Sat Oct 3 03:59:39 2009 UTC ()
Move KAUTH_NETWORK_BIND::KAUTH_REQ_NETWORK_BIND_PORT policy back to the
subsystem (or close to it).

Note: Revisit KAUTH_REQ_NETWORK_BIND_PRIVPORT.


(elad)
diff -r1.192 -r1.193 src/sys/kern/uipc_socket.c
diff -r1.25 -r1.26 src/sys/secmodel/suser/secmodel_suser.c

cvs diff -r1.192 -r1.193 src/sys/kern/uipc_socket.c (switch to unified diff)

--- src/sys/kern/uipc_socket.c 2009/10/03 01:41:39 1.192
+++ src/sys/kern/uipc_socket.c 2009/10/03 03:59:39 1.193
@@ -1,1445 +1,1450 @@ @@ -1,1445 +1,1450 @@
1/* $NetBSD: uipc_socket.c,v 1.192 2009/10/03 01:41:39 elad Exp $ */ 1/* $NetBSD: uipc_socket.c,v 1.193 2009/10/03 03:59:39 elad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2007, 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran. 8 * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright (c) 2004 The FreeBSD Foundation 33 * Copyright (c) 2004 The FreeBSD Foundation
34 * Copyright (c) 2004 Robert Watson 34 * Copyright (c) 2004 Robert Watson
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved. 36 * The Regents of the University of California. All rights reserved.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions 39 * modification, are permitted provided that the following conditions
40 * are met: 40 * are met:
41 * 1. Redistributions of source code must retain the above copyright 41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer. 42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright 43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the 44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution. 45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors 46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software 47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission. 48 * without specific prior written permission.
49 * 49 *
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE. 60 * SUCH DAMAGE.
61 * 61 *
62 * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95 62 * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95
63 */ 63 */
64 64
65#include <sys/cdefs.h> 65#include <sys/cdefs.h>
66__KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.192 2009/10/03 01:41:39 elad Exp $"); 66__KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.193 2009/10/03 03:59:39 elad Exp $");
67 67
68#include "opt_compat_netbsd.h" 68#include "opt_compat_netbsd.h"
69#include "opt_sock_counters.h" 69#include "opt_sock_counters.h"
70#include "opt_sosend_loan.h" 70#include "opt_sosend_loan.h"
71#include "opt_mbuftrace.h" 71#include "opt_mbuftrace.h"
72#include "opt_somaxkva.h" 72#include "opt_somaxkva.h"
73#include "opt_multiprocessor.h" /* XXX */ 73#include "opt_multiprocessor.h" /* XXX */
74 74
75#include <sys/param.h> 75#include <sys/param.h>
76#include <sys/systm.h> 76#include <sys/systm.h>
77#include <sys/proc.h> 77#include <sys/proc.h>
78#include <sys/file.h> 78#include <sys/file.h>
79#include <sys/filedesc.h> 79#include <sys/filedesc.h>
80#include <sys/kmem.h> 80#include <sys/kmem.h>
81#include <sys/mbuf.h> 81#include <sys/mbuf.h>
82#include <sys/domain.h> 82#include <sys/domain.h>
83#include <sys/kernel.h> 83#include <sys/kernel.h>
84#include <sys/protosw.h> 84#include <sys/protosw.h>
85#include <sys/socket.h> 85#include <sys/socket.h>
86#include <sys/socketvar.h> 86#include <sys/socketvar.h>
87#include <sys/signalvar.h> 87#include <sys/signalvar.h>
88#include <sys/resourcevar.h> 88#include <sys/resourcevar.h>
89#include <sys/uidinfo.h> 89#include <sys/uidinfo.h>
90#include <sys/event.h> 90#include <sys/event.h>
91#include <sys/poll.h> 91#include <sys/poll.h>
92#include <sys/kauth.h> 92#include <sys/kauth.h>
93#include <sys/mutex.h> 93#include <sys/mutex.h>
94#include <sys/condvar.h> 94#include <sys/condvar.h>
95 95
96#ifdef COMPAT_50 96#ifdef COMPAT_50
97#include <compat/sys/time.h> 97#include <compat/sys/time.h>
98#include <compat/sys/socket.h> 98#include <compat/sys/socket.h>
99#endif 99#endif
100 100
101#include <uvm/uvm.h> 101#include <uvm/uvm.h>
102 102
103MALLOC_DEFINE(M_SOOPTS, "soopts", "socket options"); 103MALLOC_DEFINE(M_SOOPTS, "soopts", "socket options");
104MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 104MALLOC_DEFINE(M_SONAME, "soname", "socket name");
105 105
106extern const struct fileops socketops; 106extern const struct fileops socketops;
107 107
108extern int somaxconn; /* patchable (XXX sysctl) */ 108extern int somaxconn; /* patchable (XXX sysctl) */
109int somaxconn = SOMAXCONN; 109int somaxconn = SOMAXCONN;
110kmutex_t *softnet_lock; 110kmutex_t *softnet_lock;
111 111
112#ifdef SOSEND_COUNTERS 112#ifdef SOSEND_COUNTERS
113#include <sys/device.h> 113#include <sys/device.h>
114 114
115static struct evcnt sosend_loan_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 115static struct evcnt sosend_loan_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
116 NULL, "sosend", "loan big"); 116 NULL, "sosend", "loan big");
117static struct evcnt sosend_copy_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 117static struct evcnt sosend_copy_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
118 NULL, "sosend", "copy big"); 118 NULL, "sosend", "copy big");
119static struct evcnt sosend_copy_small = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 119static struct evcnt sosend_copy_small = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
120 NULL, "sosend", "copy small"); 120 NULL, "sosend", "copy small");
121static struct evcnt sosend_kvalimit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 121static struct evcnt sosend_kvalimit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
122 NULL, "sosend", "kva limit"); 122 NULL, "sosend", "kva limit");
123 123
124#define SOSEND_COUNTER_INCR(ev) (ev)->ev_count++ 124#define SOSEND_COUNTER_INCR(ev) (ev)->ev_count++
125 125
126EVCNT_ATTACH_STATIC(sosend_loan_big); 126EVCNT_ATTACH_STATIC(sosend_loan_big);
127EVCNT_ATTACH_STATIC(sosend_copy_big); 127EVCNT_ATTACH_STATIC(sosend_copy_big);
128EVCNT_ATTACH_STATIC(sosend_copy_small); 128EVCNT_ATTACH_STATIC(sosend_copy_small);
129EVCNT_ATTACH_STATIC(sosend_kvalimit); 129EVCNT_ATTACH_STATIC(sosend_kvalimit);
130#else 130#else
131 131
132#define SOSEND_COUNTER_INCR(ev) /* nothing */ 132#define SOSEND_COUNTER_INCR(ev) /* nothing */
133 133
134#endif /* SOSEND_COUNTERS */ 134#endif /* SOSEND_COUNTERS */
135 135
136static struct callback_entry sokva_reclaimerentry; 136static struct callback_entry sokva_reclaimerentry;
137 137
138#if defined(SOSEND_NO_LOAN) || defined(MULTIPROCESSOR) 138#if defined(SOSEND_NO_LOAN) || defined(MULTIPROCESSOR)
139int sock_loan_thresh = -1; 139int sock_loan_thresh = -1;
140#else 140#else
141int sock_loan_thresh = 4096; 141int sock_loan_thresh = 4096;
142#endif 142#endif
143 143
144static kmutex_t so_pendfree_lock; 144static kmutex_t so_pendfree_lock;
145static struct mbuf *so_pendfree; 145static struct mbuf *so_pendfree;
146 146
147#ifndef SOMAXKVA 147#ifndef SOMAXKVA
148#define SOMAXKVA (16 * 1024 * 1024) 148#define SOMAXKVA (16 * 1024 * 1024)
149#endif 149#endif
150int somaxkva = SOMAXKVA; 150int somaxkva = SOMAXKVA;
151static int socurkva; 151static int socurkva;
152static kcondvar_t socurkva_cv; 152static kcondvar_t socurkva_cv;
153 153
154static kauth_listener_t socket_listener; 154static kauth_listener_t socket_listener;
155 155
156#define SOCK_LOAN_CHUNK 65536 156#define SOCK_LOAN_CHUNK 65536
157 157
158static size_t sodopendfree(void); 158static size_t sodopendfree(void);
159static size_t sodopendfreel(void); 159static size_t sodopendfreel(void);
160 160
161static void sysctl_kern_somaxkva_setup(void); 161static void sysctl_kern_somaxkva_setup(void);
162static struct sysctllog *socket_sysctllog; 162static struct sysctllog *socket_sysctllog;
163 163
164static vsize_t 164static vsize_t
165sokvareserve(struct socket *so, vsize_t len) 165sokvareserve(struct socket *so, vsize_t len)
166{ 166{
167 int error; 167 int error;
168 168
169 mutex_enter(&so_pendfree_lock); 169 mutex_enter(&so_pendfree_lock);
170 while (socurkva + len > somaxkva) { 170 while (socurkva + len > somaxkva) {
171 size_t freed; 171 size_t freed;
172 172
173 /* 173 /*
174 * try to do pendfree. 174 * try to do pendfree.
175 */ 175 */
176 176
177 freed = sodopendfreel(); 177 freed = sodopendfreel();
178 178
179 /* 179 /*
180 * if some kva was freed, try again. 180 * if some kva was freed, try again.
181 */ 181 */
182 182
183 if (freed) 183 if (freed)
184 continue; 184 continue;
185 185
186 SOSEND_COUNTER_INCR(&sosend_kvalimit); 186 SOSEND_COUNTER_INCR(&sosend_kvalimit);
187 error = cv_wait_sig(&socurkva_cv, &so_pendfree_lock); 187 error = cv_wait_sig(&socurkva_cv, &so_pendfree_lock);
188 if (error) { 188 if (error) {
189 len = 0; 189 len = 0;
190 break; 190 break;
191 } 191 }
192 } 192 }
193 socurkva += len; 193 socurkva += len;
194 mutex_exit(&so_pendfree_lock); 194 mutex_exit(&so_pendfree_lock);
195 return len; 195 return len;
196} 196}
197 197
198static void 198static void
199sokvaunreserve(vsize_t len) 199sokvaunreserve(vsize_t len)
200{ 200{
201 201
202 mutex_enter(&so_pendfree_lock); 202 mutex_enter(&so_pendfree_lock);
203 socurkva -= len; 203 socurkva -= len;
204 cv_broadcast(&socurkva_cv); 204 cv_broadcast(&socurkva_cv);
205 mutex_exit(&so_pendfree_lock); 205 mutex_exit(&so_pendfree_lock);
206} 206}
207 207
208/* 208/*
209 * sokvaalloc: allocate kva for loan. 209 * sokvaalloc: allocate kva for loan.
210 */ 210 */
211 211
212vaddr_t 212vaddr_t
213sokvaalloc(vsize_t len, struct socket *so) 213sokvaalloc(vsize_t len, struct socket *so)
214{ 214{
215 vaddr_t lva; 215 vaddr_t lva;
216 216
217 /* 217 /*
218 * reserve kva. 218 * reserve kva.
219 */ 219 */
220 220
221 if (sokvareserve(so, len) == 0) 221 if (sokvareserve(so, len) == 0)
222 return 0; 222 return 0;
223 223
224 /* 224 /*
225 * allocate kva. 225 * allocate kva.
226 */ 226 */
227 227
228 lva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); 228 lva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
229 if (lva == 0) { 229 if (lva == 0) {
230 sokvaunreserve(len); 230 sokvaunreserve(len);
231 return (0); 231 return (0);
232 } 232 }
233 233
234 return lva; 234 return lva;
235} 235}
236 236
237/* 237/*
238 * sokvafree: free kva for loan. 238 * sokvafree: free kva for loan.
239 */ 239 */
240 240
241void 241void
242sokvafree(vaddr_t sva, vsize_t len) 242sokvafree(vaddr_t sva, vsize_t len)
243{ 243{
244 244
245 /* 245 /*
246 * free kva. 246 * free kva.
247 */ 247 */
248 248
249 uvm_km_free(kernel_map, sva, len, UVM_KMF_VAONLY); 249 uvm_km_free(kernel_map, sva, len, UVM_KMF_VAONLY);
250 250
251 /* 251 /*
252 * unreserve kva. 252 * unreserve kva.
253 */ 253 */
254 254
255 sokvaunreserve(len); 255 sokvaunreserve(len);
256} 256}
257 257
258static void 258static void
259sodoloanfree(struct vm_page **pgs, void *buf, size_t size) 259sodoloanfree(struct vm_page **pgs, void *buf, size_t size)
260{ 260{
261 vaddr_t sva, eva; 261 vaddr_t sva, eva;
262 vsize_t len; 262 vsize_t len;
263 int npgs; 263 int npgs;
264 264
265 KASSERT(pgs != NULL); 265 KASSERT(pgs != NULL);
266 266
267 eva = round_page((vaddr_t) buf + size); 267 eva = round_page((vaddr_t) buf + size);
268 sva = trunc_page((vaddr_t) buf); 268 sva = trunc_page((vaddr_t) buf);
269 len = eva - sva; 269 len = eva - sva;
270 npgs = len >> PAGE_SHIFT; 270 npgs = len >> PAGE_SHIFT;
271 271
272 pmap_kremove(sva, len); 272 pmap_kremove(sva, len);
273 pmap_update(pmap_kernel()); 273 pmap_update(pmap_kernel());
274 uvm_unloan(pgs, npgs, UVM_LOAN_TOPAGE); 274 uvm_unloan(pgs, npgs, UVM_LOAN_TOPAGE);
275 sokvafree(sva, len); 275 sokvafree(sva, len);
276} 276}
277 277
278static size_t 278static size_t
279sodopendfree(void) 279sodopendfree(void)
280{ 280{
281 size_t rv; 281 size_t rv;
282 282
283 if (__predict_true(so_pendfree == NULL)) 283 if (__predict_true(so_pendfree == NULL))
284 return 0; 284 return 0;
285 285
286 mutex_enter(&so_pendfree_lock); 286 mutex_enter(&so_pendfree_lock);
287 rv = sodopendfreel(); 287 rv = sodopendfreel();
288 mutex_exit(&so_pendfree_lock); 288 mutex_exit(&so_pendfree_lock);
289 289
290 return rv; 290 return rv;
291} 291}
292 292
293/* 293/*
294 * sodopendfreel: free mbufs on "pendfree" list. 294 * sodopendfreel: free mbufs on "pendfree" list.
295 * unlock and relock so_pendfree_lock when freeing mbufs. 295 * unlock and relock so_pendfree_lock when freeing mbufs.
296 * 296 *
297 * => called with so_pendfree_lock held. 297 * => called with so_pendfree_lock held.
298 */ 298 */
299 299
300static size_t 300static size_t
301sodopendfreel(void) 301sodopendfreel(void)
302{ 302{
303 struct mbuf *m, *next; 303 struct mbuf *m, *next;
304 size_t rv = 0; 304 size_t rv = 0;
305 305
306 KASSERT(mutex_owned(&so_pendfree_lock)); 306 KASSERT(mutex_owned(&so_pendfree_lock));
307 307
308 while (so_pendfree != NULL) { 308 while (so_pendfree != NULL) {
309 m = so_pendfree; 309 m = so_pendfree;
310 so_pendfree = NULL; 310 so_pendfree = NULL;
311 mutex_exit(&so_pendfree_lock); 311 mutex_exit(&so_pendfree_lock);
312 312
313 for (; m != NULL; m = next) { 313 for (; m != NULL; m = next) {
314 next = m->m_next; 314 next = m->m_next;
315 KASSERT((~m->m_flags & (M_EXT|M_EXT_PAGES)) == 0); 315 KASSERT((~m->m_flags & (M_EXT|M_EXT_PAGES)) == 0);
316 KASSERT(m->m_ext.ext_refcnt == 0); 316 KASSERT(m->m_ext.ext_refcnt == 0);
317 317
318 rv += m->m_ext.ext_size; 318 rv += m->m_ext.ext_size;
319 sodoloanfree(m->m_ext.ext_pgs, m->m_ext.ext_buf, 319 sodoloanfree(m->m_ext.ext_pgs, m->m_ext.ext_buf,
320 m->m_ext.ext_size); 320 m->m_ext.ext_size);
321 pool_cache_put(mb_cache, m); 321 pool_cache_put(mb_cache, m);
322 } 322 }
323 323
324 mutex_enter(&so_pendfree_lock); 324 mutex_enter(&so_pendfree_lock);
325 } 325 }
326 326
327 return (rv); 327 return (rv);
328} 328}
329 329
330void 330void
331soloanfree(struct mbuf *m, void *buf, size_t size, void *arg) 331soloanfree(struct mbuf *m, void *buf, size_t size, void *arg)
332{ 332{
333 333
334 KASSERT(m != NULL); 334 KASSERT(m != NULL);
335 335
336 /* 336 /*
337 * postpone freeing mbuf. 337 * postpone freeing mbuf.
338 * 338 *
339 * we can't do it in interrupt context 339 * we can't do it in interrupt context
340 * because we need to put kva back to kernel_map. 340 * because we need to put kva back to kernel_map.
341 */ 341 */
342 342
343 mutex_enter(&so_pendfree_lock); 343 mutex_enter(&so_pendfree_lock);
344 m->m_next = so_pendfree; 344 m->m_next = so_pendfree;
345 so_pendfree = m; 345 so_pendfree = m;
346 cv_broadcast(&socurkva_cv); 346 cv_broadcast(&socurkva_cv);
347 mutex_exit(&so_pendfree_lock); 347 mutex_exit(&so_pendfree_lock);
348} 348}
349 349
350static long 350static long
351sosend_loan(struct socket *so, struct uio *uio, struct mbuf *m, long space) 351sosend_loan(struct socket *so, struct uio *uio, struct mbuf *m, long space)
352{ 352{
353 struct iovec *iov = uio->uio_iov; 353 struct iovec *iov = uio->uio_iov;
354 vaddr_t sva, eva; 354 vaddr_t sva, eva;
355 vsize_t len; 355 vsize_t len;
356 vaddr_t lva; 356 vaddr_t lva;
357 int npgs, error; 357 int npgs, error;
358 vaddr_t va; 358 vaddr_t va;
359 int i; 359 int i;
360 360
361 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) 361 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace))
362 return (0); 362 return (0);
363 363
364 if (iov->iov_len < (size_t) space) 364 if (iov->iov_len < (size_t) space)
365 space = iov->iov_len; 365 space = iov->iov_len;
366 if (space > SOCK_LOAN_CHUNK) 366 if (space > SOCK_LOAN_CHUNK)
367 space = SOCK_LOAN_CHUNK; 367 space = SOCK_LOAN_CHUNK;
368 368
369 eva = round_page((vaddr_t) iov->iov_base + space); 369 eva = round_page((vaddr_t) iov->iov_base + space);
370 sva = trunc_page((vaddr_t) iov->iov_base); 370 sva = trunc_page((vaddr_t) iov->iov_base);
371 len = eva - sva; 371 len = eva - sva;
372 npgs = len >> PAGE_SHIFT; 372 npgs = len >> PAGE_SHIFT;
373 373
374 KASSERT(npgs <= M_EXT_MAXPAGES); 374 KASSERT(npgs <= M_EXT_MAXPAGES);
375 375
376 lva = sokvaalloc(len, so); 376 lva = sokvaalloc(len, so);
377 if (lva == 0) 377 if (lva == 0)
378 return 0; 378 return 0;
379 379
380 error = uvm_loan(&uio->uio_vmspace->vm_map, sva, len, 380 error = uvm_loan(&uio->uio_vmspace->vm_map, sva, len,
381 m->m_ext.ext_pgs, UVM_LOAN_TOPAGE); 381 m->m_ext.ext_pgs, UVM_LOAN_TOPAGE);
382 if (error) { 382 if (error) {
383 sokvafree(lva, len); 383 sokvafree(lva, len);
384 return (0); 384 return (0);
385 } 385 }
386 386
387 for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE) 387 for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE)
388 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]), 388 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]),
389 VM_PROT_READ); 389 VM_PROT_READ);
390 pmap_update(pmap_kernel()); 390 pmap_update(pmap_kernel());
391 391
392 lva += (vaddr_t) iov->iov_base & PAGE_MASK; 392 lva += (vaddr_t) iov->iov_base & PAGE_MASK;
393 393
394 MEXTADD(m, (void *) lva, space, M_MBUF, soloanfree, so); 394 MEXTADD(m, (void *) lva, space, M_MBUF, soloanfree, so);
395 m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP; 395 m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP;
396 396
397 uio->uio_resid -= space; 397 uio->uio_resid -= space;
398 /* uio_offset not updated, not set/used for write(2) */ 398 /* uio_offset not updated, not set/used for write(2) */
399 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + space; 399 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + space;
400 uio->uio_iov->iov_len -= space; 400 uio->uio_iov->iov_len -= space;
401 if (uio->uio_iov->iov_len == 0) { 401 if (uio->uio_iov->iov_len == 0) {
402 uio->uio_iov++; 402 uio->uio_iov++;
403 uio->uio_iovcnt--; 403 uio->uio_iovcnt--;
404 } 404 }
405 405
406 return (space); 406 return (space);
407} 407}
408 408
409static int 409static int
410sokva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) 410sokva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
411{ 411{
412 412
413 KASSERT(ce == &sokva_reclaimerentry); 413 KASSERT(ce == &sokva_reclaimerentry);
414 KASSERT(obj == NULL); 414 KASSERT(obj == NULL);
415 415
416 sodopendfree(); 416 sodopendfree();
417 if (!vm_map_starved_p(kernel_map)) { 417 if (!vm_map_starved_p(kernel_map)) {
418 return CALLBACK_CHAIN_ABORT; 418 return CALLBACK_CHAIN_ABORT;
419 } 419 }
420 return CALLBACK_CHAIN_CONTINUE; 420 return CALLBACK_CHAIN_CONTINUE;
421} 421}
422 422
423struct mbuf * 423struct mbuf *
424getsombuf(struct socket *so, int type) 424getsombuf(struct socket *so, int type)
425{ 425{
426 struct mbuf *m; 426 struct mbuf *m;
427 427
428 m = m_get(M_WAIT, type); 428 m = m_get(M_WAIT, type);
429 MCLAIM(m, so->so_mowner); 429 MCLAIM(m, so->so_mowner);
430 return m; 430 return m;
431} 431}
432 432
433static int 433static int
434socket_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 434socket_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
435 void *arg0, void *arg1, void *arg2, void *arg3) 435 void *arg0, void *arg1, void *arg2, void *arg3)
436{ 436{
437 int result; 437 int result;
438 enum kauth_network_req req; 438 enum kauth_network_req req;
439 439
440 result = KAUTH_RESULT_DEFER; 440 result = KAUTH_RESULT_DEFER;
441 req = (enum kauth_network_req)arg0; 441 req = (enum kauth_network_req)arg0;
442 442
443 if (action != KAUTH_NETWORK_SOCKET) 443 if ((action != KAUTH_NETWORK_SOCKET) &&
 444 (action != KAUTH_NETWORK_BIND))
444 return result; 445 return result;
445 446
446 switch (req) { 447 switch (req) {
 448 case KAUTH_REQ_NETWORK_BIND_PORT:
 449 result = KAUTH_RESULT_ALLOW;
 450 break;
 451
447 case KAUTH_REQ_NETWORK_SOCKET_DROP: { 452 case KAUTH_REQ_NETWORK_SOCKET_DROP: {
448 /* Normal users can only drop their own connections. */ 453 /* Normal users can only drop their own connections. */
449 struct socket *so = (struct socket *)arg1; 454 struct socket *so = (struct socket *)arg1;
450 uid_t sockuid = so->so_uidinfo->ui_uid; 455 uid_t sockuid = so->so_uidinfo->ui_uid;
451 456
452 if (sockuid == kauth_cred_getuid(cred) || 457 if (sockuid == kauth_cred_getuid(cred) ||
453 sockuid == kauth_cred_geteuid(cred)) 458 sockuid == kauth_cred_geteuid(cred))
454 result = KAUTH_RESULT_ALLOW; 459 result = KAUTH_RESULT_ALLOW;
455 460
456 break; 461 break;
457 } 462 }
458 463
459 case KAUTH_REQ_NETWORK_SOCKET_OPEN: 464 case KAUTH_REQ_NETWORK_SOCKET_OPEN:
460 /* We allow "raw" routing/bluetooth sockets to anyone. */ 465 /* We allow "raw" routing/bluetooth sockets to anyone. */
461 if ((u_long)arg1 == PF_ROUTE || (u_long)arg1 == PF_BLUETOOTH) 466 if ((u_long)arg1 == PF_ROUTE || (u_long)arg1 == PF_BLUETOOTH)
462 result = KAUTH_RESULT_ALLOW; 467 result = KAUTH_RESULT_ALLOW;
463 else { 468 else {
464 /* Privileged, let secmodel handle this. */ 469 /* Privileged, let secmodel handle this. */
465 if ((u_long)arg2 == SOCK_RAW) 470 if ((u_long)arg2 == SOCK_RAW)
466 break; 471 break;
467 } 472 }
468 473
469 result = KAUTH_RESULT_ALLOW; 474 result = KAUTH_RESULT_ALLOW;
470 475
471 break; 476 break;
472 477
473 case KAUTH_REQ_NETWORK_SOCKET_CANSEE: 478 case KAUTH_REQ_NETWORK_SOCKET_CANSEE:
474 result = KAUTH_RESULT_ALLOW; 479 result = KAUTH_RESULT_ALLOW;
475 480
476 break; 481 break;
477 482
478 default: 483 default:
479 break; 484 break;
480 } 485 }
481 486
482 return result; 487 return result;
483} 488}
484 489
485void 490void
486soinit(void) 491soinit(void)
487{ 492{
488 493
489 sysctl_kern_somaxkva_setup(); 494 sysctl_kern_somaxkva_setup();
490 495
491 mutex_init(&so_pendfree_lock, MUTEX_DEFAULT, IPL_VM); 496 mutex_init(&so_pendfree_lock, MUTEX_DEFAULT, IPL_VM);
492 softnet_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 497 softnet_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
493 cv_init(&socurkva_cv, "sokva"); 498 cv_init(&socurkva_cv, "sokva");
494 soinit2(); 499 soinit2();
495 500
496 /* Set the initial adjusted socket buffer size. */ 501 /* Set the initial adjusted socket buffer size. */
497 if (sb_max_set(sb_max)) 502 if (sb_max_set(sb_max))
498 panic("bad initial sb_max value: %lu", sb_max); 503 panic("bad initial sb_max value: %lu", sb_max);
499 504
500 callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback, 505 callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
501 &sokva_reclaimerentry, NULL, sokva_reclaim_callback); 506 &sokva_reclaimerentry, NULL, sokva_reclaim_callback);
502 507
503 socket_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 508 socket_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
504 socket_listener_cb, NULL); 509 socket_listener_cb, NULL);
505} 510}
506 511
507/* 512/*
508 * Socket operation routines. 513 * Socket operation routines.
509 * These routines are called by the routines in 514 * These routines are called by the routines in
510 * sys_socket.c or from a system process, and 515 * sys_socket.c or from a system process, and
511 * implement the semantics of socket operations by 516 * implement the semantics of socket operations by
512 * switching out to the protocol specific routines. 517 * switching out to the protocol specific routines.
513 */ 518 */
514/*ARGSUSED*/ 519/*ARGSUSED*/
515int 520int
516socreate(int dom, struct socket **aso, int type, int proto, struct lwp *l, 521socreate(int dom, struct socket **aso, int type, int proto, struct lwp *l,
517 struct socket *lockso) 522 struct socket *lockso)
518{ 523{
519 const struct protosw *prp; 524 const struct protosw *prp;
520 struct socket *so; 525 struct socket *so;
521 uid_t uid; 526 uid_t uid;
522 int error; 527 int error;
523 kmutex_t *lock; 528 kmutex_t *lock;
524 529
525 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_SOCKET, 530 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_SOCKET,
526 KAUTH_REQ_NETWORK_SOCKET_OPEN, KAUTH_ARG(dom), KAUTH_ARG(type), 531 KAUTH_REQ_NETWORK_SOCKET_OPEN, KAUTH_ARG(dom), KAUTH_ARG(type),
527 KAUTH_ARG(proto)); 532 KAUTH_ARG(proto));
528 if (error != 0) 533 if (error != 0)
529 return error; 534 return error;
530 535
531 if (proto) 536 if (proto)
532 prp = pffindproto(dom, proto, type); 537 prp = pffindproto(dom, proto, type);
533 else 538 else
534 prp = pffindtype(dom, type); 539 prp = pffindtype(dom, type);
535 if (prp == NULL) { 540 if (prp == NULL) {
536 /* no support for domain */ 541 /* no support for domain */
537 if (pffinddomain(dom) == 0) 542 if (pffinddomain(dom) == 0)
538 return EAFNOSUPPORT; 543 return EAFNOSUPPORT;
539 /* no support for socket type */ 544 /* no support for socket type */
540 if (proto == 0 && type != 0) 545 if (proto == 0 && type != 0)
541 return EPROTOTYPE; 546 return EPROTOTYPE;
542 return EPROTONOSUPPORT; 547 return EPROTONOSUPPORT;
543 } 548 }
544 if (prp->pr_usrreq == NULL) 549 if (prp->pr_usrreq == NULL)
545 return EPROTONOSUPPORT; 550 return EPROTONOSUPPORT;
546 if (prp->pr_type != type) 551 if (prp->pr_type != type)
547 return EPROTOTYPE; 552 return EPROTOTYPE;
548 553
549 so = soget(true); 554 so = soget(true);
550 so->so_type = type; 555 so->so_type = type;
551 so->so_proto = prp; 556 so->so_proto = prp;
552 so->so_send = sosend; 557 so->so_send = sosend;
553 so->so_receive = soreceive; 558 so->so_receive = soreceive;
554#ifdef MBUFTRACE 559#ifdef MBUFTRACE
555 so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner; 560 so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner;
556 so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner; 561 so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner;
557 so->so_mowner = &prp->pr_domain->dom_mowner; 562 so->so_mowner = &prp->pr_domain->dom_mowner;
558#endif 563#endif
559 /* so->so_cred = kauth_cred_dup(l->l_cred); */ 564 /* so->so_cred = kauth_cred_dup(l->l_cred); */
560 uid = kauth_cred_geteuid(l->l_cred); 565 uid = kauth_cred_geteuid(l->l_cred);
561 so->so_uidinfo = uid_find(uid); 566 so->so_uidinfo = uid_find(uid);
562 so->so_egid = kauth_cred_getegid(l->l_cred); 567 so->so_egid = kauth_cred_getegid(l->l_cred);
563 so->so_cpid = l->l_proc->p_pid; 568 so->so_cpid = l->l_proc->p_pid;
564 if (lockso != NULL) { 569 if (lockso != NULL) {
565 /* Caller wants us to share a lock. */ 570 /* Caller wants us to share a lock. */
566 lock = lockso->so_lock; 571 lock = lockso->so_lock;
567 so->so_lock = lock; 572 so->so_lock = lock;
568 mutex_obj_hold(lock); 573 mutex_obj_hold(lock);
569 mutex_enter(lock); 574 mutex_enter(lock);
570 } else { 575 } else {
571 /* Lock assigned and taken during PRU_ATTACH. */ 576 /* Lock assigned and taken during PRU_ATTACH. */
572 } 577 }
573 error = (*prp->pr_usrreq)(so, PRU_ATTACH, NULL, 578 error = (*prp->pr_usrreq)(so, PRU_ATTACH, NULL,
574 (struct mbuf *)(long)proto, NULL, l); 579 (struct mbuf *)(long)proto, NULL, l);
575 KASSERT(solocked(so)); 580 KASSERT(solocked(so));
576 if (error != 0) { 581 if (error != 0) {
577 so->so_state |= SS_NOFDREF; 582 so->so_state |= SS_NOFDREF;
578 sofree(so); 583 sofree(so);
579 return error; 584 return error;
580 } 585 }
581 sounlock(so); 586 sounlock(so);
582 *aso = so; 587 *aso = so;
583 return 0; 588 return 0;
584} 589}
585 590
586/* On success, write file descriptor to fdout and return zero. On 591/* On success, write file descriptor to fdout and return zero. On
587 * failure, return non-zero; *fdout will be undefined. 592 * failure, return non-zero; *fdout will be undefined.
588 */ 593 */
589int 594int
590fsocreate(int domain, struct socket **sop, int type, int protocol, 595fsocreate(int domain, struct socket **sop, int type, int protocol,
591 struct lwp *l, int *fdout) 596 struct lwp *l, int *fdout)
592{ 597{
593 struct socket *so; 598 struct socket *so;
594 struct file *fp; 599 struct file *fp;
595 int fd, error; 600 int fd, error;
596 601
597 if ((error = fd_allocfile(&fp, &fd)) != 0) 602 if ((error = fd_allocfile(&fp, &fd)) != 0)
598 return (error); 603 return (error);
599 fp->f_flag = FREAD|FWRITE; 604 fp->f_flag = FREAD|FWRITE;
600 fp->f_type = DTYPE_SOCKET; 605 fp->f_type = DTYPE_SOCKET;
601 fp->f_ops = &socketops; 606 fp->f_ops = &socketops;
602 error = socreate(domain, &so, type, protocol, l, NULL); 607 error = socreate(domain, &so, type, protocol, l, NULL);
603 if (error != 0) { 608 if (error != 0) {
604 fd_abort(curproc, fp, fd); 609 fd_abort(curproc, fp, fd);
605 } else { 610 } else {
606 if (sop != NULL) 611 if (sop != NULL)
607 *sop = so; 612 *sop = so;
608 fp->f_data = so; 613 fp->f_data = so;
609 fd_affix(curproc, fp, fd); 614 fd_affix(curproc, fp, fd);
610 *fdout = fd; 615 *fdout = fd;
611 } 616 }
612 return error; 617 return error;
613} 618}
614 619
615int 620int
616sofamily(const struct socket *so) 621sofamily(const struct socket *so)
617{ 622{
618 const struct protosw *pr; 623 const struct protosw *pr;
619 const struct domain *dom; 624 const struct domain *dom;
620 625
621 if ((pr = so->so_proto) == NULL) 626 if ((pr = so->so_proto) == NULL)
622 return AF_UNSPEC; 627 return AF_UNSPEC;
623 if ((dom = pr->pr_domain) == NULL) 628 if ((dom = pr->pr_domain) == NULL)
624 return AF_UNSPEC; 629 return AF_UNSPEC;
625 return dom->dom_family; 630 return dom->dom_family;
626} 631}
627 632
628int 633int
629sobind(struct socket *so, struct mbuf *nam, struct lwp *l) 634sobind(struct socket *so, struct mbuf *nam, struct lwp *l)
630{ 635{
631 int error; 636 int error;
632 637
633 solock(so); 638 solock(so);
634 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, l); 639 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, l);
635 sounlock(so); 640 sounlock(so);
636 return error; 641 return error;
637} 642}
638 643
639int 644int
640solisten(struct socket *so, int backlog, struct lwp *l) 645solisten(struct socket *so, int backlog, struct lwp *l)
641{ 646{
642 int error; 647 int error;
643 648
644 solock(so); 649 solock(so);
645 if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |  650 if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
646 SS_ISDISCONNECTING)) != 0) { 651 SS_ISDISCONNECTING)) != 0) {
647 sounlock(so); 652 sounlock(so);
648 return (EOPNOTSUPP); 653 return (EOPNOTSUPP);
649 } 654 }
650 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, 655 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL,
651 NULL, NULL, l); 656 NULL, NULL, l);
652 if (error != 0) { 657 if (error != 0) {
653 sounlock(so); 658 sounlock(so);
654 return error; 659 return error;
655 } 660 }
656 if (TAILQ_EMPTY(&so->so_q)) 661 if (TAILQ_EMPTY(&so->so_q))
657 so->so_options |= SO_ACCEPTCONN; 662 so->so_options |= SO_ACCEPTCONN;
658 if (backlog < 0) 663 if (backlog < 0)
659 backlog = 0; 664 backlog = 0;
660 so->so_qlimit = min(backlog, somaxconn); 665 so->so_qlimit = min(backlog, somaxconn);
661 sounlock(so); 666 sounlock(so);
662 return 0; 667 return 0;
663} 668}
664 669
665void 670void
666sofree(struct socket *so) 671sofree(struct socket *so)
667{ 672{
668 u_int refs; 673 u_int refs;
669 674
670 KASSERT(solocked(so)); 675 KASSERT(solocked(so));
671 676
672 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { 677 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) {
673 sounlock(so); 678 sounlock(so);
674 return; 679 return;
675 } 680 }
676 if (so->so_head) { 681 if (so->so_head) {
677 /* 682 /*
678 * We must not decommission a socket that's on the accept(2) 683 * We must not decommission a socket that's on the accept(2)
679 * queue. If we do, then accept(2) may hang after select(2) 684 * queue. If we do, then accept(2) may hang after select(2)
680 * indicated that the listening socket was ready. 685 * indicated that the listening socket was ready.
681 */ 686 */
682 if (!soqremque(so, 0)) { 687 if (!soqremque(so, 0)) {
683 sounlock(so); 688 sounlock(so);
684 return; 689 return;
685 } 690 }
686 } 691 }
687 if (so->so_rcv.sb_hiwat) 692 if (so->so_rcv.sb_hiwat)
688 (void)chgsbsize(so->so_uidinfo, &so->so_rcv.sb_hiwat, 0, 693 (void)chgsbsize(so->so_uidinfo, &so->so_rcv.sb_hiwat, 0,
689 RLIM_INFINITY); 694 RLIM_INFINITY);
690 if (so->so_snd.sb_hiwat) 695 if (so->so_snd.sb_hiwat)
691 (void)chgsbsize(so->so_uidinfo, &so->so_snd.sb_hiwat, 0, 696 (void)chgsbsize(so->so_uidinfo, &so->so_snd.sb_hiwat, 0,
692 RLIM_INFINITY); 697 RLIM_INFINITY);
693 sbrelease(&so->so_snd, so); 698 sbrelease(&so->so_snd, so);
694 KASSERT(!cv_has_waiters(&so->so_cv)); 699 KASSERT(!cv_has_waiters(&so->so_cv));
695 KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv)); 700 KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv));
696 KASSERT(!cv_has_waiters(&so->so_snd.sb_cv)); 701 KASSERT(!cv_has_waiters(&so->so_snd.sb_cv));
697 sorflush(so); 702 sorflush(so);
698 refs = so->so_aborting; /* XXX */ 703 refs = so->so_aborting; /* XXX */
699 /* Remove acccept filter if one is present. */ 704 /* Remove acccept filter if one is present. */
700 if (so->so_accf != NULL) 705 if (so->so_accf != NULL)
701 (void)accept_filt_clear(so); 706 (void)accept_filt_clear(so);
702 /* kauth_cred_free(so->so_cred); */ 707 /* kauth_cred_free(so->so_cred); */
703 sounlock(so); 708 sounlock(so);
704 if (refs == 0) /* XXX */ 709 if (refs == 0) /* XXX */
705 soput(so); 710 soput(so);
706} 711}
707 712
708/* 713/*
709 * Close a socket on last file table reference removal. 714 * Close a socket on last file table reference removal.
710 * Initiate disconnect if connected. 715 * Initiate disconnect if connected.
711 * Free socket when disconnect complete. 716 * Free socket when disconnect complete.
712 */ 717 */
713int 718int
714soclose(struct socket *so) 719soclose(struct socket *so)
715{ 720{
716 struct socket *so2; 721 struct socket *so2;
717 int error; 722 int error;
718 int error2; 723 int error2;
719 724
720 error = 0; 725 error = 0;
721 solock(so); 726 solock(so);
722 if (so->so_options & SO_ACCEPTCONN) { 727 if (so->so_options & SO_ACCEPTCONN) {
723 for (;;) { 728 for (;;) {
724 if ((so2 = TAILQ_FIRST(&so->so_q0)) != 0) { 729 if ((so2 = TAILQ_FIRST(&so->so_q0)) != 0) {
725 KASSERT(solocked2(so, so2)); 730 KASSERT(solocked2(so, so2));
726 (void) soqremque(so2, 0); 731 (void) soqremque(so2, 0);
727 /* soabort drops the lock. */ 732 /* soabort drops the lock. */
728 (void) soabort(so2); 733 (void) soabort(so2);
729 solock(so); 734 solock(so);
730 continue; 735 continue;
731 } 736 }
732 if ((so2 = TAILQ_FIRST(&so->so_q)) != 0) { 737 if ((so2 = TAILQ_FIRST(&so->so_q)) != 0) {
733 KASSERT(solocked2(so, so2)); 738 KASSERT(solocked2(so, so2));
734 (void) soqremque(so2, 1); 739 (void) soqremque(so2, 1);
735 /* soabort drops the lock. */ 740 /* soabort drops the lock. */
736 (void) soabort(so2); 741 (void) soabort(so2);
737 solock(so); 742 solock(so);
738 continue; 743 continue;
739 } 744 }
740 break; 745 break;
741 } 746 }
742 } 747 }
743 if (so->so_pcb == 0) 748 if (so->so_pcb == 0)
744 goto discard; 749 goto discard;
745 if (so->so_state & SS_ISCONNECTED) { 750 if (so->so_state & SS_ISCONNECTED) {
746 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 751 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
747 error = sodisconnect(so); 752 error = sodisconnect(so);
748 if (error) 753 if (error)
749 goto drop; 754 goto drop;
750 } 755 }
751 if (so->so_options & SO_LINGER) { 756 if (so->so_options & SO_LINGER) {
752 if ((so->so_state & SS_ISDISCONNECTING) && so->so_nbio) 757 if ((so->so_state & SS_ISDISCONNECTING) && so->so_nbio)
753 goto drop; 758 goto drop;
754 while (so->so_state & SS_ISCONNECTED) { 759 while (so->so_state & SS_ISCONNECTED) {
755 error = sowait(so, true, so->so_linger * hz); 760 error = sowait(so, true, so->so_linger * hz);
756 if (error) 761 if (error)
757 break; 762 break;
758 } 763 }
759 } 764 }
760 } 765 }
761 drop: 766 drop:
762 if (so->so_pcb) { 767 if (so->so_pcb) {
763 error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 768 error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
764 NULL, NULL, NULL, NULL); 769 NULL, NULL, NULL, NULL);
765 if (error == 0) 770 if (error == 0)
766 error = error2; 771 error = error2;
767 } 772 }
768 discard: 773 discard:
769 if (so->so_state & SS_NOFDREF) 774 if (so->so_state & SS_NOFDREF)
770 panic("soclose: NOFDREF"); 775 panic("soclose: NOFDREF");
771 so->so_state |= SS_NOFDREF; 776 so->so_state |= SS_NOFDREF;
772 sofree(so); 777 sofree(so);
773 return (error); 778 return (error);
774} 779}
775 780
776/* 781/*
777 * Must be called with the socket locked.. Will return with it unlocked. 782 * Must be called with the socket locked.. Will return with it unlocked.
778 */ 783 */
779int 784int
780soabort(struct socket *so) 785soabort(struct socket *so)
781{ 786{
782 u_int refs; 787 u_int refs;
783 int error; 788 int error;
784  789
785 KASSERT(solocked(so)); 790 KASSERT(solocked(so));
786 KASSERT(so->so_head == NULL); 791 KASSERT(so->so_head == NULL);
787 792
788 so->so_aborting++; /* XXX */ 793 so->so_aborting++; /* XXX */
789 error = (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, 794 error = (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL,
790 NULL, NULL, NULL); 795 NULL, NULL, NULL);
791 refs = --so->so_aborting; /* XXX */ 796 refs = --so->so_aborting; /* XXX */
792 if (error || (refs == 0)) { 797 if (error || (refs == 0)) {
793 sofree(so); 798 sofree(so);
794 } else { 799 } else {
795 sounlock(so); 800 sounlock(so);
796 } 801 }
797 return error; 802 return error;
798} 803}
799 804
800int 805int
801soaccept(struct socket *so, struct mbuf *nam) 806soaccept(struct socket *so, struct mbuf *nam)
802{ 807{
803 int error; 808 int error;
804 809
805 KASSERT(solocked(so)); 810 KASSERT(solocked(so));
806 811
807 error = 0; 812 error = 0;
808 if ((so->so_state & SS_NOFDREF) == 0) 813 if ((so->so_state & SS_NOFDREF) == 0)
809 panic("soaccept: !NOFDREF"); 814 panic("soaccept: !NOFDREF");
810 so->so_state &= ~SS_NOFDREF; 815 so->so_state &= ~SS_NOFDREF;
811 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 816 if ((so->so_state & SS_ISDISCONNECTED) == 0 ||
812 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 817 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0)
813 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 818 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
814 NULL, nam, NULL, NULL); 819 NULL, nam, NULL, NULL);
815 else 820 else
816 error = ECONNABORTED; 821 error = ECONNABORTED;
817 822
818 return (error); 823 return (error);
819} 824}
820 825
821int 826int
822soconnect(struct socket *so, struct mbuf *nam, struct lwp *l) 827soconnect(struct socket *so, struct mbuf *nam, struct lwp *l)
823{ 828{
824 int error; 829 int error;
825 830
826 KASSERT(solocked(so)); 831 KASSERT(solocked(so));
827 832
828 if (so->so_options & SO_ACCEPTCONN) 833 if (so->so_options & SO_ACCEPTCONN)
829 return (EOPNOTSUPP); 834 return (EOPNOTSUPP);
830 /* 835 /*
831 * If protocol is connection-based, can only connect once. 836 * If protocol is connection-based, can only connect once.
832 * Otherwise, if connected, try to disconnect first. 837 * Otherwise, if connected, try to disconnect first.
833 * This allows user to disconnect by connecting to, e.g., 838 * This allows user to disconnect by connecting to, e.g.,
834 * a null address. 839 * a null address.
835 */ 840 */
836 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 841 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
837 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 842 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
838 (error = sodisconnect(so)))) 843 (error = sodisconnect(so))))
839 error = EISCONN; 844 error = EISCONN;
840 else 845 else
841 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 846 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
842 NULL, nam, NULL, l); 847 NULL, nam, NULL, l);
843 return (error); 848 return (error);
844} 849}
845 850
846int 851int
847soconnect2(struct socket *so1, struct socket *so2) 852soconnect2(struct socket *so1, struct socket *so2)
848{ 853{
849 int error; 854 int error;
850 855
851 KASSERT(solocked2(so1, so2)); 856 KASSERT(solocked2(so1, so2));
852 857
853 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 858 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
854 NULL, (struct mbuf *)so2, NULL, NULL); 859 NULL, (struct mbuf *)so2, NULL, NULL);
855 return (error); 860 return (error);
856} 861}
857 862
858int 863int
859sodisconnect(struct socket *so) 864sodisconnect(struct socket *so)
860{ 865{
861 int error; 866 int error;
862 867
863 KASSERT(solocked(so)); 868 KASSERT(solocked(so));
864 869
865 if ((so->so_state & SS_ISCONNECTED) == 0) { 870 if ((so->so_state & SS_ISCONNECTED) == 0) {
866 error = ENOTCONN; 871 error = ENOTCONN;
867 } else if (so->so_state & SS_ISDISCONNECTING) { 872 } else if (so->so_state & SS_ISDISCONNECTING) {
868 error = EALREADY; 873 error = EALREADY;
869 } else { 874 } else {
870 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 875 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
871 NULL, NULL, NULL, NULL); 876 NULL, NULL, NULL, NULL);
872 } 877 }
873 sodopendfree(); 878 sodopendfree();
874 return (error); 879 return (error);
875} 880}
876 881
877#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 882#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
878/* 883/*
879 * Send on a socket. 884 * Send on a socket.
880 * If send must go all at once and message is larger than 885 * If send must go all at once and message is larger than
881 * send buffering, then hard error. 886 * send buffering, then hard error.
882 * Lock against other senders. 887 * Lock against other senders.
883 * If must go all at once and not enough room now, then 888 * If must go all at once and not enough room now, then
884 * inform user that this would block and do nothing. 889 * inform user that this would block and do nothing.
885 * Otherwise, if nonblocking, send as much as possible. 890 * Otherwise, if nonblocking, send as much as possible.
886 * The data to be sent is described by "uio" if nonzero, 891 * The data to be sent is described by "uio" if nonzero,
887 * otherwise by the mbuf chain "top" (which must be null 892 * otherwise by the mbuf chain "top" (which must be null
888 * if uio is not). Data provided in mbuf chain must be small 893 * if uio is not). Data provided in mbuf chain must be small
889 * enough to send all at once. 894 * enough to send all at once.
890 * 895 *
891 * Returns nonzero on error, timeout or signal; callers 896 * Returns nonzero on error, timeout or signal; callers
892 * must check for short counts if EINTR/ERESTART are returned. 897 * must check for short counts if EINTR/ERESTART are returned.
893 * Data and control buffers are freed on return. 898 * Data and control buffers are freed on return.
894 */ 899 */
895int 900int
896sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 901sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top,
897 struct mbuf *control, int flags, struct lwp *l) 902 struct mbuf *control, int flags, struct lwp *l)
898{ 903{
899 struct mbuf **mp, *m; 904 struct mbuf **mp, *m;
900 struct proc *p; 905 struct proc *p;
901 long space, len, resid, clen, mlen; 906 long space, len, resid, clen, mlen;
902 int error, s, dontroute, atomic; 907 int error, s, dontroute, atomic;
903 908
904 p = l->l_proc; 909 p = l->l_proc;
905 sodopendfree(); 910 sodopendfree();
906 clen = 0; 911 clen = 0;
907 912
908 /* 913 /*
909 * solock() provides atomicity of access. splsoftnet() prevents 914 * solock() provides atomicity of access. splsoftnet() prevents
910 * protocol processing soft interrupts from interrupting us and 915 * protocol processing soft interrupts from interrupting us and
911 * blocking (expensive). 916 * blocking (expensive).
912 */ 917 */
913 s = splsoftnet(); 918 s = splsoftnet();
914 solock(so); 919 solock(so);
915 atomic = sosendallatonce(so) || top; 920 atomic = sosendallatonce(so) || top;
916 if (uio) 921 if (uio)
917 resid = uio->uio_resid; 922 resid = uio->uio_resid;
918 else 923 else
919 resid = top->m_pkthdr.len; 924 resid = top->m_pkthdr.len;
920 /* 925 /*
921 * In theory resid should be unsigned. 926 * In theory resid should be unsigned.
922 * However, space must be signed, as it might be less than 0 927 * However, space must be signed, as it might be less than 0
923 * if we over-committed, and we must use a signed comparison 928 * if we over-committed, and we must use a signed comparison
924 * of space and resid. On the other hand, a negative resid 929 * of space and resid. On the other hand, a negative resid
925 * causes us to loop sending 0-length segments to the protocol. 930 * causes us to loop sending 0-length segments to the protocol.
926 */ 931 */
927 if (resid < 0) { 932 if (resid < 0) {
928 error = EINVAL; 933 error = EINVAL;
929 goto out; 934 goto out;
930 } 935 }
931 dontroute = 936 dontroute =
932 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 937 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
933 (so->so_proto->pr_flags & PR_ATOMIC); 938 (so->so_proto->pr_flags & PR_ATOMIC);
934 l->l_ru.ru_msgsnd++; 939 l->l_ru.ru_msgsnd++;
935 if (control) 940 if (control)
936 clen = control->m_len; 941 clen = control->m_len;
937 restart: 942 restart:
938 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0) 943 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
939 goto out; 944 goto out;
940 do { 945 do {
941 if (so->so_state & SS_CANTSENDMORE) { 946 if (so->so_state & SS_CANTSENDMORE) {
942 error = EPIPE; 947 error = EPIPE;
943 goto release; 948 goto release;
944 } 949 }
945 if (so->so_error) { 950 if (so->so_error) {
946 error = so->so_error; 951 error = so->so_error;
947 so->so_error = 0; 952 so->so_error = 0;
948 goto release; 953 goto release;
949 } 954 }
950 if ((so->so_state & SS_ISCONNECTED) == 0) { 955 if ((so->so_state & SS_ISCONNECTED) == 0) {
951 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 956 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
952 if ((so->so_state & SS_ISCONFIRMING) == 0 && 957 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
953 !(resid == 0 && clen != 0)) { 958 !(resid == 0 && clen != 0)) {
954 error = ENOTCONN; 959 error = ENOTCONN;
955 goto release; 960 goto release;
956 } 961 }
957 } else if (addr == 0) { 962 } else if (addr == 0) {
958 error = EDESTADDRREQ; 963 error = EDESTADDRREQ;
959 goto release; 964 goto release;
960 } 965 }
961 } 966 }
962 space = sbspace(&so->so_snd); 967 space = sbspace(&so->so_snd);
963 if (flags & MSG_OOB) 968 if (flags & MSG_OOB)
964 space += 1024; 969 space += 1024;
965 if ((atomic && resid > so->so_snd.sb_hiwat) || 970 if ((atomic && resid > so->so_snd.sb_hiwat) ||
966 clen > so->so_snd.sb_hiwat) { 971 clen > so->so_snd.sb_hiwat) {
967 error = EMSGSIZE; 972 error = EMSGSIZE;
968 goto release; 973 goto release;
969 } 974 }
970 if (space < resid + clen && 975 if (space < resid + clen &&
971 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 976 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
972 if (so->so_nbio) { 977 if (so->so_nbio) {
973 error = EWOULDBLOCK; 978 error = EWOULDBLOCK;
974 goto release; 979 goto release;
975 } 980 }
976 sbunlock(&so->so_snd); 981 sbunlock(&so->so_snd);
977 error = sbwait(&so->so_snd); 982 error = sbwait(&so->so_snd);
978 if (error) 983 if (error)
979 goto out; 984 goto out;
980 goto restart; 985 goto restart;
981 } 986 }
982 mp = &top; 987 mp = &top;
983 space -= clen; 988 space -= clen;
984 do { 989 do {
985 if (uio == NULL) { 990 if (uio == NULL) {
986 /* 991 /*
987 * Data is prepackaged in "top". 992 * Data is prepackaged in "top".
988 */ 993 */
989 resid = 0; 994 resid = 0;
990 if (flags & MSG_EOR) 995 if (flags & MSG_EOR)
991 top->m_flags |= M_EOR; 996 top->m_flags |= M_EOR;
992 } else do { 997 } else do {
993 sounlock(so); 998 sounlock(so);
994 splx(s); 999 splx(s);
995 if (top == NULL) { 1000 if (top == NULL) {
996 m = m_gethdr(M_WAIT, MT_DATA); 1001 m = m_gethdr(M_WAIT, MT_DATA);
997 mlen = MHLEN; 1002 mlen = MHLEN;
998 m->m_pkthdr.len = 0; 1003 m->m_pkthdr.len = 0;
999 m->m_pkthdr.rcvif = NULL; 1004 m->m_pkthdr.rcvif = NULL;
1000 } else { 1005 } else {
1001 m = m_get(M_WAIT, MT_DATA); 1006 m = m_get(M_WAIT, MT_DATA);
1002 mlen = MLEN; 1007 mlen = MLEN;
1003 } 1008 }
1004 MCLAIM(m, so->so_snd.sb_mowner); 1009 MCLAIM(m, so->so_snd.sb_mowner);
1005 if (sock_loan_thresh >= 0 && 1010 if (sock_loan_thresh >= 0 &&
1006 uio->uio_iov->iov_len >= sock_loan_thresh && 1011 uio->uio_iov->iov_len >= sock_loan_thresh &&
1007 space >= sock_loan_thresh && 1012 space >= sock_loan_thresh &&
1008 (len = sosend_loan(so, uio, m, 1013 (len = sosend_loan(so, uio, m,
1009 space)) != 0) { 1014 space)) != 0) {
1010 SOSEND_COUNTER_INCR(&sosend_loan_big); 1015 SOSEND_COUNTER_INCR(&sosend_loan_big);
1011 space -= len; 1016 space -= len;
1012 goto have_data; 1017 goto have_data;
1013 } 1018 }
1014 if (resid >= MINCLSIZE && space >= MCLBYTES) { 1019 if (resid >= MINCLSIZE && space >= MCLBYTES) {
1015 SOSEND_COUNTER_INCR(&sosend_copy_big); 1020 SOSEND_COUNTER_INCR(&sosend_copy_big);
1016 m_clget(m, M_WAIT); 1021 m_clget(m, M_WAIT);
1017 if ((m->m_flags & M_EXT) == 0) 1022 if ((m->m_flags & M_EXT) == 0)
1018 goto nopages; 1023 goto nopages;
1019 mlen = MCLBYTES; 1024 mlen = MCLBYTES;
1020 if (atomic && top == 0) { 1025 if (atomic && top == 0) {
1021 len = lmin(MCLBYTES - max_hdr, 1026 len = lmin(MCLBYTES - max_hdr,
1022 resid); 1027 resid);
1023 m->m_data += max_hdr; 1028 m->m_data += max_hdr;
1024 } else 1029 } else
1025 len = lmin(MCLBYTES, resid); 1030 len = lmin(MCLBYTES, resid);
1026 space -= len; 1031 space -= len;
1027 } else { 1032 } else {
1028 nopages: 1033 nopages:
1029 SOSEND_COUNTER_INCR(&sosend_copy_small); 1034 SOSEND_COUNTER_INCR(&sosend_copy_small);
1030 len = lmin(lmin(mlen, resid), space); 1035 len = lmin(lmin(mlen, resid), space);
1031 space -= len; 1036 space -= len;
1032 /* 1037 /*
1033 * For datagram protocols, leave room 1038 * For datagram protocols, leave room
1034 * for protocol headers in first mbuf. 1039 * for protocol headers in first mbuf.
1035 */ 1040 */
1036 if (atomic && top == 0 && len < mlen) 1041 if (atomic && top == 0 && len < mlen)
1037 MH_ALIGN(m, len); 1042 MH_ALIGN(m, len);
1038 } 1043 }
1039 error = uiomove(mtod(m, void *), (int)len, uio); 1044 error = uiomove(mtod(m, void *), (int)len, uio);
1040 have_data: 1045 have_data:
1041 resid = uio->uio_resid; 1046 resid = uio->uio_resid;
1042 m->m_len = len; 1047 m->m_len = len;
1043 *mp = m; 1048 *mp = m;
1044 top->m_pkthdr.len += len; 1049 top->m_pkthdr.len += len;
1045 s = splsoftnet(); 1050 s = splsoftnet();
1046 solock(so); 1051 solock(so);
1047 if (error != 0) 1052 if (error != 0)
1048 goto release; 1053 goto release;
1049 mp = &m->m_next; 1054 mp = &m->m_next;
1050 if (resid <= 0) { 1055 if (resid <= 0) {
1051 if (flags & MSG_EOR) 1056 if (flags & MSG_EOR)
1052 top->m_flags |= M_EOR; 1057 top->m_flags |= M_EOR;
1053 break; 1058 break;
1054 } 1059 }
1055 } while (space > 0 && atomic); 1060 } while (space > 0 && atomic);
1056 1061
1057 if (so->so_state & SS_CANTSENDMORE) { 1062 if (so->so_state & SS_CANTSENDMORE) {
1058 error = EPIPE; 1063 error = EPIPE;
1059 goto release; 1064 goto release;
1060 } 1065 }
1061 if (dontroute) 1066 if (dontroute)
1062 so->so_options |= SO_DONTROUTE; 1067 so->so_options |= SO_DONTROUTE;
1063 if (resid > 0) 1068 if (resid > 0)
1064 so->so_state |= SS_MORETOCOME; 1069 so->so_state |= SS_MORETOCOME;
1065 error = (*so->so_proto->pr_usrreq)(so, 1070 error = (*so->so_proto->pr_usrreq)(so,
1066 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 1071 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
1067 top, addr, control, curlwp); 1072 top, addr, control, curlwp);
1068 if (dontroute) 1073 if (dontroute)
1069 so->so_options &= ~SO_DONTROUTE; 1074 so->so_options &= ~SO_DONTROUTE;
1070 if (resid > 0) 1075 if (resid > 0)
1071 so->so_state &= ~SS_MORETOCOME; 1076 so->so_state &= ~SS_MORETOCOME;
1072 clen = 0; 1077 clen = 0;
1073 control = NULL; 1078 control = NULL;
1074 top = NULL; 1079 top = NULL;
1075 mp = &top; 1080 mp = &top;
1076 if (error != 0) 1081 if (error != 0)
1077 goto release; 1082 goto release;
1078 } while (resid && space > 0); 1083 } while (resid && space > 0);
1079 } while (resid); 1084 } while (resid);
1080 1085
1081 release: 1086 release:
1082 sbunlock(&so->so_snd); 1087 sbunlock(&so->so_snd);
1083 out: 1088 out:
1084 sounlock(so); 1089 sounlock(so);
1085 splx(s); 1090 splx(s);
1086 if (top) 1091 if (top)
1087 m_freem(top); 1092 m_freem(top);
1088 if (control) 1093 if (control)
1089 m_freem(control); 1094 m_freem(control);
1090 return (error); 1095 return (error);
1091} 1096}
1092 1097
1093/* 1098/*
1094 * Following replacement or removal of the first mbuf on the first 1099 * Following replacement or removal of the first mbuf on the first
1095 * mbuf chain of a socket buffer, push necessary state changes back 1100 * mbuf chain of a socket buffer, push necessary state changes back
1096 * into the socket buffer so that other consumers see the values 1101 * into the socket buffer so that other consumers see the values
1097 * consistently. 'nextrecord' is the callers locally stored value of 1102 * consistently. 'nextrecord' is the callers locally stored value of
1098 * the original value of sb->sb_mb->m_nextpkt which must be restored 1103 * the original value of sb->sb_mb->m_nextpkt which must be restored
1099 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 1104 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL.
1100 */ 1105 */
1101static void 1106static void
1102sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 1107sbsync(struct sockbuf *sb, struct mbuf *nextrecord)
1103{ 1108{
1104 1109
1105 KASSERT(solocked(sb->sb_so)); 1110 KASSERT(solocked(sb->sb_so));
1106 1111
1107 /* 1112 /*
1108 * First, update for the new value of nextrecord. If necessary, 1113 * First, update for the new value of nextrecord. If necessary,
1109 * make it the first record. 1114 * make it the first record.
1110 */ 1115 */
1111 if (sb->sb_mb != NULL) 1116 if (sb->sb_mb != NULL)
1112 sb->sb_mb->m_nextpkt = nextrecord; 1117 sb->sb_mb->m_nextpkt = nextrecord;
1113 else 1118 else
1114 sb->sb_mb = nextrecord; 1119 sb->sb_mb = nextrecord;
1115 1120
1116 /* 1121 /*
1117 * Now update any dependent socket buffer fields to reflect 1122 * Now update any dependent socket buffer fields to reflect
1118 * the new state. This is an inline of SB_EMPTY_FIXUP, with 1123 * the new state. This is an inline of SB_EMPTY_FIXUP, with
1119 * the addition of a second clause that takes care of the 1124 * the addition of a second clause that takes care of the
1120 * case where sb_mb has been updated, but remains the last 1125 * case where sb_mb has been updated, but remains the last
1121 * record. 1126 * record.
1122 */ 1127 */
1123 if (sb->sb_mb == NULL) { 1128 if (sb->sb_mb == NULL) {
1124 sb->sb_mbtail = NULL; 1129 sb->sb_mbtail = NULL;
1125 sb->sb_lastrecord = NULL; 1130 sb->sb_lastrecord = NULL;
1126 } else if (sb->sb_mb->m_nextpkt == NULL) 1131 } else if (sb->sb_mb->m_nextpkt == NULL)
1127 sb->sb_lastrecord = sb->sb_mb; 1132 sb->sb_lastrecord = sb->sb_mb;
1128} 1133}
1129 1134
1130/* 1135/*
1131 * Implement receive operations on a socket. 1136 * Implement receive operations on a socket.
1132 * We depend on the way that records are added to the sockbuf 1137 * We depend on the way that records are added to the sockbuf
1133 * by sbappend*. In particular, each record (mbufs linked through m_next) 1138 * by sbappend*. In particular, each record (mbufs linked through m_next)
1134 * must begin with an address if the protocol so specifies, 1139 * must begin with an address if the protocol so specifies,
1135 * followed by an optional mbuf or mbufs containing ancillary data, 1140 * followed by an optional mbuf or mbufs containing ancillary data,
1136 * and then zero or more mbufs of data. 1141 * and then zero or more mbufs of data.
1137 * In order to avoid blocking network interrupts for the entire time here, 1142 * In order to avoid blocking network interrupts for the entire time here,
1138 * we splx() while doing the actual copy to user space. 1143 * we splx() while doing the actual copy to user space.
1139 * Although the sockbuf is locked, new data may still be appended, 1144 * Although the sockbuf is locked, new data may still be appended,
1140 * and thus we must maintain consistency of the sockbuf during that time. 1145 * and thus we must maintain consistency of the sockbuf during that time.
1141 * 1146 *
1142 * The caller may receive the data as a single mbuf chain by supplying 1147 * The caller may receive the data as a single mbuf chain by supplying
1143 * an mbuf **mp0 for use in returning the chain. The uio is then used 1148 * an mbuf **mp0 for use in returning the chain. The uio is then used
1144 * only for the count in uio_resid. 1149 * only for the count in uio_resid.
1145 */ 1150 */
1146int 1151int
1147soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 1152soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio,
1148 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1153 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1149{ 1154{
1150 struct lwp *l = curlwp; 1155 struct lwp *l = curlwp;
1151 struct mbuf *m, **mp, *mt; 1156 struct mbuf *m, **mp, *mt;
1152 int atomic, flags, len, error, s, offset, moff, type, orig_resid; 1157 int atomic, flags, len, error, s, offset, moff, type, orig_resid;
1153 const struct protosw *pr; 1158 const struct protosw *pr;
1154 struct mbuf *nextrecord; 1159 struct mbuf *nextrecord;
1155 int mbuf_removed = 0; 1160 int mbuf_removed = 0;
1156 const struct domain *dom; 1161 const struct domain *dom;
1157 1162
1158 pr = so->so_proto; 1163 pr = so->so_proto;
1159 atomic = pr->pr_flags & PR_ATOMIC; 1164 atomic = pr->pr_flags & PR_ATOMIC;
1160 dom = pr->pr_domain; 1165 dom = pr->pr_domain;
1161 mp = mp0; 1166 mp = mp0;
1162 type = 0; 1167 type = 0;
1163 orig_resid = uio->uio_resid; 1168 orig_resid = uio->uio_resid;
1164 1169
1165 if (paddr != NULL) 1170 if (paddr != NULL)
1166 *paddr = NULL; 1171 *paddr = NULL;
1167 if (controlp != NULL) 1172 if (controlp != NULL)
1168 *controlp = NULL; 1173 *controlp = NULL;
1169 if (flagsp != NULL) 1174 if (flagsp != NULL)
1170 flags = *flagsp &~ MSG_EOR; 1175 flags = *flagsp &~ MSG_EOR;
1171 else 1176 else
1172 flags = 0; 1177 flags = 0;
1173 1178
1174 if ((flags & MSG_DONTWAIT) == 0) 1179 if ((flags & MSG_DONTWAIT) == 0)
1175 sodopendfree(); 1180 sodopendfree();
1176 1181
1177 if (flags & MSG_OOB) { 1182 if (flags & MSG_OOB) {
1178 m = m_get(M_WAIT, MT_DATA); 1183 m = m_get(M_WAIT, MT_DATA);
1179 solock(so); 1184 solock(so);
1180 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, 1185 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
1181 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, l); 1186 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, l);
1182 sounlock(so); 1187 sounlock(so);
1183 if (error) 1188 if (error)
1184 goto bad; 1189 goto bad;
1185 do { 1190 do {
1186 error = uiomove(mtod(m, void *), 1191 error = uiomove(mtod(m, void *),
1187 (int) min(uio->uio_resid, m->m_len), uio); 1192 (int) min(uio->uio_resid, m->m_len), uio);
1188 m = m_free(m); 1193 m = m_free(m);
1189 } while (uio->uio_resid > 0 && error == 0 && m); 1194 } while (uio->uio_resid > 0 && error == 0 && m);
1190 bad: 1195 bad:
1191 if (m != NULL) 1196 if (m != NULL)
1192 m_freem(m); 1197 m_freem(m);
1193 return error; 1198 return error;
1194 } 1199 }
1195 if (mp != NULL) 1200 if (mp != NULL)
1196 *mp = NULL; 1201 *mp = NULL;
1197 1202
1198 /* 1203 /*
1199 * solock() provides atomicity of access. splsoftnet() prevents 1204 * solock() provides atomicity of access. splsoftnet() prevents
1200 * protocol processing soft interrupts from interrupting us and 1205 * protocol processing soft interrupts from interrupting us and
1201 * blocking (expensive). 1206 * blocking (expensive).
1202 */ 1207 */
1203 s = splsoftnet(); 1208 s = splsoftnet();
1204 solock(so); 1209 solock(so);
1205 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 1210 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
1206 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, l); 1211 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, l);
1207 1212
1208 restart: 1213 restart:
1209 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) { 1214 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) {
1210 sounlock(so); 1215 sounlock(so);
1211 splx(s); 1216 splx(s);
1212 return error; 1217 return error;
1213 } 1218 }
1214 1219
1215 m = so->so_rcv.sb_mb; 1220 m = so->so_rcv.sb_mb;
1216 /* 1221 /*
1217 * If we have less data than requested, block awaiting more 1222 * If we have less data than requested, block awaiting more
1218 * (subject to any timeout) if: 1223 * (subject to any timeout) if:
1219 * 1. the current count is less than the low water mark, 1224 * 1. the current count is less than the low water mark,
1220 * 2. MSG_WAITALL is set, and it is possible to do the entire 1225 * 2. MSG_WAITALL is set, and it is possible to do the entire
1221 * receive operation at once if we block (resid <= hiwat), or 1226 * receive operation at once if we block (resid <= hiwat), or
1222 * 3. MSG_DONTWAIT is not set. 1227 * 3. MSG_DONTWAIT is not set.
1223 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1228 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1224 * we have to do the receive in sections, and thus risk returning 1229 * we have to do the receive in sections, and thus risk returning
1225 * a short count if a timeout or signal occurs after we start. 1230 * a short count if a timeout or signal occurs after we start.
1226 */ 1231 */
1227 if (m == NULL || 1232 if (m == NULL ||
1228 ((flags & MSG_DONTWAIT) == 0 && 1233 ((flags & MSG_DONTWAIT) == 0 &&
1229 so->so_rcv.sb_cc < uio->uio_resid && 1234 so->so_rcv.sb_cc < uio->uio_resid &&
1230 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 1235 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1231 ((flags & MSG_WAITALL) && 1236 ((flags & MSG_WAITALL) &&
1232 uio->uio_resid <= so->so_rcv.sb_hiwat)) && 1237 uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1233 m->m_nextpkt == NULL && !atomic)) { 1238 m->m_nextpkt == NULL && !atomic)) {
1234#ifdef DIAGNOSTIC 1239#ifdef DIAGNOSTIC
1235 if (m == NULL && so->so_rcv.sb_cc) 1240 if (m == NULL && so->so_rcv.sb_cc)
1236 panic("receive 1"); 1241 panic("receive 1");
1237#endif 1242#endif
1238 if (so->so_error) { 1243 if (so->so_error) {
1239 if (m != NULL) 1244 if (m != NULL)
1240 goto dontblock; 1245 goto dontblock;
1241 error = so->so_error; 1246 error = so->so_error;
1242 if ((flags & MSG_PEEK) == 0) 1247 if ((flags & MSG_PEEK) == 0)
1243 so->so_error = 0; 1248 so->so_error = 0;
1244 goto release; 1249 goto release;
1245 } 1250 }
1246 if (so->so_state & SS_CANTRCVMORE) { 1251 if (so->so_state & SS_CANTRCVMORE) {
1247 if (m != NULL) 1252 if (m != NULL)
1248 goto dontblock; 1253 goto dontblock;
1249 else 1254 else
1250 goto release; 1255 goto release;
1251 } 1256 }
1252 for (; m != NULL; m = m->m_next) 1257 for (; m != NULL; m = m->m_next)
1253 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1258 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1254 m = so->so_rcv.sb_mb; 1259 m = so->so_rcv.sb_mb;
1255 goto dontblock; 1260 goto dontblock;
1256 } 1261 }
1257 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1262 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1258 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1263 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1259 error = ENOTCONN; 1264 error = ENOTCONN;
1260 goto release; 1265 goto release;
1261 } 1266 }
1262 if (uio->uio_resid == 0) 1267 if (uio->uio_resid == 0)
1263 goto release; 1268 goto release;
1264 if (so->so_nbio || (flags & MSG_DONTWAIT)) { 1269 if (so->so_nbio || (flags & MSG_DONTWAIT)) {
1265 error = EWOULDBLOCK; 1270 error = EWOULDBLOCK;
1266 goto release; 1271 goto release;
1267 } 1272 }
1268 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 1273 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
1269 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 1274 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
1270 sbunlock(&so->so_rcv); 1275 sbunlock(&so->so_rcv);
1271 error = sbwait(&so->so_rcv); 1276 error = sbwait(&so->so_rcv);
1272 if (error != 0) { 1277 if (error != 0) {
1273 sounlock(so); 1278 sounlock(so);
1274 splx(s); 1279 splx(s);
1275 return error; 1280 return error;
1276 } 1281 }
1277 goto restart; 1282 goto restart;
1278 } 1283 }
1279 dontblock: 1284 dontblock:
1280 /* 1285 /*
1281 * On entry here, m points to the first record of the socket buffer. 1286 * On entry here, m points to the first record of the socket buffer.
1282 * From this point onward, we maintain 'nextrecord' as a cache of the 1287 * From this point onward, we maintain 'nextrecord' as a cache of the
1283 * pointer to the next record in the socket buffer. We must keep the 1288 * pointer to the next record in the socket buffer. We must keep the
1284 * various socket buffer pointers and local stack versions of the 1289 * various socket buffer pointers and local stack versions of the
1285 * pointers in sync, pushing out modifications before dropping the 1290 * pointers in sync, pushing out modifications before dropping the
1286 * socket lock, and re-reading them when picking it up. 1291 * socket lock, and re-reading them when picking it up.
1287 * 1292 *
1288 * Otherwise, we will race with the network stack appending new data 1293 * Otherwise, we will race with the network stack appending new data
1289 * or records onto the socket buffer by using inconsistent/stale 1294 * or records onto the socket buffer by using inconsistent/stale
1290 * versions of the field, possibly resulting in socket buffer 1295 * versions of the field, possibly resulting in socket buffer
1291 * corruption. 1296 * corruption.
1292 * 1297 *
1293 * By holding the high-level sblock(), we prevent simultaneous 1298 * By holding the high-level sblock(), we prevent simultaneous
1294 * readers from pulling off the front of the socket buffer. 1299 * readers from pulling off the front of the socket buffer.
1295 */ 1300 */
1296 if (l != NULL) 1301 if (l != NULL)
1297 l->l_ru.ru_msgrcv++; 1302 l->l_ru.ru_msgrcv++;
1298 KASSERT(m == so->so_rcv.sb_mb); 1303 KASSERT(m == so->so_rcv.sb_mb);
1299 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 1304 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1");
1300 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 1305 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1");
1301 nextrecord = m->m_nextpkt; 1306 nextrecord = m->m_nextpkt;
1302 if (pr->pr_flags & PR_ADDR) { 1307 if (pr->pr_flags & PR_ADDR) {
1303#ifdef DIAGNOSTIC 1308#ifdef DIAGNOSTIC
1304 if (m->m_type != MT_SONAME) 1309 if (m->m_type != MT_SONAME)
1305 panic("receive 1a"); 1310 panic("receive 1a");
1306#endif 1311#endif
1307 orig_resid = 0; 1312 orig_resid = 0;
1308 if (flags & MSG_PEEK) { 1313 if (flags & MSG_PEEK) {
1309 if (paddr) 1314 if (paddr)
1310 *paddr = m_copy(m, 0, m->m_len); 1315 *paddr = m_copy(m, 0, m->m_len);
1311 m = m->m_next; 1316 m = m->m_next;
1312 } else { 1317 } else {
1313 sbfree(&so->so_rcv, m); 1318 sbfree(&so->so_rcv, m);
1314 mbuf_removed = 1; 1319 mbuf_removed = 1;
1315 if (paddr != NULL) { 1320 if (paddr != NULL) {
1316 *paddr = m; 1321 *paddr = m;
1317 so->so_rcv.sb_mb = m->m_next; 1322 so->so_rcv.sb_mb = m->m_next;
1318 m->m_next = NULL; 1323 m->m_next = NULL;
1319 m = so->so_rcv.sb_mb; 1324 m = so->so_rcv.sb_mb;
1320 } else { 1325 } else {
1321 MFREE(m, so->so_rcv.sb_mb); 1326 MFREE(m, so->so_rcv.sb_mb);
1322 m = so->so_rcv.sb_mb; 1327 m = so->so_rcv.sb_mb;
1323 } 1328 }
1324 sbsync(&so->so_rcv, nextrecord); 1329 sbsync(&so->so_rcv, nextrecord);
1325 } 1330 }
1326 } 1331 }
1327 1332
1328 /* 1333 /*
1329 * Process one or more MT_CONTROL mbufs present before any data mbufs 1334 * Process one or more MT_CONTROL mbufs present before any data mbufs
1330 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1335 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1331 * just copy the data; if !MSG_PEEK, we call into the protocol to 1336 * just copy the data; if !MSG_PEEK, we call into the protocol to
1332 * perform externalization (or freeing if controlp == NULL). 1337 * perform externalization (or freeing if controlp == NULL).
1333 */ 1338 */
1334 if (__predict_false(m != NULL && m->m_type == MT_CONTROL)) { 1339 if (__predict_false(m != NULL && m->m_type == MT_CONTROL)) {
1335 struct mbuf *cm = NULL, *cmn; 1340 struct mbuf *cm = NULL, *cmn;
1336 struct mbuf **cme = &cm; 1341 struct mbuf **cme = &cm;
1337 1342
1338 do { 1343 do {
1339 if (flags & MSG_PEEK) { 1344 if (flags & MSG_PEEK) {
1340 if (controlp != NULL) { 1345 if (controlp != NULL) {
1341 *controlp = m_copy(m, 0, m->m_len); 1346 *controlp = m_copy(m, 0, m->m_len);
1342 controlp = &(*controlp)->m_next; 1347 controlp = &(*controlp)->m_next;
1343 } 1348 }
1344 m = m->m_next; 1349 m = m->m_next;
1345 } else { 1350 } else {
1346 sbfree(&so->so_rcv, m); 1351 sbfree(&so->so_rcv, m);
1347 so->so_rcv.sb_mb = m->m_next; 1352 so->so_rcv.sb_mb = m->m_next;
1348 m->m_next = NULL; 1353 m->m_next = NULL;
1349 *cme = m; 1354 *cme = m;
1350 cme = &(*cme)->m_next; 1355 cme = &(*cme)->m_next;
1351 m = so->so_rcv.sb_mb; 1356 m = so->so_rcv.sb_mb;
1352 } 1357 }
1353 } while (m != NULL && m->m_type == MT_CONTROL); 1358 } while (m != NULL && m->m_type == MT_CONTROL);
1354 if ((flags & MSG_PEEK) == 0) 1359 if ((flags & MSG_PEEK) == 0)
1355 sbsync(&so->so_rcv, nextrecord); 1360 sbsync(&so->so_rcv, nextrecord);
1356 for (; cm != NULL; cm = cmn) { 1361 for (; cm != NULL; cm = cmn) {
1357 cmn = cm->m_next; 1362 cmn = cm->m_next;
1358 cm->m_next = NULL; 1363 cm->m_next = NULL;
1359 type = mtod(cm, struct cmsghdr *)->cmsg_type; 1364 type = mtod(cm, struct cmsghdr *)->cmsg_type;
1360 if (controlp != NULL) { 1365 if (controlp != NULL) {
1361 if (dom->dom_externalize != NULL && 1366 if (dom->dom_externalize != NULL &&
1362 type == SCM_RIGHTS) { 1367 type == SCM_RIGHTS) {
1363 sounlock(so); 1368 sounlock(so);
1364 splx(s); 1369 splx(s);
1365 error = (*dom->dom_externalize)(cm, l); 1370 error = (*dom->dom_externalize)(cm, l);
1366 s = splsoftnet(); 1371 s = splsoftnet();
1367 solock(so); 1372 solock(so);
1368 } 1373 }
1369 *controlp = cm; 1374 *controlp = cm;
1370 while (*controlp != NULL) 1375 while (*controlp != NULL)
1371 controlp = &(*controlp)->m_next; 1376 controlp = &(*controlp)->m_next;
1372 } else { 1377 } else {
1373 /* 1378 /*
1374 * Dispose of any SCM_RIGHTS message that went 1379 * Dispose of any SCM_RIGHTS message that went
1375 * through the read path rather than recv. 1380 * through the read path rather than recv.
1376 */ 1381 */
1377 if (dom->dom_dispose != NULL && 1382 if (dom->dom_dispose != NULL &&
1378 type == SCM_RIGHTS) { 1383 type == SCM_RIGHTS) {
1379 sounlock(so); 1384 sounlock(so);
1380 (*dom->dom_dispose)(cm); 1385 (*dom->dom_dispose)(cm);
1381 solock(so); 1386 solock(so);
1382 } 1387 }
1383 m_freem(cm); 1388 m_freem(cm);
1384 } 1389 }
1385 } 1390 }
1386 if (m != NULL) 1391 if (m != NULL)
1387 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1392 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1388 else 1393 else
1389 nextrecord = so->so_rcv.sb_mb; 1394 nextrecord = so->so_rcv.sb_mb;
1390 orig_resid = 0; 1395 orig_resid = 0;
1391 } 1396 }
1392 1397
1393 /* If m is non-NULL, we have some data to read. */ 1398 /* If m is non-NULL, we have some data to read. */
1394 if (__predict_true(m != NULL)) { 1399 if (__predict_true(m != NULL)) {
1395 type = m->m_type; 1400 type = m->m_type;
1396 if (type == MT_OOBDATA) 1401 if (type == MT_OOBDATA)
1397 flags |= MSG_OOB; 1402 flags |= MSG_OOB;
1398 } 1403 }
1399 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 1404 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2");
1400 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 1405 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2");
1401 1406
1402 moff = 0; 1407 moff = 0;
1403 offset = 0; 1408 offset = 0;
1404 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1409 while (m != NULL && uio->uio_resid > 0 && error == 0) {
1405 if (m->m_type == MT_OOBDATA) { 1410 if (m->m_type == MT_OOBDATA) {
1406 if (type != MT_OOBDATA) 1411 if (type != MT_OOBDATA)
1407 break; 1412 break;
1408 } else if (type == MT_OOBDATA) 1413 } else if (type == MT_OOBDATA)
1409 break; 1414 break;
1410#ifdef DIAGNOSTIC 1415#ifdef DIAGNOSTIC
1411 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 1416 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
1412 panic("receive 3"); 1417 panic("receive 3");
1413#endif 1418#endif
1414 so->so_state &= ~SS_RCVATMARK; 1419 so->so_state &= ~SS_RCVATMARK;
1415 len = uio->uio_resid; 1420 len = uio->uio_resid;
1416 if (so->so_oobmark && len > so->so_oobmark - offset) 1421 if (so->so_oobmark && len > so->so_oobmark - offset)
1417 len = so->so_oobmark - offset; 1422 len = so->so_oobmark - offset;
1418 if (len > m->m_len - moff) 1423 if (len > m->m_len - moff)
1419 len = m->m_len - moff; 1424 len = m->m_len - moff;
1420 /* 1425 /*
1421 * If mp is set, just pass back the mbufs. 1426 * If mp is set, just pass back the mbufs.
1422 * Otherwise copy them out via the uio, then free. 1427 * Otherwise copy them out via the uio, then free.
1423 * Sockbuf must be consistent here (points to current mbuf, 1428 * Sockbuf must be consistent here (points to current mbuf,
1424 * it points to next record) when we drop priority; 1429 * it points to next record) when we drop priority;
1425 * we must note any additions to the sockbuf when we 1430 * we must note any additions to the sockbuf when we
1426 * block interrupts again. 1431 * block interrupts again.
1427 */ 1432 */
1428 if (mp == NULL) { 1433 if (mp == NULL) {
1429 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 1434 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove");
1430 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 1435 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove");
1431 sounlock(so); 1436 sounlock(so);
1432 splx(s); 1437 splx(s);
1433 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1438 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1434 s = splsoftnet(); 1439 s = splsoftnet();
1435 solock(so); 1440 solock(so);
1436 if (error != 0) { 1441 if (error != 0) {
1437 /* 1442 /*
1438 * If any part of the record has been removed 1443 * If any part of the record has been removed
1439 * (such as the MT_SONAME mbuf, which will 1444 * (such as the MT_SONAME mbuf, which will
1440 * happen when PR_ADDR, and thus also 1445 * happen when PR_ADDR, and thus also
1441 * PR_ATOMIC, is set), then drop the entire 1446 * PR_ATOMIC, is set), then drop the entire
1442 * record to maintain the atomicity of the 1447 * record to maintain the atomicity of the
1443 * receive operation. 1448 * receive operation.
1444 * 1449 *
1445 * This avoids a later panic("receive 1a") 1450 * This avoids a later panic("receive 1a")

cvs diff -r1.25 -r1.26 src/sys/secmodel/suser/secmodel_suser.c (switch to unified diff)

--- src/sys/secmodel/suser/secmodel_suser.c 2009/10/03 03:38:31 1.25
+++ src/sys/secmodel/suser/secmodel_suser.c 2009/10/03 03:59:39 1.26
@@ -1,890 +1,886 @@ @@ -1,890 +1,886 @@
1/* $NetBSD: secmodel_suser.c,v 1.25 2009/10/03 03:38:31 elad Exp $ */ 1/* $NetBSD: secmodel_suser.c,v 1.26 2009/10/03 03:59:39 elad Exp $ */
2/*- 2/*-
3 * Copyright (c) 2006 Elad Efrat <elad@NetBSD.org> 3 * Copyright (c) 2006 Elad Efrat <elad@NetBSD.org>
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products 14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission. 15 * derived from this software without specific prior written permission.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29/* 29/*
30 * This file contains kauth(9) listeners needed to implement the traditional 30 * This file contains kauth(9) listeners needed to implement the traditional
31 * NetBSD superuser access restrictions. 31 * NetBSD superuser access restrictions.
32 * 32 *
33 * There are two main resources a request can be issued to: user-owned and 33 * There are two main resources a request can be issued to: user-owned and
34 * system owned. For the first, traditional Unix access checks are done, as 34 * system owned. For the first, traditional Unix access checks are done, as
35 * well as superuser checks. If needed, the request context is examined before 35 * well as superuser checks. If needed, the request context is examined before
36 * a decision is made. For the latter, usually only superuser checks are done 36 * a decision is made. For the latter, usually only superuser checks are done
37 * as normal users are not allowed to access system resources. 37 * as normal users are not allowed to access system resources.
38 */ 38 */
39 39
40#include <sys/cdefs.h> 40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: secmodel_suser.c,v 1.25 2009/10/03 03:38:31 elad Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: secmodel_suser.c,v 1.26 2009/10/03 03:59:39 elad Exp $");
42 42
43#include <sys/types.h> 43#include <sys/types.h>
44#include <sys/param.h> 44#include <sys/param.h>
45#include <sys/kauth.h> 45#include <sys/kauth.h>
46 46
47#include <sys/mutex.h> 47#include <sys/mutex.h>
48#include <sys/mount.h> 48#include <sys/mount.h>
49#include <sys/socketvar.h> 49#include <sys/socketvar.h>
50#include <sys/sysctl.h> 50#include <sys/sysctl.h>
51#include <sys/vnode.h> 51#include <sys/vnode.h>
52#include <sys/proc.h> 52#include <sys/proc.h>
53#include <sys/uidinfo.h> 53#include <sys/uidinfo.h>
54#include <sys/module.h> 54#include <sys/module.h>
55 55
56#include <secmodel/suser/suser.h> 56#include <secmodel/suser/suser.h>
57 57
58MODULE(MODULE_CLASS_SECMODEL, suser, NULL); 58MODULE(MODULE_CLASS_SECMODEL, suser, NULL);
59 59
60static int secmodel_suser_curtain; 60static int secmodel_suser_curtain;
61/* static */ int dovfsusermount; 61/* static */ int dovfsusermount;
62 62
63static kauth_listener_t l_generic, l_system, l_process, l_network, l_machdep, 63static kauth_listener_t l_generic, l_system, l_process, l_network, l_machdep,
64 l_device, l_vnode; 64 l_device, l_vnode;
65 65
66static struct sysctllog *suser_sysctl_log; 66static struct sysctllog *suser_sysctl_log;
67 67
68void 68void
69sysctl_security_suser_setup(struct sysctllog **clog) 69sysctl_security_suser_setup(struct sysctllog **clog)
70{ 70{
71 const struct sysctlnode *rnode; 71 const struct sysctlnode *rnode;
72 72
73 sysctl_createv(clog, 0, NULL, &rnode, 73 sysctl_createv(clog, 0, NULL, &rnode,
74 CTLFLAG_PERMANENT, 74 CTLFLAG_PERMANENT,
75 CTLTYPE_NODE, "security", NULL, 75 CTLTYPE_NODE, "security", NULL,
76 NULL, 0, NULL, 0, 76 NULL, 0, NULL, 0,
77 CTL_SECURITY, CTL_EOL); 77 CTL_SECURITY, CTL_EOL);
78 78
79 sysctl_createv(clog, 0, &rnode, &rnode, 79 sysctl_createv(clog, 0, &rnode, &rnode,
80 CTLFLAG_PERMANENT, 80 CTLFLAG_PERMANENT,
81 CTLTYPE_NODE, "models", NULL, 81 CTLTYPE_NODE, "models", NULL,
82 NULL, 0, NULL, 0, 82 NULL, 0, NULL, 0,
83 CTL_CREATE, CTL_EOL); 83 CTL_CREATE, CTL_EOL);
84 84
85 sysctl_createv(clog, 0, &rnode, &rnode, 85 sysctl_createv(clog, 0, &rnode, &rnode,
86 CTLFLAG_PERMANENT, 86 CTLFLAG_PERMANENT,
87 CTLTYPE_NODE, "suser", NULL, 87 CTLTYPE_NODE, "suser", NULL,
88 NULL, 0, NULL, 0, 88 NULL, 0, NULL, 0,
89 CTL_CREATE, CTL_EOL); 89 CTL_CREATE, CTL_EOL);
90 90
91 sysctl_createv(clog, 0, &rnode, NULL, 91 sysctl_createv(clog, 0, &rnode, NULL,
92 CTLFLAG_PERMANENT, 92 CTLFLAG_PERMANENT,
93 CTLTYPE_STRING, "name", NULL, 93 CTLTYPE_STRING, "name", NULL,
94 NULL, 0, __UNCONST("Traditional NetBSD: Superuser"), 0, 94 NULL, 0, __UNCONST("Traditional NetBSD: Superuser"), 0,
95 CTL_CREATE, CTL_EOL); 95 CTL_CREATE, CTL_EOL);
96 96
97 sysctl_createv(clog, 0, &rnode, NULL, 97 sysctl_createv(clog, 0, &rnode, NULL,
98 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 98 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
99 CTLTYPE_INT, "curtain", 99 CTLTYPE_INT, "curtain",
100 SYSCTL_DESCR("Curtain information about objects to "\ 100 SYSCTL_DESCR("Curtain information about objects to "\
101 "users not owning them."), 101 "users not owning them."),
102 NULL, 0, &secmodel_suser_curtain, 0, 102 NULL, 0, &secmodel_suser_curtain, 0,
103 CTL_CREATE, CTL_EOL); 103 CTL_CREATE, CTL_EOL);
104 104
105 sysctl_createv(clog, 0, &rnode, NULL, 105 sysctl_createv(clog, 0, &rnode, NULL,
106 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 106 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
107 CTLTYPE_INT, "usermount", 107 CTLTYPE_INT, "usermount",
108 SYSCTL_DESCR("Whether unprivileged users may mount " 108 SYSCTL_DESCR("Whether unprivileged users may mount "
109 "filesystems"), 109 "filesystems"),
110 NULL, 0, &dovfsusermount, 0, 110 NULL, 0, &dovfsusermount, 0,
111 CTL_CREATE, CTL_EOL); 111 CTL_CREATE, CTL_EOL);
112 112
113 /* Compatibility: security.curtain */ 113 /* Compatibility: security.curtain */
114 sysctl_createv(clog, 0, NULL, &rnode, 114 sysctl_createv(clog, 0, NULL, &rnode,
115 CTLFLAG_PERMANENT, 115 CTLFLAG_PERMANENT,
116 CTLTYPE_NODE, "security", NULL, 116 CTLTYPE_NODE, "security", NULL,
117 NULL, 0, NULL, 0, 117 NULL, 0, NULL, 0,
118 CTL_SECURITY, CTL_EOL); 118 CTL_SECURITY, CTL_EOL);
119 119
120 sysctl_createv(clog, 0, &rnode, NULL, 120 sysctl_createv(clog, 0, &rnode, NULL,
121 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 121 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
122 CTLTYPE_INT, "curtain", 122 CTLTYPE_INT, "curtain",
123 SYSCTL_DESCR("Curtain information about objects to "\ 123 SYSCTL_DESCR("Curtain information about objects to "\
124 "users not owning them."), 124 "users not owning them."),
125 NULL, 0, &secmodel_suser_curtain, 0, 125 NULL, 0, &secmodel_suser_curtain, 0,
126 CTL_CREATE, CTL_EOL); 126 CTL_CREATE, CTL_EOL);
127 127
128 /* Compatibility: vfs.generic.usermount */ 128 /* Compatibility: vfs.generic.usermount */
129 sysctl_createv(clog, 0, NULL, NULL, 129 sysctl_createv(clog, 0, NULL, NULL,
130 CTLFLAG_PERMANENT, 130 CTLFLAG_PERMANENT,
131 CTLTYPE_NODE, "vfs", NULL, 131 CTLTYPE_NODE, "vfs", NULL,
132 NULL, 0, NULL, 0, 132 NULL, 0, NULL, 0,
133 CTL_VFS, CTL_EOL); 133 CTL_VFS, CTL_EOL);
134 134
135 sysctl_createv(clog, 0, NULL, NULL, 135 sysctl_createv(clog, 0, NULL, NULL,
136 CTLFLAG_PERMANENT, 136 CTLFLAG_PERMANENT,
137 CTLTYPE_NODE, "generic", 137 CTLTYPE_NODE, "generic",
138 SYSCTL_DESCR("Non-specific vfs related information"), 138 SYSCTL_DESCR("Non-specific vfs related information"),
139 NULL, 0, NULL, 0, 139 NULL, 0, NULL, 0,
140 CTL_VFS, VFS_GENERIC, CTL_EOL); 140 CTL_VFS, VFS_GENERIC, CTL_EOL);
141 141
142 sysctl_createv(clog, 0, NULL, NULL, 142 sysctl_createv(clog, 0, NULL, NULL,
143 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 143 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
144 CTLTYPE_INT, "usermount", 144 CTLTYPE_INT, "usermount",
145 SYSCTL_DESCR("Whether unprivileged users may mount " 145 SYSCTL_DESCR("Whether unprivileged users may mount "
146 "filesystems"), 146 "filesystems"),
147 NULL, 0, &dovfsusermount, 0, 147 NULL, 0, &dovfsusermount, 0,
148 CTL_VFS, VFS_GENERIC, VFS_USERMOUNT, CTL_EOL); 148 CTL_VFS, VFS_GENERIC, VFS_USERMOUNT, CTL_EOL);
149} 149}
150 150
151void 151void
152secmodel_suser_init(void) 152secmodel_suser_init(void)
153{ 153{
154 secmodel_suser_curtain = 0; 154 secmodel_suser_curtain = 0;
155 dovfsusermount = 0; 155 dovfsusermount = 0;
156} 156}
157 157
158void 158void
159secmodel_suser_start(void) 159secmodel_suser_start(void)
160{ 160{
161 l_generic = kauth_listen_scope(KAUTH_SCOPE_GENERIC, 161 l_generic = kauth_listen_scope(KAUTH_SCOPE_GENERIC,
162 secmodel_suser_generic_cb, NULL); 162 secmodel_suser_generic_cb, NULL);
163 l_system = kauth_listen_scope(KAUTH_SCOPE_SYSTEM, 163 l_system = kauth_listen_scope(KAUTH_SCOPE_SYSTEM,
164 secmodel_suser_system_cb, NULL); 164 secmodel_suser_system_cb, NULL);
165 l_process = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 165 l_process = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
166 secmodel_suser_process_cb, NULL); 166 secmodel_suser_process_cb, NULL);
167 l_network = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 167 l_network = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
168 secmodel_suser_network_cb, NULL); 168 secmodel_suser_network_cb, NULL);
169 l_machdep = kauth_listen_scope(KAUTH_SCOPE_MACHDEP, 169 l_machdep = kauth_listen_scope(KAUTH_SCOPE_MACHDEP,
170 secmodel_suser_machdep_cb, NULL); 170 secmodel_suser_machdep_cb, NULL);
171 l_device = kauth_listen_scope(KAUTH_SCOPE_DEVICE, 171 l_device = kauth_listen_scope(KAUTH_SCOPE_DEVICE,
172 secmodel_suser_device_cb, NULL); 172 secmodel_suser_device_cb, NULL);
173 l_vnode = kauth_listen_scope(KAUTH_SCOPE_VNODE, 173 l_vnode = kauth_listen_scope(KAUTH_SCOPE_VNODE,
174 secmodel_suser_vnode_cb, NULL); 174 secmodel_suser_vnode_cb, NULL);
175} 175}
176 176
177void 177void
178secmodel_suser_stop(void) 178secmodel_suser_stop(void)
179{ 179{
180 kauth_unlisten_scope(l_generic); 180 kauth_unlisten_scope(l_generic);
181 kauth_unlisten_scope(l_system); 181 kauth_unlisten_scope(l_system);
182 kauth_unlisten_scope(l_process); 182 kauth_unlisten_scope(l_process);
183 kauth_unlisten_scope(l_network); 183 kauth_unlisten_scope(l_network);
184 kauth_unlisten_scope(l_machdep); 184 kauth_unlisten_scope(l_machdep);
185 kauth_unlisten_scope(l_device); 185 kauth_unlisten_scope(l_device);
186 kauth_unlisten_scope(l_vnode); 186 kauth_unlisten_scope(l_vnode);
187} 187}
188 188
189static int 189static int
190suser_modcmd(modcmd_t cmd, void *arg) 190suser_modcmd(modcmd_t cmd, void *arg)
191{ 191{
192 int error = 0; 192 int error = 0;
193 193
194 switch (cmd) { 194 switch (cmd) {
195 case MODULE_CMD_INIT: 195 case MODULE_CMD_INIT:
196 secmodel_suser_init(); 196 secmodel_suser_init();
197 secmodel_suser_start(); 197 secmodel_suser_start();
198 sysctl_security_suser_setup(&suser_sysctl_log); 198 sysctl_security_suser_setup(&suser_sysctl_log);
199 break; 199 break;
200 200
201 case MODULE_CMD_FINI: 201 case MODULE_CMD_FINI:
202 sysctl_teardown(&suser_sysctl_log); 202 sysctl_teardown(&suser_sysctl_log);
203 secmodel_suser_stop(); 203 secmodel_suser_stop();
204 break; 204 break;
205 205
206 case MODULE_CMD_AUTOUNLOAD: 206 case MODULE_CMD_AUTOUNLOAD:
207 error = EPERM; 207 error = EPERM;
208 break; 208 break;
209 209
210 default: 210 default:
211 error = ENOTTY; 211 error = ENOTTY;
212 break; 212 break;
213 } 213 }
214 214
215 return (error); 215 return (error);
216} 216}
217 217
218/* 218/*
219 * kauth(9) listener 219 * kauth(9) listener
220 * 220 *
221 * Security model: Traditional NetBSD 221 * Security model: Traditional NetBSD
222 * Scope: Generic 222 * Scope: Generic
223 * Responsibility: Superuser access 223 * Responsibility: Superuser access
224 */ 224 */
225int 225int
226secmodel_suser_generic_cb(kauth_cred_t cred, kauth_action_t action, 226secmodel_suser_generic_cb(kauth_cred_t cred, kauth_action_t action,
227 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 227 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
228{ 228{
229 bool isroot; 229 bool isroot;
230 int result; 230 int result;
231 231
232 isroot = (kauth_cred_geteuid(cred) == 0); 232 isroot = (kauth_cred_geteuid(cred) == 0);
233 result = KAUTH_RESULT_DEFER; 233 result = KAUTH_RESULT_DEFER;
234 234
235 switch (action) { 235 switch (action) {
236 case KAUTH_GENERIC_ISSUSER: 236 case KAUTH_GENERIC_ISSUSER:
237 if (isroot) 237 if (isroot)
238 result = KAUTH_RESULT_ALLOW; 238 result = KAUTH_RESULT_ALLOW;
239 break; 239 break;
240 240
241 case KAUTH_GENERIC_CANSEE:  241 case KAUTH_GENERIC_CANSEE:
242 if (!secmodel_suser_curtain) 242 if (!secmodel_suser_curtain)
243 result = KAUTH_RESULT_ALLOW; 243 result = KAUTH_RESULT_ALLOW;
244 else if (isroot || kauth_cred_uidmatch(cred, arg0)) 244 else if (isroot || kauth_cred_uidmatch(cred, arg0))
245 result = KAUTH_RESULT_ALLOW; 245 result = KAUTH_RESULT_ALLOW;
246 246
247 break; 247 break;
248 248
249 default: 249 default:
250 break; 250 break;
251 } 251 }
252 252
253 return (result); 253 return (result);
254} 254}
255 255
256/* 256/*
257 * kauth(9) listener 257 * kauth(9) listener
258 * 258 *
259 * Security model: Traditional NetBSD 259 * Security model: Traditional NetBSD
260 * Scope: System 260 * Scope: System
261 * Responsibility: Superuser access 261 * Responsibility: Superuser access
262 */ 262 */
263int 263int
264secmodel_suser_system_cb(kauth_cred_t cred, kauth_action_t action, 264secmodel_suser_system_cb(kauth_cred_t cred, kauth_action_t action,
265 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 265 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
266{ 266{
267 bool isroot; 267 bool isroot;
268 int result; 268 int result;
269 enum kauth_system_req req; 269 enum kauth_system_req req;
270 270
271 isroot = (kauth_cred_geteuid(cred) == 0); 271 isroot = (kauth_cred_geteuid(cred) == 0);
272 result = KAUTH_RESULT_DEFER; 272 result = KAUTH_RESULT_DEFER;
273 req = (enum kauth_system_req)arg0; 273 req = (enum kauth_system_req)arg0;
274 274
275 switch (action) { 275 switch (action) {
276 case KAUTH_SYSTEM_CPU: 276 case KAUTH_SYSTEM_CPU:
277 switch (req) { 277 switch (req) {
278 case KAUTH_REQ_SYSTEM_CPU_SETSTATE: 278 case KAUTH_REQ_SYSTEM_CPU_SETSTATE:
279 if (isroot) 279 if (isroot)
280 result = KAUTH_RESULT_ALLOW; 280 result = KAUTH_RESULT_ALLOW;
281 281
282 break; 282 break;
283 283
284 default: 284 default:
285 break; 285 break;
286 } 286 }
287 287
288 break; 288 break;
289 289
290 case KAUTH_SYSTEM_FS_QUOTA: 290 case KAUTH_SYSTEM_FS_QUOTA:
291 switch (req) { 291 switch (req) {
292 case KAUTH_REQ_SYSTEM_FS_QUOTA_GET: 292 case KAUTH_REQ_SYSTEM_FS_QUOTA_GET:
293 case KAUTH_REQ_SYSTEM_FS_QUOTA_ONOFF: 293 case KAUTH_REQ_SYSTEM_FS_QUOTA_ONOFF:
294 case KAUTH_REQ_SYSTEM_FS_QUOTA_MANAGE: 294 case KAUTH_REQ_SYSTEM_FS_QUOTA_MANAGE:
295 case KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT: 295 case KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT:
296 if (isroot) 296 if (isroot)
297 result = KAUTH_RESULT_ALLOW; 297 result = KAUTH_RESULT_ALLOW;
298 break; 298 break;
299 299
300 default: 300 default:
301 break; 301 break;
302 } 302 }
303 303
304 break; 304 break;
305 305
306 case KAUTH_SYSTEM_FS_RESERVEDSPACE: 306 case KAUTH_SYSTEM_FS_RESERVEDSPACE:
307 if (isroot) 307 if (isroot)
308 result = KAUTH_RESULT_ALLOW; 308 result = KAUTH_RESULT_ALLOW;
309 break; 309 break;
310 310
311 case KAUTH_SYSTEM_MOUNT: 311 case KAUTH_SYSTEM_MOUNT:
312 switch (req) { 312 switch (req) {
313 case KAUTH_REQ_SYSTEM_MOUNT_GET: 313 case KAUTH_REQ_SYSTEM_MOUNT_GET:
314 result = KAUTH_RESULT_ALLOW; 314 result = KAUTH_RESULT_ALLOW;
315 break; 315 break;
316 316
317 case KAUTH_REQ_SYSTEM_MOUNT_NEW: 317 case KAUTH_REQ_SYSTEM_MOUNT_NEW:
318 if (isroot) 318 if (isroot)
319 result = KAUTH_RESULT_ALLOW; 319 result = KAUTH_RESULT_ALLOW;
320 else if (dovfsusermount) { 320 else if (dovfsusermount) {
321 struct vnode *vp = arg1; 321 struct vnode *vp = arg1;
322 u_long flags = (u_long)arg2; 322 u_long flags = (u_long)arg2;
323 323
324 if (!(flags & MNT_NODEV) || 324 if (!(flags & MNT_NODEV) ||
325 !(flags & MNT_NOSUID)) 325 !(flags & MNT_NOSUID))
326 break; 326 break;
327 327
328 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) && 328 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) &&
329 !(flags & MNT_NOEXEC)) 329 !(flags & MNT_NOEXEC))
330 break; 330 break;
331 331
332 result = KAUTH_RESULT_ALLOW; 332 result = KAUTH_RESULT_ALLOW;
333 } 333 }
334 334
335 break; 335 break;
336 336
337 case KAUTH_REQ_SYSTEM_MOUNT_UNMOUNT: 337 case KAUTH_REQ_SYSTEM_MOUNT_UNMOUNT:
338 if (isroot) 338 if (isroot)
339 result = KAUTH_RESULT_ALLOW; 339 result = KAUTH_RESULT_ALLOW;
340 else { 340 else {
341 struct mount *mp = arg1; 341 struct mount *mp = arg1;
342 342
343 if (mp->mnt_stat.f_owner == 343 if (mp->mnt_stat.f_owner ==
344 kauth_cred_geteuid(cred)) 344 kauth_cred_geteuid(cred))
345 result = KAUTH_RESULT_ALLOW; 345 result = KAUTH_RESULT_ALLOW;
346 } 346 }
347 347
348 break; 348 break;
349 349
350 case KAUTH_REQ_SYSTEM_MOUNT_UPDATE: 350 case KAUTH_REQ_SYSTEM_MOUNT_UPDATE:
351 if (isroot) 351 if (isroot)
352 result = KAUTH_RESULT_ALLOW; 352 result = KAUTH_RESULT_ALLOW;
353 else if (dovfsusermount) { 353 else if (dovfsusermount) {
354 struct mount *mp = arg1; 354 struct mount *mp = arg1;
355 u_long flags = (u_long)arg2; 355 u_long flags = (u_long)arg2;
356 356
357 /* No exporting for non-root. */ 357 /* No exporting for non-root. */
358 if (flags & MNT_EXPORTED) 358 if (flags & MNT_EXPORTED)
359 break; 359 break;
360 360
361 if (!(flags & MNT_NODEV) || 361 if (!(flags & MNT_NODEV) ||
362 !(flags & MNT_NOSUID)) 362 !(flags & MNT_NOSUID))
363 break; 363 break;
364 364
365 /* 365 /*
366 * Only super-user, or user that did the mount, 366 * Only super-user, or user that did the mount,
367 * can update. 367 * can update.
368 */ 368 */
369 if (mp->mnt_stat.f_owner != 369 if (mp->mnt_stat.f_owner !=
370 kauth_cred_geteuid(cred)) 370 kauth_cred_geteuid(cred))
371 break; 371 break;
372 372
373 /* Retain 'noexec'. */ 373 /* Retain 'noexec'. */
374 if ((mp->mnt_flag & MNT_NOEXEC) && 374 if ((mp->mnt_flag & MNT_NOEXEC) &&
375 !(flags & MNT_NOEXEC)) 375 !(flags & MNT_NOEXEC))
376 break; 376 break;
377 377
378 result = KAUTH_RESULT_ALLOW; 378 result = KAUTH_RESULT_ALLOW;
379 } 379 }
380 380
381 break; 381 break;
382 382
383 default: 383 default:
384 break; 384 break;
385 } 385 }
386 386
387 break; 387 break;
388 388
389 case KAUTH_SYSTEM_PSET: 389 case KAUTH_SYSTEM_PSET:
390 switch (req) { 390 switch (req) {
391 case KAUTH_REQ_SYSTEM_PSET_ASSIGN: 391 case KAUTH_REQ_SYSTEM_PSET_ASSIGN:
392 case KAUTH_REQ_SYSTEM_PSET_BIND: 392 case KAUTH_REQ_SYSTEM_PSET_BIND:
393 case KAUTH_REQ_SYSTEM_PSET_CREATE: 393 case KAUTH_REQ_SYSTEM_PSET_CREATE:
394 case KAUTH_REQ_SYSTEM_PSET_DESTROY: 394 case KAUTH_REQ_SYSTEM_PSET_DESTROY:
395 if (isroot) 395 if (isroot)
396 result = KAUTH_RESULT_ALLOW; 396 result = KAUTH_RESULT_ALLOW;
397 397
398 break; 398 break;
399 399
400 default: 400 default:
401 break; 401 break;
402 } 402 }
403 403
404 break; 404 break;
405 405
406 case KAUTH_SYSTEM_TIME: 406 case KAUTH_SYSTEM_TIME:
407 switch (req) { 407 switch (req) {
408 case KAUTH_REQ_SYSTEM_TIME_ADJTIME: 408 case KAUTH_REQ_SYSTEM_TIME_ADJTIME:
409 case KAUTH_REQ_SYSTEM_TIME_NTPADJTIME: 409 case KAUTH_REQ_SYSTEM_TIME_NTPADJTIME:
410 case KAUTH_REQ_SYSTEM_TIME_TIMECOUNTERS: 410 case KAUTH_REQ_SYSTEM_TIME_TIMECOUNTERS:
411 case KAUTH_REQ_SYSTEM_TIME_SYSTEM: 411 case KAUTH_REQ_SYSTEM_TIME_SYSTEM:
412 case KAUTH_REQ_SYSTEM_TIME_RTCOFFSET: 412 case KAUTH_REQ_SYSTEM_TIME_RTCOFFSET:
413 if (isroot) 413 if (isroot)
414 result = KAUTH_RESULT_ALLOW; 414 result = KAUTH_RESULT_ALLOW;
415 break; 415 break;
416 416
417 default: 417 default:
418 break; 418 break;
419 } 419 }
420 break; 420 break;
421 421
422 case KAUTH_SYSTEM_SYSCTL: 422 case KAUTH_SYSTEM_SYSCTL:
423 switch (req) { 423 switch (req) {
424 case KAUTH_REQ_SYSTEM_SYSCTL_ADD: 424 case KAUTH_REQ_SYSTEM_SYSCTL_ADD:
425 case KAUTH_REQ_SYSTEM_SYSCTL_DELETE: 425 case KAUTH_REQ_SYSTEM_SYSCTL_DELETE:
426 case KAUTH_REQ_SYSTEM_SYSCTL_DESC: 426 case KAUTH_REQ_SYSTEM_SYSCTL_DESC:
427 case KAUTH_REQ_SYSTEM_SYSCTL_MODIFY: 427 case KAUTH_REQ_SYSTEM_SYSCTL_MODIFY:
428 case KAUTH_REQ_SYSTEM_SYSCTL_PRVT: 428 case KAUTH_REQ_SYSTEM_SYSCTL_PRVT:
429 if (isroot) 429 if (isroot)
430 result = KAUTH_RESULT_ALLOW; 430 result = KAUTH_RESULT_ALLOW;
431 break; 431 break;
432 432
433 default: 433 default:
434 break; 434 break;
435 } 435 }
436 436
437 break; 437 break;
438 438
439 case KAUTH_SYSTEM_SWAPCTL: 439 case KAUTH_SYSTEM_SWAPCTL:
440 case KAUTH_SYSTEM_ACCOUNTING: 440 case KAUTH_SYSTEM_ACCOUNTING:
441 case KAUTH_SYSTEM_REBOOT: 441 case KAUTH_SYSTEM_REBOOT:
442 case KAUTH_SYSTEM_CHROOT: 442 case KAUTH_SYSTEM_CHROOT:
443 case KAUTH_SYSTEM_FILEHANDLE: 443 case KAUTH_SYSTEM_FILEHANDLE:
444 case KAUTH_SYSTEM_MKNOD: 444 case KAUTH_SYSTEM_MKNOD:
445 case KAUTH_SYSTEM_SETIDCORE: 445 case KAUTH_SYSTEM_SETIDCORE:
446 case KAUTH_SYSTEM_MODULE: 446 case KAUTH_SYSTEM_MODULE:
447 if (isroot) 447 if (isroot)
448 result = KAUTH_RESULT_ALLOW; 448 result = KAUTH_RESULT_ALLOW;
449 break; 449 break;
450 450
451 case KAUTH_SYSTEM_CHSYSFLAGS: 451 case KAUTH_SYSTEM_CHSYSFLAGS:
452 /* 452 /*
453 * Needs to be checked in conjunction with the immutable and 453 * Needs to be checked in conjunction with the immutable and
454 * append-only flags (usually). Should be handled differently. 454 * append-only flags (usually). Should be handled differently.
455 * Infects ufs, ext2fs, tmpfs, and rump. 455 * Infects ufs, ext2fs, tmpfs, and rump.
456 */ 456 */
457 if (isroot) 457 if (isroot)
458 result = KAUTH_RESULT_ALLOW; 458 result = KAUTH_RESULT_ALLOW;
459 459
460 break; 460 break;
461 461
462 default: 462 default:
463 break; 463 break;
464 } 464 }
465 465
466 return (result); 466 return (result);
467} 467}
468 468
469/* 469/*
470 * kauth(9) listener 470 * kauth(9) listener
471 * 471 *
472 * Security model: Traditional NetBSD 472 * Security model: Traditional NetBSD
473 * Scope: Process 473 * Scope: Process
474 * Responsibility: Superuser access 474 * Responsibility: Superuser access
475 */ 475 */
476int 476int
477secmodel_suser_process_cb(kauth_cred_t cred, kauth_action_t action, 477secmodel_suser_process_cb(kauth_cred_t cred, kauth_action_t action,
478 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 478 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
479{ 479{
480 struct proc *p; 480 struct proc *p;
481 bool isroot; 481 bool isroot;
482 int result; 482 int result;
483 483
484 isroot = (kauth_cred_geteuid(cred) == 0); 484 isroot = (kauth_cred_geteuid(cred) == 0);
485 result = KAUTH_RESULT_DEFER; 485 result = KAUTH_RESULT_DEFER;
486 p = arg0; 486 p = arg0;
487 487
488 switch (action) { 488 switch (action) {
489 case KAUTH_PROCESS_SIGNAL: 489 case KAUTH_PROCESS_SIGNAL:
490 case KAUTH_PROCESS_KTRACE: 490 case KAUTH_PROCESS_KTRACE:
491 case KAUTH_PROCESS_PROCFS: 491 case KAUTH_PROCESS_PROCFS:
492 case KAUTH_PROCESS_PTRACE: 492 case KAUTH_PROCESS_PTRACE:
493 case KAUTH_PROCESS_SCHEDULER_GETPARAM: 493 case KAUTH_PROCESS_SCHEDULER_GETPARAM:
494 case KAUTH_PROCESS_SCHEDULER_SETPARAM: 494 case KAUTH_PROCESS_SCHEDULER_SETPARAM:
495 case KAUTH_PROCESS_SCHEDULER_SETAFFINITY: 495 case KAUTH_PROCESS_SCHEDULER_SETAFFINITY:
496 case KAUTH_PROCESS_SETID: 496 case KAUTH_PROCESS_SETID:
497 case KAUTH_PROCESS_KEVENT_FILTER: 497 case KAUTH_PROCESS_KEVENT_FILTER:
498 case KAUTH_PROCESS_NICE: 498 case KAUTH_PROCESS_NICE:
499 case KAUTH_PROCESS_FORK: 499 case KAUTH_PROCESS_FORK:
500 case KAUTH_PROCESS_CORENAME: 500 case KAUTH_PROCESS_CORENAME:
501 case KAUTH_PROCESS_STOPFLAG: 501 case KAUTH_PROCESS_STOPFLAG:
502 if (isroot) 502 if (isroot)
503 result = KAUTH_RESULT_ALLOW; 503 result = KAUTH_RESULT_ALLOW;
504 504
505 break; 505 break;
506 506
507 case KAUTH_PROCESS_CANSEE: { 507 case KAUTH_PROCESS_CANSEE: {
508 unsigned long req; 508 unsigned long req;
509 509
510 req = (unsigned long)arg1; 510 req = (unsigned long)arg1;
511 511
512 switch (req) { 512 switch (req) {
513 case KAUTH_REQ_PROCESS_CANSEE_ARGS: 513 case KAUTH_REQ_PROCESS_CANSEE_ARGS:
514 case KAUTH_REQ_PROCESS_CANSEE_ENTRY: 514 case KAUTH_REQ_PROCESS_CANSEE_ENTRY:
515 case KAUTH_REQ_PROCESS_CANSEE_OPENFILES: 515 case KAUTH_REQ_PROCESS_CANSEE_OPENFILES:
516 if (isroot) { 516 if (isroot) {
517 result = KAUTH_RESULT_ALLOW; 517 result = KAUTH_RESULT_ALLOW;
518 break; 518 break;
519 } 519 }
520 520
521 if (secmodel_suser_curtain) { 521 if (secmodel_suser_curtain) {
522 if (kauth_cred_uidmatch(cred, p->p_cred) != 0) 522 if (kauth_cred_uidmatch(cred, p->p_cred) != 0)
523 result = KAUTH_RESULT_DENY; 523 result = KAUTH_RESULT_DENY;
524 } 524 }
525 525
526 break; 526 break;
527 527
528 case KAUTH_REQ_PROCESS_CANSEE_ENV: 528 case KAUTH_REQ_PROCESS_CANSEE_ENV:
529 if (isroot) 529 if (isroot)
530 result = KAUTH_RESULT_ALLOW; 530 result = KAUTH_RESULT_ALLOW;
531 531
532 break; 532 break;
533 533
534 default: 534 default:
535 break; 535 break;
536 } 536 }
537 537
538 break; 538 break;
539 } 539 }
540 540
541 case KAUTH_PROCESS_RLIMIT: { 541 case KAUTH_PROCESS_RLIMIT: {
542 enum kauth_process_req req; 542 enum kauth_process_req req;
543 543
544 req = (enum kauth_process_req)(unsigned long)arg1; 544 req = (enum kauth_process_req)(unsigned long)arg1;
545 545
546 switch (req) { 546 switch (req) {
547 case KAUTH_REQ_PROCESS_RLIMIT_SET: 547 case KAUTH_REQ_PROCESS_RLIMIT_SET:
548 case KAUTH_REQ_PROCESS_RLIMIT_GET: 548 case KAUTH_REQ_PROCESS_RLIMIT_GET:
549 if (isroot) 549 if (isroot)
550 result = KAUTH_RESULT_ALLOW; 550 result = KAUTH_RESULT_ALLOW;
551 551
552 break; 552 break;
553 553
554 default: 554 default:
555 break; 555 break;
556 } 556 }
557 557
558 break; 558 break;
559 } 559 }
560 560
561 default: 561 default:
562 break; 562 break;
563 } 563 }
564 564
565 return (result); 565 return (result);
566} 566}
567 567
568/* 568/*
569 * kauth(9) listener 569 * kauth(9) listener
570 * 570 *
571 * Security model: Traditional NetBSD 571 * Security model: Traditional NetBSD
572 * Scope: Network 572 * Scope: Network
573 * Responsibility: Superuser access 573 * Responsibility: Superuser access
574 */ 574 */
575int 575int
576secmodel_suser_network_cb(kauth_cred_t cred, kauth_action_t action, 576secmodel_suser_network_cb(kauth_cred_t cred, kauth_action_t action,
577 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 577 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
578{ 578{
579 bool isroot; 579 bool isroot;
580 int result; 580 int result;
581 enum kauth_network_req req; 581 enum kauth_network_req req;
582 582
583 isroot = (kauth_cred_geteuid(cred) == 0); 583 isroot = (kauth_cred_geteuid(cred) == 0);
584 result = KAUTH_RESULT_DEFER; 584 result = KAUTH_RESULT_DEFER;
585 req = (enum kauth_network_req)arg0; 585 req = (enum kauth_network_req)arg0;
586 586
587 switch (action) { 587 switch (action) {
588 case KAUTH_NETWORK_ALTQ: 588 case KAUTH_NETWORK_ALTQ:
589 switch (req) { 589 switch (req) {
590 case KAUTH_REQ_NETWORK_ALTQ_AFMAP: 590 case KAUTH_REQ_NETWORK_ALTQ_AFMAP:
591 case KAUTH_REQ_NETWORK_ALTQ_BLUE: 591 case KAUTH_REQ_NETWORK_ALTQ_BLUE:
592 case KAUTH_REQ_NETWORK_ALTQ_CBQ: 592 case KAUTH_REQ_NETWORK_ALTQ_CBQ:
593 case KAUTH_REQ_NETWORK_ALTQ_CDNR: 593 case KAUTH_REQ_NETWORK_ALTQ_CDNR:
594 case KAUTH_REQ_NETWORK_ALTQ_CONF: 594 case KAUTH_REQ_NETWORK_ALTQ_CONF:
595 case KAUTH_REQ_NETWORK_ALTQ_FIFOQ: 595 case KAUTH_REQ_NETWORK_ALTQ_FIFOQ:
596 case KAUTH_REQ_NETWORK_ALTQ_HFSC: 596 case KAUTH_REQ_NETWORK_ALTQ_HFSC:
597 case KAUTH_REQ_NETWORK_ALTQ_JOBS: 597 case KAUTH_REQ_NETWORK_ALTQ_JOBS:
598 case KAUTH_REQ_NETWORK_ALTQ_PRIQ: 598 case KAUTH_REQ_NETWORK_ALTQ_PRIQ:
599 case KAUTH_REQ_NETWORK_ALTQ_RED: 599 case KAUTH_REQ_NETWORK_ALTQ_RED:
600 case KAUTH_REQ_NETWORK_ALTQ_RIO: 600 case KAUTH_REQ_NETWORK_ALTQ_RIO:
601 case KAUTH_REQ_NETWORK_ALTQ_WFQ: 601 case KAUTH_REQ_NETWORK_ALTQ_WFQ:
602 if (isroot) 602 if (isroot)
603 result = KAUTH_RESULT_ALLOW; 603 result = KAUTH_RESULT_ALLOW;
604 break; 604 break;
605 605
606 default: 606 default:
607 break; 607 break;
608 } 608 }
609 609
610 break; 610 break;
611 611
612 case KAUTH_NETWORK_BIND: 612 case KAUTH_NETWORK_BIND:
613 switch (req) { 613 switch (req) {
614 case KAUTH_REQ_NETWORK_BIND_PORT: 
615 result = KAUTH_RESULT_ALLOW; 
616 break; 
617 
618 case KAUTH_REQ_NETWORK_BIND_PRIVPORT: 614 case KAUTH_REQ_NETWORK_BIND_PRIVPORT:
619 if (isroot) 615 if (isroot)
620 result = KAUTH_RESULT_ALLOW; 616 result = KAUTH_RESULT_ALLOW;
621 break; 617 break;
622 618
623 default: 619 default:
624 break; 620 break;
625 } 621 }
626 break; 622 break;
627 623
628 case KAUTH_NETWORK_FORWSRCRT: 624 case KAUTH_NETWORK_FORWSRCRT:
629 case KAUTH_NETWORK_ROUTE: 625 case KAUTH_NETWORK_ROUTE:
630 if (isroot) 626 if (isroot)
631 result = KAUTH_RESULT_ALLOW; 627 result = KAUTH_RESULT_ALLOW;
632 628
633 break; 629 break;
634 630
635 case KAUTH_NETWORK_INTERFACE: 631 case KAUTH_NETWORK_INTERFACE:
636 switch (req) { 632 switch (req) {
637 case KAUTH_REQ_NETWORK_INTERFACE_GETPRIV: 633 case KAUTH_REQ_NETWORK_INTERFACE_GETPRIV:
638 case KAUTH_REQ_NETWORK_INTERFACE_SETPRIV: 634 case KAUTH_REQ_NETWORK_INTERFACE_SETPRIV:
639 if (isroot) 635 if (isroot)
640 result = KAUTH_RESULT_ALLOW; 636 result = KAUTH_RESULT_ALLOW;
641 break; 637 break;
642 638
643 default: 639 default:
644 break; 640 break;
645 } 641 }
646 break; 642 break;
647 643
648 case KAUTH_NETWORK_INTERFACE_PPP: 644 case KAUTH_NETWORK_INTERFACE_PPP:
649 switch (req) { 645 switch (req) {
650 case KAUTH_REQ_NETWORK_INTERFACE_PPP_ADD: 646 case KAUTH_REQ_NETWORK_INTERFACE_PPP_ADD:
651 if (isroot) 647 if (isroot)
652 result = KAUTH_RESULT_ALLOW; 648 result = KAUTH_RESULT_ALLOW;
653 break; 649 break;
654 650
655 default: 651 default:
656 break; 652 break;
657 } 653 }
658 654
659 break; 655 break;
660 656
661 case KAUTH_NETWORK_INTERFACE_SLIP: 657 case KAUTH_NETWORK_INTERFACE_SLIP:
662 switch (req) { 658 switch (req) {
663 case KAUTH_REQ_NETWORK_INTERFACE_SLIP_ADD: 659 case KAUTH_REQ_NETWORK_INTERFACE_SLIP_ADD:
664 if (isroot) 660 if (isroot)
665 result = KAUTH_RESULT_ALLOW; 661 result = KAUTH_RESULT_ALLOW;
666 break; 662 break;
667 663
668 default: 664 default:
669 break; 665 break;
670 } 666 }
671 667
672 break; 668 break;
673 669
674 case KAUTH_NETWORK_INTERFACE_STRIP: 670 case KAUTH_NETWORK_INTERFACE_STRIP:
675 switch (req) { 671 switch (req) {
676 case KAUTH_REQ_NETWORK_INTERFACE_STRIP_ADD: 672 case KAUTH_REQ_NETWORK_INTERFACE_STRIP_ADD:
677 if (isroot) 673 if (isroot)
678 result = KAUTH_RESULT_ALLOW; 674 result = KAUTH_RESULT_ALLOW;
679 break; 675 break;
680 676
681 default: 677 default:
682 break; 678 break;
683 } 679 }
684 680
685 break; 681 break;
686 682
687 case KAUTH_NETWORK_INTERFACE_TUN: 683 case KAUTH_NETWORK_INTERFACE_TUN:
688 switch (req) { 684 switch (req) {
689 case KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD: 685 case KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD:
690 if (isroot) 686 if (isroot)
691 result = KAUTH_RESULT_ALLOW; 687 result = KAUTH_RESULT_ALLOW;
692 break; 688 break;
693 689
694 default: 690 default:
695 break; 691 break;
696 } 692 }
697 693
698 break; 694 break;
699 695
700 case KAUTH_NETWORK_NFS: 696 case KAUTH_NETWORK_NFS:
701 switch (req) { 697 switch (req) {
702 case KAUTH_REQ_NETWORK_NFS_EXPORT: 698 case KAUTH_REQ_NETWORK_NFS_EXPORT:
703 case KAUTH_REQ_NETWORK_NFS_SVC: 699 case KAUTH_REQ_NETWORK_NFS_SVC:
704 if (isroot) 700 if (isroot)
705 result = KAUTH_RESULT_ALLOW; 701 result = KAUTH_RESULT_ALLOW;
706 702
707 break; 703 break;
708 704
709 default: 705 default:
710 break; 706 break;
711 } 707 }
712 break; 708 break;
713 709
714 case KAUTH_NETWORK_SOCKET: 710 case KAUTH_NETWORK_SOCKET:
715 switch (req) { 711 switch (req) {
716 case KAUTH_REQ_NETWORK_SOCKET_DROP: 712 case KAUTH_REQ_NETWORK_SOCKET_DROP:
717 case KAUTH_REQ_NETWORK_SOCKET_OPEN: 713 case KAUTH_REQ_NETWORK_SOCKET_OPEN:
718 case KAUTH_REQ_NETWORK_SOCKET_RAWSOCK: 714 case KAUTH_REQ_NETWORK_SOCKET_RAWSOCK:
719 case KAUTH_REQ_NETWORK_SOCKET_SETPRIV: 715 case KAUTH_REQ_NETWORK_SOCKET_SETPRIV:
720 if (isroot) 716 if (isroot)
721 result = KAUTH_RESULT_ALLOW; 717 result = KAUTH_RESULT_ALLOW;
722 break; 718 break;
723 719
724 case KAUTH_REQ_NETWORK_SOCKET_CANSEE: 720 case KAUTH_REQ_NETWORK_SOCKET_CANSEE:
725 if (isroot) { 721 if (isroot) {
726 result = KAUTH_RESULT_ALLOW; 722 result = KAUTH_RESULT_ALLOW;
727 break; 723 break;
728 } 724 }
729 725
730 if (secmodel_suser_curtain) { 726 if (secmodel_suser_curtain) {
731 struct socket *so; 727 struct socket *so;
732 uid_t so_uid; 728 uid_t so_uid;
733 729
734 so = (struct socket *)arg1; 730 so = (struct socket *)arg1;
735 so_uid = so->so_uidinfo->ui_uid; 731 so_uid = so->so_uidinfo->ui_uid;
736 if (kauth_cred_geteuid(cred) != so_uid) 732 if (kauth_cred_geteuid(cred) != so_uid)
737 result = KAUTH_RESULT_DENY; 733 result = KAUTH_RESULT_DENY;
738 } 734 }
739 735
740 break; 736 break;
741 737
742 default: 738 default:
743 break; 739 break;
744 } 740 }
745 741
746 break; 742 break;
747 743
748 744
749 default: 745 default:
750 break; 746 break;
751 } 747 }
752 748
753 return (result); 749 return (result);
754} 750}
755 751
756/* 752/*
757 * kauth(9) listener 753 * kauth(9) listener
758 * 754 *
759 * Security model: Traditional NetBSD 755 * Security model: Traditional NetBSD
760 * Scope: Machdep 756 * Scope: Machdep
761 * Responsibility: Superuser access 757 * Responsibility: Superuser access
762 */ 758 */
763int 759int
764secmodel_suser_machdep_cb(kauth_cred_t cred, kauth_action_t action, 760secmodel_suser_machdep_cb(kauth_cred_t cred, kauth_action_t action,
765 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 761 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
766{ 762{
767 bool isroot; 763 bool isroot;
768 int result; 764 int result;
769 765
770 isroot = (kauth_cred_geteuid(cred) == 0); 766 isroot = (kauth_cred_geteuid(cred) == 0);
771 result = KAUTH_RESULT_DEFER; 767 result = KAUTH_RESULT_DEFER;
772 768
773 switch (action) { 769 switch (action) {
774 case KAUTH_MACHDEP_IOPERM_GET: 770 case KAUTH_MACHDEP_IOPERM_GET:
775 case KAUTH_MACHDEP_LDT_GET: 771 case KAUTH_MACHDEP_LDT_GET:
776 case KAUTH_MACHDEP_LDT_SET: 772 case KAUTH_MACHDEP_LDT_SET:
777 case KAUTH_MACHDEP_MTRR_GET: 773 case KAUTH_MACHDEP_MTRR_GET:
778 result = KAUTH_RESULT_ALLOW; 774 result = KAUTH_RESULT_ALLOW;
779 break; 775 break;
780 776
781 case KAUTH_MACHDEP_CACHEFLUSH: 777 case KAUTH_MACHDEP_CACHEFLUSH:
782 case KAUTH_MACHDEP_IOPERM_SET: 778 case KAUTH_MACHDEP_IOPERM_SET:
783 case KAUTH_MACHDEP_IOPL: 779 case KAUTH_MACHDEP_IOPL:
784 case KAUTH_MACHDEP_MTRR_SET: 780 case KAUTH_MACHDEP_MTRR_SET:
785 case KAUTH_MACHDEP_NVRAM: 781 case KAUTH_MACHDEP_NVRAM:
786 case KAUTH_MACHDEP_UNMANAGEDMEM: 782 case KAUTH_MACHDEP_UNMANAGEDMEM:
787 if (isroot) 783 if (isroot)
788 result = KAUTH_RESULT_ALLOW; 784 result = KAUTH_RESULT_ALLOW;
789 break; 785 break;
790 786
791 default: 787 default:
792 break; 788 break;
793 } 789 }
794 790
795 return (result); 791 return (result);
796} 792}
797 793
798/* 794/*
799 * kauth(9) listener 795 * kauth(9) listener
800 * 796 *
801 * Security model: Traditional NetBSD 797 * Security model: Traditional NetBSD
802 * Scope: Device 798 * Scope: Device
803 * Responsibility: Superuser access 799 * Responsibility: Superuser access
804 */ 800 */
805int 801int
806secmodel_suser_device_cb(kauth_cred_t cred, kauth_action_t action, 802secmodel_suser_device_cb(kauth_cred_t cred, kauth_action_t action,
807 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 803 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
808{ 804{
809 bool isroot; 805 bool isroot;
810 int result; 806 int result;
811 807
812 isroot = (kauth_cred_geteuid(cred) == 0); 808 isroot = (kauth_cred_geteuid(cred) == 0);
813 result = KAUTH_RESULT_DEFER; 809 result = KAUTH_RESULT_DEFER;
814 810
815 switch (action) { 811 switch (action) {
816 case KAUTH_DEVICE_BLUETOOTH_SETPRIV: 812 case KAUTH_DEVICE_BLUETOOTH_SETPRIV:
817 case KAUTH_DEVICE_BLUETOOTH_SEND: 813 case KAUTH_DEVICE_BLUETOOTH_SEND:
818 case KAUTH_DEVICE_BLUETOOTH_RECV: 814 case KAUTH_DEVICE_BLUETOOTH_RECV:
819 case KAUTH_DEVICE_TTY_OPEN: 815 case KAUTH_DEVICE_TTY_OPEN:
820 case KAUTH_DEVICE_TTY_PRIVSET: 816 case KAUTH_DEVICE_TTY_PRIVSET:
821 case KAUTH_DEVICE_TTY_STI: 817 case KAUTH_DEVICE_TTY_STI:
822 case KAUTH_DEVICE_RND_ADDDATA: 818 case KAUTH_DEVICE_RND_ADDDATA:
823 case KAUTH_DEVICE_RND_GETPRIV: 819 case KAUTH_DEVICE_RND_GETPRIV:
824 case KAUTH_DEVICE_RND_SETPRIV: 820 case KAUTH_DEVICE_RND_SETPRIV:
825 if (isroot) 821 if (isroot)
826 result = KAUTH_RESULT_ALLOW; 822 result = KAUTH_RESULT_ALLOW;
827 break; 823 break;
828 824
829 case KAUTH_DEVICE_BLUETOOTH_BCSP: 825 case KAUTH_DEVICE_BLUETOOTH_BCSP:
830 case KAUTH_DEVICE_BLUETOOTH_BTUART: { 826 case KAUTH_DEVICE_BLUETOOTH_BTUART: {
831 enum kauth_device_req req; 827 enum kauth_device_req req;
832 828
833 req = (enum kauth_device_req)arg0; 829 req = (enum kauth_device_req)arg0;
834 switch (req) { 830 switch (req) {
835 case KAUTH_REQ_DEVICE_BLUETOOTH_BCSP_ADD: 831 case KAUTH_REQ_DEVICE_BLUETOOTH_BCSP_ADD:
836 case KAUTH_REQ_DEVICE_BLUETOOTH_BTUART_ADD: 832 case KAUTH_REQ_DEVICE_BLUETOOTH_BTUART_ADD:
837 if (isroot) 833 if (isroot)
838 result = KAUTH_RESULT_ALLOW; 834 result = KAUTH_RESULT_ALLOW;
839 break; 835 break;
840 836
841 default: 837 default:
842 break; 838 break;
843 } 839 }
844 840
845 break; 841 break;
846 } 842 }
847 843
848 case KAUTH_DEVICE_RAWIO_SPEC: 844 case KAUTH_DEVICE_RAWIO_SPEC:
849 case KAUTH_DEVICE_RAWIO_PASSTHRU: 845 case KAUTH_DEVICE_RAWIO_PASSTHRU:
850 /* 846 /*
851 * Decision is root-agnostic. 847 * Decision is root-agnostic.
852 * 848 *
853 * Both requests can be issued on devices subject to their 849 * Both requests can be issued on devices subject to their
854 * permission bits. 850 * permission bits.
855 */ 851 */
856 result = KAUTH_RESULT_ALLOW; 852 result = KAUTH_RESULT_ALLOW;
857 break; 853 break;
858 854
859 case KAUTH_DEVICE_GPIO_PINSET: 855 case KAUTH_DEVICE_GPIO_PINSET:
860 /* 856 /*
861 * root can access gpio pins, secmodel_securlevel can veto 857 * root can access gpio pins, secmodel_securlevel can veto
862 * this decision. 858 * this decision.
863 */ 859 */
864 if (isroot) 860 if (isroot)
865 result = KAUTH_RESULT_ALLOW; 861 result = KAUTH_RESULT_ALLOW;
866 break; 862 break;
867 863
868 default: 864 default:
869 break; 865 break;
870 } 866 }
871 867
872 return (result); 868 return (result);
873} 869}
874 870
875int 871int
876secmodel_suser_vnode_cb(kauth_cred_t cred, kauth_action_t action, 872secmodel_suser_vnode_cb(kauth_cred_t cred, kauth_action_t action,
877 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 873 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
878{ 874{
879 bool isroot; 875 bool isroot;
880 int result; 876 int result;
881 877
882 isroot = (kauth_cred_geteuid(cred) == 0); 878 isroot = (kauth_cred_geteuid(cred) == 0);
883 result = KAUTH_RESULT_DEFER; 879 result = KAUTH_RESULT_DEFER;
884 880
885 if (isroot) 881 if (isroot)
886 result = KAUTH_RESULT_ALLOW; 882 result = KAUTH_RESULT_ALLOW;
887 883
888 return (result); 884 return (result);
889} 885}
890 886