Sun Jul 26 21:28:33 2020 UTC ()
use the right printing function


(christos)
diff -r1.488 -r1.489 src/sys/kern/vfs_subr.c

cvs diff -r1.488 -r1.489 src/sys/kern/vfs_subr.c (switch to unified diff)

--- src/sys/kern/vfs_subr.c 2020/05/26 18:38:37 1.488
+++ src/sys/kern/vfs_subr.c 2020/07/26 21:28:33 1.489
@@ -1,1698 +1,1698 @@ @@ -1,1698 +1,1698 @@
1/* $NetBSD: vfs_subr.c,v 1.488 2020/05/26 18:38:37 ad Exp $ */ 1/* $NetBSD: vfs_subr.c,v 1.489 2020/07/26 21:28:33 christos Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008, 2019, 2020 4 * Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008, 2019, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Charles M. Hannum, by Andrew Doran, 10 * NASA Ames Research Center, by Charles M. Hannum, by Andrew Doran,
11 * by Marshall Kirk McKusick and Greg Ganger at the University of Michigan. 11 * by Marshall Kirk McKusick and Greg Ganger at the University of Michigan.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
15 * are met: 15 * are met:
16 * 1. Redistributions of source code must retain the above copyright 16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer. 17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright 18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the 19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution. 20 * documentation and/or other materials provided with the distribution.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE. 32 * POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35/* 35/*
36 * Copyright (c) 1989, 1993 36 * Copyright (c) 1989, 1993
37 * The Regents of the University of California. All rights reserved. 37 * The Regents of the University of California. All rights reserved.
38 * (c) UNIX System Laboratories, Inc. 38 * (c) UNIX System Laboratories, Inc.
39 * All or some portions of this file are derived from material licensed 39 * All or some portions of this file are derived from material licensed
40 * to the University of California by American Telephone and Telegraph 40 * to the University of California by American Telephone and Telegraph
41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
42 * the permission of UNIX System Laboratories, Inc. 42 * the permission of UNIX System Laboratories, Inc.
43 * 43 *
44 * Redistribution and use in source and binary forms, with or without 44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions 45 * modification, are permitted provided that the following conditions
46 * are met: 46 * are met:
47 * 1. Redistributions of source code must retain the above copyright 47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer. 48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright 49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the 50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution. 51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the University nor the names of its contributors 52 * 3. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software 53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission. 54 * without specific prior written permission.
55 * 55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE. 66 * SUCH DAMAGE.
67 * 67 *
68 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 68 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
69 */ 69 */
70 70
71#include <sys/cdefs.h> 71#include <sys/cdefs.h>
72__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.488 2020/05/26 18:38:37 ad Exp $"); 72__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.489 2020/07/26 21:28:33 christos Exp $");
73 73
74#ifdef _KERNEL_OPT 74#ifdef _KERNEL_OPT
75#include "opt_ddb.h" 75#include "opt_ddb.h"
76#include "opt_compat_netbsd.h" 76#include "opt_compat_netbsd.h"
77#include "opt_compat_43.h" 77#include "opt_compat_43.h"
78#endif 78#endif
79 79
80#include <sys/param.h> 80#include <sys/param.h>
81#include <sys/systm.h> 81#include <sys/systm.h>
82#include <sys/conf.h> 82#include <sys/conf.h>
83#include <sys/dirent.h> 83#include <sys/dirent.h>
84#include <sys/filedesc.h> 84#include <sys/filedesc.h>
85#include <sys/kernel.h> 85#include <sys/kernel.h>
86#include <sys/mount.h> 86#include <sys/mount.h>
87#include <sys/fstrans.h> 87#include <sys/fstrans.h>
88#include <sys/vnode_impl.h> 88#include <sys/vnode_impl.h>
89#include <sys/stat.h> 89#include <sys/stat.h>
90#include <sys/sysctl.h> 90#include <sys/sysctl.h>
91#include <sys/namei.h> 91#include <sys/namei.h>
92#include <sys/buf.h> 92#include <sys/buf.h>
93#include <sys/errno.h> 93#include <sys/errno.h>
94#include <sys/kmem.h> 94#include <sys/kmem.h>
95#include <sys/syscallargs.h> 95#include <sys/syscallargs.h>
96#include <sys/kauth.h> 96#include <sys/kauth.h>
97#include <sys/module.h> 97#include <sys/module.h>
98 98
99#include <miscfs/genfs/genfs.h> 99#include <miscfs/genfs/genfs.h>
100#include <miscfs/specfs/specdev.h> 100#include <miscfs/specfs/specdev.h>
101#include <uvm/uvm_ddb.h> 101#include <uvm/uvm_ddb.h>
102 102
103const enum vtype iftovt_tab[16] = { 103const enum vtype iftovt_tab[16] = {
104 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 104 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
105 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 105 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
106}; 106};
107const int vttoif_tab[9] = { 107const int vttoif_tab[9] = {
108 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 108 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
109 S_IFSOCK, S_IFIFO, S_IFMT, 109 S_IFSOCK, S_IFIFO, S_IFMT,
110}; 110};
111 111
112/* 112/*
113 * Insq/Remq for the vnode usage lists. 113 * Insq/Remq for the vnode usage lists.
114 */ 114 */
115#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 115#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
116#define bufremvn(bp) { \ 116#define bufremvn(bp) { \
117 LIST_REMOVE(bp, b_vnbufs); \ 117 LIST_REMOVE(bp, b_vnbufs); \
118 (bp)->b_vnbufs.le_next = NOLIST; \ 118 (bp)->b_vnbufs.le_next = NOLIST; \
119} 119}
120 120
121int doforce = 1; /* 1 => permit forcible unmounting */ 121int doforce = 1; /* 1 => permit forcible unmounting */
122 122
123extern struct mount *dead_rootmount; 123extern struct mount *dead_rootmount;
124 124
125/* 125/*
126 * Local declarations. 126 * Local declarations.
127 */ 127 */
128 128
129static void vn_initialize_syncerd(void); 129static void vn_initialize_syncerd(void);
130 130
131/* 131/*
132 * Initialize the vnode management data structures. 132 * Initialize the vnode management data structures.
133 */ 133 */
134void 134void
135vntblinit(void) 135vntblinit(void)
136{ 136{
137 137
138 vn_initialize_syncerd(); 138 vn_initialize_syncerd();
139 vfs_mount_sysinit(); 139 vfs_mount_sysinit();
140 vfs_vnode_sysinit(); 140 vfs_vnode_sysinit();
141} 141}
142 142
143/* 143/*
144 * Flush out and invalidate all buffers associated with a vnode. 144 * Flush out and invalidate all buffers associated with a vnode.
145 * Called with the underlying vnode locked, which should prevent new dirty 145 * Called with the underlying vnode locked, which should prevent new dirty
146 * buffers from being queued. 146 * buffers from being queued.
147 */ 147 */
148int 148int
149vinvalbuf(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l, 149vinvalbuf(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l,
150 bool catch_p, int slptimeo) 150 bool catch_p, int slptimeo)
151{ 151{
152 struct buf *bp, *nbp; 152 struct buf *bp, *nbp;
153 int error; 153 int error;
154 int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO | 154 int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO |
155 (flags & V_SAVE ? PGO_CLEANIT | PGO_RECLAIM : 0); 155 (flags & V_SAVE ? PGO_CLEANIT | PGO_RECLAIM : 0);
156 156
157 /* XXXUBC this doesn't look at flags or slp* */ 157 /* XXXUBC this doesn't look at flags or slp* */
158 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 158 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
159 error = VOP_PUTPAGES(vp, 0, 0, flushflags); 159 error = VOP_PUTPAGES(vp, 0, 0, flushflags);
160 if (error) { 160 if (error) {
161 return error; 161 return error;
162 } 162 }
163 163
164 if (flags & V_SAVE) { 164 if (flags & V_SAVE) {
165 error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0); 165 error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0);
166 if (error) 166 if (error)
167 return (error); 167 return (error);
168 KASSERT(LIST_EMPTY(&vp->v_dirtyblkhd)); 168 KASSERT(LIST_EMPTY(&vp->v_dirtyblkhd));
169 } 169 }
170 170
171 mutex_enter(&bufcache_lock); 171 mutex_enter(&bufcache_lock);
172restart: 172restart:
173 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 173 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
174 KASSERT(bp->b_vp == vp); 174 KASSERT(bp->b_vp == vp);
175 nbp = LIST_NEXT(bp, b_vnbufs); 175 nbp = LIST_NEXT(bp, b_vnbufs);
176 error = bbusy(bp, catch_p, slptimeo, NULL); 176 error = bbusy(bp, catch_p, slptimeo, NULL);
177 if (error != 0) { 177 if (error != 0) {
178 if (error == EPASSTHROUGH) 178 if (error == EPASSTHROUGH)
179 goto restart; 179 goto restart;
180 mutex_exit(&bufcache_lock); 180 mutex_exit(&bufcache_lock);
181 return (error); 181 return (error);
182 } 182 }
183 brelsel(bp, BC_INVAL | BC_VFLUSH); 183 brelsel(bp, BC_INVAL | BC_VFLUSH);
184 } 184 }
185 185
186 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 186 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
187 KASSERT(bp->b_vp == vp); 187 KASSERT(bp->b_vp == vp);
188 nbp = LIST_NEXT(bp, b_vnbufs); 188 nbp = LIST_NEXT(bp, b_vnbufs);
189 error = bbusy(bp, catch_p, slptimeo, NULL); 189 error = bbusy(bp, catch_p, slptimeo, NULL);
190 if (error != 0) { 190 if (error != 0) {
191 if (error == EPASSTHROUGH) 191 if (error == EPASSTHROUGH)
192 goto restart; 192 goto restart;
193 mutex_exit(&bufcache_lock); 193 mutex_exit(&bufcache_lock);
194 return (error); 194 return (error);
195 } 195 }
196 /* 196 /*
197 * XXX Since there are no node locks for NFS, I believe 197 * XXX Since there are no node locks for NFS, I believe
198 * there is a slight chance that a delayed write will 198 * there is a slight chance that a delayed write will
199 * occur while sleeping just above, so check for it. 199 * occur while sleeping just above, so check for it.
200 */ 200 */
201 if ((bp->b_oflags & BO_DELWRI) && (flags & V_SAVE)) { 201 if ((bp->b_oflags & BO_DELWRI) && (flags & V_SAVE)) {
202#ifdef DEBUG 202#ifdef DEBUG
203 printf("buffer still DELWRI\n"); 203 printf("buffer still DELWRI\n");
204#endif 204#endif
205 bp->b_cflags |= BC_BUSY | BC_VFLUSH; 205 bp->b_cflags |= BC_BUSY | BC_VFLUSH;
206 mutex_exit(&bufcache_lock); 206 mutex_exit(&bufcache_lock);
207 VOP_BWRITE(bp->b_vp, bp); 207 VOP_BWRITE(bp->b_vp, bp);
208 mutex_enter(&bufcache_lock); 208 mutex_enter(&bufcache_lock);
209 goto restart; 209 goto restart;
210 } 210 }
211 brelsel(bp, BC_INVAL | BC_VFLUSH); 211 brelsel(bp, BC_INVAL | BC_VFLUSH);
212 } 212 }
213 213
214#ifdef DIAGNOSTIC 214#ifdef DIAGNOSTIC
215 if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd)) 215 if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))
216 panic("vinvalbuf: flush failed, vp %p", vp); 216 panic("vinvalbuf: flush failed, vp %p", vp);
217#endif 217#endif
218 218
219 mutex_exit(&bufcache_lock); 219 mutex_exit(&bufcache_lock);
220 220
221 return (0); 221 return (0);
222} 222}
223 223
224/* 224/*
225 * Destroy any in core blocks past the truncation length. 225 * Destroy any in core blocks past the truncation length.
226 * Called with the underlying vnode locked, which should prevent new dirty 226 * Called with the underlying vnode locked, which should prevent new dirty
227 * buffers from being queued. 227 * buffers from being queued.
228 */ 228 */
229int 229int
230vtruncbuf(struct vnode *vp, daddr_t lbn, bool catch_p, int slptimeo) 230vtruncbuf(struct vnode *vp, daddr_t lbn, bool catch_p, int slptimeo)
231{ 231{
232 struct buf *bp, *nbp; 232 struct buf *bp, *nbp;
233 int error; 233 int error;
234 voff_t off; 234 voff_t off;
235 235
236 off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift); 236 off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift);
237 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 237 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
238 error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO); 238 error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO);
239 if (error) { 239 if (error) {
240 return error; 240 return error;
241 } 241 }
242 242
243 mutex_enter(&bufcache_lock); 243 mutex_enter(&bufcache_lock);
244restart: 244restart:
245 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 245 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
246 KASSERT(bp->b_vp == vp); 246 KASSERT(bp->b_vp == vp);
247 nbp = LIST_NEXT(bp, b_vnbufs); 247 nbp = LIST_NEXT(bp, b_vnbufs);
248 if (bp->b_lblkno < lbn) 248 if (bp->b_lblkno < lbn)
249 continue; 249 continue;
250 error = bbusy(bp, catch_p, slptimeo, NULL); 250 error = bbusy(bp, catch_p, slptimeo, NULL);
251 if (error != 0) { 251 if (error != 0) {
252 if (error == EPASSTHROUGH) 252 if (error == EPASSTHROUGH)
253 goto restart; 253 goto restart;
254 mutex_exit(&bufcache_lock); 254 mutex_exit(&bufcache_lock);
255 return (error); 255 return (error);
256 } 256 }
257 brelsel(bp, BC_INVAL | BC_VFLUSH); 257 brelsel(bp, BC_INVAL | BC_VFLUSH);
258 } 258 }
259 259
260 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 260 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
261 KASSERT(bp->b_vp == vp); 261 KASSERT(bp->b_vp == vp);
262 nbp = LIST_NEXT(bp, b_vnbufs); 262 nbp = LIST_NEXT(bp, b_vnbufs);
263 if (bp->b_lblkno < lbn) 263 if (bp->b_lblkno < lbn)
264 continue; 264 continue;
265 error = bbusy(bp, catch_p, slptimeo, NULL); 265 error = bbusy(bp, catch_p, slptimeo, NULL);
266 if (error != 0) { 266 if (error != 0) {
267 if (error == EPASSTHROUGH) 267 if (error == EPASSTHROUGH)
268 goto restart; 268 goto restart;
269 mutex_exit(&bufcache_lock); 269 mutex_exit(&bufcache_lock);
270 return (error); 270 return (error);
271 } 271 }
272 brelsel(bp, BC_INVAL | BC_VFLUSH); 272 brelsel(bp, BC_INVAL | BC_VFLUSH);
273 } 273 }
274 mutex_exit(&bufcache_lock); 274 mutex_exit(&bufcache_lock);
275 275
276 return (0); 276 return (0);
277} 277}
278 278
279/* 279/*
280 * Flush all dirty buffers from a vnode. 280 * Flush all dirty buffers from a vnode.
281 * Called with the underlying vnode locked, which should prevent new dirty 281 * Called with the underlying vnode locked, which should prevent new dirty
282 * buffers from being queued. 282 * buffers from being queued.
283 */ 283 */
284int 284int
285vflushbuf(struct vnode *vp, int flags) 285vflushbuf(struct vnode *vp, int flags)
286{ 286{
287 struct buf *bp, *nbp; 287 struct buf *bp, *nbp;
288 int error, pflags; 288 int error, pflags;
289 bool dirty, sync; 289 bool dirty, sync;
290 290
291 sync = (flags & FSYNC_WAIT) != 0; 291 sync = (flags & FSYNC_WAIT) != 0;
292 pflags = PGO_CLEANIT | PGO_ALLPAGES | 292 pflags = PGO_CLEANIT | PGO_ALLPAGES |
293 (sync ? PGO_SYNCIO : 0) | 293 (sync ? PGO_SYNCIO : 0) |
294 ((flags & FSYNC_LAZY) ? PGO_LAZY : 0); 294 ((flags & FSYNC_LAZY) ? PGO_LAZY : 0);
295 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 295 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
296 (void) VOP_PUTPAGES(vp, 0, 0, pflags); 296 (void) VOP_PUTPAGES(vp, 0, 0, pflags);
297 297
298loop: 298loop:
299 mutex_enter(&bufcache_lock); 299 mutex_enter(&bufcache_lock);
300 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 300 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
301 KASSERT(bp->b_vp == vp); 301 KASSERT(bp->b_vp == vp);
302 nbp = LIST_NEXT(bp, b_vnbufs); 302 nbp = LIST_NEXT(bp, b_vnbufs);
303 if ((bp->b_cflags & BC_BUSY)) 303 if ((bp->b_cflags & BC_BUSY))
304 continue; 304 continue;
305 if ((bp->b_oflags & BO_DELWRI) == 0) 305 if ((bp->b_oflags & BO_DELWRI) == 0)
306 panic("vflushbuf: not dirty, bp %p", bp); 306 panic("vflushbuf: not dirty, bp %p", bp);
307 bp->b_cflags |= BC_BUSY | BC_VFLUSH; 307 bp->b_cflags |= BC_BUSY | BC_VFLUSH;
308 mutex_exit(&bufcache_lock); 308 mutex_exit(&bufcache_lock);
309 /* 309 /*
310 * Wait for I/O associated with indirect blocks to complete, 310 * Wait for I/O associated with indirect blocks to complete,
311 * since there is no way to quickly wait for them below. 311 * since there is no way to quickly wait for them below.
312 */ 312 */
313 if (bp->b_vp == vp || !sync) 313 if (bp->b_vp == vp || !sync)
314 (void) bawrite(bp); 314 (void) bawrite(bp);
315 else { 315 else {
316 error = bwrite(bp); 316 error = bwrite(bp);
317 if (error) 317 if (error)
318 return error; 318 return error;
319 } 319 }
320 goto loop; 320 goto loop;
321 } 321 }
322 mutex_exit(&bufcache_lock); 322 mutex_exit(&bufcache_lock);
323 323
324 if (!sync) 324 if (!sync)
325 return 0; 325 return 0;
326 326
327 mutex_enter(vp->v_interlock); 327 mutex_enter(vp->v_interlock);
328 while (vp->v_numoutput != 0) 328 while (vp->v_numoutput != 0)
329 cv_wait(&vp->v_cv, vp->v_interlock); 329 cv_wait(&vp->v_cv, vp->v_interlock);
330 dirty = !LIST_EMPTY(&vp->v_dirtyblkhd); 330 dirty = !LIST_EMPTY(&vp->v_dirtyblkhd);
331 mutex_exit(vp->v_interlock); 331 mutex_exit(vp->v_interlock);
332 332
333 if (dirty) { 333 if (dirty) {
334 vprint("vflushbuf: dirty", vp); 334 vprint("vflushbuf: dirty", vp);
335 goto loop; 335 goto loop;
336 } 336 }
337 337
338 return 0; 338 return 0;
339} 339}
340 340
341/* 341/*
342 * Create a vnode for a block device. 342 * Create a vnode for a block device.
343 * Used for root filesystem and swap areas. 343 * Used for root filesystem and swap areas.
344 * Also used for memory file system special devices. 344 * Also used for memory file system special devices.
345 */ 345 */
346int 346int
347bdevvp(dev_t dev, vnode_t **vpp) 347bdevvp(dev_t dev, vnode_t **vpp)
348{ 348{
349 struct vattr va; 349 struct vattr va;
350 350
351 vattr_null(&va); 351 vattr_null(&va);
352 va.va_type = VBLK; 352 va.va_type = VBLK;
353 va.va_rdev = dev; 353 va.va_rdev = dev;
354 354
355 return vcache_new(dead_rootmount, NULL, &va, NOCRED, NULL, vpp); 355 return vcache_new(dead_rootmount, NULL, &va, NOCRED, NULL, vpp);
356} 356}
357 357
358/* 358/*
359 * Create a vnode for a character device. 359 * Create a vnode for a character device.
360 * Used for kernfs and some console handling. 360 * Used for kernfs and some console handling.
361 */ 361 */
362int 362int
363cdevvp(dev_t dev, vnode_t **vpp) 363cdevvp(dev_t dev, vnode_t **vpp)
364{ 364{
365 struct vattr va; 365 struct vattr va;
366 366
367 vattr_null(&va); 367 vattr_null(&va);
368 va.va_type = VCHR; 368 va.va_type = VCHR;
369 va.va_rdev = dev; 369 va.va_rdev = dev;
370 370
371 return vcache_new(dead_rootmount, NULL, &va, NOCRED, NULL, vpp); 371 return vcache_new(dead_rootmount, NULL, &va, NOCRED, NULL, vpp);
372} 372}
373 373
374/* 374/*
375 * Associate a buffer with a vnode. There must already be a hold on 375 * Associate a buffer with a vnode. There must already be a hold on
376 * the vnode. 376 * the vnode.
377 */ 377 */
378void 378void
379bgetvp(struct vnode *vp, struct buf *bp) 379bgetvp(struct vnode *vp, struct buf *bp)
380{ 380{
381 381
382 KASSERT(bp->b_vp == NULL); 382 KASSERT(bp->b_vp == NULL);
383 KASSERT(bp->b_objlock == &buffer_lock); 383 KASSERT(bp->b_objlock == &buffer_lock);
384 KASSERT(mutex_owned(vp->v_interlock)); 384 KASSERT(mutex_owned(vp->v_interlock));
385 KASSERT(mutex_owned(&bufcache_lock)); 385 KASSERT(mutex_owned(&bufcache_lock));
386 KASSERT((bp->b_cflags & BC_BUSY) != 0); 386 KASSERT((bp->b_cflags & BC_BUSY) != 0);
387 KASSERT(!cv_has_waiters(&bp->b_done)); 387 KASSERT(!cv_has_waiters(&bp->b_done));
388 388
389 vholdl(vp); 389 vholdl(vp);
390 bp->b_vp = vp; 390 bp->b_vp = vp;
391 if (vp->v_type == VBLK || vp->v_type == VCHR) 391 if (vp->v_type == VBLK || vp->v_type == VCHR)
392 bp->b_dev = vp->v_rdev; 392 bp->b_dev = vp->v_rdev;
393 else 393 else
394 bp->b_dev = NODEV; 394 bp->b_dev = NODEV;
395 395
396 /* 396 /*
397 * Insert onto list for new vnode. 397 * Insert onto list for new vnode.
398 */ 398 */
399 bufinsvn(bp, &vp->v_cleanblkhd); 399 bufinsvn(bp, &vp->v_cleanblkhd);
400 bp->b_objlock = vp->v_interlock; 400 bp->b_objlock = vp->v_interlock;
401} 401}
402 402
403/* 403/*
404 * Disassociate a buffer from a vnode. 404 * Disassociate a buffer from a vnode.
405 */ 405 */
406void 406void
407brelvp(struct buf *bp) 407brelvp(struct buf *bp)
408{ 408{
409 struct vnode *vp = bp->b_vp; 409 struct vnode *vp = bp->b_vp;
410 410
411 KASSERT(vp != NULL); 411 KASSERT(vp != NULL);
412 KASSERT(bp->b_objlock == vp->v_interlock); 412 KASSERT(bp->b_objlock == vp->v_interlock);
413 KASSERT(mutex_owned(vp->v_interlock)); 413 KASSERT(mutex_owned(vp->v_interlock));
414 KASSERT(mutex_owned(&bufcache_lock)); 414 KASSERT(mutex_owned(&bufcache_lock));
415 KASSERT((bp->b_cflags & BC_BUSY) != 0); 415 KASSERT((bp->b_cflags & BC_BUSY) != 0);
416 KASSERT(!cv_has_waiters(&bp->b_done)); 416 KASSERT(!cv_has_waiters(&bp->b_done));
417 417
418 /* 418 /*
419 * Delete from old vnode list, if on one. 419 * Delete from old vnode list, if on one.
420 */ 420 */
421 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 421 if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
422 bufremvn(bp); 422 bufremvn(bp);
423 423
424 if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) == VI_ONWORKLST && 424 if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) == VI_ONWORKLST &&
425 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) 425 LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
426 vn_syncer_remove_from_worklist(vp); 426 vn_syncer_remove_from_worklist(vp);
427 427
428 bp->b_objlock = &buffer_lock; 428 bp->b_objlock = &buffer_lock;
429 bp->b_vp = NULL; 429 bp->b_vp = NULL;
430 holdrelel(vp); 430 holdrelel(vp);
431} 431}
432 432
433/* 433/*
434 * Reassign a buffer from one vnode list to another. 434 * Reassign a buffer from one vnode list to another.
435 * The list reassignment must be within the same vnode. 435 * The list reassignment must be within the same vnode.
436 * Used to assign file specific control information 436 * Used to assign file specific control information
437 * (indirect blocks) to the list to which they belong. 437 * (indirect blocks) to the list to which they belong.
438 */ 438 */
439void 439void
440reassignbuf(struct buf *bp, struct vnode *vp) 440reassignbuf(struct buf *bp, struct vnode *vp)
441{ 441{
442 struct buflists *listheadp; 442 struct buflists *listheadp;
443 int delayx; 443 int delayx;
444 444
445 KASSERT(mutex_owned(&bufcache_lock)); 445 KASSERT(mutex_owned(&bufcache_lock));
446 KASSERT(bp->b_objlock == vp->v_interlock); 446 KASSERT(bp->b_objlock == vp->v_interlock);
447 KASSERT(mutex_owned(vp->v_interlock)); 447 KASSERT(mutex_owned(vp->v_interlock));
448 KASSERT((bp->b_cflags & BC_BUSY) != 0); 448 KASSERT((bp->b_cflags & BC_BUSY) != 0);
449 449
450 /* 450 /*
451 * Delete from old vnode list, if on one. 451 * Delete from old vnode list, if on one.
452 */ 452 */
453 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 453 if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
454 bufremvn(bp); 454 bufremvn(bp);
455 455
456 /* 456 /*
457 * If dirty, put on list of dirty buffers; 457 * If dirty, put on list of dirty buffers;
458 * otherwise insert onto list of clean buffers. 458 * otherwise insert onto list of clean buffers.
459 */ 459 */
460 if ((bp->b_oflags & BO_DELWRI) == 0) { 460 if ((bp->b_oflags & BO_DELWRI) == 0) {
461 listheadp = &vp->v_cleanblkhd; 461 listheadp = &vp->v_cleanblkhd;
462 if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) == 462 if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) ==
463 VI_ONWORKLST && 463 VI_ONWORKLST &&
464 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) 464 LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
465 vn_syncer_remove_from_worklist(vp); 465 vn_syncer_remove_from_worklist(vp);
466 } else { 466 } else {
467 listheadp = &vp->v_dirtyblkhd; 467 listheadp = &vp->v_dirtyblkhd;
468 if ((vp->v_iflag & VI_ONWORKLST) == 0) { 468 if ((vp->v_iflag & VI_ONWORKLST) == 0) {
469 switch (vp->v_type) { 469 switch (vp->v_type) {
470 case VDIR: 470 case VDIR:
471 delayx = dirdelay; 471 delayx = dirdelay;
472 break; 472 break;
473 case VBLK: 473 case VBLK:
474 if (spec_node_getmountedfs(vp) != NULL) { 474 if (spec_node_getmountedfs(vp) != NULL) {
475 delayx = metadelay; 475 delayx = metadelay;
476 break; 476 break;
477 } 477 }
478 /* fall through */ 478 /* fall through */
479 default: 479 default:
480 delayx = filedelay; 480 delayx = filedelay;
481 break; 481 break;
482 } 482 }
483 if (!vp->v_mount || 483 if (!vp->v_mount ||
484 (vp->v_mount->mnt_flag & MNT_ASYNC) == 0) 484 (vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
485 vn_syncer_add_to_worklist(vp, delayx); 485 vn_syncer_add_to_worklist(vp, delayx);
486 } 486 }
487 } 487 }
488 bufinsvn(bp, listheadp); 488 bufinsvn(bp, listheadp);
489} 489}
490 490
491/* 491/*
492 * Lookup a vnode by device number and return it referenced. 492 * Lookup a vnode by device number and return it referenced.
493 */ 493 */
494int 494int
495vfinddev(dev_t dev, enum vtype type, vnode_t **vpp) 495vfinddev(dev_t dev, enum vtype type, vnode_t **vpp)
496{ 496{
497 497
498 return (spec_node_lookup_by_dev(type, dev, vpp) == 0); 498 return (spec_node_lookup_by_dev(type, dev, vpp) == 0);
499} 499}
500 500
501/* 501/*
502 * Revoke all the vnodes corresponding to the specified minor number 502 * Revoke all the vnodes corresponding to the specified minor number
503 * range (endpoints inclusive) of the specified major. 503 * range (endpoints inclusive) of the specified major.
504 */ 504 */
505void 505void
506vdevgone(int maj, int minl, int minh, enum vtype type) 506vdevgone(int maj, int minl, int minh, enum vtype type)
507{ 507{
508 vnode_t *vp; 508 vnode_t *vp;
509 dev_t dev; 509 dev_t dev;
510 int mn; 510 int mn;
511 511
512 for (mn = minl; mn <= minh; mn++) { 512 for (mn = minl; mn <= minh; mn++) {
513 dev = makedev(maj, mn); 513 dev = makedev(maj, mn);
514 while (spec_node_lookup_by_dev(type, dev, &vp) == 0) { 514 while (spec_node_lookup_by_dev(type, dev, &vp) == 0) {
515 VOP_REVOKE(vp, REVOKEALL); 515 VOP_REVOKE(vp, REVOKEALL);
516 vrele(vp); 516 vrele(vp);
517 } 517 }
518 } 518 }
519} 519}
520 520
521/* 521/*
522 * The filesystem synchronizer mechanism - syncer. 522 * The filesystem synchronizer mechanism - syncer.
523 * 523 *
524 * It is useful to delay writes of file data and filesystem metadata for 524 * It is useful to delay writes of file data and filesystem metadata for
525 * a certain amount of time so that quickly created and deleted files need 525 * a certain amount of time so that quickly created and deleted files need
526 * not waste disk bandwidth being created and removed. To implement this, 526 * not waste disk bandwidth being created and removed. To implement this,
527 * vnodes are appended to a "workitem" queue. 527 * vnodes are appended to a "workitem" queue.
528 * 528 *
529 * Most pending metadata should not wait for more than ten seconds. Thus, 529 * Most pending metadata should not wait for more than ten seconds. Thus,
530 * mounted on block devices are delayed only about a half the time that file 530 * mounted on block devices are delayed only about a half the time that file
531 * data is delayed. Similarly, directory updates are more critical, so are 531 * data is delayed. Similarly, directory updates are more critical, so are
532 * only delayed about a third the time that file data is delayed. 532 * only delayed about a third the time that file data is delayed.
533 * 533 *
534 * There are SYNCER_MAXDELAY queues that are processed in a round-robin 534 * There are SYNCER_MAXDELAY queues that are processed in a round-robin
535 * manner at a rate of one each second (driven off the filesystem syner 535 * manner at a rate of one each second (driven off the filesystem syner
536 * thread). The syncer_delayno variable indicates the next queue that is 536 * thread). The syncer_delayno variable indicates the next queue that is
537 * to be processed. Items that need to be processed soon are placed in 537 * to be processed. Items that need to be processed soon are placed in
538 * this queue: 538 * this queue:
539 * 539 *
540 * syncer_workitem_pending[syncer_delayno] 540 * syncer_workitem_pending[syncer_delayno]
541 * 541 *
542 * A delay of e.g. fifteen seconds is done by placing the request fifteen 542 * A delay of e.g. fifteen seconds is done by placing the request fifteen
543 * entries later in the queue: 543 * entries later in the queue:
544 * 544 *
545 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 545 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
546 * 546 *
547 * Flag VI_ONWORKLST indicates that vnode is added into the queue. 547 * Flag VI_ONWORKLST indicates that vnode is added into the queue.
548 */ 548 */
549 549
550#define SYNCER_MAXDELAY 32 550#define SYNCER_MAXDELAY 32
551 551
552typedef TAILQ_HEAD(synclist, vnode_impl) synclist_t; 552typedef TAILQ_HEAD(synclist, vnode_impl) synclist_t;
553 553
554static void vn_syncer_add1(struct vnode *, int); 554static void vn_syncer_add1(struct vnode *, int);
555static void sysctl_vfs_syncfs_setup(struct sysctllog **); 555static void sysctl_vfs_syncfs_setup(struct sysctllog **);
556 556
557/* 557/*
558 * Defines and variables for the syncer process. 558 * Defines and variables for the syncer process.
559 */ 559 */
560int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 560int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
561time_t syncdelay = 30; /* max time to delay syncing data */ 561time_t syncdelay = 30; /* max time to delay syncing data */
562time_t filedelay = 30; /* time to delay syncing files */ 562time_t filedelay = 30; /* time to delay syncing files */
563time_t dirdelay = 15; /* time to delay syncing directories */ 563time_t dirdelay = 15; /* time to delay syncing directories */
564time_t metadelay = 10; /* time to delay syncing metadata */ 564time_t metadelay = 10; /* time to delay syncing metadata */
565time_t lockdelay = 1; /* time to delay if locking fails */ 565time_t lockdelay = 1; /* time to delay if locking fails */
566 566
567static kmutex_t syncer_data_lock; /* short term lock on data structs */ 567static kmutex_t syncer_data_lock; /* short term lock on data structs */
568 568
569static int syncer_delayno = 0; 569static int syncer_delayno = 0;
570static long syncer_last; 570static long syncer_last;
571static synclist_t * syncer_workitem_pending; 571static synclist_t * syncer_workitem_pending;
572 572
573static void 573static void
574vn_initialize_syncerd(void) 574vn_initialize_syncerd(void)
575{ 575{
576 int i; 576 int i;
577 577
578 syncer_last = SYNCER_MAXDELAY + 2; 578 syncer_last = SYNCER_MAXDELAY + 2;
579 579
580 sysctl_vfs_syncfs_setup(NULL); 580 sysctl_vfs_syncfs_setup(NULL);
581 581
582 syncer_workitem_pending = 582 syncer_workitem_pending =
583 kmem_alloc(syncer_last * sizeof (struct synclist), KM_SLEEP); 583 kmem_alloc(syncer_last * sizeof (struct synclist), KM_SLEEP);
584 584
585 for (i = 0; i < syncer_last; i++) 585 for (i = 0; i < syncer_last; i++)
586 TAILQ_INIT(&syncer_workitem_pending[i]); 586 TAILQ_INIT(&syncer_workitem_pending[i]);
587 587
588 mutex_init(&syncer_data_lock, MUTEX_DEFAULT, IPL_NONE); 588 mutex_init(&syncer_data_lock, MUTEX_DEFAULT, IPL_NONE);
589} 589}
590 590
591/* 591/*
592 * Return delay factor appropriate for the given file system. For 592 * Return delay factor appropriate for the given file system. For
593 * WAPBL we use the sync vnode to burst out metadata updates: sync 593 * WAPBL we use the sync vnode to burst out metadata updates: sync
594 * those file systems more frequently. 594 * those file systems more frequently.
595 */ 595 */
596static inline int 596static inline int
597sync_delay(struct mount *mp) 597sync_delay(struct mount *mp)
598{ 598{
599 599
600 return mp->mnt_wapbl != NULL ? metadelay : syncdelay; 600 return mp->mnt_wapbl != NULL ? metadelay : syncdelay;
601} 601}
602 602
603/* 603/*
604 * Compute the next slot index from delay. 604 * Compute the next slot index from delay.
605 */ 605 */
606static inline int 606static inline int
607sync_delay_slot(int delayx) 607sync_delay_slot(int delayx)
608{ 608{
609 609
610 if (delayx > syncer_maxdelay - 2) 610 if (delayx > syncer_maxdelay - 2)
611 delayx = syncer_maxdelay - 2; 611 delayx = syncer_maxdelay - 2;
612 return (syncer_delayno + delayx) % syncer_last; 612 return (syncer_delayno + delayx) % syncer_last;
613} 613}
614 614
615/* 615/*
616 * Add an item to the syncer work queue. 616 * Add an item to the syncer work queue.
617 */ 617 */
618static void 618static void
619vn_syncer_add1(struct vnode *vp, int delayx) 619vn_syncer_add1(struct vnode *vp, int delayx)
620{ 620{
621 synclist_t *slp; 621 synclist_t *slp;
622 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 622 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
623 623
624 KASSERT(mutex_owned(&syncer_data_lock)); 624 KASSERT(mutex_owned(&syncer_data_lock));
625 625
626 if (vp->v_iflag & VI_ONWORKLST) { 626 if (vp->v_iflag & VI_ONWORKLST) {
627 /* 627 /*
628 * Remove in order to adjust the position of the vnode. 628 * Remove in order to adjust the position of the vnode.
629 * Note: called from sched_sync(), which will not hold 629 * Note: called from sched_sync(), which will not hold
630 * interlock, therefore we cannot modify v_iflag here. 630 * interlock, therefore we cannot modify v_iflag here.
631 */ 631 */
632 slp = &syncer_workitem_pending[vip->vi_synclist_slot]; 632 slp = &syncer_workitem_pending[vip->vi_synclist_slot];
633 TAILQ_REMOVE(slp, vip, vi_synclist); 633 TAILQ_REMOVE(slp, vip, vi_synclist);
634 } else { 634 } else {
635 KASSERT(mutex_owned(vp->v_interlock)); 635 KASSERT(mutex_owned(vp->v_interlock));
636 vp->v_iflag |= VI_ONWORKLST; 636 vp->v_iflag |= VI_ONWORKLST;
637 } 637 }
638 638
639 vip->vi_synclist_slot = sync_delay_slot(delayx); 639 vip->vi_synclist_slot = sync_delay_slot(delayx);
640 640
641 slp = &syncer_workitem_pending[vip->vi_synclist_slot]; 641 slp = &syncer_workitem_pending[vip->vi_synclist_slot];
642 TAILQ_INSERT_TAIL(slp, vip, vi_synclist); 642 TAILQ_INSERT_TAIL(slp, vip, vi_synclist);
643} 643}
644 644
645void 645void
646vn_syncer_add_to_worklist(struct vnode *vp, int delayx) 646vn_syncer_add_to_worklist(struct vnode *vp, int delayx)
647{ 647{
648 648
649 KASSERT(mutex_owned(vp->v_interlock)); 649 KASSERT(mutex_owned(vp->v_interlock));
650 650
651 mutex_enter(&syncer_data_lock); 651 mutex_enter(&syncer_data_lock);
652 vn_syncer_add1(vp, delayx); 652 vn_syncer_add1(vp, delayx);
653 mutex_exit(&syncer_data_lock); 653 mutex_exit(&syncer_data_lock);
654} 654}
655 655
656/* 656/*
657 * Remove an item from the syncer work queue. 657 * Remove an item from the syncer work queue.
658 */ 658 */
659void 659void
660vn_syncer_remove_from_worklist(struct vnode *vp) 660vn_syncer_remove_from_worklist(struct vnode *vp)
661{ 661{
662 synclist_t *slp; 662 synclist_t *slp;
663 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 663 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
664 664
665 KASSERT(mutex_owned(vp->v_interlock)); 665 KASSERT(mutex_owned(vp->v_interlock));
666 666
667 if (vp->v_iflag & VI_ONWORKLST) { 667 if (vp->v_iflag & VI_ONWORKLST) {
668 mutex_enter(&syncer_data_lock); 668 mutex_enter(&syncer_data_lock);
669 vp->v_iflag &= ~VI_ONWORKLST; 669 vp->v_iflag &= ~VI_ONWORKLST;
670 slp = &syncer_workitem_pending[vip->vi_synclist_slot]; 670 slp = &syncer_workitem_pending[vip->vi_synclist_slot];
671 TAILQ_REMOVE(slp, vip, vi_synclist); 671 TAILQ_REMOVE(slp, vip, vi_synclist);
672 mutex_exit(&syncer_data_lock); 672 mutex_exit(&syncer_data_lock);
673 } 673 }
674} 674}
675 675
676/* 676/*
677 * Add this mount point to the syncer. 677 * Add this mount point to the syncer.
678 */ 678 */
679void 679void
680vfs_syncer_add_to_worklist(struct mount *mp) 680vfs_syncer_add_to_worklist(struct mount *mp)
681{ 681{
682 static int start, incr, next; 682 static int start, incr, next;
683 int vdelay; 683 int vdelay;
684 684
685 KASSERT(mutex_owned(mp->mnt_updating)); 685 KASSERT(mutex_owned(mp->mnt_updating));
686 KASSERT((mp->mnt_iflag & IMNT_ONWORKLIST) == 0); 686 KASSERT((mp->mnt_iflag & IMNT_ONWORKLIST) == 0);
687 687
688 /* 688 /*
689 * We attempt to scatter the mount points on the list 689 * We attempt to scatter the mount points on the list
690 * so that they will go off at evenly distributed times 690 * so that they will go off at evenly distributed times
691 * even if all the filesystems are mounted at once. 691 * even if all the filesystems are mounted at once.
692 */ 692 */
693 693
694 next += incr; 694 next += incr;
695 if (next == 0 || next > syncer_maxdelay) { 695 if (next == 0 || next > syncer_maxdelay) {
696 start /= 2; 696 start /= 2;
697 incr /= 2; 697 incr /= 2;
698 if (start == 0) { 698 if (start == 0) {
699 start = syncer_maxdelay / 2; 699 start = syncer_maxdelay / 2;
700 incr = syncer_maxdelay; 700 incr = syncer_maxdelay;
701 } 701 }
702 next = start; 702 next = start;
703 } 703 }
704 mp->mnt_iflag |= IMNT_ONWORKLIST; 704 mp->mnt_iflag |= IMNT_ONWORKLIST;
705 vdelay = sync_delay(mp); 705 vdelay = sync_delay(mp);
706 mp->mnt_synclist_slot = vdelay > 0 ? next % vdelay : 0; 706 mp->mnt_synclist_slot = vdelay > 0 ? next % vdelay : 0;
707} 707}
708 708
709/* 709/*
710 * Remove the mount point from the syncer. 710 * Remove the mount point from the syncer.
711 */ 711 */
712void 712void
713vfs_syncer_remove_from_worklist(struct mount *mp) 713vfs_syncer_remove_from_worklist(struct mount *mp)
714{ 714{
715 715
716 KASSERT(mutex_owned(mp->mnt_updating)); 716 KASSERT(mutex_owned(mp->mnt_updating));
717 KASSERT((mp->mnt_iflag & IMNT_ONWORKLIST) != 0); 717 KASSERT((mp->mnt_iflag & IMNT_ONWORKLIST) != 0);
718 718
719 mp->mnt_iflag &= ~IMNT_ONWORKLIST; 719 mp->mnt_iflag &= ~IMNT_ONWORKLIST;
720} 720}
721 721
722/* 722/*
723 * Try lazy sync, return true on success. 723 * Try lazy sync, return true on success.
724 */ 724 */
725static bool 725static bool
726lazy_sync_vnode(struct vnode *vp) 726lazy_sync_vnode(struct vnode *vp)
727{ 727{
728 bool synced; 728 bool synced;
729 729
730 KASSERT(mutex_owned(&syncer_data_lock)); 730 KASSERT(mutex_owned(&syncer_data_lock));
731 731
732 synced = false; 732 synced = false;
733 if (vcache_tryvget(vp) == 0) { 733 if (vcache_tryvget(vp) == 0) {
734 mutex_exit(&syncer_data_lock); 734 mutex_exit(&syncer_data_lock);
735 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 735 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
736 synced = true; 736 synced = true;
737 (void) VOP_FSYNC(vp, curlwp->l_cred, 737 (void) VOP_FSYNC(vp, curlwp->l_cred,
738 FSYNC_LAZY, 0, 0); 738 FSYNC_LAZY, 0, 0);
739 vput(vp); 739 vput(vp);
740 } else 740 } else
741 vrele(vp); 741 vrele(vp);
742 mutex_enter(&syncer_data_lock); 742 mutex_enter(&syncer_data_lock);
743 } 743 }
744 return synced; 744 return synced;
745} 745}
746 746
747/* 747/*
748 * System filesystem synchronizer daemon. 748 * System filesystem synchronizer daemon.
749 */ 749 */
750void 750void
751sched_sync(void *arg) 751sched_sync(void *arg)
752{ 752{
753 mount_iterator_t *iter; 753 mount_iterator_t *iter;
754 synclist_t *slp; 754 synclist_t *slp;
755 struct vnode_impl *vi; 755 struct vnode_impl *vi;
756 struct vnode *vp; 756 struct vnode *vp;
757 struct mount *mp; 757 struct mount *mp;
758 time_t starttime; 758 time_t starttime;
759 bool synced; 759 bool synced;
760 760
761 for (;;) { 761 for (;;) {
762 starttime = time_second; 762 starttime = time_second;
763 763
764 /* 764 /*
765 * Sync mounts whose dirty time has expired. 765 * Sync mounts whose dirty time has expired.
766 */ 766 */
767 mountlist_iterator_init(&iter); 767 mountlist_iterator_init(&iter);
768 while ((mp = mountlist_iterator_trynext(iter)) != NULL) { 768 while ((mp = mountlist_iterator_trynext(iter)) != NULL) {
769 if ((mp->mnt_iflag & IMNT_ONWORKLIST) == 0 || 769 if ((mp->mnt_iflag & IMNT_ONWORKLIST) == 0 ||
770 mp->mnt_synclist_slot != syncer_delayno) { 770 mp->mnt_synclist_slot != syncer_delayno) {
771 continue; 771 continue;
772 } 772 }
773 mp->mnt_synclist_slot = sync_delay_slot(sync_delay(mp)); 773 mp->mnt_synclist_slot = sync_delay_slot(sync_delay(mp));
774 VFS_SYNC(mp, MNT_LAZY, curlwp->l_cred); 774 VFS_SYNC(mp, MNT_LAZY, curlwp->l_cred);
775 } 775 }
776 mountlist_iterator_destroy(iter); 776 mountlist_iterator_destroy(iter);
777 777
778 mutex_enter(&syncer_data_lock); 778 mutex_enter(&syncer_data_lock);
779 779
780 /* 780 /*
781 * Push files whose dirty time has expired. 781 * Push files whose dirty time has expired.
782 */ 782 */
783 slp = &syncer_workitem_pending[syncer_delayno]; 783 slp = &syncer_workitem_pending[syncer_delayno];
784 syncer_delayno += 1; 784 syncer_delayno += 1;
785 if (syncer_delayno >= syncer_last) 785 if (syncer_delayno >= syncer_last)
786 syncer_delayno = 0; 786 syncer_delayno = 0;
787 787
788 while ((vi = TAILQ_FIRST(slp)) != NULL) { 788 while ((vi = TAILQ_FIRST(slp)) != NULL) {
789 vp = VIMPL_TO_VNODE(vi); 789 vp = VIMPL_TO_VNODE(vi);
790 synced = lazy_sync_vnode(vp); 790 synced = lazy_sync_vnode(vp);
791 791
792 /* 792 /*
793 * XXX The vnode may have been recycled, in which 793 * XXX The vnode may have been recycled, in which
794 * case it may have a new identity. 794 * case it may have a new identity.
795 */ 795 */
796 vi = TAILQ_FIRST(slp); 796 vi = TAILQ_FIRST(slp);
797 if (vi != NULL && VIMPL_TO_VNODE(vi) == vp) { 797 if (vi != NULL && VIMPL_TO_VNODE(vi) == vp) {
798 /* 798 /*
799 * Put us back on the worklist. The worklist 799 * Put us back on the worklist. The worklist
800 * routine will remove us from our current 800 * routine will remove us from our current
801 * position and then add us back in at a later 801 * position and then add us back in at a later
802 * position. 802 * position.
803 * 803 *
804 * Try again sooner rather than later if 804 * Try again sooner rather than later if
805 * we were unable to lock the vnode. Lock 805 * we were unable to lock the vnode. Lock
806 * failure should not prevent us from doing 806 * failure should not prevent us from doing
807 * the sync "soon". 807 * the sync "soon".
808 * 808 *
809 * If we locked it yet arrive here, it's 809 * If we locked it yet arrive here, it's
810 * likely that lazy sync is in progress and 810 * likely that lazy sync is in progress and
811 * so the vnode still has dirty metadata.  811 * so the vnode still has dirty metadata.
812 * syncdelay is mainly to get this vnode out 812 * syncdelay is mainly to get this vnode out
813 * of the way so we do not consider it again 813 * of the way so we do not consider it again
814 * "soon" in this loop, so the delay time is 814 * "soon" in this loop, so the delay time is
815 * not critical as long as it is not "soon".  815 * not critical as long as it is not "soon".
816 * While write-back strategy is the file 816 * While write-back strategy is the file
817 * system's domain, we expect write-back to 817 * system's domain, we expect write-back to
818 * occur no later than syncdelay seconds 818 * occur no later than syncdelay seconds
819 * into the future. 819 * into the future.
820 */ 820 */
821 vn_syncer_add1(vp, 821 vn_syncer_add1(vp,
822 synced ? syncdelay : lockdelay); 822 synced ? syncdelay : lockdelay);
823 } 823 }
824 } 824 }
825 825
826 /* 826 /*
827 * If it has taken us less than a second to process the 827 * If it has taken us less than a second to process the
828 * current work, then wait. Otherwise start right over 828 * current work, then wait. Otherwise start right over
829 * again. We can still lose time if any single round 829 * again. We can still lose time if any single round
830 * takes more than two seconds, but it does not really 830 * takes more than two seconds, but it does not really
831 * matter as we are just trying to generally pace the 831 * matter as we are just trying to generally pace the
832 * filesystem activity. 832 * filesystem activity.
833 */ 833 */
834 if (time_second == starttime) { 834 if (time_second == starttime) {
835 kpause("syncer", false, hz, &syncer_data_lock); 835 kpause("syncer", false, hz, &syncer_data_lock);
836 } 836 }
837 mutex_exit(&syncer_data_lock); 837 mutex_exit(&syncer_data_lock);
838 } 838 }
839} 839}
840 840
841static void 841static void
842sysctl_vfs_syncfs_setup(struct sysctllog **clog) 842sysctl_vfs_syncfs_setup(struct sysctllog **clog)
843{ 843{
844 const struct sysctlnode *rnode, *cnode; 844 const struct sysctlnode *rnode, *cnode;
845 845
846 sysctl_createv(clog, 0, NULL, &rnode, 846 sysctl_createv(clog, 0, NULL, &rnode,
847 CTLFLAG_PERMANENT, 847 CTLFLAG_PERMANENT,
848 CTLTYPE_NODE, "sync", 848 CTLTYPE_NODE, "sync",
849 SYSCTL_DESCR("syncer options"), 849 SYSCTL_DESCR("syncer options"),
850 NULL, 0, NULL, 0, 850 NULL, 0, NULL, 0,
851 CTL_VFS, CTL_CREATE, CTL_EOL); 851 CTL_VFS, CTL_CREATE, CTL_EOL);
852 852
853 sysctl_createv(clog, 0, &rnode, &cnode, 853 sysctl_createv(clog, 0, &rnode, &cnode,
854 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 854 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
855 CTLTYPE_QUAD, "delay", 855 CTLTYPE_QUAD, "delay",
856 SYSCTL_DESCR("max time to delay syncing data"), 856 SYSCTL_DESCR("max time to delay syncing data"),
857 NULL, 0, &syncdelay, 0, 857 NULL, 0, &syncdelay, 0,
858 CTL_CREATE, CTL_EOL); 858 CTL_CREATE, CTL_EOL);
859 859
860 sysctl_createv(clog, 0, &rnode, &cnode, 860 sysctl_createv(clog, 0, &rnode, &cnode,
861 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 861 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
862 CTLTYPE_QUAD, "filedelay", 862 CTLTYPE_QUAD, "filedelay",
863 SYSCTL_DESCR("time to delay syncing files"), 863 SYSCTL_DESCR("time to delay syncing files"),
864 NULL, 0, &filedelay, 0, 864 NULL, 0, &filedelay, 0,
865 CTL_CREATE, CTL_EOL); 865 CTL_CREATE, CTL_EOL);
866 866
867 sysctl_createv(clog, 0, &rnode, &cnode, 867 sysctl_createv(clog, 0, &rnode, &cnode,
868 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 868 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
869 CTLTYPE_QUAD, "dirdelay", 869 CTLTYPE_QUAD, "dirdelay",
870 SYSCTL_DESCR("time to delay syncing directories"), 870 SYSCTL_DESCR("time to delay syncing directories"),
871 NULL, 0, &dirdelay, 0, 871 NULL, 0, &dirdelay, 0,
872 CTL_CREATE, CTL_EOL); 872 CTL_CREATE, CTL_EOL);
873 873
874 sysctl_createv(clog, 0, &rnode, &cnode, 874 sysctl_createv(clog, 0, &rnode, &cnode,
875 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 875 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
876 CTLTYPE_QUAD, "metadelay", 876 CTLTYPE_QUAD, "metadelay",
877 SYSCTL_DESCR("time to delay syncing metadata"), 877 SYSCTL_DESCR("time to delay syncing metadata"),
878 NULL, 0, &metadelay, 0, 878 NULL, 0, &metadelay, 0,
879 CTL_CREATE, CTL_EOL); 879 CTL_CREATE, CTL_EOL);
880} 880}
881 881
882/* 882/*
883 * sysctl helper routine to return list of supported fstypes 883 * sysctl helper routine to return list of supported fstypes
884 */ 884 */
885int 885int
886sysctl_vfs_generic_fstypes(SYSCTLFN_ARGS) 886sysctl_vfs_generic_fstypes(SYSCTLFN_ARGS)
887{ 887{
888 char bf[sizeof(((struct statvfs *)NULL)->f_fstypename)]; 888 char bf[sizeof(((struct statvfs *)NULL)->f_fstypename)];
889 char *where = oldp; 889 char *where = oldp;
890 struct vfsops *v; 890 struct vfsops *v;
891 size_t needed, left, slen; 891 size_t needed, left, slen;
892 int error, first; 892 int error, first;
893 893
894 if (newp != NULL) 894 if (newp != NULL)
895 return (EPERM); 895 return (EPERM);
896 if (namelen != 0) 896 if (namelen != 0)
897 return (EINVAL); 897 return (EINVAL);
898 898
899 first = 1; 899 first = 1;
900 error = 0; 900 error = 0;
901 needed = 0; 901 needed = 0;
902 left = *oldlenp; 902 left = *oldlenp;
903 903
904 sysctl_unlock(); 904 sysctl_unlock();
905 mutex_enter(&vfs_list_lock); 905 mutex_enter(&vfs_list_lock);
906 LIST_FOREACH(v, &vfs_list, vfs_list) { 906 LIST_FOREACH(v, &vfs_list, vfs_list) {
907 if (where == NULL) 907 if (where == NULL)
908 needed += strlen(v->vfs_name) + 1; 908 needed += strlen(v->vfs_name) + 1;
909 else { 909 else {
910 memset(bf, 0, sizeof(bf)); 910 memset(bf, 0, sizeof(bf));
911 if (first) { 911 if (first) {
912 strncpy(bf, v->vfs_name, sizeof(bf)); 912 strncpy(bf, v->vfs_name, sizeof(bf));
913 first = 0; 913 first = 0;
914 } else { 914 } else {
915 bf[0] = ' '; 915 bf[0] = ' ';
916 strncpy(bf + 1, v->vfs_name, sizeof(bf) - 1); 916 strncpy(bf + 1, v->vfs_name, sizeof(bf) - 1);
917 } 917 }
918 bf[sizeof(bf)-1] = '\0'; 918 bf[sizeof(bf)-1] = '\0';
919 slen = strlen(bf); 919 slen = strlen(bf);
920 if (left < slen + 1) 920 if (left < slen + 1)
921 break; 921 break;
922 v->vfs_refcount++; 922 v->vfs_refcount++;
923 mutex_exit(&vfs_list_lock); 923 mutex_exit(&vfs_list_lock);
924 /* +1 to copy out the trailing NUL byte */ 924 /* +1 to copy out the trailing NUL byte */
925 error = copyout(bf, where, slen + 1); 925 error = copyout(bf, where, slen + 1);
926 mutex_enter(&vfs_list_lock); 926 mutex_enter(&vfs_list_lock);
927 v->vfs_refcount--; 927 v->vfs_refcount--;
928 if (error) 928 if (error)
929 break; 929 break;
930 where += slen; 930 where += slen;
931 needed += slen; 931 needed += slen;
932 left -= slen; 932 left -= slen;
933 } 933 }
934 } 934 }
935 mutex_exit(&vfs_list_lock); 935 mutex_exit(&vfs_list_lock);
936 sysctl_relock(); 936 sysctl_relock();
937 *oldlenp = needed; 937 *oldlenp = needed;
938 return (error); 938 return (error);
939} 939}
940 940
941int kinfo_vdebug = 1; 941int kinfo_vdebug = 1;
942int kinfo_vgetfailed; 942int kinfo_vgetfailed;
943 943
944#define KINFO_VNODESLOP 10 944#define KINFO_VNODESLOP 10
945 945
946/* 946/*
947 * Dump vnode list (via sysctl). 947 * Dump vnode list (via sysctl).
948 * Copyout address of vnode followed by vnode. 948 * Copyout address of vnode followed by vnode.
949 */ 949 */
950int 950int
951sysctl_kern_vnode(SYSCTLFN_ARGS) 951sysctl_kern_vnode(SYSCTLFN_ARGS)
952{ 952{
953 char *where = oldp; 953 char *where = oldp;
954 size_t *sizep = oldlenp; 954 size_t *sizep = oldlenp;
955 struct mount *mp; 955 struct mount *mp;
956 vnode_t *vp, vbuf; 956 vnode_t *vp, vbuf;
957 mount_iterator_t *iter; 957 mount_iterator_t *iter;
958 struct vnode_iterator *marker; 958 struct vnode_iterator *marker;
959 char *bp = where; 959 char *bp = where;
960 char *ewhere; 960 char *ewhere;
961 int error; 961 int error;
962 962
963 if (namelen != 0) 963 if (namelen != 0)
964 return (EOPNOTSUPP); 964 return (EOPNOTSUPP);
965 if (newp != NULL) 965 if (newp != NULL)
966 return (EPERM); 966 return (EPERM);
967 967
968#define VPTRSZ sizeof(vnode_t *) 968#define VPTRSZ sizeof(vnode_t *)
969#define VNODESZ sizeof(vnode_t) 969#define VNODESZ sizeof(vnode_t)
970 if (where == NULL) { 970 if (where == NULL) {
971 *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ); 971 *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
972 return (0); 972 return (0);
973 } 973 }
974 ewhere = where + *sizep; 974 ewhere = where + *sizep;
975 975
976 sysctl_unlock(); 976 sysctl_unlock();
977 mountlist_iterator_init(&iter); 977 mountlist_iterator_init(&iter);
978 while ((mp = mountlist_iterator_next(iter)) != NULL) { 978 while ((mp = mountlist_iterator_next(iter)) != NULL) {
979 vfs_vnode_iterator_init(mp, &marker); 979 vfs_vnode_iterator_init(mp, &marker);
980 while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) { 980 while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
981 if (bp + VPTRSZ + VNODESZ > ewhere) { 981 if (bp + VPTRSZ + VNODESZ > ewhere) {
982 vrele(vp); 982 vrele(vp);
983 vfs_vnode_iterator_destroy(marker); 983 vfs_vnode_iterator_destroy(marker);
984 mountlist_iterator_destroy(iter); 984 mountlist_iterator_destroy(iter);
985 sysctl_relock(); 985 sysctl_relock();
986 *sizep = bp - where; 986 *sizep = bp - where;
987 return (ENOMEM); 987 return (ENOMEM);
988 } 988 }
989 memcpy(&vbuf, vp, VNODESZ); 989 memcpy(&vbuf, vp, VNODESZ);
990 if ((error = copyout(&vp, bp, VPTRSZ)) || 990 if ((error = copyout(&vp, bp, VPTRSZ)) ||
991 (error = copyout(&vbuf, bp + VPTRSZ, VNODESZ))) { 991 (error = copyout(&vbuf, bp + VPTRSZ, VNODESZ))) {
992 vrele(vp); 992 vrele(vp);
993 vfs_vnode_iterator_destroy(marker); 993 vfs_vnode_iterator_destroy(marker);
994 mountlist_iterator_destroy(iter); 994 mountlist_iterator_destroy(iter);
995 sysctl_relock(); 995 sysctl_relock();
996 return (error); 996 return (error);
997 } 997 }
998 vrele(vp); 998 vrele(vp);
999 bp += VPTRSZ + VNODESZ; 999 bp += VPTRSZ + VNODESZ;
1000 } 1000 }
1001 vfs_vnode_iterator_destroy(marker); 1001 vfs_vnode_iterator_destroy(marker);
1002 } 1002 }
1003 mountlist_iterator_destroy(iter); 1003 mountlist_iterator_destroy(iter);
1004 sysctl_relock(); 1004 sysctl_relock();
1005 1005
1006 *sizep = bp - where; 1006 *sizep = bp - where;
1007 return (0); 1007 return (0);
1008} 1008}
1009 1009
1010/* 1010/*
1011 * Set vnode attributes to VNOVAL 1011 * Set vnode attributes to VNOVAL
1012 */ 1012 */
1013void 1013void
1014vattr_null(struct vattr *vap) 1014vattr_null(struct vattr *vap)
1015{ 1015{
1016 1016
1017 memset(vap, 0, sizeof(*vap)); 1017 memset(vap, 0, sizeof(*vap));
1018 1018
1019 vap->va_type = VNON; 1019 vap->va_type = VNON;
1020 1020
1021 /* 1021 /*
1022 * Assign individually so that it is safe even if size and 1022 * Assign individually so that it is safe even if size and
1023 * sign of each member are varied. 1023 * sign of each member are varied.
1024 */ 1024 */
1025 vap->va_mode = VNOVAL; 1025 vap->va_mode = VNOVAL;
1026 vap->va_nlink = VNOVAL; 1026 vap->va_nlink = VNOVAL;
1027 vap->va_uid = VNOVAL; 1027 vap->va_uid = VNOVAL;
1028 vap->va_gid = VNOVAL; 1028 vap->va_gid = VNOVAL;
1029 vap->va_fsid = VNOVAL; 1029 vap->va_fsid = VNOVAL;
1030 vap->va_fileid = VNOVAL; 1030 vap->va_fileid = VNOVAL;
1031 vap->va_size = VNOVAL; 1031 vap->va_size = VNOVAL;
1032 vap->va_blocksize = VNOVAL; 1032 vap->va_blocksize = VNOVAL;
1033 vap->va_atime.tv_sec = 1033 vap->va_atime.tv_sec =
1034 vap->va_mtime.tv_sec = 1034 vap->va_mtime.tv_sec =
1035 vap->va_ctime.tv_sec = 1035 vap->va_ctime.tv_sec =
1036 vap->va_birthtime.tv_sec = VNOVAL; 1036 vap->va_birthtime.tv_sec = VNOVAL;
1037 vap->va_atime.tv_nsec = 1037 vap->va_atime.tv_nsec =
1038 vap->va_mtime.tv_nsec = 1038 vap->va_mtime.tv_nsec =
1039 vap->va_ctime.tv_nsec = 1039 vap->va_ctime.tv_nsec =
1040 vap->va_birthtime.tv_nsec = VNOVAL; 1040 vap->va_birthtime.tv_nsec = VNOVAL;
1041 vap->va_gen = VNOVAL; 1041 vap->va_gen = VNOVAL;
1042 vap->va_flags = VNOVAL; 1042 vap->va_flags = VNOVAL;
1043 vap->va_rdev = VNOVAL; 1043 vap->va_rdev = VNOVAL;
1044 vap->va_bytes = VNOVAL; 1044 vap->va_bytes = VNOVAL;
1045} 1045}
1046 1046
1047/* 1047/*
1048 * Vnode state to string. 1048 * Vnode state to string.
1049 */ 1049 */
1050const char * 1050const char *
1051vstate_name(enum vnode_state state) 1051vstate_name(enum vnode_state state)
1052{ 1052{
1053 1053
1054 switch (state) { 1054 switch (state) {
1055 case VS_ACTIVE: 1055 case VS_ACTIVE:
1056 return "ACTIVE"; 1056 return "ACTIVE";
1057 case VS_MARKER: 1057 case VS_MARKER:
1058 return "MARKER"; 1058 return "MARKER";
1059 case VS_LOADING: 1059 case VS_LOADING:
1060 return "LOADING"; 1060 return "LOADING";
1061 case VS_LOADED: 1061 case VS_LOADED:
1062 return "LOADED"; 1062 return "LOADED";
1063 case VS_BLOCKED: 1063 case VS_BLOCKED:
1064 return "BLOCKED"; 1064 return "BLOCKED";
1065 case VS_RECLAIMING: 1065 case VS_RECLAIMING:
1066 return "RECLAIMING"; 1066 return "RECLAIMING";
1067 case VS_RECLAIMED: 1067 case VS_RECLAIMED:
1068 return "RECLAIMED"; 1068 return "RECLAIMED";
1069 default: 1069 default:
1070 return "ILLEGAL"; 1070 return "ILLEGAL";
1071 } 1071 }
1072} 1072}
1073 1073
1074/* 1074/*
1075 * Print a description of a vnode (common part). 1075 * Print a description of a vnode (common part).
1076 */ 1076 */
1077static void 1077static void
1078vprint_common(struct vnode *vp, const char *prefix, 1078vprint_common(struct vnode *vp, const char *prefix,
1079 void (*pr)(const char *, ...) __printflike(1, 2)) 1079 void (*pr)(const char *, ...) __printflike(1, 2))
1080{ 1080{
1081 int n; 1081 int n;
1082 char bf[96]; 1082 char bf[96];
1083 const uint8_t *cp; 1083 const uint8_t *cp;
1084 vnode_impl_t *vip; 1084 vnode_impl_t *vip;
1085 const char * const vnode_tags[] = { VNODE_TAGS }; 1085 const char * const vnode_tags[] = { VNODE_TAGS };
1086 const char * const vnode_types[] = { VNODE_TYPES }; 1086 const char * const vnode_types[] = { VNODE_TYPES };
1087 const char vnode_flagbits[] = VNODE_FLAGBITS; 1087 const char vnode_flagbits[] = VNODE_FLAGBITS;
1088 1088
1089#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) 1089#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
1090#define ARRAY_PRINT(idx, arr) \ 1090#define ARRAY_PRINT(idx, arr) \
1091 ((unsigned int)(idx) < ARRAY_SIZE(arr) ? (arr)[(idx)] : "UNKNOWN") 1091 ((unsigned int)(idx) < ARRAY_SIZE(arr) ? (arr)[(idx)] : "UNKNOWN")
1092 1092
1093 vip = VNODE_TO_VIMPL(vp); 1093 vip = VNODE_TO_VIMPL(vp);
1094 1094
1095 snprintb(bf, sizeof(bf), 1095 snprintb(bf, sizeof(bf),
1096 vnode_flagbits, vp->v_iflag | vp->v_vflag | vp->v_uflag); 1096 vnode_flagbits, vp->v_iflag | vp->v_vflag | vp->v_uflag);
1097 1097
1098 (*pr)("vnode %p flags %s\n", vp, bf); 1098 (*pr)("vnode %p flags %s\n", vp, bf);
1099 (*pr)("%stag %s(%d) type %s(%d) mount %p typedata %p\n", prefix, 1099 (*pr)("%stag %s(%d) type %s(%d) mount %p typedata %p\n", prefix,
1100 ARRAY_PRINT(vp->v_tag, vnode_tags), vp->v_tag, 1100 ARRAY_PRINT(vp->v_tag, vnode_tags), vp->v_tag,
1101 ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type, 1101 ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type,
1102 vp->v_mount, vp->v_mountedhere); 1102 vp->v_mount, vp->v_mountedhere);
1103 (*pr)("%susecount %d writecount %d holdcount %d\n", prefix, 1103 (*pr)("%susecount %d writecount %d holdcount %d\n", prefix,
1104 vrefcnt(vp), vp->v_writecount, vp->v_holdcnt); 1104 vrefcnt(vp), vp->v_writecount, vp->v_holdcnt);
1105 (*pr)("%ssize %" PRIx64 " writesize %" PRIx64 " numoutput %d\n", 1105 (*pr)("%ssize %" PRIx64 " writesize %" PRIx64 " numoutput %d\n",
1106 prefix, vp->v_size, vp->v_writesize, vp->v_numoutput); 1106 prefix, vp->v_size, vp->v_writesize, vp->v_numoutput);
1107 (*pr)("%sdata %p lock %p\n", prefix, vp->v_data, &vip->vi_lock); 1107 (*pr)("%sdata %p lock %p\n", prefix, vp->v_data, &vip->vi_lock);
1108 1108
1109 (*pr)("%sstate %s key(%p %zd)", prefix, vstate_name(vip->vi_state), 1109 (*pr)("%sstate %s key(%p %zd)", prefix, vstate_name(vip->vi_state),
1110 vip->vi_key.vk_mount, vip->vi_key.vk_key_len); 1110 vip->vi_key.vk_mount, vip->vi_key.vk_key_len);
1111 n = vip->vi_key.vk_key_len; 1111 n = vip->vi_key.vk_key_len;
1112 cp = vip->vi_key.vk_key; 1112 cp = vip->vi_key.vk_key;
1113 while (n-- > 0) 1113 while (n-- > 0)
1114 (*pr)(" %02x", *cp++); 1114 (*pr)(" %02x", *cp++);
1115 (*pr)("\n"); 1115 (*pr)("\n");
1116 (*pr)("%slrulisthd %p\n", prefix, vip->vi_lrulisthd); 1116 (*pr)("%slrulisthd %p\n", prefix, vip->vi_lrulisthd);
1117 1117
1118#undef ARRAY_PRINT 1118#undef ARRAY_PRINT
1119#undef ARRAY_SIZE 1119#undef ARRAY_SIZE
1120} 1120}
1121 1121
1122/* 1122/*
1123 * Print out a description of a vnode. 1123 * Print out a description of a vnode.
1124 */ 1124 */
1125void 1125void
1126vprint(const char *label, struct vnode *vp) 1126vprint(const char *label, struct vnode *vp)
1127{ 1127{
1128 1128
1129 if (label != NULL) 1129 if (label != NULL)
1130 printf("%s: ", label); 1130 printf("%s: ", label);
1131 vprint_common(vp, "\t", printf); 1131 vprint_common(vp, "\t", printf);
1132 if (vp->v_data != NULL) { 1132 if (vp->v_data != NULL) {
1133 printf("\t"); 1133 printf("\t");
1134 VOP_PRINT(vp); 1134 VOP_PRINT(vp);
1135 } 1135 }
1136} 1136}
1137 1137
1138/* 1138/*
1139 * Given a file system name, look up the vfsops for that 1139 * Given a file system name, look up the vfsops for that
1140 * file system, or return NULL if file system isn't present 1140 * file system, or return NULL if file system isn't present
1141 * in the kernel. 1141 * in the kernel.
1142 */ 1142 */
1143struct vfsops * 1143struct vfsops *
1144vfs_getopsbyname(const char *name) 1144vfs_getopsbyname(const char *name)
1145{ 1145{
1146 struct vfsops *v; 1146 struct vfsops *v;
1147 1147
1148 mutex_enter(&vfs_list_lock); 1148 mutex_enter(&vfs_list_lock);
1149 LIST_FOREACH(v, &vfs_list, vfs_list) { 1149 LIST_FOREACH(v, &vfs_list, vfs_list) {
1150 if (strcmp(v->vfs_name, name) == 0) 1150 if (strcmp(v->vfs_name, name) == 0)
1151 break; 1151 break;
1152 } 1152 }
1153 if (v != NULL) 1153 if (v != NULL)
1154 v->vfs_refcount++; 1154 v->vfs_refcount++;
1155 mutex_exit(&vfs_list_lock); 1155 mutex_exit(&vfs_list_lock);
1156 1156
1157 return (v); 1157 return (v);
1158} 1158}
1159 1159
1160void 1160void
1161copy_statvfs_info(struct statvfs *sbp, const struct mount *mp) 1161copy_statvfs_info(struct statvfs *sbp, const struct mount *mp)
1162{ 1162{
1163 const struct statvfs *mbp; 1163 const struct statvfs *mbp;
1164 1164
1165 if (sbp == (mbp = &mp->mnt_stat)) 1165 if (sbp == (mbp = &mp->mnt_stat))
1166 return; 1166 return;
1167 1167
1168 (void)memcpy(&sbp->f_fsidx, &mbp->f_fsidx, sizeof(sbp->f_fsidx)); 1168 (void)memcpy(&sbp->f_fsidx, &mbp->f_fsidx, sizeof(sbp->f_fsidx));
1169 sbp->f_fsid = mbp->f_fsid; 1169 sbp->f_fsid = mbp->f_fsid;
1170 sbp->f_owner = mbp->f_owner; 1170 sbp->f_owner = mbp->f_owner;
1171 sbp->f_flag = mbp->f_flag; 1171 sbp->f_flag = mbp->f_flag;
1172 sbp->f_syncwrites = mbp->f_syncwrites; 1172 sbp->f_syncwrites = mbp->f_syncwrites;
1173 sbp->f_asyncwrites = mbp->f_asyncwrites; 1173 sbp->f_asyncwrites = mbp->f_asyncwrites;
1174 sbp->f_syncreads = mbp->f_syncreads; 1174 sbp->f_syncreads = mbp->f_syncreads;
1175 sbp->f_asyncreads = mbp->f_asyncreads; 1175 sbp->f_asyncreads = mbp->f_asyncreads;
1176 (void)memcpy(sbp->f_spare, mbp->f_spare, sizeof(mbp->f_spare)); 1176 (void)memcpy(sbp->f_spare, mbp->f_spare, sizeof(mbp->f_spare));
1177 (void)memcpy(sbp->f_fstypename, mbp->f_fstypename, 1177 (void)memcpy(sbp->f_fstypename, mbp->f_fstypename,
1178 sizeof(sbp->f_fstypename)); 1178 sizeof(sbp->f_fstypename));
1179 (void)memcpy(sbp->f_mntonname, mbp->f_mntonname, 1179 (void)memcpy(sbp->f_mntonname, mbp->f_mntonname,
1180 sizeof(sbp->f_mntonname)); 1180 sizeof(sbp->f_mntonname));
1181 (void)memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, 1181 (void)memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname,
1182 sizeof(sbp->f_mntfromname)); 1182 sizeof(sbp->f_mntfromname));
1183 (void)memcpy(sbp->f_mntfromlabel, mp->mnt_stat.f_mntfromlabel, 1183 (void)memcpy(sbp->f_mntfromlabel, mp->mnt_stat.f_mntfromlabel,
1184 sizeof(sbp->f_mntfromlabel)); 1184 sizeof(sbp->f_mntfromlabel));
1185 sbp->f_namemax = mbp->f_namemax; 1185 sbp->f_namemax = mbp->f_namemax;
1186} 1186}
1187 1187
1188int 1188int
1189set_statvfs_info(const char *onp, int ukon, const char *fromp, int ukfrom, 1189set_statvfs_info(const char *onp, int ukon, const char *fromp, int ukfrom,
1190 const char *vfsname, struct mount *mp, struct lwp *l) 1190 const char *vfsname, struct mount *mp, struct lwp *l)
1191{ 1191{
1192 int error; 1192 int error;
1193 size_t size; 1193 size_t size;
1194 struct statvfs *sfs = &mp->mnt_stat; 1194 struct statvfs *sfs = &mp->mnt_stat;
1195 int (*fun)(const void *, void *, size_t, size_t *); 1195 int (*fun)(const void *, void *, size_t, size_t *);
1196 1196
1197 (void)strlcpy(mp->mnt_stat.f_fstypename, vfsname, 1197 (void)strlcpy(mp->mnt_stat.f_fstypename, vfsname,
1198 sizeof(mp->mnt_stat.f_fstypename)); 1198 sizeof(mp->mnt_stat.f_fstypename));
1199 1199
1200 if (onp) { 1200 if (onp) {
1201 struct cwdinfo *cwdi = l->l_proc->p_cwdi; 1201 struct cwdinfo *cwdi = l->l_proc->p_cwdi;
1202 fun = (ukon == UIO_SYSSPACE) ? copystr : copyinstr; 1202 fun = (ukon == UIO_SYSSPACE) ? copystr : copyinstr;
1203 if (cwdi->cwdi_rdir != NULL) { 1203 if (cwdi->cwdi_rdir != NULL) {
1204 size_t len; 1204 size_t len;
1205 char *bp; 1205 char *bp;
1206 char *path = PNBUF_GET(); 1206 char *path = PNBUF_GET();
1207 1207
1208 bp = path + MAXPATHLEN; 1208 bp = path + MAXPATHLEN;
1209 *--bp = '\0'; 1209 *--bp = '\0';
1210 rw_enter(&cwdi->cwdi_lock, RW_READER); 1210 rw_enter(&cwdi->cwdi_lock, RW_READER);
1211 error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp, 1211 error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp,
1212 path, MAXPATHLEN / 2, 0, l); 1212 path, MAXPATHLEN / 2, 0, l);
1213 rw_exit(&cwdi->cwdi_lock); 1213 rw_exit(&cwdi->cwdi_lock);
1214 if (error) { 1214 if (error) {
1215 PNBUF_PUT(path); 1215 PNBUF_PUT(path);
1216 return error; 1216 return error;
1217 } 1217 }
1218 1218
1219 len = strlen(bp); 1219 len = strlen(bp);
1220 if (len > sizeof(sfs->f_mntonname) - 1) 1220 if (len > sizeof(sfs->f_mntonname) - 1)
1221 len = sizeof(sfs->f_mntonname) - 1; 1221 len = sizeof(sfs->f_mntonname) - 1;
1222 (void)strncpy(sfs->f_mntonname, bp, len); 1222 (void)strncpy(sfs->f_mntonname, bp, len);
1223 PNBUF_PUT(path); 1223 PNBUF_PUT(path);
1224 1224
1225 if (len < sizeof(sfs->f_mntonname) - 1) { 1225 if (len < sizeof(sfs->f_mntonname) - 1) {
1226 error = (*fun)(onp, &sfs->f_mntonname[len], 1226 error = (*fun)(onp, &sfs->f_mntonname[len],
1227 sizeof(sfs->f_mntonname) - len - 1, &size); 1227 sizeof(sfs->f_mntonname) - len - 1, &size);
1228 if (error) 1228 if (error)
1229 return error; 1229 return error;
1230 size += len; 1230 size += len;
1231 } else { 1231 } else {
1232 size = len; 1232 size = len;
1233 } 1233 }
1234 } else { 1234 } else {
1235 error = (*fun)(onp, &sfs->f_mntonname, 1235 error = (*fun)(onp, &sfs->f_mntonname,
1236 sizeof(sfs->f_mntonname) - 1, &size); 1236 sizeof(sfs->f_mntonname) - 1, &size);
1237 if (error) 1237 if (error)
1238 return error; 1238 return error;
1239 } 1239 }
1240 (void)memset(sfs->f_mntonname + size, 0, 1240 (void)memset(sfs->f_mntonname + size, 0,
1241 sizeof(sfs->f_mntonname) - size); 1241 sizeof(sfs->f_mntonname) - size);
1242 } 1242 }
1243 1243
1244 if (fromp) { 1244 if (fromp) {
1245 fun = (ukfrom == UIO_SYSSPACE) ? copystr : copyinstr; 1245 fun = (ukfrom == UIO_SYSSPACE) ? copystr : copyinstr;
1246 error = (*fun)(fromp, sfs->f_mntfromname, 1246 error = (*fun)(fromp, sfs->f_mntfromname,
1247 sizeof(sfs->f_mntfromname) - 1, &size); 1247 sizeof(sfs->f_mntfromname) - 1, &size);
1248 if (error) 1248 if (error)
1249 return error; 1249 return error;
1250 (void)memset(sfs->f_mntfromname + size, 0, 1250 (void)memset(sfs->f_mntfromname + size, 0,
1251 sizeof(sfs->f_mntfromname) - size); 1251 sizeof(sfs->f_mntfromname) - size);
1252 } 1252 }
1253 return 0; 1253 return 0;
1254} 1254}
1255 1255
1256void 1256void
1257vfs_timestamp(struct timespec *ts) 1257vfs_timestamp(struct timespec *ts)
1258{ 1258{
1259 1259
1260 nanotime(ts); 1260 nanotime(ts);
1261} 1261}
1262 1262
1263/* 1263/*
1264 * The purpose of this routine is to remove granularity from accmode_t, 1264 * The purpose of this routine is to remove granularity from accmode_t,
1265 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 1265 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
1266 * VADMIN and VAPPEND. 1266 * VADMIN and VAPPEND.
1267 * 1267 *
1268 * If it returns 0, the caller is supposed to continue with the usual 1268 * If it returns 0, the caller is supposed to continue with the usual
1269 * access checks using 'accmode' as modified by this routine. If it 1269 * access checks using 'accmode' as modified by this routine. If it
1270 * returns nonzero value, the caller is supposed to return that value 1270 * returns nonzero value, the caller is supposed to return that value
1271 * as errno. 1271 * as errno.
1272 * 1272 *
1273 * Note that after this routine runs, accmode may be zero. 1273 * Note that after this routine runs, accmode may be zero.
1274 */ 1274 */
1275int 1275int
1276vfs_unixify_accmode(accmode_t *accmode) 1276vfs_unixify_accmode(accmode_t *accmode)
1277{ 1277{
1278 /* 1278 /*
1279 * There is no way to specify explicit "deny" rule using 1279 * There is no way to specify explicit "deny" rule using
1280 * file mode or POSIX.1e ACLs. 1280 * file mode or POSIX.1e ACLs.
1281 */ 1281 */
1282 if (*accmode & VEXPLICIT_DENY) { 1282 if (*accmode & VEXPLICIT_DENY) {
1283 *accmode = 0; 1283 *accmode = 0;
1284 return (0); 1284 return (0);
1285 } 1285 }
1286 1286
1287 /* 1287 /*
1288 * None of these can be translated into usual access bits. 1288 * None of these can be translated into usual access bits.
1289 * Also, the common case for NFSv4 ACLs is to not contain 1289 * Also, the common case for NFSv4 ACLs is to not contain
1290 * either of these bits. Caller should check for VWRITE 1290 * either of these bits. Caller should check for VWRITE
1291 * on the containing directory instead. 1291 * on the containing directory instead.
1292 */ 1292 */
1293 if (*accmode & (VDELETE_CHILD | VDELETE)) 1293 if (*accmode & (VDELETE_CHILD | VDELETE))
1294 return (EPERM); 1294 return (EPERM);
1295 1295
1296 if (*accmode & VADMIN_PERMS) { 1296 if (*accmode & VADMIN_PERMS) {
1297 *accmode &= ~VADMIN_PERMS; 1297 *accmode &= ~VADMIN_PERMS;
1298 *accmode |= VADMIN; 1298 *accmode |= VADMIN;
1299 } 1299 }
1300 1300
1301 /* 1301 /*
1302 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 1302 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
1303 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 1303 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
1304 */ 1304 */
1305 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 1305 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
1306 1306
1307 return (0); 1307 return (0);
1308} 1308}
1309 1309
1310time_t rootfstime; /* recorded root fs time, if known */ 1310time_t rootfstime; /* recorded root fs time, if known */
1311void 1311void
1312setrootfstime(time_t t) 1312setrootfstime(time_t t)
1313{ 1313{
1314 rootfstime = t; 1314 rootfstime = t;
1315} 1315}
1316 1316
1317static const uint8_t vttodt_tab[ ] = { 1317static const uint8_t vttodt_tab[ ] = {
1318 [VNON] = DT_UNKNOWN, 1318 [VNON] = DT_UNKNOWN,
1319 [VREG] = DT_REG, 1319 [VREG] = DT_REG,
1320 [VDIR] = DT_DIR, 1320 [VDIR] = DT_DIR,
1321 [VBLK] = DT_BLK, 1321 [VBLK] = DT_BLK,
1322 [VCHR] = DT_CHR, 1322 [VCHR] = DT_CHR,
1323 [VLNK] = DT_LNK, 1323 [VLNK] = DT_LNK,
1324 [VSOCK] = DT_SOCK, 1324 [VSOCK] = DT_SOCK,
1325 [VFIFO] = DT_FIFO, 1325 [VFIFO] = DT_FIFO,
1326 [VBAD] = DT_UNKNOWN 1326 [VBAD] = DT_UNKNOWN
1327}; 1327};
1328 1328
1329uint8_t 1329uint8_t
1330vtype2dt(enum vtype vt) 1330vtype2dt(enum vtype vt)
1331{ 1331{
1332 1332
1333 CTASSERT(VBAD == __arraycount(vttodt_tab) - 1); 1333 CTASSERT(VBAD == __arraycount(vttodt_tab) - 1);
1334 return vttodt_tab[vt]; 1334 return vttodt_tab[vt];
1335} 1335}
1336 1336
1337int 1337int
1338VFS_MOUNT(struct mount *mp, const char *a, void *b, size_t *c) 1338VFS_MOUNT(struct mount *mp, const char *a, void *b, size_t *c)
1339{ 1339{
1340 int error; 1340 int error;
1341 1341
1342 KERNEL_LOCK(1, NULL); 1342 KERNEL_LOCK(1, NULL);
1343 error = (*(mp->mnt_op->vfs_mount))(mp, a, b, c); 1343 error = (*(mp->mnt_op->vfs_mount))(mp, a, b, c);
1344 KERNEL_UNLOCK_ONE(NULL); 1344 KERNEL_UNLOCK_ONE(NULL);
1345 1345
1346 return error; 1346 return error;
1347} 1347}
1348  1348
1349int 1349int
1350VFS_START(struct mount *mp, int a) 1350VFS_START(struct mount *mp, int a)
1351{ 1351{
1352 int error; 1352 int error;
1353 1353
1354 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1354 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1355 KERNEL_LOCK(1, NULL); 1355 KERNEL_LOCK(1, NULL);
1356 } 1356 }
1357 error = (*(mp->mnt_op->vfs_start))(mp, a); 1357 error = (*(mp->mnt_op->vfs_start))(mp, a);
1358 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1358 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1359 KERNEL_UNLOCK_ONE(NULL); 1359 KERNEL_UNLOCK_ONE(NULL);
1360 } 1360 }
1361 1361
1362 return error; 1362 return error;
1363} 1363}
1364  1364
1365int 1365int
1366VFS_UNMOUNT(struct mount *mp, int a) 1366VFS_UNMOUNT(struct mount *mp, int a)
1367{ 1367{
1368 int error; 1368 int error;
1369 1369
1370 KERNEL_LOCK(1, NULL); 1370 KERNEL_LOCK(1, NULL);
1371 error = (*(mp->mnt_op->vfs_unmount))(mp, a); 1371 error = (*(mp->mnt_op->vfs_unmount))(mp, a);
1372 KERNEL_UNLOCK_ONE(NULL); 1372 KERNEL_UNLOCK_ONE(NULL);
1373 1373
1374 return error; 1374 return error;
1375} 1375}
1376 1376
1377int 1377int
1378VFS_ROOT(struct mount *mp, int lktype, struct vnode **a) 1378VFS_ROOT(struct mount *mp, int lktype, struct vnode **a)
1379{ 1379{
1380 int error; 1380 int error;
1381 1381
1382 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1382 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1383 KERNEL_LOCK(1, NULL); 1383 KERNEL_LOCK(1, NULL);
1384 } 1384 }
1385 error = (*(mp->mnt_op->vfs_root))(mp, lktype, a); 1385 error = (*(mp->mnt_op->vfs_root))(mp, lktype, a);
1386 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1386 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1387 KERNEL_UNLOCK_ONE(NULL); 1387 KERNEL_UNLOCK_ONE(NULL);
1388 } 1388 }
1389 1389
1390 return error; 1390 return error;
1391} 1391}
1392 1392
1393int 1393int
1394VFS_QUOTACTL(struct mount *mp, struct quotactl_args *args) 1394VFS_QUOTACTL(struct mount *mp, struct quotactl_args *args)
1395{ 1395{
1396 int error; 1396 int error;
1397 1397
1398 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1398 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1399 KERNEL_LOCK(1, NULL); 1399 KERNEL_LOCK(1, NULL);
1400 } 1400 }
1401 error = (*(mp->mnt_op->vfs_quotactl))(mp, args); 1401 error = (*(mp->mnt_op->vfs_quotactl))(mp, args);
1402 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1402 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1403 KERNEL_UNLOCK_ONE(NULL); 1403 KERNEL_UNLOCK_ONE(NULL);
1404 } 1404 }
1405 1405
1406 return error; 1406 return error;
1407} 1407}
1408 1408
1409int 1409int
1410VFS_STATVFS(struct mount *mp, struct statvfs *a) 1410VFS_STATVFS(struct mount *mp, struct statvfs *a)
1411{ 1411{
1412 int error; 1412 int error;
1413 1413
1414 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1414 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1415 KERNEL_LOCK(1, NULL); 1415 KERNEL_LOCK(1, NULL);
1416 } 1416 }
1417 error = (*(mp->mnt_op->vfs_statvfs))(mp, a); 1417 error = (*(mp->mnt_op->vfs_statvfs))(mp, a);
1418 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1418 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1419 KERNEL_UNLOCK_ONE(NULL); 1419 KERNEL_UNLOCK_ONE(NULL);
1420 } 1420 }
1421 1421
1422 return error; 1422 return error;
1423} 1423}
1424 1424
1425int 1425int
1426VFS_SYNC(struct mount *mp, int a, struct kauth_cred *b) 1426VFS_SYNC(struct mount *mp, int a, struct kauth_cred *b)
1427{ 1427{
1428 int error; 1428 int error;
1429 1429
1430 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1430 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1431 KERNEL_LOCK(1, NULL); 1431 KERNEL_LOCK(1, NULL);
1432 } 1432 }
1433 error = (*(mp->mnt_op->vfs_sync))(mp, a, b); 1433 error = (*(mp->mnt_op->vfs_sync))(mp, a, b);
1434 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1434 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1435 KERNEL_UNLOCK_ONE(NULL); 1435 KERNEL_UNLOCK_ONE(NULL);
1436 } 1436 }
1437 1437
1438 return error; 1438 return error;
1439} 1439}
1440 1440
1441int 1441int
1442VFS_FHTOVP(struct mount *mp, struct fid *a, int b, struct vnode **c) 1442VFS_FHTOVP(struct mount *mp, struct fid *a, int b, struct vnode **c)
1443{ 1443{
1444 int error; 1444 int error;
1445 1445
1446 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1446 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1447 KERNEL_LOCK(1, NULL); 1447 KERNEL_LOCK(1, NULL);
1448 } 1448 }
1449 error = (*(mp->mnt_op->vfs_fhtovp))(mp, a, b, c); 1449 error = (*(mp->mnt_op->vfs_fhtovp))(mp, a, b, c);
1450 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1450 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1451 KERNEL_UNLOCK_ONE(NULL); 1451 KERNEL_UNLOCK_ONE(NULL);
1452 } 1452 }
1453 1453
1454 return error; 1454 return error;
1455} 1455}
1456 1456
1457int 1457int
1458VFS_VPTOFH(struct vnode *vp, struct fid *a, size_t *b) 1458VFS_VPTOFH(struct vnode *vp, struct fid *a, size_t *b)
1459{ 1459{
1460 int error; 1460 int error;
1461 1461
1462 if ((vp->v_vflag & VV_MPSAFE) == 0) { 1462 if ((vp->v_vflag & VV_MPSAFE) == 0) {
1463 KERNEL_LOCK(1, NULL); 1463 KERNEL_LOCK(1, NULL);
1464 } 1464 }
1465 error = (*(vp->v_mount->mnt_op->vfs_vptofh))(vp, a, b); 1465 error = (*(vp->v_mount->mnt_op->vfs_vptofh))(vp, a, b);
1466 if ((vp->v_vflag & VV_MPSAFE) == 0) { 1466 if ((vp->v_vflag & VV_MPSAFE) == 0) {
1467 KERNEL_UNLOCK_ONE(NULL); 1467 KERNEL_UNLOCK_ONE(NULL);
1468 } 1468 }
1469 1469
1470 return error; 1470 return error;
1471} 1471}
1472 1472
1473int 1473int
1474VFS_SNAPSHOT(struct mount *mp, struct vnode *a, struct timespec *b) 1474VFS_SNAPSHOT(struct mount *mp, struct vnode *a, struct timespec *b)
1475{ 1475{
1476 int error; 1476 int error;
1477 1477
1478 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1478 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1479 KERNEL_LOCK(1, NULL); 1479 KERNEL_LOCK(1, NULL);
1480 } 1480 }
1481 error = (*(mp->mnt_op->vfs_snapshot))(mp, a, b); 1481 error = (*(mp->mnt_op->vfs_snapshot))(mp, a, b);
1482 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1482 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1483 KERNEL_UNLOCK_ONE(NULL); 1483 KERNEL_UNLOCK_ONE(NULL);
1484 } 1484 }
1485 1485
1486 return error; 1486 return error;
1487} 1487}
1488 1488
1489int 1489int
1490VFS_EXTATTRCTL(struct mount *mp, int a, struct vnode *b, int c, const char *d) 1490VFS_EXTATTRCTL(struct mount *mp, int a, struct vnode *b, int c, const char *d)
1491{ 1491{
1492 int error; 1492 int error;
1493 1493
1494 KERNEL_LOCK(1, NULL); /* XXXSMP check ffs */ 1494 KERNEL_LOCK(1, NULL); /* XXXSMP check ffs */
1495 error = (*(mp->mnt_op->vfs_extattrctl))(mp, a, b, c, d); 1495 error = (*(mp->mnt_op->vfs_extattrctl))(mp, a, b, c, d);
1496 KERNEL_UNLOCK_ONE(NULL); /* XXX */ 1496 KERNEL_UNLOCK_ONE(NULL); /* XXX */
1497 1497
1498 return error; 1498 return error;
1499} 1499}
1500 1500
1501int 1501int
1502VFS_SUSPENDCTL(struct mount *mp, int a) 1502VFS_SUSPENDCTL(struct mount *mp, int a)
1503{ 1503{
1504 int error; 1504 int error;
1505 1505
1506 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1506 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1507 KERNEL_LOCK(1, NULL); 1507 KERNEL_LOCK(1, NULL);
1508 } 1508 }
1509 error = (*(mp->mnt_op->vfs_suspendctl))(mp, a); 1509 error = (*(mp->mnt_op->vfs_suspendctl))(mp, a);
1510 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) { 1510 if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1511 KERNEL_UNLOCK_ONE(NULL); 1511 KERNEL_UNLOCK_ONE(NULL);
1512 } 1512 }
1513 1513
1514 return error; 1514 return error;
1515} 1515}
1516 1516
1517#if defined(DDB) || defined(DEBUGPRINT) 1517#if defined(DDB) || defined(DEBUGPRINT)
1518static const char buf_flagbits[] = BUF_FLAGBITS; 1518static const char buf_flagbits[] = BUF_FLAGBITS;
1519 1519
1520void 1520void
1521vfs_buf_print(struct buf *bp, int full, void (*pr)(const char *, ...)) 1521vfs_buf_print(struct buf *bp, int full, void (*pr)(const char *, ...))
1522{ 1522{
1523 char bf[1024]; 1523 char bf[1024];
1524 1524
1525 (*pr)(" vp %p lblkno 0x%"PRIx64" blkno 0x%"PRIx64" rawblkno 0x%" 1525 (*pr)(" vp %p lblkno 0x%"PRIx64" blkno 0x%"PRIx64" rawblkno 0x%"
1526 PRIx64 " dev 0x%x\n", 1526 PRIx64 " dev 0x%x\n",
1527 bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_rawblkno, bp->b_dev); 1527 bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_rawblkno, bp->b_dev);
1528 1528
1529 snprintb(bf, sizeof(bf), 1529 snprintb(bf, sizeof(bf),
1530 buf_flagbits, bp->b_flags | bp->b_oflags | bp->b_cflags); 1530 buf_flagbits, bp->b_flags | bp->b_oflags | bp->b_cflags);
1531 (*pr)(" error %d flags %s\n", bp->b_error, bf); 1531 (*pr)(" error %d flags %s\n", bp->b_error, bf);
1532 1532
1533 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n", 1533 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n",
1534 bp->b_bufsize, bp->b_bcount, bp->b_resid); 1534 bp->b_bufsize, bp->b_bcount, bp->b_resid);
1535 (*pr)(" data %p saveaddr %p\n", 1535 (*pr)(" data %p saveaddr %p\n",
1536 bp->b_data, bp->b_saveaddr); 1536 bp->b_data, bp->b_saveaddr);
1537 (*pr)(" iodone %p objlock %p\n", bp->b_iodone, bp->b_objlock); 1537 (*pr)(" iodone %p objlock %p\n", bp->b_iodone, bp->b_objlock);
1538} 1538}
1539 1539
1540void 1540void
1541vfs_vnode_print(struct vnode *vp, int full, void (*pr)(const char *, ...)) 1541vfs_vnode_print(struct vnode *vp, int full, void (*pr)(const char *, ...))
1542{ 1542{
1543 1543
1544 uvm_object_printit(&vp->v_uobj, full, pr); 1544 uvm_object_printit(&vp->v_uobj, full, pr);
1545 (*pr)("\n"); 1545 (*pr)("\n");
1546 vprint_common(vp, "", printf); 1546 vprint_common(vp, "", pr);
1547 if (full) { 1547 if (full) {
1548 struct buf *bp; 1548 struct buf *bp;
1549 1549
1550 (*pr)("clean bufs:\n"); 1550 (*pr)("clean bufs:\n");
1551 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 1551 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
1552 (*pr)(" bp %p\n", bp); 1552 (*pr)(" bp %p\n", bp);
1553 vfs_buf_print(bp, full, pr); 1553 vfs_buf_print(bp, full, pr);
1554 } 1554 }
1555 1555
1556 (*pr)("dirty bufs:\n"); 1556 (*pr)("dirty bufs:\n");
1557 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 1557 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
1558 (*pr)(" bp %p\n", bp); 1558 (*pr)(" bp %p\n", bp);
1559 vfs_buf_print(bp, full, pr); 1559 vfs_buf_print(bp, full, pr);
1560 } 1560 }
1561 } 1561 }
1562} 1562}
1563 1563
1564void 1564void
1565vfs_vnode_lock_print(void *vlock, int full, void (*pr)(const char *, ...)) 1565vfs_vnode_lock_print(void *vlock, int full, void (*pr)(const char *, ...))
1566{ 1566{
1567 struct mount *mp; 1567 struct mount *mp;
1568 vnode_impl_t *vip; 1568 vnode_impl_t *vip;
1569 1569
1570 for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp)) { 1570 for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp)) {
1571 TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) { 1571 TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
1572 if (&vip->vi_lock == vlock || 1572 if (&vip->vi_lock == vlock ||
1573 VIMPL_TO_VNODE(vip)->v_interlock == vlock) 1573 VIMPL_TO_VNODE(vip)->v_interlock == vlock)
1574 vfs_vnode_print(VIMPL_TO_VNODE(vip), full, pr); 1574 vfs_vnode_print(VIMPL_TO_VNODE(vip), full, pr);
1575 } 1575 }
1576 } 1576 }
1577} 1577}
1578 1578
1579void 1579void
1580vfs_mount_print_all(int full, void (*pr)(const char *, ...)) 1580vfs_mount_print_all(int full, void (*pr)(const char *, ...))
1581{ 1581{
1582 struct mount *mp; 1582 struct mount *mp;
1583 for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp)) 1583 for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp))
1584 vfs_mount_print(mp, full, pr); 1584 vfs_mount_print(mp, full, pr);
1585} 1585}
1586 1586
1587void 1587void
1588vfs_mount_print(struct mount *mp, int full, void (*pr)(const char *, ...)) 1588vfs_mount_print(struct mount *mp, int full, void (*pr)(const char *, ...))
1589{ 1589{
1590 char sbuf[256]; 1590 char sbuf[256];
1591 1591
1592 (*pr)("vnodecovered = %p data = %p\n", 1592 (*pr)("vnodecovered = %p data = %p\n",
1593 mp->mnt_vnodecovered,mp->mnt_data); 1593 mp->mnt_vnodecovered,mp->mnt_data);
1594 1594
1595 (*pr)("fs_bshift %d dev_bshift = %d\n", 1595 (*pr)("fs_bshift %d dev_bshift = %d\n",
1596 mp->mnt_fs_bshift,mp->mnt_dev_bshift); 1596 mp->mnt_fs_bshift,mp->mnt_dev_bshift);
1597 1597
1598 snprintb(sbuf, sizeof(sbuf), __MNT_FLAG_BITS, mp->mnt_flag); 1598 snprintb(sbuf, sizeof(sbuf), __MNT_FLAG_BITS, mp->mnt_flag);
1599 (*pr)("flag = %s\n", sbuf); 1599 (*pr)("flag = %s\n", sbuf);
1600 1600
1601 snprintb(sbuf, sizeof(sbuf), __IMNT_FLAG_BITS, mp->mnt_iflag); 1601 snprintb(sbuf, sizeof(sbuf), __IMNT_FLAG_BITS, mp->mnt_iflag);
1602 (*pr)("iflag = %s\n", sbuf); 1602 (*pr)("iflag = %s\n", sbuf);
1603 1603
1604 (*pr)("refcnt = %d updating @ %p\n", mp->mnt_refcnt, mp->mnt_updating); 1604 (*pr)("refcnt = %d updating @ %p\n", mp->mnt_refcnt, mp->mnt_updating);
1605 1605
1606 (*pr)("statvfs cache:\n"); 1606 (*pr)("statvfs cache:\n");
1607 (*pr)("\tbsize = %lu\n",mp->mnt_stat.f_bsize); 1607 (*pr)("\tbsize = %lu\n",mp->mnt_stat.f_bsize);
1608 (*pr)("\tfrsize = %lu\n",mp->mnt_stat.f_frsize); 1608 (*pr)("\tfrsize = %lu\n",mp->mnt_stat.f_frsize);
1609 (*pr)("\tiosize = %lu\n",mp->mnt_stat.f_iosize); 1609 (*pr)("\tiosize = %lu\n",mp->mnt_stat.f_iosize);
1610 1610
1611 (*pr)("\tblocks = %"PRIu64"\n",mp->mnt_stat.f_blocks); 1611 (*pr)("\tblocks = %"PRIu64"\n",mp->mnt_stat.f_blocks);
1612 (*pr)("\tbfree = %"PRIu64"\n",mp->mnt_stat.f_bfree); 1612 (*pr)("\tbfree = %"PRIu64"\n",mp->mnt_stat.f_bfree);
1613 (*pr)("\tbavail = %"PRIu64"\n",mp->mnt_stat.f_bavail); 1613 (*pr)("\tbavail = %"PRIu64"\n",mp->mnt_stat.f_bavail);
1614 (*pr)("\tbresvd = %"PRIu64"\n",mp->mnt_stat.f_bresvd); 1614 (*pr)("\tbresvd = %"PRIu64"\n",mp->mnt_stat.f_bresvd);
1615 1615
1616 (*pr)("\tfiles = %"PRIu64"\n",mp->mnt_stat.f_files); 1616 (*pr)("\tfiles = %"PRIu64"\n",mp->mnt_stat.f_files);
1617 (*pr)("\tffree = %"PRIu64"\n",mp->mnt_stat.f_ffree); 1617 (*pr)("\tffree = %"PRIu64"\n",mp->mnt_stat.f_ffree);
1618 (*pr)("\tfavail = %"PRIu64"\n",mp->mnt_stat.f_favail); 1618 (*pr)("\tfavail = %"PRIu64"\n",mp->mnt_stat.f_favail);
1619 (*pr)("\tfresvd = %"PRIu64"\n",mp->mnt_stat.f_fresvd); 1619 (*pr)("\tfresvd = %"PRIu64"\n",mp->mnt_stat.f_fresvd);
1620 1620
1621 (*pr)("\tf_fsidx = { 0x%"PRIx32", 0x%"PRIx32" }\n", 1621 (*pr)("\tf_fsidx = { 0x%"PRIx32", 0x%"PRIx32" }\n",
1622 mp->mnt_stat.f_fsidx.__fsid_val[0], 1622 mp->mnt_stat.f_fsidx.__fsid_val[0],
1623 mp->mnt_stat.f_fsidx.__fsid_val[1]); 1623 mp->mnt_stat.f_fsidx.__fsid_val[1]);
1624 1624
1625 (*pr)("\towner = %"PRIu32"\n",mp->mnt_stat.f_owner); 1625 (*pr)("\towner = %"PRIu32"\n",mp->mnt_stat.f_owner);
1626 (*pr)("\tnamemax = %lu\n",mp->mnt_stat.f_namemax); 1626 (*pr)("\tnamemax = %lu\n",mp->mnt_stat.f_namemax);
1627 1627
1628 snprintb(sbuf, sizeof(sbuf), __MNT_FLAG_BITS, mp->mnt_stat.f_flag); 1628 snprintb(sbuf, sizeof(sbuf), __MNT_FLAG_BITS, mp->mnt_stat.f_flag);
1629 1629
1630 (*pr)("\tflag = %s\n",sbuf); 1630 (*pr)("\tflag = %s\n",sbuf);
1631 (*pr)("\tsyncwrites = %" PRIu64 "\n",mp->mnt_stat.f_syncwrites); 1631 (*pr)("\tsyncwrites = %" PRIu64 "\n",mp->mnt_stat.f_syncwrites);
1632 (*pr)("\tasyncwrites = %" PRIu64 "\n",mp->mnt_stat.f_asyncwrites); 1632 (*pr)("\tasyncwrites = %" PRIu64 "\n",mp->mnt_stat.f_asyncwrites);
1633 (*pr)("\tsyncreads = %" PRIu64 "\n",mp->mnt_stat.f_syncreads); 1633 (*pr)("\tsyncreads = %" PRIu64 "\n",mp->mnt_stat.f_syncreads);
1634 (*pr)("\tasyncreads = %" PRIu64 "\n",mp->mnt_stat.f_asyncreads); 1634 (*pr)("\tasyncreads = %" PRIu64 "\n",mp->mnt_stat.f_asyncreads);
1635 (*pr)("\tfstypename = %s\n",mp->mnt_stat.f_fstypename); 1635 (*pr)("\tfstypename = %s\n",mp->mnt_stat.f_fstypename);
1636 (*pr)("\tmntonname = %s\n",mp->mnt_stat.f_mntonname); 1636 (*pr)("\tmntonname = %s\n",mp->mnt_stat.f_mntonname);
1637 (*pr)("\tmntfromname = %s\n",mp->mnt_stat.f_mntfromname); 1637 (*pr)("\tmntfromname = %s\n",mp->mnt_stat.f_mntfromname);
1638 1638
1639 { 1639 {
1640 int cnt = 0; 1640 int cnt = 0;
1641 vnode_t *vp; 1641 vnode_t *vp;
1642 vnode_impl_t *vip; 1642 vnode_impl_t *vip;
1643 (*pr)("locked vnodes ="); 1643 (*pr)("locked vnodes =");
1644 TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) { 1644 TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
1645 vp = VIMPL_TO_VNODE(vip); 1645 vp = VIMPL_TO_VNODE(vip);
1646 if (VOP_ISLOCKED(vp)) { 1646 if (VOP_ISLOCKED(vp)) {
1647 if ((++cnt % 6) == 0) { 1647 if ((++cnt % 6) == 0) {
1648 (*pr)(" %p,\n\t", vp); 1648 (*pr)(" %p,\n\t", vp);
1649 } else { 1649 } else {
1650 (*pr)(" %p,", vp); 1650 (*pr)(" %p,", vp);
1651 } 1651 }
1652 } 1652 }
1653 } 1653 }
1654 (*pr)("\n"); 1654 (*pr)("\n");
1655 } 1655 }
1656 1656
1657 if (full) { 1657 if (full) {
1658 int cnt = 0; 1658 int cnt = 0;
1659 vnode_t *vp; 1659 vnode_t *vp;
1660 vnode_impl_t *vip; 1660 vnode_impl_t *vip;
1661 (*pr)("all vnodes ="); 1661 (*pr)("all vnodes =");
1662 TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) { 1662 TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
1663 vp = VIMPL_TO_VNODE(vip); 1663 vp = VIMPL_TO_VNODE(vip);
1664 if (!TAILQ_NEXT(vip, vi_mntvnodes)) { 1664 if (!TAILQ_NEXT(vip, vi_mntvnodes)) {
1665 (*pr)(" %p", vp); 1665 (*pr)(" %p", vp);
1666 } else if ((++cnt % 6) == 0) { 1666 } else if ((++cnt % 6) == 0) {
1667 (*pr)(" %p,\n\t", vp); 1667 (*pr)(" %p,\n\t", vp);
1668 } else { 1668 } else {
1669 (*pr)(" %p,", vp); 1669 (*pr)(" %p,", vp);
1670 } 1670 }
1671 } 1671 }
1672 (*pr)("\n"); 1672 (*pr)("\n");
1673 } 1673 }
1674} 1674}
1675 1675
1676/* 1676/*
1677 * List all of the locked vnodes in the system. 1677 * List all of the locked vnodes in the system.
1678 */ 1678 */
1679void printlockedvnodes(void); 1679void printlockedvnodes(void);
1680 1680
1681void 1681void
1682printlockedvnodes(void) 1682printlockedvnodes(void)
1683{ 1683{
1684 struct mount *mp; 1684 struct mount *mp;
1685 vnode_t *vp; 1685 vnode_t *vp;
1686 vnode_impl_t *vip; 1686 vnode_impl_t *vip;
1687 1687
1688 printf("Locked vnodes\n"); 1688 printf("Locked vnodes\n");
1689 for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp)) { 1689 for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp)) {
1690 TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) { 1690 TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
1691 vp = VIMPL_TO_VNODE(vip); 1691 vp = VIMPL_TO_VNODE(vip);
1692 if (VOP_ISLOCKED(vp)) 1692 if (VOP_ISLOCKED(vp))
1693 vprint(NULL, vp); 1693 vprint(NULL, vp);
1694 } 1694 }
1695 } 1695 }
1696} 1696}
1697 1697
1698#endif /* DDB || DEBUGPRINT */ 1698#endif /* DDB || DEBUGPRINT */