Tue Sep 29 11:54:53 2009 UTC ()
regen: remove VNODE_LOCKDEBUG


(pooka)
diff -r1.80 -r1.81 src/sys/kern/vnode_if.c
diff -r1.2 -r1.3 src/sys/rump/include/rump/rumpvnode_if.h
diff -r1.1 -r1.2 src/sys/rump/librump/rumpvfs/rumpvnode_if.c
diff -r1.74 -r1.75 src/sys/sys/vnode_if.h

cvs diff -r1.80 -r1.81 src/sys/kern/vnode_if.c (expand / switch to unified diff)

--- src/sys/kern/vnode_if.c 2008/11/17 08:59:33 1.80
+++ src/sys/kern/vnode_if.c 2009/09/29 11:54:52 1.81
@@ -1,23 +1,23 @@ @@ -1,23 +1,23 @@
1/* $NetBSD: vnode_if.c,v 1.80 2008/11/17 08:59:33 pooka Exp $ */ 1/* $NetBSD: vnode_if.c,v 1.81 2009/09/29 11:54:52 pooka Exp $ */
2 2
3/* 3/*
4 * Warning: DO NOT EDIT! This file is automatically generated! 4 * Warning: DO NOT EDIT! This file is automatically generated!
5 * (Modifications made here may easily be lost!) 5 * (Modifications made here may easily be lost!)
6 * 6 *
7 * Created from the file: 7 * Created from the file:
8 * NetBSD: vnode_if.src,v 1.58 2008/11/15 19:08:12 pooka Exp 8 * NetBSD: vnode_if.src,v 1.58 2008/11/15 19:08:12 pooka Exp
9 * by the script: 9 * by the script:
10 * NetBSD: vnode_if.sh,v 1.50 2008/11/17 08:46:03 pooka Exp 10 * NetBSD: vnode_if.sh,v 1.52 2009/09/29 11:51:02 pooka Exp
11 */ 11 */
12 12
13/* 13/*
14 * Copyright (c) 1992, 1993, 1994, 1995 14 * Copyright (c) 1992, 1993, 1994, 1995
15 * The Regents of the University of California. All rights reserved. 15 * The Regents of the University of California. All rights reserved.
16 * 16 *
17 * Redistribution and use in source and binary forms, with or without 17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions 18 * modification, are permitted provided that the following conditions
19 * are met: 19 * are met:
20 * 1. Redistributions of source code must retain the above copyright 20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer. 21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright 22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the 23 * notice, this list of conditions and the following disclaimer in the
@@ -30,30 +30,27 @@ @@ -30,30 +30,27 @@
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE. 39 * SUCH DAMAGE.
40 */ 40 */
41 41
42#include <sys/cdefs.h> 42#include <sys/cdefs.h>
43__KERNEL_RCSID(0, "$NetBSD: vnode_if.c,v 1.80 2008/11/17 08:59:33 pooka Exp $"); 43__KERNEL_RCSID(0, "$NetBSD: vnode_if.c,v 1.81 2009/09/29 11:54:52 pooka Exp $");
44 
45 
46#include "opt_vnode_lockdebug.h" 
47 44
48#include <sys/param.h> 45#include <sys/param.h>
49#include <sys/mount.h> 46#include <sys/mount.h>
50#include <sys/buf.h> 47#include <sys/buf.h>
51#include <sys/vnode.h> 48#include <sys/vnode.h>
52#include <sys/lock.h> 49#include <sys/lock.h>
53 50
54const struct vnodeop_desc vop_default_desc = { 51const struct vnodeop_desc vop_default_desc = {
55 0, 52 0,
56 "default", 53 "default",
57 0, 54 0,
58 NULL, 55 NULL,
59 VDESC_NO_OFFSET, 56 VDESC_NO_OFFSET,
@@ -74,28 +71,26 @@ const struct vnodeop_desc vop_bwrite_des @@ -74,28 +71,26 @@ const struct vnodeop_desc vop_bwrite_des
74 0, 71 0,
75 vop_bwrite_vp_offsets, 72 vop_bwrite_vp_offsets,
76 VDESC_NO_OFFSET, 73 VDESC_NO_OFFSET,
77 VDESC_NO_OFFSET, 74 VDESC_NO_OFFSET,
78 VDESC_NO_OFFSET, 75 VDESC_NO_OFFSET,
79 NULL, 76 NULL,
80}; 77};
81int 78int
82VOP_BWRITE(struct buf *bp) 79VOP_BWRITE(struct buf *bp)
83{ 80{
84 int error; 81 int error;
85 bool mpsafe; 82 bool mpsafe;
86 struct vop_bwrite_args a; 83 struct vop_bwrite_args a;
87#ifdef VNODE_LOCKDEBUG 
88#endif 
89 a.a_desc = VDESC(vop_bwrite); 84 a.a_desc = VDESC(vop_bwrite);
90 a.a_bp = bp; 85 a.a_bp = bp;
91 mpsafe = (bp->b_vp->v_vflag & VV_MPSAFE); 86 mpsafe = (bp->b_vp->v_vflag & VV_MPSAFE);
92 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 87 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
93 error = (VCALL(bp->b_vp, VOFFSET(vop_bwrite), &a)); 88 error = (VCALL(bp->b_vp, VOFFSET(vop_bwrite), &a));
94 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 89 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
95 return error; 90 return error;
96} 91}
97 92
98/* End of special cases */ 93/* End of special cases */
99 94
100const int vop_lookup_vp_offsets[] = { 95const int vop_lookup_vp_offsets[] = {
101 VOPARG_OFFSETOF(struct vop_lookup_args,a_dvp), 96 VOPARG_OFFSETOF(struct vop_lookup_args,a_dvp),
@@ -109,28 +104,26 @@ const struct vnodeop_desc vop_lookup_des @@ -109,28 +104,26 @@ const struct vnodeop_desc vop_lookup_des
109 VOPARG_OFFSETOF(struct vop_lookup_args, a_vpp), 104 VOPARG_OFFSETOF(struct vop_lookup_args, a_vpp),
110 VDESC_NO_OFFSET, 105 VDESC_NO_OFFSET,
111 VOPARG_OFFSETOF(struct vop_lookup_args, a_cnp), 106 VOPARG_OFFSETOF(struct vop_lookup_args, a_cnp),
112 NULL, 107 NULL,
113}; 108};
114int 109int
115VOP_LOOKUP(struct vnode *dvp, 110VOP_LOOKUP(struct vnode *dvp,
116 struct vnode **vpp, 111 struct vnode **vpp,
117 struct componentname *cnp) 112 struct componentname *cnp)
118{ 113{
119 int error; 114 int error;
120 bool mpsafe; 115 bool mpsafe;
121 struct vop_lookup_args a; 116 struct vop_lookup_args a;
122#ifdef VNODE_LOCKDEBUG 
123#endif 
124 a.a_desc = VDESC(vop_lookup); 117 a.a_desc = VDESC(vop_lookup);
125 a.a_dvp = dvp; 118 a.a_dvp = dvp;
126 a.a_vpp = vpp; 119 a.a_vpp = vpp;
127 a.a_cnp = cnp; 120 a.a_cnp = cnp;
128 mpsafe = (dvp->v_vflag & VV_MPSAFE); 121 mpsafe = (dvp->v_vflag & VV_MPSAFE);
129 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 122 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
130 error = (VCALL(dvp, VOFFSET(vop_lookup), &a)); 123 error = (VCALL(dvp, VOFFSET(vop_lookup), &a));
131 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 124 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
132#ifdef DIAGNOSTIC 125#ifdef DIAGNOSTIC
133 if (error == 0) 126 if (error == 0)
134 KASSERT((*vpp)->v_size != VSIZENOTSET 127 KASSERT((*vpp)->v_size != VSIZENOTSET
135 && (*vpp)->v_writesize != VSIZENOTSET); 128 && (*vpp)->v_writesize != VSIZENOTSET);
136#endif /* DIAGNOSTIC */ 129#endif /* DIAGNOSTIC */
@@ -150,36 +143,28 @@ const struct vnodeop_desc vop_create_des @@ -150,36 +143,28 @@ const struct vnodeop_desc vop_create_des
150 VDESC_NO_OFFSET, 143 VDESC_NO_OFFSET,
151 VOPARG_OFFSETOF(struct vop_create_args, a_cnp), 144 VOPARG_OFFSETOF(struct vop_create_args, a_cnp),
152 NULL, 145 NULL,
153}; 146};
154int 147int
155VOP_CREATE(struct vnode *dvp, 148VOP_CREATE(struct vnode *dvp,
156 struct vnode **vpp, 149 struct vnode **vpp,
157 struct componentname *cnp, 150 struct componentname *cnp,
158 struct vattr *vap) 151 struct vattr *vap)
159{ 152{
160 int error; 153 int error;
161 bool mpsafe; 154 bool mpsafe;
162 struct vop_create_args a; 155 struct vop_create_args a;
163#ifdef VNODE_LOCKDEBUG 
164 int islocked_dvp; 
165#endif 
166 a.a_desc = VDESC(vop_create); 156 a.a_desc = VDESC(vop_create);
167 a.a_dvp = dvp; 157 a.a_dvp = dvp;
168#ifdef VNODE_LOCKDEBUG 
169 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
170 if (islocked_dvp != 1) 
171 panic("vop_create: dvp: locked %d, expected %d", islocked_dvp, 1); 
172#endif 
173 a.a_vpp = vpp; 158 a.a_vpp = vpp;
174 a.a_cnp = cnp; 159 a.a_cnp = cnp;
175 a.a_vap = vap; 160 a.a_vap = vap;
176 mpsafe = (dvp->v_vflag & VV_MPSAFE); 161 mpsafe = (dvp->v_vflag & VV_MPSAFE);
177 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 162 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
178 error = (VCALL(dvp, VOFFSET(vop_create), &a)); 163 error = (VCALL(dvp, VOFFSET(vop_create), &a));
179 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 164 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
180#ifdef DIAGNOSTIC 165#ifdef DIAGNOSTIC
181 if (error == 0) 166 if (error == 0)
182 KASSERT((*vpp)->v_size != VSIZENOTSET 167 KASSERT((*vpp)->v_size != VSIZENOTSET
183 && (*vpp)->v_writesize != VSIZENOTSET); 168 && (*vpp)->v_writesize != VSIZENOTSET);
184#endif /* DIAGNOSTIC */ 169#endif /* DIAGNOSTIC */
185 return error; 170 return error;
@@ -198,36 +183,28 @@ const struct vnodeop_desc vop_mknod_desc @@ -198,36 +183,28 @@ const struct vnodeop_desc vop_mknod_desc
198 VDESC_NO_OFFSET, 183 VDESC_NO_OFFSET,
199 VOPARG_OFFSETOF(struct vop_mknod_args, a_cnp), 184 VOPARG_OFFSETOF(struct vop_mknod_args, a_cnp),
200 NULL, 185 NULL,
201}; 186};
202int 187int
203VOP_MKNOD(struct vnode *dvp, 188VOP_MKNOD(struct vnode *dvp,
204 struct vnode **vpp, 189 struct vnode **vpp,
205 struct componentname *cnp, 190 struct componentname *cnp,
206 struct vattr *vap) 191 struct vattr *vap)
207{ 192{
208 int error; 193 int error;
209 bool mpsafe; 194 bool mpsafe;
210 struct vop_mknod_args a; 195 struct vop_mknod_args a;
211#ifdef VNODE_LOCKDEBUG 
212 int islocked_dvp; 
213#endif 
214 a.a_desc = VDESC(vop_mknod); 196 a.a_desc = VDESC(vop_mknod);
215 a.a_dvp = dvp; 197 a.a_dvp = dvp;
216#ifdef VNODE_LOCKDEBUG 
217 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
218 if (islocked_dvp != 1) 
219 panic("vop_mknod: dvp: locked %d, expected %d", islocked_dvp, 1); 
220#endif 
221 a.a_vpp = vpp; 198 a.a_vpp = vpp;
222 a.a_cnp = cnp; 199 a.a_cnp = cnp;
223 a.a_vap = vap; 200 a.a_vap = vap;
224 mpsafe = (dvp->v_vflag & VV_MPSAFE); 201 mpsafe = (dvp->v_vflag & VV_MPSAFE);
225 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 202 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
226 error = (VCALL(dvp, VOFFSET(vop_mknod), &a)); 203 error = (VCALL(dvp, VOFFSET(vop_mknod), &a));
227 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 204 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
228#ifdef DIAGNOSTIC 205#ifdef DIAGNOSTIC
229 if (error == 0) 206 if (error == 0)
230 KASSERT((*vpp)->v_size != VSIZENOTSET 207 KASSERT((*vpp)->v_size != VSIZENOTSET
231 && (*vpp)->v_writesize != VSIZENOTSET); 208 && (*vpp)->v_writesize != VSIZENOTSET);
232#endif /* DIAGNOSTIC */ 209#endif /* DIAGNOSTIC */
233 return error; 210 return error;
@@ -245,36 +222,28 @@ const struct vnodeop_desc vop_open_desc  @@ -245,36 +222,28 @@ const struct vnodeop_desc vop_open_desc
245 VDESC_NO_OFFSET, 222 VDESC_NO_OFFSET,
246 VOPARG_OFFSETOF(struct vop_open_args, a_cred), 223 VOPARG_OFFSETOF(struct vop_open_args, a_cred),
247 VDESC_NO_OFFSET, 224 VDESC_NO_OFFSET,
248 NULL, 225 NULL,
249}; 226};
250int 227int
251VOP_OPEN(struct vnode *vp, 228VOP_OPEN(struct vnode *vp,
252 int mode, 229 int mode,
253 kauth_cred_t cred) 230 kauth_cred_t cred)
254{ 231{
255 int error; 232 int error;
256 bool mpsafe; 233 bool mpsafe;
257 struct vop_open_args a; 234 struct vop_open_args a;
258#ifdef VNODE_LOCKDEBUG 
259 int islocked_vp; 
260#endif 
261 a.a_desc = VDESC(vop_open); 235 a.a_desc = VDESC(vop_open);
262 a.a_vp = vp; 236 a.a_vp = vp;
263#ifdef VNODE_LOCKDEBUG 
264 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
265 if (islocked_vp != 1) 
266 panic("vop_open: vp: locked %d, expected %d", islocked_vp, 1); 
267#endif 
268 a.a_mode = mode; 237 a.a_mode = mode;
269 a.a_cred = cred; 238 a.a_cred = cred;
270 mpsafe = (vp->v_vflag & VV_MPSAFE); 239 mpsafe = (vp->v_vflag & VV_MPSAFE);
271 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 240 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
272 error = (VCALL(vp, VOFFSET(vop_open), &a)); 241 error = (VCALL(vp, VOFFSET(vop_open), &a));
273 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 242 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
274 return error; 243 return error;
275} 244}
276 245
277const int vop_close_vp_offsets[] = { 246const int vop_close_vp_offsets[] = {
278 VOPARG_OFFSETOF(struct vop_close_args,a_vp), 247 VOPARG_OFFSETOF(struct vop_close_args,a_vp),
279 VDESC_NO_OFFSET 248 VDESC_NO_OFFSET
280}; 249};
@@ -286,36 +255,28 @@ const struct vnodeop_desc vop_close_desc @@ -286,36 +255,28 @@ const struct vnodeop_desc vop_close_desc
286 VDESC_NO_OFFSET, 255 VDESC_NO_OFFSET,
287 VOPARG_OFFSETOF(struct vop_close_args, a_cred), 256 VOPARG_OFFSETOF(struct vop_close_args, a_cred),
288 VDESC_NO_OFFSET, 257 VDESC_NO_OFFSET,
289 NULL, 258 NULL,
290}; 259};
291int 260int
292VOP_CLOSE(struct vnode *vp, 261VOP_CLOSE(struct vnode *vp,
293 int fflag, 262 int fflag,
294 kauth_cred_t cred) 263 kauth_cred_t cred)
295{ 264{
296 int error; 265 int error;
297 bool mpsafe; 266 bool mpsafe;
298 struct vop_close_args a; 267 struct vop_close_args a;
299#ifdef VNODE_LOCKDEBUG 
300 int islocked_vp; 
301#endif 
302 a.a_desc = VDESC(vop_close); 268 a.a_desc = VDESC(vop_close);
303 a.a_vp = vp; 269 a.a_vp = vp;
304#ifdef VNODE_LOCKDEBUG 
305 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
306 if (islocked_vp != 1) 
307 panic("vop_close: vp: locked %d, expected %d", islocked_vp, 1); 
308#endif 
309 a.a_fflag = fflag; 270 a.a_fflag = fflag;
310 a.a_cred = cred; 271 a.a_cred = cred;
311 mpsafe = (vp->v_vflag & VV_MPSAFE); 272 mpsafe = (vp->v_vflag & VV_MPSAFE);
312 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 273 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
313 error = (VCALL(vp, VOFFSET(vop_close), &a)); 274 error = (VCALL(vp, VOFFSET(vop_close), &a));
314 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 275 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
315 return error; 276 return error;
316} 277}
317 278
318const int vop_access_vp_offsets[] = { 279const int vop_access_vp_offsets[] = {
319 VOPARG_OFFSETOF(struct vop_access_args,a_vp), 280 VOPARG_OFFSETOF(struct vop_access_args,a_vp),
320 VDESC_NO_OFFSET 281 VDESC_NO_OFFSET
321}; 282};
@@ -327,36 +288,28 @@ const struct vnodeop_desc vop_access_des @@ -327,36 +288,28 @@ const struct vnodeop_desc vop_access_des
327 VDESC_NO_OFFSET, 288 VDESC_NO_OFFSET,
328 VOPARG_OFFSETOF(struct vop_access_args, a_cred), 289 VOPARG_OFFSETOF(struct vop_access_args, a_cred),
329 VDESC_NO_OFFSET, 290 VDESC_NO_OFFSET,
330 NULL, 291 NULL,
331}; 292};
332int 293int
333VOP_ACCESS(struct vnode *vp, 294VOP_ACCESS(struct vnode *vp,
334 int mode, 295 int mode,
335 kauth_cred_t cred) 296 kauth_cred_t cred)
336{ 297{
337 int error; 298 int error;
338 bool mpsafe; 299 bool mpsafe;
339 struct vop_access_args a; 300 struct vop_access_args a;
340#ifdef VNODE_LOCKDEBUG 
341 int islocked_vp; 
342#endif 
343 a.a_desc = VDESC(vop_access); 301 a.a_desc = VDESC(vop_access);
344 a.a_vp = vp; 302 a.a_vp = vp;
345#ifdef VNODE_LOCKDEBUG 
346 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
347 if (islocked_vp != 1) 
348 panic("vop_access: vp: locked %d, expected %d", islocked_vp, 1); 
349#endif 
350 a.a_mode = mode; 303 a.a_mode = mode;
351 a.a_cred = cred; 304 a.a_cred = cred;
352 mpsafe = (vp->v_vflag & VV_MPSAFE); 305 mpsafe = (vp->v_vflag & VV_MPSAFE);
353 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 306 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
354 error = (VCALL(vp, VOFFSET(vop_access), &a)); 307 error = (VCALL(vp, VOFFSET(vop_access), &a));
355 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 308 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
356 return error; 309 return error;
357} 310}
358 311
359const int vop_getattr_vp_offsets[] = { 312const int vop_getattr_vp_offsets[] = {
360 VOPARG_OFFSETOF(struct vop_getattr_args,a_vp), 313 VOPARG_OFFSETOF(struct vop_getattr_args,a_vp),
361 VDESC_NO_OFFSET 314 VDESC_NO_OFFSET
362}; 315};
@@ -368,28 +321,26 @@ const struct vnodeop_desc vop_getattr_de @@ -368,28 +321,26 @@ const struct vnodeop_desc vop_getattr_de
368 VDESC_NO_OFFSET, 321 VDESC_NO_OFFSET,
369 VOPARG_OFFSETOF(struct vop_getattr_args, a_cred), 322 VOPARG_OFFSETOF(struct vop_getattr_args, a_cred),
370 VDESC_NO_OFFSET, 323 VDESC_NO_OFFSET,
371 NULL, 324 NULL,
372}; 325};
373int 326int
374VOP_GETATTR(struct vnode *vp, 327VOP_GETATTR(struct vnode *vp,
375 struct vattr *vap, 328 struct vattr *vap,
376 kauth_cred_t cred) 329 kauth_cred_t cred)
377{ 330{
378 int error; 331 int error;
379 bool mpsafe; 332 bool mpsafe;
380 struct vop_getattr_args a; 333 struct vop_getattr_args a;
381#ifdef VNODE_LOCKDEBUG 
382#endif 
383 a.a_desc = VDESC(vop_getattr); 334 a.a_desc = VDESC(vop_getattr);
384 a.a_vp = vp; 335 a.a_vp = vp;
385 a.a_vap = vap; 336 a.a_vap = vap;
386 a.a_cred = cred; 337 a.a_cred = cred;
387 mpsafe = (vp->v_vflag & VV_MPSAFE); 338 mpsafe = (vp->v_vflag & VV_MPSAFE);
388 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 339 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
389 error = (VCALL(vp, VOFFSET(vop_getattr), &a)); 340 error = (VCALL(vp, VOFFSET(vop_getattr), &a));
390 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 341 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
391 return error; 342 return error;
392} 343}
393 344
394const int vop_setattr_vp_offsets[] = { 345const int vop_setattr_vp_offsets[] = {
395 VOPARG_OFFSETOF(struct vop_setattr_args,a_vp), 346 VOPARG_OFFSETOF(struct vop_setattr_args,a_vp),
@@ -403,36 +354,28 @@ const struct vnodeop_desc vop_setattr_de @@ -403,36 +354,28 @@ const struct vnodeop_desc vop_setattr_de
403 VDESC_NO_OFFSET, 354 VDESC_NO_OFFSET,
404 VOPARG_OFFSETOF(struct vop_setattr_args, a_cred), 355 VOPARG_OFFSETOF(struct vop_setattr_args, a_cred),
405 VDESC_NO_OFFSET, 356 VDESC_NO_OFFSET,
406 NULL, 357 NULL,
407}; 358};
408int 359int
409VOP_SETATTR(struct vnode *vp, 360VOP_SETATTR(struct vnode *vp,
410 struct vattr *vap, 361 struct vattr *vap,
411 kauth_cred_t cred) 362 kauth_cred_t cred)
412{ 363{
413 int error; 364 int error;
414 bool mpsafe; 365 bool mpsafe;
415 struct vop_setattr_args a; 366 struct vop_setattr_args a;
416#ifdef VNODE_LOCKDEBUG 
417 int islocked_vp; 
418#endif 
419 a.a_desc = VDESC(vop_setattr); 367 a.a_desc = VDESC(vop_setattr);
420 a.a_vp = vp; 368 a.a_vp = vp;
421#ifdef VNODE_LOCKDEBUG 
422 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
423 if (islocked_vp != 1) 
424 panic("vop_setattr: vp: locked %d, expected %d", islocked_vp, 1); 
425#endif 
426 a.a_vap = vap; 369 a.a_vap = vap;
427 a.a_cred = cred; 370 a.a_cred = cred;
428 mpsafe = (vp->v_vflag & VV_MPSAFE); 371 mpsafe = (vp->v_vflag & VV_MPSAFE);
429 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 372 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
430 error = (VCALL(vp, VOFFSET(vop_setattr), &a)); 373 error = (VCALL(vp, VOFFSET(vop_setattr), &a));
431 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 374 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
432 return error; 375 return error;
433} 376}
434 377
435const int vop_read_vp_offsets[] = { 378const int vop_read_vp_offsets[] = {
436 VOPARG_OFFSETOF(struct vop_read_args,a_vp), 379 VOPARG_OFFSETOF(struct vop_read_args,a_vp),
437 VDESC_NO_OFFSET 380 VDESC_NO_OFFSET
438}; 381};
@@ -445,36 +388,28 @@ const struct vnodeop_desc vop_read_desc  @@ -445,36 +388,28 @@ const struct vnodeop_desc vop_read_desc
445 VOPARG_OFFSETOF(struct vop_read_args, a_cred), 388 VOPARG_OFFSETOF(struct vop_read_args, a_cred),
446 VDESC_NO_OFFSET, 389 VDESC_NO_OFFSET,
447 NULL, 390 NULL,
448}; 391};
449int 392int
450VOP_READ(struct vnode *vp, 393VOP_READ(struct vnode *vp,
451 struct uio *uio, 394 struct uio *uio,
452 int ioflag, 395 int ioflag,
453 kauth_cred_t cred) 396 kauth_cred_t cred)
454{ 397{
455 int error; 398 int error;
456 bool mpsafe; 399 bool mpsafe;
457 struct vop_read_args a; 400 struct vop_read_args a;
458#ifdef VNODE_LOCKDEBUG 
459 int islocked_vp; 
460#endif 
461 a.a_desc = VDESC(vop_read); 401 a.a_desc = VDESC(vop_read);
462 a.a_vp = vp; 402 a.a_vp = vp;
463#ifdef VNODE_LOCKDEBUG 
464 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
465 if (islocked_vp != 1) 
466 panic("vop_read: vp: locked %d, expected %d", islocked_vp, 1); 
467#endif 
468 a.a_uio = uio; 403 a.a_uio = uio;
469 a.a_ioflag = ioflag; 404 a.a_ioflag = ioflag;
470 a.a_cred = cred; 405 a.a_cred = cred;
471 mpsafe = (vp->v_vflag & VV_MPSAFE); 406 mpsafe = (vp->v_vflag & VV_MPSAFE);
472 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 407 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
473 error = (VCALL(vp, VOFFSET(vop_read), &a)); 408 error = (VCALL(vp, VOFFSET(vop_read), &a));
474 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 409 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
475 return error; 410 return error;
476} 411}
477 412
478const int vop_write_vp_offsets[] = { 413const int vop_write_vp_offsets[] = {
479 VOPARG_OFFSETOF(struct vop_write_args,a_vp), 414 VOPARG_OFFSETOF(struct vop_write_args,a_vp),
480 VDESC_NO_OFFSET 415 VDESC_NO_OFFSET
@@ -488,36 +423,28 @@ const struct vnodeop_desc vop_write_desc @@ -488,36 +423,28 @@ const struct vnodeop_desc vop_write_desc
488 VOPARG_OFFSETOF(struct vop_write_args, a_cred), 423 VOPARG_OFFSETOF(struct vop_write_args, a_cred),
489 VDESC_NO_OFFSET, 424 VDESC_NO_OFFSET,
490 NULL, 425 NULL,
491}; 426};
492int 427int
493VOP_WRITE(struct vnode *vp, 428VOP_WRITE(struct vnode *vp,
494 struct uio *uio, 429 struct uio *uio,
495 int ioflag, 430 int ioflag,
496 kauth_cred_t cred) 431 kauth_cred_t cred)
497{ 432{
498 int error; 433 int error;
499 bool mpsafe; 434 bool mpsafe;
500 struct vop_write_args a; 435 struct vop_write_args a;
501#ifdef VNODE_LOCKDEBUG 
502 int islocked_vp; 
503#endif 
504 a.a_desc = VDESC(vop_write); 436 a.a_desc = VDESC(vop_write);
505 a.a_vp = vp; 437 a.a_vp = vp;
506#ifdef VNODE_LOCKDEBUG 
507 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
508 if (islocked_vp != 1) 
509 panic("vop_write: vp: locked %d, expected %d", islocked_vp, 1); 
510#endif 
511 a.a_uio = uio; 438 a.a_uio = uio;
512 a.a_ioflag = ioflag; 439 a.a_ioflag = ioflag;
513 a.a_cred = cred; 440 a.a_cred = cred;
514 mpsafe = (vp->v_vflag & VV_MPSAFE); 441 mpsafe = (vp->v_vflag & VV_MPSAFE);
515 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 442 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
516 error = (VCALL(vp, VOFFSET(vop_write), &a)); 443 error = (VCALL(vp, VOFFSET(vop_write), &a));
517 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 444 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
518 return error; 445 return error;
519} 446}
520 447
521const int vop_ioctl_vp_offsets[] = { 448const int vop_ioctl_vp_offsets[] = {
522 VOPARG_OFFSETOF(struct vop_ioctl_args,a_vp), 449 VOPARG_OFFSETOF(struct vop_ioctl_args,a_vp),
523 VDESC_NO_OFFSET 450 VDESC_NO_OFFSET
@@ -532,36 +459,28 @@ const struct vnodeop_desc vop_ioctl_desc @@ -532,36 +459,28 @@ const struct vnodeop_desc vop_ioctl_desc
532 VDESC_NO_OFFSET, 459 VDESC_NO_OFFSET,
533 NULL, 460 NULL,
534}; 461};
535int 462int
536VOP_IOCTL(struct vnode *vp, 463VOP_IOCTL(struct vnode *vp,
537 u_long command, 464 u_long command,
538 void *data, 465 void *data,
539 int fflag, 466 int fflag,
540 kauth_cred_t cred) 467 kauth_cred_t cred)
541{ 468{
542 int error; 469 int error;
543 bool mpsafe; 470 bool mpsafe;
544 struct vop_ioctl_args a; 471 struct vop_ioctl_args a;
545#ifdef VNODE_LOCKDEBUG 
546 int islocked_vp; 
547#endif 
548 a.a_desc = VDESC(vop_ioctl); 472 a.a_desc = VDESC(vop_ioctl);
549 a.a_vp = vp; 473 a.a_vp = vp;
550#ifdef VNODE_LOCKDEBUG 
551 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
552 if (islocked_vp != 0) 
553 panic("vop_ioctl: vp: locked %d, expected %d", islocked_vp, 0); 
554#endif 
555 a.a_command = command; 474 a.a_command = command;
556 a.a_data = data; 475 a.a_data = data;
557 a.a_fflag = fflag; 476 a.a_fflag = fflag;
558 a.a_cred = cred; 477 a.a_cred = cred;
559 mpsafe = (vp->v_vflag & VV_MPSAFE); 478 mpsafe = (vp->v_vflag & VV_MPSAFE);
560 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 479 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
561 error = (VCALL(vp, VOFFSET(vop_ioctl), &a)); 480 error = (VCALL(vp, VOFFSET(vop_ioctl), &a));
562 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 481 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
563 return error; 482 return error;
564} 483}
565 484
566const int vop_fcntl_vp_offsets[] = { 485const int vop_fcntl_vp_offsets[] = {
567 VOPARG_OFFSETOF(struct vop_fcntl_args,a_vp), 486 VOPARG_OFFSETOF(struct vop_fcntl_args,a_vp),
@@ -577,36 +496,28 @@ const struct vnodeop_desc vop_fcntl_desc @@ -577,36 +496,28 @@ const struct vnodeop_desc vop_fcntl_desc
577 VDESC_NO_OFFSET, 496 VDESC_NO_OFFSET,
578 NULL, 497 NULL,
579}; 498};
580int 499int
581VOP_FCNTL(struct vnode *vp, 500VOP_FCNTL(struct vnode *vp,
582 u_int command, 501 u_int command,
583 void *data, 502 void *data,
584 int fflag, 503 int fflag,
585 kauth_cred_t cred) 504 kauth_cred_t cred)
586{ 505{
587 int error; 506 int error;
588 bool mpsafe; 507 bool mpsafe;
589 struct vop_fcntl_args a; 508 struct vop_fcntl_args a;
590#ifdef VNODE_LOCKDEBUG 
591 int islocked_vp; 
592#endif 
593 a.a_desc = VDESC(vop_fcntl); 509 a.a_desc = VDESC(vop_fcntl);
594 a.a_vp = vp; 510 a.a_vp = vp;
595#ifdef VNODE_LOCKDEBUG 
596 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
597 if (islocked_vp != 0) 
598 panic("vop_fcntl: vp: locked %d, expected %d", islocked_vp, 0); 
599#endif 
600 a.a_command = command; 511 a.a_command = command;
601 a.a_data = data; 512 a.a_data = data;
602 a.a_fflag = fflag; 513 a.a_fflag = fflag;
603 a.a_cred = cred; 514 a.a_cred = cred;
604 mpsafe = (vp->v_vflag & VV_MPSAFE); 515 mpsafe = (vp->v_vflag & VV_MPSAFE);
605 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 516 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
606 error = (VCALL(vp, VOFFSET(vop_fcntl), &a)); 517 error = (VCALL(vp, VOFFSET(vop_fcntl), &a));
607 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 518 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
608 return error; 519 return error;
609} 520}
610 521
611const int vop_poll_vp_offsets[] = { 522const int vop_poll_vp_offsets[] = {
612 VOPARG_OFFSETOF(struct vop_poll_args,a_vp), 523 VOPARG_OFFSETOF(struct vop_poll_args,a_vp),
@@ -619,36 +530,28 @@ const struct vnodeop_desc vop_poll_desc  @@ -619,36 +530,28 @@ const struct vnodeop_desc vop_poll_desc
619 vop_poll_vp_offsets, 530 vop_poll_vp_offsets,
620 VDESC_NO_OFFSET, 531 VDESC_NO_OFFSET,
621 VDESC_NO_OFFSET, 532 VDESC_NO_OFFSET,
622 VDESC_NO_OFFSET, 533 VDESC_NO_OFFSET,
623 NULL, 534 NULL,
624}; 535};
625int 536int
626VOP_POLL(struct vnode *vp, 537VOP_POLL(struct vnode *vp,
627 int events) 538 int events)
628{ 539{
629 int error; 540 int error;
630 bool mpsafe; 541 bool mpsafe;
631 struct vop_poll_args a; 542 struct vop_poll_args a;
632#ifdef VNODE_LOCKDEBUG 
633 int islocked_vp; 
634#endif 
635 a.a_desc = VDESC(vop_poll); 543 a.a_desc = VDESC(vop_poll);
636 a.a_vp = vp; 544 a.a_vp = vp;
637#ifdef VNODE_LOCKDEBUG 
638 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
639 if (islocked_vp != 0) 
640 panic("vop_poll: vp: locked %d, expected %d", islocked_vp, 0); 
641#endif 
642 a.a_events = events; 545 a.a_events = events;
643 mpsafe = (vp->v_vflag & VV_MPSAFE); 546 mpsafe = (vp->v_vflag & VV_MPSAFE);
644 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 547 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
645 error = (VCALL(vp, VOFFSET(vop_poll), &a)); 548 error = (VCALL(vp, VOFFSET(vop_poll), &a));
646 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 549 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
647 return error; 550 return error;
648} 551}
649 552
650const int vop_kqfilter_vp_offsets[] = { 553const int vop_kqfilter_vp_offsets[] = {
651 VOPARG_OFFSETOF(struct vop_kqfilter_args,a_vp), 554 VOPARG_OFFSETOF(struct vop_kqfilter_args,a_vp),
652 VDESC_NO_OFFSET 555 VDESC_NO_OFFSET
653}; 556};
654const struct vnodeop_desc vop_kqfilter_desc = { 557const struct vnodeop_desc vop_kqfilter_desc = {
@@ -658,36 +561,28 @@ const struct vnodeop_desc vop_kqfilter_d @@ -658,36 +561,28 @@ const struct vnodeop_desc vop_kqfilter_d
658 vop_kqfilter_vp_offsets, 561 vop_kqfilter_vp_offsets,
659 VDESC_NO_OFFSET, 562 VDESC_NO_OFFSET,
660 VDESC_NO_OFFSET, 563 VDESC_NO_OFFSET,
661 VDESC_NO_OFFSET, 564 VDESC_NO_OFFSET,
662 NULL, 565 NULL,
663}; 566};
664int 567int
665VOP_KQFILTER(struct vnode *vp, 568VOP_KQFILTER(struct vnode *vp,
666 struct knote *kn) 569 struct knote *kn)
667{ 570{
668 int error; 571 int error;
669 bool mpsafe; 572 bool mpsafe;
670 struct vop_kqfilter_args a; 573 struct vop_kqfilter_args a;
671#ifdef VNODE_LOCKDEBUG 
672 int islocked_vp; 
673#endif 
674 a.a_desc = VDESC(vop_kqfilter); 574 a.a_desc = VDESC(vop_kqfilter);
675 a.a_vp = vp; 575 a.a_vp = vp;
676#ifdef VNODE_LOCKDEBUG 
677 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
678 if (islocked_vp != 0) 
679 panic("vop_kqfilter: vp: locked %d, expected %d", islocked_vp, 0); 
680#endif 
681 a.a_kn = kn; 576 a.a_kn = kn;
682 mpsafe = (vp->v_vflag & VV_MPSAFE); 577 mpsafe = (vp->v_vflag & VV_MPSAFE);
683 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 578 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
684 error = (VCALL(vp, VOFFSET(vop_kqfilter), &a)); 579 error = (VCALL(vp, VOFFSET(vop_kqfilter), &a));
685 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 580 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
686 return error; 581 return error;
687} 582}
688 583
689const int vop_revoke_vp_offsets[] = { 584const int vop_revoke_vp_offsets[] = {
690 VOPARG_OFFSETOF(struct vop_revoke_args,a_vp), 585 VOPARG_OFFSETOF(struct vop_revoke_args,a_vp),
691 VDESC_NO_OFFSET 586 VDESC_NO_OFFSET
692}; 587};
693const struct vnodeop_desc vop_revoke_desc = { 588const struct vnodeop_desc vop_revoke_desc = {
@@ -697,36 +592,28 @@ const struct vnodeop_desc vop_revoke_des @@ -697,36 +592,28 @@ const struct vnodeop_desc vop_revoke_des
697 vop_revoke_vp_offsets, 592 vop_revoke_vp_offsets,
698 VDESC_NO_OFFSET, 593 VDESC_NO_OFFSET,
699 VDESC_NO_OFFSET, 594 VDESC_NO_OFFSET,
700 VDESC_NO_OFFSET, 595 VDESC_NO_OFFSET,
701 NULL, 596 NULL,
702}; 597};
703int 598int
704VOP_REVOKE(struct vnode *vp, 599VOP_REVOKE(struct vnode *vp,
705 int flags) 600 int flags)
706{ 601{
707 int error; 602 int error;
708 bool mpsafe; 603 bool mpsafe;
709 struct vop_revoke_args a; 604 struct vop_revoke_args a;
710#ifdef VNODE_LOCKDEBUG 
711 int islocked_vp; 
712#endif 
713 a.a_desc = VDESC(vop_revoke); 605 a.a_desc = VDESC(vop_revoke);
714 a.a_vp = vp; 606 a.a_vp = vp;
715#ifdef VNODE_LOCKDEBUG 
716 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
717 if (islocked_vp != 0) 
718 panic("vop_revoke: vp: locked %d, expected %d", islocked_vp, 0); 
719#endif 
720 a.a_flags = flags; 607 a.a_flags = flags;
721 mpsafe = (vp->v_vflag & VV_MPSAFE); 608 mpsafe = (vp->v_vflag & VV_MPSAFE);
722 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 609 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
723 error = (VCALL(vp, VOFFSET(vop_revoke), &a)); 610 error = (VCALL(vp, VOFFSET(vop_revoke), &a));
724 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 611 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
725 return error; 612 return error;
726} 613}
727 614
728const int vop_mmap_vp_offsets[] = { 615const int vop_mmap_vp_offsets[] = {
729 VOPARG_OFFSETOF(struct vop_mmap_args,a_vp), 616 VOPARG_OFFSETOF(struct vop_mmap_args,a_vp),
730 VDESC_NO_OFFSET 617 VDESC_NO_OFFSET
731}; 618};
732const struct vnodeop_desc vop_mmap_desc = { 619const struct vnodeop_desc vop_mmap_desc = {
@@ -737,28 +624,26 @@ const struct vnodeop_desc vop_mmap_desc  @@ -737,28 +624,26 @@ const struct vnodeop_desc vop_mmap_desc
737 VDESC_NO_OFFSET, 624 VDESC_NO_OFFSET,
738 VOPARG_OFFSETOF(struct vop_mmap_args, a_cred), 625 VOPARG_OFFSETOF(struct vop_mmap_args, a_cred),
739 VDESC_NO_OFFSET, 626 VDESC_NO_OFFSET,
740 NULL, 627 NULL,
741}; 628};
742int 629int
743VOP_MMAP(struct vnode *vp, 630VOP_MMAP(struct vnode *vp,
744 vm_prot_t prot, 631 vm_prot_t prot,
745 kauth_cred_t cred) 632 kauth_cred_t cred)
746{ 633{
747 int error; 634 int error;
748 bool mpsafe; 635 bool mpsafe;
749 struct vop_mmap_args a; 636 struct vop_mmap_args a;
750#ifdef VNODE_LOCKDEBUG 
751#endif 
752 a.a_desc = VDESC(vop_mmap); 637 a.a_desc = VDESC(vop_mmap);
753 a.a_vp = vp; 638 a.a_vp = vp;
754 a.a_prot = prot; 639 a.a_prot = prot;
755 a.a_cred = cred; 640 a.a_cred = cred;
756 mpsafe = (vp->v_vflag & VV_MPSAFE); 641 mpsafe = (vp->v_vflag & VV_MPSAFE);
757 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 642 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
758 error = (VCALL(vp, VOFFSET(vop_mmap), &a)); 643 error = (VCALL(vp, VOFFSET(vop_mmap), &a));
759 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 644 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
760 return error; 645 return error;
761} 646}
762 647
763const int vop_fsync_vp_offsets[] = { 648const int vop_fsync_vp_offsets[] = {
764 VOPARG_OFFSETOF(struct vop_fsync_args,a_vp), 649 VOPARG_OFFSETOF(struct vop_fsync_args,a_vp),
@@ -774,36 +659,28 @@ const struct vnodeop_desc vop_fsync_desc @@ -774,36 +659,28 @@ const struct vnodeop_desc vop_fsync_desc
774 VDESC_NO_OFFSET, 659 VDESC_NO_OFFSET,
775 NULL, 660 NULL,
776}; 661};
777int 662int
778VOP_FSYNC(struct vnode *vp, 663VOP_FSYNC(struct vnode *vp,
779 kauth_cred_t cred, 664 kauth_cred_t cred,
780 int flags, 665 int flags,
781 off_t offlo, 666 off_t offlo,
782 off_t offhi) 667 off_t offhi)
783{ 668{
784 int error; 669 int error;
785 bool mpsafe; 670 bool mpsafe;
786 struct vop_fsync_args a; 671 struct vop_fsync_args a;
787#ifdef VNODE_LOCKDEBUG 
788 int islocked_vp; 
789#endif 
790 a.a_desc = VDESC(vop_fsync); 672 a.a_desc = VDESC(vop_fsync);
791 a.a_vp = vp; 673 a.a_vp = vp;
792#ifdef VNODE_LOCKDEBUG 
793 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
794 if (islocked_vp != 1) 
795 panic("vop_fsync: vp: locked %d, expected %d", islocked_vp, 1); 
796#endif 
797 a.a_cred = cred; 674 a.a_cred = cred;
798 a.a_flags = flags; 675 a.a_flags = flags;
799 a.a_offlo = offlo; 676 a.a_offlo = offlo;
800 a.a_offhi = offhi; 677 a.a_offhi = offhi;
801 mpsafe = (vp->v_vflag & VV_MPSAFE); 678 mpsafe = (vp->v_vflag & VV_MPSAFE);
802 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 679 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
803 error = (VCALL(vp, VOFFSET(vop_fsync), &a)); 680 error = (VCALL(vp, VOFFSET(vop_fsync), &a));
804 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 681 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
805 return error; 682 return error;
806} 683}
807 684
808const int vop_seek_vp_offsets[] = { 685const int vop_seek_vp_offsets[] = {
809 VOPARG_OFFSETOF(struct vop_seek_args,a_vp), 686 VOPARG_OFFSETOF(struct vop_seek_args,a_vp),
@@ -818,28 +695,26 @@ const struct vnodeop_desc vop_seek_desc  @@ -818,28 +695,26 @@ const struct vnodeop_desc vop_seek_desc
818 VOPARG_OFFSETOF(struct vop_seek_args, a_cred), 695 VOPARG_OFFSETOF(struct vop_seek_args, a_cred),
819 VDESC_NO_OFFSET, 696 VDESC_NO_OFFSET,
820 NULL, 697 NULL,
821}; 698};
822int 699int
823VOP_SEEK(struct vnode *vp, 700VOP_SEEK(struct vnode *vp,
824 off_t oldoff, 701 off_t oldoff,
825 off_t newoff, 702 off_t newoff,
826 kauth_cred_t cred) 703 kauth_cred_t cred)
827{ 704{
828 int error; 705 int error;
829 bool mpsafe; 706 bool mpsafe;
830 struct vop_seek_args a; 707 struct vop_seek_args a;
831#ifdef VNODE_LOCKDEBUG 
832#endif 
833 a.a_desc = VDESC(vop_seek); 708 a.a_desc = VDESC(vop_seek);
834 a.a_vp = vp; 709 a.a_vp = vp;
835 a.a_oldoff = oldoff; 710 a.a_oldoff = oldoff;
836 a.a_newoff = newoff; 711 a.a_newoff = newoff;
837 a.a_cred = cred; 712 a.a_cred = cred;
838 mpsafe = (vp->v_vflag & VV_MPSAFE); 713 mpsafe = (vp->v_vflag & VV_MPSAFE);
839 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 714 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
840 error = (VCALL(vp, VOFFSET(vop_seek), &a)); 715 error = (VCALL(vp, VOFFSET(vop_seek), &a));
841 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 716 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
842 return error; 717 return error;
843} 718}
844 719
845const int vop_remove_vp_offsets[] = { 720const int vop_remove_vp_offsets[] = {
@@ -855,43 +730,29 @@ const struct vnodeop_desc vop_remove_des @@ -855,43 +730,29 @@ const struct vnodeop_desc vop_remove_des
855 VDESC_NO_OFFSET, 730 VDESC_NO_OFFSET,
856 VDESC_NO_OFFSET, 731 VDESC_NO_OFFSET,
857 VOPARG_OFFSETOF(struct vop_remove_args, a_cnp), 732 VOPARG_OFFSETOF(struct vop_remove_args, a_cnp),
858 NULL, 733 NULL,
859}; 734};
860int 735int
861VOP_REMOVE(struct vnode *dvp, 736VOP_REMOVE(struct vnode *dvp,
862 struct vnode *vp, 737 struct vnode *vp,
863 struct componentname *cnp) 738 struct componentname *cnp)
864{ 739{
865 int error; 740 int error;
866 bool mpsafe; 741 bool mpsafe;
867 struct vop_remove_args a; 742 struct vop_remove_args a;
868#ifdef VNODE_LOCKDEBUG 
869 int islocked_dvp; 
870 int islocked_vp; 
871#endif 
872 a.a_desc = VDESC(vop_remove); 743 a.a_desc = VDESC(vop_remove);
873 a.a_dvp = dvp; 744 a.a_dvp = dvp;
874#ifdef VNODE_LOCKDEBUG 745 a.a_vp = vp;
875 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
876 if (islocked_dvp != 1) 
877 panic("vop_remove: dvp: locked %d, expected %d", islocked_dvp, 1); 
878#endif 
879 a.a_vp = vp; 
880#ifdef VNODE_LOCKDEBUG 
881 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
882 if (islocked_vp != 1) 
883 panic("vop_remove: vp: locked %d, expected %d", islocked_vp, 1); 
884#endif 
885 a.a_cnp = cnp; 746 a.a_cnp = cnp;
886 mpsafe = (dvp->v_vflag & VV_MPSAFE); 747 mpsafe = (dvp->v_vflag & VV_MPSAFE);
887 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 748 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
888 error = (VCALL(dvp, VOFFSET(vop_remove), &a)); 749 error = (VCALL(dvp, VOFFSET(vop_remove), &a));
889 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 750 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
890 return error; 751 return error;
891} 752}
892 753
893const int vop_link_vp_offsets[] = { 754const int vop_link_vp_offsets[] = {
894 VOPARG_OFFSETOF(struct vop_link_args,a_dvp), 755 VOPARG_OFFSETOF(struct vop_link_args,a_dvp),
895 VOPARG_OFFSETOF(struct vop_link_args,a_vp), 756 VOPARG_OFFSETOF(struct vop_link_args,a_vp),
896 VDESC_NO_OFFSET 757 VDESC_NO_OFFSET
897}; 758};
@@ -903,43 +764,29 @@ const struct vnodeop_desc vop_link_desc  @@ -903,43 +764,29 @@ const struct vnodeop_desc vop_link_desc
903 VDESC_NO_OFFSET, 764 VDESC_NO_OFFSET,
904 VDESC_NO_OFFSET, 765 VDESC_NO_OFFSET,
905 VOPARG_OFFSETOF(struct vop_link_args, a_cnp), 766 VOPARG_OFFSETOF(struct vop_link_args, a_cnp),
906 NULL, 767 NULL,
907}; 768};
908int 769int
909VOP_LINK(struct vnode *dvp, 770VOP_LINK(struct vnode *dvp,
910 struct vnode *vp, 771 struct vnode *vp,
911 struct componentname *cnp) 772 struct componentname *cnp)
912{ 773{
913 int error; 774 int error;
914 bool mpsafe; 775 bool mpsafe;
915 struct vop_link_args a; 776 struct vop_link_args a;
916#ifdef VNODE_LOCKDEBUG 
917 int islocked_dvp; 
918 int islocked_vp; 
919#endif 
920 a.a_desc = VDESC(vop_link); 777 a.a_desc = VDESC(vop_link);
921 a.a_dvp = dvp; 778 a.a_dvp = dvp;
922#ifdef VNODE_LOCKDEBUG 779 a.a_vp = vp;
923 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
924 if (islocked_dvp != 1) 
925 panic("vop_link: dvp: locked %d, expected %d", islocked_dvp, 1); 
926#endif 
927 a.a_vp = vp; 
928#ifdef VNODE_LOCKDEBUG 
929 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
930 if (islocked_vp != 0) 
931 panic("vop_link: vp: locked %d, expected %d", islocked_vp, 0); 
932#endif 
933 a.a_cnp = cnp; 780 a.a_cnp = cnp;
934 mpsafe = (dvp->v_vflag & VV_MPSAFE); 781 mpsafe = (dvp->v_vflag & VV_MPSAFE);
935 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 782 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
936 error = (VCALL(dvp, VOFFSET(vop_link), &a)); 783 error = (VCALL(dvp, VOFFSET(vop_link), &a));
937 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 784 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
938 return error; 785 return error;
939} 786}
940 787
941const int vop_rename_vp_offsets[] = { 788const int vop_rename_vp_offsets[] = {
942 VOPARG_OFFSETOF(struct vop_rename_args,a_fdvp), 789 VOPARG_OFFSETOF(struct vop_rename_args,a_fdvp),
943 VOPARG_OFFSETOF(struct vop_rename_args,a_fvp), 790 VOPARG_OFFSETOF(struct vop_rename_args,a_fvp),
944 VOPARG_OFFSETOF(struct vop_rename_args,a_tdvp), 791 VOPARG_OFFSETOF(struct vop_rename_args,a_tdvp),
945 VOPARG_OFFSETOF(struct vop_rename_args,a_tvp), 792 VOPARG_OFFSETOF(struct vop_rename_args,a_tvp),
@@ -956,51 +803,31 @@ const struct vnodeop_desc vop_rename_des @@ -956,51 +803,31 @@ const struct vnodeop_desc vop_rename_des
956 NULL, 803 NULL,
957}; 804};
958int 805int
959VOP_RENAME(struct vnode *fdvp, 806VOP_RENAME(struct vnode *fdvp,
960 struct vnode *fvp, 807 struct vnode *fvp,
961 struct componentname *fcnp, 808 struct componentname *fcnp,
962 struct vnode *tdvp, 809 struct vnode *tdvp,
963 struct vnode *tvp, 810 struct vnode *tvp,
964 struct componentname *tcnp) 811 struct componentname *tcnp)
965{ 812{
966 int error; 813 int error;
967 bool mpsafe; 814 bool mpsafe;
968 struct vop_rename_args a; 815 struct vop_rename_args a;
969#ifdef VNODE_LOCKDEBUG 
970 int islocked_fdvp; 
971 int islocked_fvp; 
972 int islocked_tdvp; 
973#endif 
974 a.a_desc = VDESC(vop_rename); 816 a.a_desc = VDESC(vop_rename);
975 a.a_fdvp = fdvp; 817 a.a_fdvp = fdvp;
976#ifdef VNODE_LOCKDEBUG 
977 islocked_fdvp = (fdvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(fdvp) == LK_EXCLUSIVE) : 0; 
978 if (islocked_fdvp != 0) 
979 panic("vop_rename: fdvp: locked %d, expected %d", islocked_fdvp, 0); 
980#endif 
981 a.a_fvp = fvp; 818 a.a_fvp = fvp;
982#ifdef VNODE_LOCKDEBUG 
983 islocked_fvp = (fvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(fvp) == LK_EXCLUSIVE) : 0; 
984 if (islocked_fvp != 0) 
985 panic("vop_rename: fvp: locked %d, expected %d", islocked_fvp, 0); 
986#endif 
987 a.a_fcnp = fcnp; 819 a.a_fcnp = fcnp;
988 a.a_tdvp = tdvp; 820 a.a_tdvp = tdvp;
989#ifdef VNODE_LOCKDEBUG 
990 islocked_tdvp = (tdvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(tdvp) == LK_EXCLUSIVE) : 1; 
991 if (islocked_tdvp != 1) 
992 panic("vop_rename: tdvp: locked %d, expected %d", islocked_tdvp, 1); 
993#endif 
994 a.a_tvp = tvp; 821 a.a_tvp = tvp;
995 a.a_tcnp = tcnp; 822 a.a_tcnp = tcnp;
996 mpsafe = (fdvp->v_vflag & VV_MPSAFE); 823 mpsafe = (fdvp->v_vflag & VV_MPSAFE);
997 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 824 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
998 error = (VCALL(fdvp, VOFFSET(vop_rename), &a)); 825 error = (VCALL(fdvp, VOFFSET(vop_rename), &a));
999 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 826 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1000 return error; 827 return error;
1001} 828}
1002 829
1003const int vop_mkdir_vp_offsets[] = { 830const int vop_mkdir_vp_offsets[] = {
1004 VOPARG_OFFSETOF(struct vop_mkdir_args,a_dvp), 831 VOPARG_OFFSETOF(struct vop_mkdir_args,a_dvp),
1005 VDESC_NO_OFFSET 832 VDESC_NO_OFFSET
1006}; 833};
@@ -1013,36 +840,28 @@ const struct vnodeop_desc vop_mkdir_desc @@ -1013,36 +840,28 @@ const struct vnodeop_desc vop_mkdir_desc
1013 VDESC_NO_OFFSET, 840 VDESC_NO_OFFSET,
1014 VOPARG_OFFSETOF(struct vop_mkdir_args, a_cnp), 841 VOPARG_OFFSETOF(struct vop_mkdir_args, a_cnp),
1015 NULL, 842 NULL,
1016}; 843};
1017int 844int
1018VOP_MKDIR(struct vnode *dvp, 845VOP_MKDIR(struct vnode *dvp,
1019 struct vnode **vpp, 846 struct vnode **vpp,
1020 struct componentname *cnp, 847 struct componentname *cnp,
1021 struct vattr *vap) 848 struct vattr *vap)
1022{ 849{
1023 int error; 850 int error;
1024 bool mpsafe; 851 bool mpsafe;
1025 struct vop_mkdir_args a; 852 struct vop_mkdir_args a;
1026#ifdef VNODE_LOCKDEBUG 
1027 int islocked_dvp; 
1028#endif 
1029 a.a_desc = VDESC(vop_mkdir); 853 a.a_desc = VDESC(vop_mkdir);
1030 a.a_dvp = dvp; 854 a.a_dvp = dvp;
1031#ifdef VNODE_LOCKDEBUG 
1032 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
1033 if (islocked_dvp != 1) 
1034 panic("vop_mkdir: dvp: locked %d, expected %d", islocked_dvp, 1); 
1035#endif 
1036 a.a_vpp = vpp; 855 a.a_vpp = vpp;
1037 a.a_cnp = cnp; 856 a.a_cnp = cnp;
1038 a.a_vap = vap; 857 a.a_vap = vap;
1039 mpsafe = (dvp->v_vflag & VV_MPSAFE); 858 mpsafe = (dvp->v_vflag & VV_MPSAFE);
1040 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 859 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1041 error = (VCALL(dvp, VOFFSET(vop_mkdir), &a)); 860 error = (VCALL(dvp, VOFFSET(vop_mkdir), &a));
1042 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 861 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1043#ifdef DIAGNOSTIC 862#ifdef DIAGNOSTIC
1044 if (error == 0) 863 if (error == 0)
1045 KASSERT((*vpp)->v_size != VSIZENOTSET 864 KASSERT((*vpp)->v_size != VSIZENOTSET
1046 && (*vpp)->v_writesize != VSIZENOTSET); 865 && (*vpp)->v_writesize != VSIZENOTSET);
1047#endif /* DIAGNOSTIC */ 866#endif /* DIAGNOSTIC */
1048 return error; 867 return error;
@@ -1061,43 +880,29 @@ const struct vnodeop_desc vop_rmdir_desc @@ -1061,43 +880,29 @@ const struct vnodeop_desc vop_rmdir_desc
1061 VDESC_NO_OFFSET, 880 VDESC_NO_OFFSET,
1062 VDESC_NO_OFFSET, 881 VDESC_NO_OFFSET,
1063 VOPARG_OFFSETOF(struct vop_rmdir_args, a_cnp), 882 VOPARG_OFFSETOF(struct vop_rmdir_args, a_cnp),
1064 NULL, 883 NULL,
1065}; 884};
1066int 885int
1067VOP_RMDIR(struct vnode *dvp, 886VOP_RMDIR(struct vnode *dvp,
1068 struct vnode *vp, 887 struct vnode *vp,
1069 struct componentname *cnp) 888 struct componentname *cnp)
1070{ 889{
1071 int error; 890 int error;
1072 bool mpsafe; 891 bool mpsafe;
1073 struct vop_rmdir_args a; 892 struct vop_rmdir_args a;
1074#ifdef VNODE_LOCKDEBUG 
1075 int islocked_dvp; 
1076 int islocked_vp; 
1077#endif 
1078 a.a_desc = VDESC(vop_rmdir); 893 a.a_desc = VDESC(vop_rmdir);
1079 a.a_dvp = dvp; 894 a.a_dvp = dvp;
1080#ifdef VNODE_LOCKDEBUG 895 a.a_vp = vp;
1081 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
1082 if (islocked_dvp != 1) 
1083 panic("vop_rmdir: dvp: locked %d, expected %d", islocked_dvp, 1); 
1084#endif 
1085 a.a_vp = vp; 
1086#ifdef VNODE_LOCKDEBUG 
1087 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1088 if (islocked_vp != 1) 
1089 panic("vop_rmdir: vp: locked %d, expected %d", islocked_vp, 1); 
1090#endif 
1091 a.a_cnp = cnp; 896 a.a_cnp = cnp;
1092 mpsafe = (dvp->v_vflag & VV_MPSAFE); 897 mpsafe = (dvp->v_vflag & VV_MPSAFE);
1093 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 898 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1094 error = (VCALL(dvp, VOFFSET(vop_rmdir), &a)); 899 error = (VCALL(dvp, VOFFSET(vop_rmdir), &a));
1095 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 900 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1096 return error; 901 return error;
1097} 902}
1098 903
1099const int vop_symlink_vp_offsets[] = { 904const int vop_symlink_vp_offsets[] = {
1100 VOPARG_OFFSETOF(struct vop_symlink_args,a_dvp), 905 VOPARG_OFFSETOF(struct vop_symlink_args,a_dvp),
1101 VDESC_NO_OFFSET 906 VDESC_NO_OFFSET
1102}; 907};
1103const struct vnodeop_desc vop_symlink_desc = { 908const struct vnodeop_desc vop_symlink_desc = {
@@ -1110,36 +915,28 @@ const struct vnodeop_desc vop_symlink_de @@ -1110,36 +915,28 @@ const struct vnodeop_desc vop_symlink_de
1110 VOPARG_OFFSETOF(struct vop_symlink_args, a_cnp), 915 VOPARG_OFFSETOF(struct vop_symlink_args, a_cnp),
1111 NULL, 916 NULL,
1112}; 917};
1113int 918int
1114VOP_SYMLINK(struct vnode *dvp, 919VOP_SYMLINK(struct vnode *dvp,
1115 struct vnode **vpp, 920 struct vnode **vpp,
1116 struct componentname *cnp, 921 struct componentname *cnp,
1117 struct vattr *vap, 922 struct vattr *vap,
1118 char *target) 923 char *target)
1119{ 924{
1120 int error; 925 int error;
1121 bool mpsafe; 926 bool mpsafe;
1122 struct vop_symlink_args a; 927 struct vop_symlink_args a;
1123#ifdef VNODE_LOCKDEBUG 
1124 int islocked_dvp; 
1125#endif 
1126 a.a_desc = VDESC(vop_symlink); 928 a.a_desc = VDESC(vop_symlink);
1127 a.a_dvp = dvp; 929 a.a_dvp = dvp;
1128#ifdef VNODE_LOCKDEBUG 
1129 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
1130 if (islocked_dvp != 1) 
1131 panic("vop_symlink: dvp: locked %d, expected %d", islocked_dvp, 1); 
1132#endif 
1133 a.a_vpp = vpp; 930 a.a_vpp = vpp;
1134 a.a_cnp = cnp; 931 a.a_cnp = cnp;
1135 a.a_vap = vap; 932 a.a_vap = vap;
1136 a.a_target = target; 933 a.a_target = target;
1137 mpsafe = (dvp->v_vflag & VV_MPSAFE); 934 mpsafe = (dvp->v_vflag & VV_MPSAFE);
1138 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 935 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1139 error = (VCALL(dvp, VOFFSET(vop_symlink), &a)); 936 error = (VCALL(dvp, VOFFSET(vop_symlink), &a));
1140 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 937 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1141#ifdef DIAGNOSTIC 938#ifdef DIAGNOSTIC
1142 if (error == 0) 939 if (error == 0)
1143 KASSERT((*vpp)->v_size != VSIZENOTSET 940 KASSERT((*vpp)->v_size != VSIZENOTSET
1144 && (*vpp)->v_writesize != VSIZENOTSET); 941 && (*vpp)->v_writesize != VSIZENOTSET);
1145#endif /* DIAGNOSTIC */ 942#endif /* DIAGNOSTIC */
@@ -1161,36 +958,28 @@ const struct vnodeop_desc vop_readdir_de @@ -1161,36 +958,28 @@ const struct vnodeop_desc vop_readdir_de
1161 NULL, 958 NULL,
1162}; 959};
1163int 960int
1164VOP_READDIR(struct vnode *vp, 961VOP_READDIR(struct vnode *vp,
1165 struct uio *uio, 962 struct uio *uio,
1166 kauth_cred_t cred, 963 kauth_cred_t cred,
1167 int *eofflag, 964 int *eofflag,
1168 off_t **cookies, 965 off_t **cookies,
1169 int *ncookies) 966 int *ncookies)
1170{ 967{
1171 int error; 968 int error;
1172 bool mpsafe; 969 bool mpsafe;
1173 struct vop_readdir_args a; 970 struct vop_readdir_args a;
1174#ifdef VNODE_LOCKDEBUG 
1175 int islocked_vp; 
1176#endif 
1177 a.a_desc = VDESC(vop_readdir); 971 a.a_desc = VDESC(vop_readdir);
1178 a.a_vp = vp; 972 a.a_vp = vp;
1179#ifdef VNODE_LOCKDEBUG 
1180 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1181 if (islocked_vp != 1) 
1182 panic("vop_readdir: vp: locked %d, expected %d", islocked_vp, 1); 
1183#endif 
1184 a.a_uio = uio; 973 a.a_uio = uio;
1185 a.a_cred = cred; 974 a.a_cred = cred;
1186 a.a_eofflag = eofflag; 975 a.a_eofflag = eofflag;
1187 a.a_cookies = cookies; 976 a.a_cookies = cookies;
1188 a.a_ncookies = ncookies; 977 a.a_ncookies = ncookies;
1189 mpsafe = (vp->v_vflag & VV_MPSAFE); 978 mpsafe = (vp->v_vflag & VV_MPSAFE);
1190 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 979 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1191 error = (VCALL(vp, VOFFSET(vop_readdir), &a)); 980 error = (VCALL(vp, VOFFSET(vop_readdir), &a));
1192 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 981 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1193 return error; 982 return error;
1194} 983}
1195 984
1196const int vop_readlink_vp_offsets[] = { 985const int vop_readlink_vp_offsets[] = {
@@ -1205,36 +994,28 @@ const struct vnodeop_desc vop_readlink_d @@ -1205,36 +994,28 @@ const struct vnodeop_desc vop_readlink_d
1205 VDESC_NO_OFFSET, 994 VDESC_NO_OFFSET,
1206 VOPARG_OFFSETOF(struct vop_readlink_args, a_cred), 995 VOPARG_OFFSETOF(struct vop_readlink_args, a_cred),
1207 VDESC_NO_OFFSET, 996 VDESC_NO_OFFSET,
1208 NULL, 997 NULL,
1209}; 998};
1210int 999int
1211VOP_READLINK(struct vnode *vp, 1000VOP_READLINK(struct vnode *vp,
1212 struct uio *uio, 1001 struct uio *uio,
1213 kauth_cred_t cred) 1002 kauth_cred_t cred)
1214{ 1003{
1215 int error; 1004 int error;
1216 bool mpsafe; 1005 bool mpsafe;
1217 struct vop_readlink_args a; 1006 struct vop_readlink_args a;
1218#ifdef VNODE_LOCKDEBUG 
1219 int islocked_vp; 
1220#endif 
1221 a.a_desc = VDESC(vop_readlink); 1007 a.a_desc = VDESC(vop_readlink);
1222 a.a_vp = vp; 1008 a.a_vp = vp;
1223#ifdef VNODE_LOCKDEBUG 
1224 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1225 if (islocked_vp != 1) 
1226 panic("vop_readlink: vp: locked %d, expected %d", islocked_vp, 1); 
1227#endif 
1228 a.a_uio = uio; 1009 a.a_uio = uio;
1229 a.a_cred = cred; 1010 a.a_cred = cred;
1230 mpsafe = (vp->v_vflag & VV_MPSAFE); 1011 mpsafe = (vp->v_vflag & VV_MPSAFE);
1231 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1012 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1232 error = (VCALL(vp, VOFFSET(vop_readlink), &a)); 1013 error = (VCALL(vp, VOFFSET(vop_readlink), &a));
1233 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1014 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1234 return error; 1015 return error;
1235} 1016}
1236 1017
1237const int vop_abortop_vp_offsets[] = { 1018const int vop_abortop_vp_offsets[] = {
1238 VOPARG_OFFSETOF(struct vop_abortop_args,a_dvp), 1019 VOPARG_OFFSETOF(struct vop_abortop_args,a_dvp),
1239 VDESC_NO_OFFSET 1020 VDESC_NO_OFFSET
1240}; 1021};
@@ -1245,28 +1026,26 @@ const struct vnodeop_desc vop_abortop_de @@ -1245,28 +1026,26 @@ const struct vnodeop_desc vop_abortop_de
1245 vop_abortop_vp_offsets, 1026 vop_abortop_vp_offsets,
1246 VDESC_NO_OFFSET, 1027 VDESC_NO_OFFSET,
1247 VDESC_NO_OFFSET, 1028 VDESC_NO_OFFSET,
1248 VOPARG_OFFSETOF(struct vop_abortop_args, a_cnp), 1029 VOPARG_OFFSETOF(struct vop_abortop_args, a_cnp),
1249 NULL, 1030 NULL,
1250}; 1031};
1251int 1032int
1252VOP_ABORTOP(struct vnode *dvp, 1033VOP_ABORTOP(struct vnode *dvp,
1253 struct componentname *cnp) 1034 struct componentname *cnp)
1254{ 1035{
1255 int error; 1036 int error;
1256 bool mpsafe; 1037 bool mpsafe;
1257 struct vop_abortop_args a; 1038 struct vop_abortop_args a;
1258#ifdef VNODE_LOCKDEBUG 
1259#endif 
1260 a.a_desc = VDESC(vop_abortop); 1039 a.a_desc = VDESC(vop_abortop);
1261 a.a_dvp = dvp; 1040 a.a_dvp = dvp;
1262 a.a_cnp = cnp; 1041 a.a_cnp = cnp;
1263 mpsafe = (dvp->v_vflag & VV_MPSAFE); 1042 mpsafe = (dvp->v_vflag & VV_MPSAFE);
1264 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1043 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1265 error = (VCALL(dvp, VOFFSET(vop_abortop), &a)); 1044 error = (VCALL(dvp, VOFFSET(vop_abortop), &a));
1266 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1045 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1267 return error; 1046 return error;
1268} 1047}
1269 1048
1270const int vop_inactive_vp_offsets[] = { 1049const int vop_inactive_vp_offsets[] = {
1271 VOPARG_OFFSETOF(struct vop_inactive_args,a_vp), 1050 VOPARG_OFFSETOF(struct vop_inactive_args,a_vp),
1272 VDESC_NO_OFFSET 1051 VDESC_NO_OFFSET
@@ -1278,36 +1057,28 @@ const struct vnodeop_desc vop_inactive_d @@ -1278,36 +1057,28 @@ const struct vnodeop_desc vop_inactive_d
1278 vop_inactive_vp_offsets, 1057 vop_inactive_vp_offsets,
1279 VDESC_NO_OFFSET, 1058 VDESC_NO_OFFSET,
1280 VDESC_NO_OFFSET, 1059 VDESC_NO_OFFSET,
1281 VDESC_NO_OFFSET, 1060 VDESC_NO_OFFSET,
1282 NULL, 1061 NULL,
1283}; 1062};
1284int 1063int
1285VOP_INACTIVE(struct vnode *vp, 1064VOP_INACTIVE(struct vnode *vp,
1286 bool *recycle) 1065 bool *recycle)
1287{ 1066{
1288 int error; 1067 int error;
1289 bool mpsafe; 1068 bool mpsafe;
1290 struct vop_inactive_args a; 1069 struct vop_inactive_args a;
1291#ifdef VNODE_LOCKDEBUG 
1292 int islocked_vp; 
1293#endif 
1294 a.a_desc = VDESC(vop_inactive); 1070 a.a_desc = VDESC(vop_inactive);
1295 a.a_vp = vp; 1071 a.a_vp = vp;
1296#ifdef VNODE_LOCKDEBUG 
1297 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1298 if (islocked_vp != 1) 
1299 panic("vop_inactive: vp: locked %d, expected %d", islocked_vp, 1); 
1300#endif 
1301 a.a_recycle = recycle; 1072 a.a_recycle = recycle;
1302 mpsafe = (vp->v_vflag & VV_MPSAFE); 1073 mpsafe = (vp->v_vflag & VV_MPSAFE);
1303 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1074 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1304 error = (VCALL(vp, VOFFSET(vop_inactive), &a)); 1075 error = (VCALL(vp, VOFFSET(vop_inactive), &a));
1305 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1076 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1306 return error; 1077 return error;
1307} 1078}
1308 1079
1309const int vop_reclaim_vp_offsets[] = { 1080const int vop_reclaim_vp_offsets[] = {
1310 VOPARG_OFFSETOF(struct vop_reclaim_args,a_vp), 1081 VOPARG_OFFSETOF(struct vop_reclaim_args,a_vp),
1311 VDESC_NO_OFFSET 1082 VDESC_NO_OFFSET
1312}; 1083};
1313const struct vnodeop_desc vop_reclaim_desc = { 1084const struct vnodeop_desc vop_reclaim_desc = {
@@ -1316,36 +1087,28 @@ const struct vnodeop_desc vop_reclaim_de @@ -1316,36 +1087,28 @@ const struct vnodeop_desc vop_reclaim_de
1316 0, 1087 0,
1317 vop_reclaim_vp_offsets, 1088 vop_reclaim_vp_offsets,
1318 VDESC_NO_OFFSET, 1089 VDESC_NO_OFFSET,
1319 VDESC_NO_OFFSET, 1090 VDESC_NO_OFFSET,
1320 VDESC_NO_OFFSET, 1091 VDESC_NO_OFFSET,
1321 NULL, 1092 NULL,
1322}; 1093};
1323int 1094int
1324VOP_RECLAIM(struct vnode *vp) 1095VOP_RECLAIM(struct vnode *vp)
1325{ 1096{
1326 int error; 1097 int error;
1327 bool mpsafe; 1098 bool mpsafe;
1328 struct vop_reclaim_args a; 1099 struct vop_reclaim_args a;
1329#ifdef VNODE_LOCKDEBUG 
1330 int islocked_vp; 
1331#endif 
1332 a.a_desc = VDESC(vop_reclaim); 1100 a.a_desc = VDESC(vop_reclaim);
1333 a.a_vp = vp; 1101 a.a_vp = vp;
1334#ifdef VNODE_LOCKDEBUG 
1335 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
1336 if (islocked_vp != 0) 
1337 panic("vop_reclaim: vp: locked %d, expected %d", islocked_vp, 0); 
1338#endif 
1339 mpsafe = (vp->v_vflag & VV_MPSAFE); 1102 mpsafe = (vp->v_vflag & VV_MPSAFE);
1340 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1103 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1341 error = (VCALL(vp, VOFFSET(vop_reclaim), &a)); 1104 error = (VCALL(vp, VOFFSET(vop_reclaim), &a));
1342 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1105 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1343 return error; 1106 return error;
1344} 1107}
1345 1108
1346const int vop_lock_vp_offsets[] = { 1109const int vop_lock_vp_offsets[] = {
1347 VOPARG_OFFSETOF(struct vop_lock_args,a_vp), 1110 VOPARG_OFFSETOF(struct vop_lock_args,a_vp),
1348 VDESC_NO_OFFSET 1111 VDESC_NO_OFFSET
1349}; 1112};
1350const struct vnodeop_desc vop_lock_desc = { 1113const struct vnodeop_desc vop_lock_desc = {
1351 VOP_LOCK_DESCOFFSET, 1114 VOP_LOCK_DESCOFFSET,
@@ -1354,36 +1117,28 @@ const struct vnodeop_desc vop_lock_desc  @@ -1354,36 +1117,28 @@ const struct vnodeop_desc vop_lock_desc
1354 vop_lock_vp_offsets, 1117 vop_lock_vp_offsets,
1355 VDESC_NO_OFFSET, 1118 VDESC_NO_OFFSET,
1356 VDESC_NO_OFFSET, 1119 VDESC_NO_OFFSET,
1357 VDESC_NO_OFFSET, 1120 VDESC_NO_OFFSET,
1358 NULL, 1121 NULL,
1359}; 1122};
1360int 1123int
1361VOP_LOCK(struct vnode *vp, 1124VOP_LOCK(struct vnode *vp,
1362 int flags) 1125 int flags)
1363{ 1126{
1364 int error; 1127 int error;
1365 bool mpsafe; 1128 bool mpsafe;
1366 struct vop_lock_args a; 1129 struct vop_lock_args a;
1367#ifdef VNODE_LOCKDEBUG 
1368 int islocked_vp; 
1369#endif 
1370 a.a_desc = VDESC(vop_lock); 1130 a.a_desc = VDESC(vop_lock);
1371 a.a_vp = vp; 1131 a.a_vp = vp;
1372#ifdef VNODE_LOCKDEBUG 
1373 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
1374 if (islocked_vp != 0) 
1375 panic("vop_lock: vp: locked %d, expected %d", islocked_vp, 0); 
1376#endif 
1377 a.a_flags = flags; 1132 a.a_flags = flags;
1378 mpsafe = (vp->v_vflag & VV_MPSAFE); 1133 mpsafe = (vp->v_vflag & VV_MPSAFE);
1379 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1134 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1380 error = (VCALL(vp, VOFFSET(vop_lock), &a)); 1135 error = (VCALL(vp, VOFFSET(vop_lock), &a));
1381 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1136 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1382 return error; 1137 return error;
1383} 1138}
1384 1139
1385const int vop_unlock_vp_offsets[] = { 1140const int vop_unlock_vp_offsets[] = {
1386 VOPARG_OFFSETOF(struct vop_unlock_args,a_vp), 1141 VOPARG_OFFSETOF(struct vop_unlock_args,a_vp),
1387 VDESC_NO_OFFSET 1142 VDESC_NO_OFFSET
1388}; 1143};
1389const struct vnodeop_desc vop_unlock_desc = { 1144const struct vnodeop_desc vop_unlock_desc = {
@@ -1393,36 +1148,28 @@ const struct vnodeop_desc vop_unlock_des @@ -1393,36 +1148,28 @@ const struct vnodeop_desc vop_unlock_des
1393 vop_unlock_vp_offsets, 1148 vop_unlock_vp_offsets,
1394 VDESC_NO_OFFSET, 1149 VDESC_NO_OFFSET,
1395 VDESC_NO_OFFSET, 1150 VDESC_NO_OFFSET,
1396 VDESC_NO_OFFSET, 1151 VDESC_NO_OFFSET,
1397 NULL, 1152 NULL,
1398}; 1153};
1399int 1154int
1400VOP_UNLOCK(struct vnode *vp, 1155VOP_UNLOCK(struct vnode *vp,
1401 int flags) 1156 int flags)
1402{ 1157{
1403 int error; 1158 int error;
1404 bool mpsafe; 1159 bool mpsafe;
1405 struct vop_unlock_args a; 1160 struct vop_unlock_args a;
1406#ifdef VNODE_LOCKDEBUG 
1407 int islocked_vp; 
1408#endif 
1409 a.a_desc = VDESC(vop_unlock); 1161 a.a_desc = VDESC(vop_unlock);
1410 a.a_vp = vp; 1162 a.a_vp = vp;
1411#ifdef VNODE_LOCKDEBUG 
1412 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1413 if (islocked_vp != 1) 
1414 panic("vop_unlock: vp: locked %d, expected %d", islocked_vp, 1); 
1415#endif 
1416 a.a_flags = flags; 1163 a.a_flags = flags;
1417 mpsafe = (vp->v_vflag & VV_MPSAFE); 1164 mpsafe = (vp->v_vflag & VV_MPSAFE);
1418 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1165 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1419 error = (VCALL(vp, VOFFSET(vop_unlock), &a)); 1166 error = (VCALL(vp, VOFFSET(vop_unlock), &a));
1420 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1167 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1421 return error; 1168 return error;
1422} 1169}
1423 1170
1424const int vop_bmap_vp_offsets[] = { 1171const int vop_bmap_vp_offsets[] = {
1425 VOPARG_OFFSETOF(struct vop_bmap_args,a_vp), 1172 VOPARG_OFFSETOF(struct vop_bmap_args,a_vp),
1426 VDESC_NO_OFFSET 1173 VDESC_NO_OFFSET
1427}; 1174};
1428const struct vnodeop_desc vop_bmap_desc = { 1175const struct vnodeop_desc vop_bmap_desc = {
@@ -1435,28 +1182,26 @@ const struct vnodeop_desc vop_bmap_desc  @@ -1435,28 +1182,26 @@ const struct vnodeop_desc vop_bmap_desc
1435 VDESC_NO_OFFSET, 1182 VDESC_NO_OFFSET,
1436 NULL, 1183 NULL,
1437}; 1184};
1438int 1185int
1439VOP_BMAP(struct vnode *vp, 1186VOP_BMAP(struct vnode *vp,
1440 daddr_t bn, 1187 daddr_t bn,
1441 struct vnode **vpp, 1188 struct vnode **vpp,
1442 daddr_t *bnp, 1189 daddr_t *bnp,
1443 int *runp) 1190 int *runp)
1444{ 1191{
1445 int error; 1192 int error;
1446 bool mpsafe; 1193 bool mpsafe;
1447 struct vop_bmap_args a; 1194 struct vop_bmap_args a;
1448#ifdef VNODE_LOCKDEBUG 
1449#endif 
1450 a.a_desc = VDESC(vop_bmap); 1195 a.a_desc = VDESC(vop_bmap);
1451 a.a_vp = vp; 1196 a.a_vp = vp;
1452 a.a_bn = bn; 1197 a.a_bn = bn;
1453 a.a_vpp = vpp; 1198 a.a_vpp = vpp;
1454 a.a_bnp = bnp; 1199 a.a_bnp = bnp;
1455 a.a_runp = runp; 1200 a.a_runp = runp;
1456 mpsafe = (vp->v_vflag & VV_MPSAFE); 1201 mpsafe = (vp->v_vflag & VV_MPSAFE);
1457 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1202 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1458 error = (VCALL(vp, VOFFSET(vop_bmap), &a)); 1203 error = (VCALL(vp, VOFFSET(vop_bmap), &a));
1459 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1204 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1460 return error; 1205 return error;
1461} 1206}
1462 1207
@@ -1471,28 +1216,26 @@ const struct vnodeop_desc vop_strategy_d @@ -1471,28 +1216,26 @@ const struct vnodeop_desc vop_strategy_d
1471 vop_strategy_vp_offsets, 1216 vop_strategy_vp_offsets,
1472 VDESC_NO_OFFSET, 1217 VDESC_NO_OFFSET,
1473 VDESC_NO_OFFSET, 1218 VDESC_NO_OFFSET,
1474 VDESC_NO_OFFSET, 1219 VDESC_NO_OFFSET,
1475 NULL, 1220 NULL,
1476}; 1221};
1477int 1222int
1478VOP_STRATEGY(struct vnode *vp, 1223VOP_STRATEGY(struct vnode *vp,
1479 struct buf *bp) 1224 struct buf *bp)
1480{ 1225{
1481 int error; 1226 int error;
1482 bool mpsafe; 1227 bool mpsafe;
1483 struct vop_strategy_args a; 1228 struct vop_strategy_args a;
1484#ifdef VNODE_LOCKDEBUG 
1485#endif 
1486 a.a_desc = VDESC(vop_strategy); 1229 a.a_desc = VDESC(vop_strategy);
1487 a.a_vp = vp; 1230 a.a_vp = vp;
1488 a.a_bp = bp; 1231 a.a_bp = bp;
1489 mpsafe = (vp->v_vflag & VV_MPSAFE); 1232 mpsafe = (vp->v_vflag & VV_MPSAFE);
1490 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1233 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1491 error = (VCALL(vp, VOFFSET(vop_strategy), &a)); 1234 error = (VCALL(vp, VOFFSET(vop_strategy), &a));
1492 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1235 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1493 return error; 1236 return error;
1494} 1237}
1495 1238
1496const int vop_print_vp_offsets[] = { 1239const int vop_print_vp_offsets[] = {
1497 VOPARG_OFFSETOF(struct vop_print_args,a_vp), 1240 VOPARG_OFFSETOF(struct vop_print_args,a_vp),
1498 VDESC_NO_OFFSET 1241 VDESC_NO_OFFSET
@@ -1503,28 +1246,26 @@ const struct vnodeop_desc vop_print_desc @@ -1503,28 +1246,26 @@ const struct vnodeop_desc vop_print_desc
1503 0, 1246 0,
1504 vop_print_vp_offsets, 1247 vop_print_vp_offsets,
1505 VDESC_NO_OFFSET, 1248 VDESC_NO_OFFSET,
1506 VDESC_NO_OFFSET, 1249 VDESC_NO_OFFSET,
1507 VDESC_NO_OFFSET, 1250 VDESC_NO_OFFSET,
1508 NULL, 1251 NULL,
1509}; 1252};
1510int 1253int
1511VOP_PRINT(struct vnode *vp) 1254VOP_PRINT(struct vnode *vp)
1512{ 1255{
1513 int error; 1256 int error;
1514 bool mpsafe; 1257 bool mpsafe;
1515 struct vop_print_args a; 1258 struct vop_print_args a;
1516#ifdef VNODE_LOCKDEBUG 
1517#endif 
1518 a.a_desc = VDESC(vop_print); 1259 a.a_desc = VDESC(vop_print);
1519 a.a_vp = vp; 1260 a.a_vp = vp;
1520 mpsafe = (vp->v_vflag & VV_MPSAFE); 1261 mpsafe = (vp->v_vflag & VV_MPSAFE);
1521 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1262 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1522 error = (VCALL(vp, VOFFSET(vop_print), &a)); 1263 error = (VCALL(vp, VOFFSET(vop_print), &a));
1523 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1264 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1524 return error; 1265 return error;
1525} 1266}
1526 1267
1527const int vop_islocked_vp_offsets[] = { 1268const int vop_islocked_vp_offsets[] = {
1528 VOPARG_OFFSETOF(struct vop_islocked_args,a_vp), 1269 VOPARG_OFFSETOF(struct vop_islocked_args,a_vp),
1529 VDESC_NO_OFFSET 1270 VDESC_NO_OFFSET
1530}; 1271};
@@ -1534,28 +1275,26 @@ const struct vnodeop_desc vop_islocked_d @@ -1534,28 +1275,26 @@ const struct vnodeop_desc vop_islocked_d
1534 0, 1275 0,
1535 vop_islocked_vp_offsets, 1276 vop_islocked_vp_offsets,
1536 VDESC_NO_OFFSET, 1277 VDESC_NO_OFFSET,
1537 VDESC_NO_OFFSET, 1278 VDESC_NO_OFFSET,
1538 VDESC_NO_OFFSET, 1279 VDESC_NO_OFFSET,
1539 NULL, 1280 NULL,
1540}; 1281};
1541int 1282int
1542VOP_ISLOCKED(struct vnode *vp) 1283VOP_ISLOCKED(struct vnode *vp)
1543{ 1284{
1544 int error; 1285 int error;
1545 bool mpsafe; 1286 bool mpsafe;
1546 struct vop_islocked_args a; 1287 struct vop_islocked_args a;
1547#ifdef VNODE_LOCKDEBUG 
1548#endif 
1549 a.a_desc = VDESC(vop_islocked); 1288 a.a_desc = VDESC(vop_islocked);
1550 a.a_vp = vp; 1289 a.a_vp = vp;
1551 mpsafe = (vp->v_vflag & VV_MPSAFE); 1290 mpsafe = (vp->v_vflag & VV_MPSAFE);
1552 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1291 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1553 error = (VCALL(vp, VOFFSET(vop_islocked), &a)); 1292 error = (VCALL(vp, VOFFSET(vop_islocked), &a));
1554 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1293 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1555 return error; 1294 return error;
1556} 1295}
1557 1296
1558const int vop_pathconf_vp_offsets[] = { 1297const int vop_pathconf_vp_offsets[] = {
1559 VOPARG_OFFSETOF(struct vop_pathconf_args,a_vp), 1298 VOPARG_OFFSETOF(struct vop_pathconf_args,a_vp),
1560 VDESC_NO_OFFSET 1299 VDESC_NO_OFFSET
1561}; 1300};
@@ -1567,36 +1306,28 @@ const struct vnodeop_desc vop_pathconf_d @@ -1567,36 +1306,28 @@ const struct vnodeop_desc vop_pathconf_d
1567 VDESC_NO_OFFSET, 1306 VDESC_NO_OFFSET,
1568 VDESC_NO_OFFSET, 1307 VDESC_NO_OFFSET,
1569 VDESC_NO_OFFSET, 1308 VDESC_NO_OFFSET,
1570 NULL, 1309 NULL,
1571}; 1310};
1572int 1311int
1573VOP_PATHCONF(struct vnode *vp, 1312VOP_PATHCONF(struct vnode *vp,
1574 int name, 1313 int name,
1575 register_t *retval) 1314 register_t *retval)
1576{ 1315{
1577 int error; 1316 int error;
1578 bool mpsafe; 1317 bool mpsafe;
1579 struct vop_pathconf_args a; 1318 struct vop_pathconf_args a;
1580#ifdef VNODE_LOCKDEBUG 
1581 int islocked_vp; 
1582#endif 
1583 a.a_desc = VDESC(vop_pathconf); 1319 a.a_desc = VDESC(vop_pathconf);
1584 a.a_vp = vp; 1320 a.a_vp = vp;
1585#ifdef VNODE_LOCKDEBUG 
1586 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1587 if (islocked_vp != 1) 
1588 panic("vop_pathconf: vp: locked %d, expected %d", islocked_vp, 1); 
1589#endif 
1590 a.a_name = name; 1321 a.a_name = name;
1591 a.a_retval = retval; 1322 a.a_retval = retval;
1592 mpsafe = (vp->v_vflag & VV_MPSAFE); 1323 mpsafe = (vp->v_vflag & VV_MPSAFE);
1593 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1324 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1594 error = (VCALL(vp, VOFFSET(vop_pathconf), &a)); 1325 error = (VCALL(vp, VOFFSET(vop_pathconf), &a));
1595 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1326 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1596 return error; 1327 return error;
1597} 1328}
1598 1329
1599const int vop_advlock_vp_offsets[] = { 1330const int vop_advlock_vp_offsets[] = {
1600 VOPARG_OFFSETOF(struct vop_advlock_args,a_vp), 1331 VOPARG_OFFSETOF(struct vop_advlock_args,a_vp),
1601 VDESC_NO_OFFSET 1332 VDESC_NO_OFFSET
1602}; 1333};
@@ -1610,36 +1341,28 @@ const struct vnodeop_desc vop_advlock_de @@ -1610,36 +1341,28 @@ const struct vnodeop_desc vop_advlock_de
1610 VDESC_NO_OFFSET, 1341 VDESC_NO_OFFSET,
1611 NULL, 1342 NULL,
1612}; 1343};
1613int 1344int
1614VOP_ADVLOCK(struct vnode *vp, 1345VOP_ADVLOCK(struct vnode *vp,
1615 void *id, 1346 void *id,
1616 int op, 1347 int op,
1617 struct flock *fl, 1348 struct flock *fl,
1618 int flags) 1349 int flags)
1619{ 1350{
1620 int error; 1351 int error;
1621 bool mpsafe; 1352 bool mpsafe;
1622 struct vop_advlock_args a; 1353 struct vop_advlock_args a;
1623#ifdef VNODE_LOCKDEBUG 
1624 int islocked_vp; 
1625#endif 
1626 a.a_desc = VDESC(vop_advlock); 1354 a.a_desc = VDESC(vop_advlock);
1627 a.a_vp = vp; 1355 a.a_vp = vp;
1628#ifdef VNODE_LOCKDEBUG 
1629 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
1630 if (islocked_vp != 0) 
1631 panic("vop_advlock: vp: locked %d, expected %d", islocked_vp, 0); 
1632#endif 
1633 a.a_id = id; 1356 a.a_id = id;
1634 a.a_op = op; 1357 a.a_op = op;
1635 a.a_fl = fl; 1358 a.a_fl = fl;
1636 a.a_flags = flags; 1359 a.a_flags = flags;
1637 mpsafe = (vp->v_vflag & VV_MPSAFE); 1360 mpsafe = (vp->v_vflag & VV_MPSAFE);
1638 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1361 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1639 error = (VCALL(vp, VOFFSET(vop_advlock), &a)); 1362 error = (VCALL(vp, VOFFSET(vop_advlock), &a));
1640 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1363 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1641 return error; 1364 return error;
1642} 1365}
1643 1366
1644const int vop_whiteout_vp_offsets[] = { 1367const int vop_whiteout_vp_offsets[] = {
1645 VOPARG_OFFSETOF(struct vop_whiteout_args,a_dvp), 1368 VOPARG_OFFSETOF(struct vop_whiteout_args,a_dvp),
@@ -1653,36 +1376,28 @@ const struct vnodeop_desc vop_whiteout_d @@ -1653,36 +1376,28 @@ const struct vnodeop_desc vop_whiteout_d
1653 VDESC_NO_OFFSET, 1376 VDESC_NO_OFFSET,
1654 VDESC_NO_OFFSET, 1377 VDESC_NO_OFFSET,
1655 VOPARG_OFFSETOF(struct vop_whiteout_args, a_cnp), 1378 VOPARG_OFFSETOF(struct vop_whiteout_args, a_cnp),
1656 NULL, 1379 NULL,
1657}; 1380};
1658int 1381int
1659VOP_WHITEOUT(struct vnode *dvp, 1382VOP_WHITEOUT(struct vnode *dvp,
1660 struct componentname *cnp, 1383 struct componentname *cnp,
1661 int flags) 1384 int flags)
1662{ 1385{
1663 int error; 1386 int error;
1664 bool mpsafe; 1387 bool mpsafe;
1665 struct vop_whiteout_args a; 1388 struct vop_whiteout_args a;
1666#ifdef VNODE_LOCKDEBUG 
1667 int islocked_dvp; 
1668#endif 
1669 a.a_desc = VDESC(vop_whiteout); 1389 a.a_desc = VDESC(vop_whiteout);
1670 a.a_dvp = dvp; 1390 a.a_dvp = dvp;
1671#ifdef VNODE_LOCKDEBUG 
1672 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
1673 if (islocked_dvp != 1) 
1674 panic("vop_whiteout: dvp: locked %d, expected %d", islocked_dvp, 1); 
1675#endif 
1676 a.a_cnp = cnp; 1391 a.a_cnp = cnp;
1677 a.a_flags = flags; 1392 a.a_flags = flags;
1678 mpsafe = (dvp->v_vflag & VV_MPSAFE); 1393 mpsafe = (dvp->v_vflag & VV_MPSAFE);
1679 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1394 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1680 error = (VCALL(dvp, VOFFSET(vop_whiteout), &a)); 1395 error = (VCALL(dvp, VOFFSET(vop_whiteout), &a));
1681 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1396 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1682 return error; 1397 return error;
1683} 1398}
1684 1399
1685const int vop_getpages_vp_offsets[] = { 1400const int vop_getpages_vp_offsets[] = {
1686 VOPARG_OFFSETOF(struct vop_getpages_args,a_vp), 1401 VOPARG_OFFSETOF(struct vop_getpages_args,a_vp),
1687 VDESC_NO_OFFSET 1402 VDESC_NO_OFFSET
1688}; 1403};
@@ -1699,28 +1414,26 @@ const struct vnodeop_desc vop_getpages_d @@ -1699,28 +1414,26 @@ const struct vnodeop_desc vop_getpages_d
1699int 1414int
1700VOP_GETPAGES(struct vnode *vp, 1415VOP_GETPAGES(struct vnode *vp,
1701 voff_t offset, 1416 voff_t offset,
1702 struct vm_page **m, 1417 struct vm_page **m,
1703 int *count, 1418 int *count,
1704 int centeridx, 1419 int centeridx,
1705 vm_prot_t access_type, 1420 vm_prot_t access_type,
1706 int advice, 1421 int advice,
1707 int flags) 1422 int flags)
1708{ 1423{
1709 int error; 1424 int error;
1710 bool mpsafe; 1425 bool mpsafe;
1711 struct vop_getpages_args a; 1426 struct vop_getpages_args a;
1712#ifdef VNODE_LOCKDEBUG 
1713#endif 
1714 a.a_desc = VDESC(vop_getpages); 1427 a.a_desc = VDESC(vop_getpages);
1715 a.a_vp = vp; 1428 a.a_vp = vp;
1716 a.a_offset = offset; 1429 a.a_offset = offset;
1717 a.a_m = m; 1430 a.a_m = m;
1718 a.a_count = count; 1431 a.a_count = count;
1719 a.a_centeridx = centeridx; 1432 a.a_centeridx = centeridx;
1720 a.a_access_type = access_type; 1433 a.a_access_type = access_type;
1721 a.a_advice = advice; 1434 a.a_advice = advice;
1722 a.a_flags = flags; 1435 a.a_flags = flags;
1723 mpsafe = (vp->v_vflag & VV_MPSAFE); 1436 mpsafe = (vp->v_vflag & VV_MPSAFE);
1724 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1437 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1725 error = (VCALL(vp, VOFFSET(vop_getpages), &a)); 1438 error = (VCALL(vp, VOFFSET(vop_getpages), &a));
1726 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1439 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
@@ -1740,28 +1453,26 @@ const struct vnodeop_desc vop_putpages_d @@ -1740,28 +1453,26 @@ const struct vnodeop_desc vop_putpages_d
1740 VDESC_NO_OFFSET, 1453 VDESC_NO_OFFSET,
1741 VDESC_NO_OFFSET, 1454 VDESC_NO_OFFSET,
1742 NULL, 1455 NULL,
1743}; 1456};
1744int 1457int
1745VOP_PUTPAGES(struct vnode *vp, 1458VOP_PUTPAGES(struct vnode *vp,
1746 voff_t offlo, 1459 voff_t offlo,
1747 voff_t offhi, 1460 voff_t offhi,
1748 int flags) 1461 int flags)
1749{ 1462{
1750 int error; 1463 int error;
1751 bool mpsafe; 1464 bool mpsafe;
1752 struct vop_putpages_args a; 1465 struct vop_putpages_args a;
1753#ifdef VNODE_LOCKDEBUG 
1754#endif 
1755 a.a_desc = VDESC(vop_putpages); 1466 a.a_desc = VDESC(vop_putpages);
1756 a.a_vp = vp; 1467 a.a_vp = vp;
1757 a.a_offlo = offlo; 1468 a.a_offlo = offlo;
1758 a.a_offhi = offhi; 1469 a.a_offhi = offhi;
1759 a.a_flags = flags; 1470 a.a_flags = flags;
1760 mpsafe = (vp->v_vflag & VV_MPSAFE); 1471 mpsafe = (vp->v_vflag & VV_MPSAFE);
1761 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1472 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1762 error = (VCALL(vp, VOFFSET(vop_putpages), &a)); 1473 error = (VCALL(vp, VOFFSET(vop_putpages), &a));
1763 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1474 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1764 return error; 1475 return error;
1765} 1476}
1766 1477
1767const int vop_closeextattr_vp_offsets[] = { 1478const int vop_closeextattr_vp_offsets[] = {
@@ -1776,36 +1487,28 @@ const struct vnodeop_desc vop_closeextat @@ -1776,36 +1487,28 @@ const struct vnodeop_desc vop_closeextat
1776 VDESC_NO_OFFSET, 1487 VDESC_NO_OFFSET,
1777 VOPARG_OFFSETOF(struct vop_closeextattr_args, a_cred), 1488 VOPARG_OFFSETOF(struct vop_closeextattr_args, a_cred),
1778 VDESC_NO_OFFSET, 1489 VDESC_NO_OFFSET,
1779 NULL, 1490 NULL,
1780}; 1491};
1781int 1492int
1782VOP_CLOSEEXTATTR(struct vnode *vp, 1493VOP_CLOSEEXTATTR(struct vnode *vp,
1783 int commit, 1494 int commit,
1784 kauth_cred_t cred) 1495 kauth_cred_t cred)
1785{ 1496{
1786 int error; 1497 int error;
1787 bool mpsafe; 1498 bool mpsafe;
1788 struct vop_closeextattr_args a; 1499 struct vop_closeextattr_args a;
1789#ifdef VNODE_LOCKDEBUG 
1790 int islocked_vp; 
1791#endif 
1792 a.a_desc = VDESC(vop_closeextattr); 1500 a.a_desc = VDESC(vop_closeextattr);
1793 a.a_vp = vp; 1501 a.a_vp = vp;
1794#ifdef VNODE_LOCKDEBUG 
1795 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1796 if (islocked_vp != 1) 
1797 panic("vop_closeextattr: vp: locked %d, expected %d", islocked_vp, 1); 
1798#endif 
1799 a.a_commit = commit; 1502 a.a_commit = commit;
1800 a.a_cred = cred; 1503 a.a_cred = cred;
1801 mpsafe = (vp->v_vflag & VV_MPSAFE); 1504 mpsafe = (vp->v_vflag & VV_MPSAFE);
1802 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1505 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1803 error = (VCALL(vp, VOFFSET(vop_closeextattr), &a)); 1506 error = (VCALL(vp, VOFFSET(vop_closeextattr), &a));
1804 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1507 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1805 return error; 1508 return error;
1806} 1509}
1807 1510
1808const int vop_getextattr_vp_offsets[] = { 1511const int vop_getextattr_vp_offsets[] = {
1809 VOPARG_OFFSETOF(struct vop_getextattr_args,a_vp), 1512 VOPARG_OFFSETOF(struct vop_getextattr_args,a_vp),
1810 VDESC_NO_OFFSET 1513 VDESC_NO_OFFSET
1811}; 1514};
@@ -1820,36 +1523,28 @@ const struct vnodeop_desc vop_getextattr @@ -1820,36 +1523,28 @@ const struct vnodeop_desc vop_getextattr
1820 NULL, 1523 NULL,
1821}; 1524};
1822int 1525int
1823VOP_GETEXTATTR(struct vnode *vp, 1526VOP_GETEXTATTR(struct vnode *vp,
1824 int attrnamespace, 1527 int attrnamespace,
1825 const char *name, 1528 const char *name,
1826 struct uio *uio, 1529 struct uio *uio,
1827 size_t *size, 1530 size_t *size,
1828 kauth_cred_t cred) 1531 kauth_cred_t cred)
1829{ 1532{
1830 int error; 1533 int error;
1831 bool mpsafe; 1534 bool mpsafe;
1832 struct vop_getextattr_args a; 1535 struct vop_getextattr_args a;
1833#ifdef VNODE_LOCKDEBUG 
1834 int islocked_vp; 
1835#endif 
1836 a.a_desc = VDESC(vop_getextattr); 1536 a.a_desc = VDESC(vop_getextattr);
1837 a.a_vp = vp; 1537 a.a_vp = vp;
1838#ifdef VNODE_LOCKDEBUG 
1839 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1840 if (islocked_vp != 1) 
1841 panic("vop_getextattr: vp: locked %d, expected %d", islocked_vp, 1); 
1842#endif 
1843 a.a_attrnamespace = attrnamespace; 1538 a.a_attrnamespace = attrnamespace;
1844 a.a_name = name; 1539 a.a_name = name;
1845 a.a_uio = uio; 1540 a.a_uio = uio;
1846 a.a_size = size; 1541 a.a_size = size;
1847 a.a_cred = cred; 1542 a.a_cred = cred;
1848 mpsafe = (vp->v_vflag & VV_MPSAFE); 1543 mpsafe = (vp->v_vflag & VV_MPSAFE);
1849 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1544 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1850 error = (VCALL(vp, VOFFSET(vop_getextattr), &a)); 1545 error = (VCALL(vp, VOFFSET(vop_getextattr), &a));
1851 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1546 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1852 return error; 1547 return error;
1853} 1548}
1854 1549
1855const int vop_listextattr_vp_offsets[] = { 1550const int vop_listextattr_vp_offsets[] = {
@@ -1866,36 +1561,28 @@ const struct vnodeop_desc vop_listextatt @@ -1866,36 +1561,28 @@ const struct vnodeop_desc vop_listextatt
1866 VDESC_NO_OFFSET, 1561 VDESC_NO_OFFSET,
1867 NULL, 1562 NULL,
1868}; 1563};
1869int 1564int
1870VOP_LISTEXTATTR(struct vnode *vp, 1565VOP_LISTEXTATTR(struct vnode *vp,
1871 int attrnamespace, 1566 int attrnamespace,
1872 struct uio *uio, 1567 struct uio *uio,
1873 size_t *size, 1568 size_t *size,
1874 kauth_cred_t cred) 1569 kauth_cred_t cred)
1875{ 1570{
1876 int error; 1571 int error;
1877 bool mpsafe; 1572 bool mpsafe;
1878 struct vop_listextattr_args a; 1573 struct vop_listextattr_args a;
1879#ifdef VNODE_LOCKDEBUG 
1880 int islocked_vp; 
1881#endif 
1882 a.a_desc = VDESC(vop_listextattr); 1574 a.a_desc = VDESC(vop_listextattr);
1883 a.a_vp = vp; 1575 a.a_vp = vp;
1884#ifdef VNODE_LOCKDEBUG 
1885 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1886 if (islocked_vp != 1) 
1887 panic("vop_listextattr: vp: locked %d, expected %d", islocked_vp, 1); 
1888#endif 
1889 a.a_attrnamespace = attrnamespace; 1576 a.a_attrnamespace = attrnamespace;
1890 a.a_uio = uio; 1577 a.a_uio = uio;
1891 a.a_size = size; 1578 a.a_size = size;
1892 a.a_cred = cred; 1579 a.a_cred = cred;
1893 mpsafe = (vp->v_vflag & VV_MPSAFE); 1580 mpsafe = (vp->v_vflag & VV_MPSAFE);
1894 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1581 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1895 error = (VCALL(vp, VOFFSET(vop_listextattr), &a)); 1582 error = (VCALL(vp, VOFFSET(vop_listextattr), &a));
1896 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1583 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1897 return error; 1584 return error;
1898} 1585}
1899 1586
1900const int vop_openextattr_vp_offsets[] = { 1587const int vop_openextattr_vp_offsets[] = {
1901 VOPARG_OFFSETOF(struct vop_openextattr_args,a_vp), 1588 VOPARG_OFFSETOF(struct vop_openextattr_args,a_vp),
@@ -1908,36 +1595,28 @@ const struct vnodeop_desc vop_openextatt @@ -1908,36 +1595,28 @@ const struct vnodeop_desc vop_openextatt
1908 vop_openextattr_vp_offsets, 1595 vop_openextattr_vp_offsets,
1909 VDESC_NO_OFFSET, 1596 VDESC_NO_OFFSET,
1910 VOPARG_OFFSETOF(struct vop_openextattr_args, a_cred), 1597 VOPARG_OFFSETOF(struct vop_openextattr_args, a_cred),
1911 VDESC_NO_OFFSET, 1598 VDESC_NO_OFFSET,
1912 NULL, 1599 NULL,
1913}; 1600};
1914int 1601int
1915VOP_OPENEXTATTR(struct vnode *vp, 1602VOP_OPENEXTATTR(struct vnode *vp,
1916 kauth_cred_t cred) 1603 kauth_cred_t cred)
1917{ 1604{
1918 int error; 1605 int error;
1919 bool mpsafe; 1606 bool mpsafe;
1920 struct vop_openextattr_args a; 1607 struct vop_openextattr_args a;
1921#ifdef VNODE_LOCKDEBUG 
1922 int islocked_vp; 
1923#endif 
1924 a.a_desc = VDESC(vop_openextattr); 1608 a.a_desc = VDESC(vop_openextattr);
1925 a.a_vp = vp; 1609 a.a_vp = vp;
1926#ifdef VNODE_LOCKDEBUG 
1927 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1928 if (islocked_vp != 1) 
1929 panic("vop_openextattr: vp: locked %d, expected %d", islocked_vp, 1); 
1930#endif 
1931 a.a_cred = cred; 1610 a.a_cred = cred;
1932 mpsafe = (vp->v_vflag & VV_MPSAFE); 1611 mpsafe = (vp->v_vflag & VV_MPSAFE);
1933 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1612 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1934 error = (VCALL(vp, VOFFSET(vop_openextattr), &a)); 1613 error = (VCALL(vp, VOFFSET(vop_openextattr), &a));
1935 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1614 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1936 return error; 1615 return error;
1937} 1616}
1938 1617
1939const int vop_deleteextattr_vp_offsets[] = { 1618const int vop_deleteextattr_vp_offsets[] = {
1940 VOPARG_OFFSETOF(struct vop_deleteextattr_args,a_vp), 1619 VOPARG_OFFSETOF(struct vop_deleteextattr_args,a_vp),
1941 VDESC_NO_OFFSET 1620 VDESC_NO_OFFSET
1942}; 1621};
1943const struct vnodeop_desc vop_deleteextattr_desc = { 1622const struct vnodeop_desc vop_deleteextattr_desc = {
@@ -1949,36 +1628,28 @@ const struct vnodeop_desc vop_deleteexta @@ -1949,36 +1628,28 @@ const struct vnodeop_desc vop_deleteexta
1949 VOPARG_OFFSETOF(struct vop_deleteextattr_args, a_cred), 1628 VOPARG_OFFSETOF(struct vop_deleteextattr_args, a_cred),
1950 VDESC_NO_OFFSET, 1629 VDESC_NO_OFFSET,
1951 NULL, 1630 NULL,
1952}; 1631};
1953int 1632int
1954VOP_DELETEEXTATTR(struct vnode *vp, 1633VOP_DELETEEXTATTR(struct vnode *vp,
1955 int attrnamespace, 1634 int attrnamespace,
1956 const char *name, 1635 const char *name,
1957 kauth_cred_t cred) 1636 kauth_cred_t cred)
1958{ 1637{
1959 int error; 1638 int error;
1960 bool mpsafe; 1639 bool mpsafe;
1961 struct vop_deleteextattr_args a; 1640 struct vop_deleteextattr_args a;
1962#ifdef VNODE_LOCKDEBUG 
1963 int islocked_vp; 
1964#endif 
1965 a.a_desc = VDESC(vop_deleteextattr); 1641 a.a_desc = VDESC(vop_deleteextattr);
1966 a.a_vp = vp; 1642 a.a_vp = vp;
1967#ifdef VNODE_LOCKDEBUG 
1968 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1969 if (islocked_vp != 1) 
1970 panic("vop_deleteextattr: vp: locked %d, expected %d", islocked_vp, 1); 
1971#endif 
1972 a.a_attrnamespace = attrnamespace; 1643 a.a_attrnamespace = attrnamespace;
1973 a.a_name = name; 1644 a.a_name = name;
1974 a.a_cred = cred; 1645 a.a_cred = cred;
1975 mpsafe = (vp->v_vflag & VV_MPSAFE); 1646 mpsafe = (vp->v_vflag & VV_MPSAFE);
1976 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1647 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1977 error = (VCALL(vp, VOFFSET(vop_deleteextattr), &a)); 1648 error = (VCALL(vp, VOFFSET(vop_deleteextattr), &a));
1978 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1649 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1979 return error; 1650 return error;
1980} 1651}
1981 1652
1982const int vop_setextattr_vp_offsets[] = { 1653const int vop_setextattr_vp_offsets[] = {
1983 VOPARG_OFFSETOF(struct vop_setextattr_args,a_vp), 1654 VOPARG_OFFSETOF(struct vop_setextattr_args,a_vp),
1984 VDESC_NO_OFFSET 1655 VDESC_NO_OFFSET
@@ -1993,36 +1664,28 @@ const struct vnodeop_desc vop_setextattr @@ -1993,36 +1664,28 @@ const struct vnodeop_desc vop_setextattr
1993 VDESC_NO_OFFSET, 1664 VDESC_NO_OFFSET,
1994 NULL, 1665 NULL,
1995}; 1666};
1996int 1667int
1997VOP_SETEXTATTR(struct vnode *vp, 1668VOP_SETEXTATTR(struct vnode *vp,
1998 int attrnamespace, 1669 int attrnamespace,
1999 const char *name, 1670 const char *name,
2000 struct uio *uio, 1671 struct uio *uio,
2001 kauth_cred_t cred) 1672 kauth_cred_t cred)
2002{ 1673{
2003 int error; 1674 int error;
2004 bool mpsafe; 1675 bool mpsafe;
2005 struct vop_setextattr_args a; 1676 struct vop_setextattr_args a;
2006#ifdef VNODE_LOCKDEBUG 
2007 int islocked_vp; 
2008#endif 
2009 a.a_desc = VDESC(vop_setextattr); 1677 a.a_desc = VDESC(vop_setextattr);
2010 a.a_vp = vp; 1678 a.a_vp = vp;
2011#ifdef VNODE_LOCKDEBUG 
2012 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
2013 if (islocked_vp != 1) 
2014 panic("vop_setextattr: vp: locked %d, expected %d", islocked_vp, 1); 
2015#endif 
2016 a.a_attrnamespace = attrnamespace; 1679 a.a_attrnamespace = attrnamespace;
2017 a.a_name = name; 1680 a.a_name = name;
2018 a.a_uio = uio; 1681 a.a_uio = uio;
2019 a.a_cred = cred; 1682 a.a_cred = cred;
2020 mpsafe = (vp->v_vflag & VV_MPSAFE); 1683 mpsafe = (vp->v_vflag & VV_MPSAFE);
2021 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1684 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
2022 error = (VCALL(vp, VOFFSET(vop_setextattr), &a)); 1685 error = (VCALL(vp, VOFFSET(vop_setextattr), &a));
2023 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1686 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
2024 return error; 1687 return error;
2025} 1688}
2026 1689
2027/* End of special cases. */ 1690/* End of special cases. */
2028 1691

cvs diff -r1.2 -r1.3 src/sys/rump/include/rump/rumpvnode_if.h (expand / switch to unified diff)

--- src/sys/rump/include/rump/rumpvnode_if.h 2008/11/17 08:59:33 1.2
+++ src/sys/rump/include/rump/rumpvnode_if.h 2009/09/29 11:54:52 1.3
@@ -1,23 +1,23 @@ @@ -1,23 +1,23 @@
1/* $NetBSD: rumpvnode_if.h,v 1.2 2008/11/17 08:59:33 pooka Exp $ */ 1/* $NetBSD: rumpvnode_if.h,v 1.3 2009/09/29 11:54:52 pooka Exp $ */
2 2
3/* 3/*
4 * Warning: DO NOT EDIT! This file is automatically generated! 4 * Warning: DO NOT EDIT! This file is automatically generated!
5 * (Modifications made here may easily be lost!) 5 * (Modifications made here may easily be lost!)
6 * 6 *
7 * Created from the file: 7 * Created from the file:
8 * NetBSD: vnode_if.src,v 1.58 2008/11/15 19:08:12 pooka Exp 8 * NetBSD: vnode_if.src,v 1.58 2008/11/15 19:08:12 pooka Exp
9 * by the script: 9 * by the script:
10 * NetBSD: vnode_if.sh,v 1.50 2008/11/17 08:46:03 pooka Exp 10 * NetBSD: vnode_if.sh,v 1.52 2009/09/29 11:51:02 pooka Exp
11 */ 11 */
12 12
13/* 13/*
14 * Copyright (c) 1992, 1993, 1994, 1995 14 * Copyright (c) 1992, 1993, 1994, 1995
15 * The Regents of the University of California. All rights reserved. 15 * The Regents of the University of California. All rights reserved.
16 * 16 *
17 * Redistribution and use in source and binary forms, with or without 17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions 18 * modification, are permitted provided that the following conditions
19 * are met: 19 * are met:
20 * 1. Redistributions of source code must retain the above copyright 20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer. 21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright 22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the 23 * notice, this list of conditions and the following disclaimer in the
@@ -32,30 +32,26 @@ @@ -32,30 +32,26 @@
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE. 39 * SUCH DAMAGE.
40 */ 40 */
41 41
42#ifndef _RUMP_RUMPVNODE_IF_H_ 42#ifndef _RUMP_RUMPVNODE_IF_H_
43#define _RUMP_RUMPVNODE_IF_H_ 43#define _RUMP_RUMPVNODE_IF_H_
44 44
45#ifdef _KERNEL_OPT 
46#include "opt_vnode_lockdebug.h" 
47#endif /* _KERNEL_OPT */ 
48 
49extern const struct vnodeop_desc rump_vop_default_desc; 45extern const struct vnodeop_desc rump_vop_default_desc;
50 46
51 47
52/* Special cases: */ 48/* Special cases: */
53struct buf; 49struct buf;
54#ifndef _KERNEL 50#ifndef _KERNEL
55#include <stdbool.h> 51#include <stdbool.h>
56#endif 52#endif
57 53
58 54
59#define RUMP_VOP_BWRITE_DESCOFFSET 1 55#define RUMP_VOP_BWRITE_DESCOFFSET 1
60struct rump_vop_bwrite_args { 56struct rump_vop_bwrite_args {
61 const struct vnodeop_desc *a_desc; 57 const struct vnodeop_desc *a_desc;

cvs diff -r1.1 -r1.2 src/sys/rump/librump/rumpvfs/rumpvnode_if.c (expand / switch to unified diff)

--- src/sys/rump/librump/rumpvfs/rumpvnode_if.c 2008/11/19 14:10:49 1.1
+++ src/sys/rump/librump/rumpvfs/rumpvnode_if.c 2009/09/29 11:54:52 1.2
@@ -1,23 +1,23 @@ @@ -1,23 +1,23 @@
1/* $NetBSD: rumpvnode_if.c,v 1.1 2008/11/19 14:10:49 pooka Exp $ */ 1/* $NetBSD: rumpvnode_if.c,v 1.2 2009/09/29 11:54:52 pooka Exp $ */
2 2
3/* 3/*
4 * Warning: DO NOT EDIT! This file is automatically generated! 4 * Warning: DO NOT EDIT! This file is automatically generated!
5 * (Modifications made here may easily be lost!) 5 * (Modifications made here may easily be lost!)
6 * 6 *
7 * Created from the file: 7 * Created from the file:
8 * NetBSD: vnode_if.src,v 1.58 2008/11/15 19:08:12 pooka Exp 8 * NetBSD: vnode_if.src,v 1.58 2008/11/15 19:08:12 pooka Exp
9 * by the script: 9 * by the script:
10 * NetBSD: vnode_if.sh,v 1.50 2008/11/17 08:46:03 pooka Exp 10 * NetBSD: vnode_if.sh,v 1.52 2009/09/29 11:51:02 pooka Exp
11 */ 11 */
12 12
13/* 13/*
14 * Copyright (c) 1992, 1993, 1994, 1995 14 * Copyright (c) 1992, 1993, 1994, 1995
15 * The Regents of the University of California. All rights reserved. 15 * The Regents of the University of California. All rights reserved.
16 * 16 *
17 * Redistribution and use in source and binary forms, with or without 17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions 18 * modification, are permitted provided that the following conditions
19 * are met: 19 * are met:
20 * 1. Redistributions of source code must retain the above copyright 20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer. 21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright 22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the 23 * notice, this list of conditions and the following disclaimer in the
@@ -30,30 +30,27 @@ @@ -30,30 +30,27 @@
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE. 39 * SUCH DAMAGE.
40 */ 40 */
41 41
42#include <sys/cdefs.h> 42#include <sys/cdefs.h>
43__KERNEL_RCSID(0, "$NetBSD: rumpvnode_if.c,v 1.1 2008/11/19 14:10:49 pooka Exp $"); 43__KERNEL_RCSID(0, "$NetBSD: rumpvnode_if.c,v 1.2 2009/09/29 11:54:52 pooka Exp $");
44 
45 
46#include "opt_vnode_lockdebug.h" 
47 44
48#include <sys/param.h> 45#include <sys/param.h>
49#include <sys/mount.h> 46#include <sys/mount.h>
50#include <sys/buf.h> 47#include <sys/buf.h>
51#include <sys/vnode.h> 48#include <sys/vnode.h>
52#include <sys/lock.h> 49#include <sys/lock.h>
53#include <rump/rumpvnode_if.h> 50#include <rump/rumpvnode_if.h>
54 51
55const struct vnodeop_desc rump_vop_default_desc = { 52const struct vnodeop_desc rump_vop_default_desc = {
56 0, 53 0,
57 "default", 54 "default",
58 0, 55 0,
59 NULL, 56 NULL,
@@ -75,28 +72,26 @@ const struct vnodeop_desc rump_vop_bwrit @@ -75,28 +72,26 @@ const struct vnodeop_desc rump_vop_bwrit
75 0, 72 0,
76 rump_vop_bwrite_vp_offsets, 73 rump_vop_bwrite_vp_offsets,
77 VDESC_NO_OFFSET, 74 VDESC_NO_OFFSET,
78 VDESC_NO_OFFSET, 75 VDESC_NO_OFFSET,
79 VDESC_NO_OFFSET, 76 VDESC_NO_OFFSET,
80 NULL, 77 NULL,
81}; 78};
82int 79int
83RUMP_VOP_BWRITE(struct buf *bp) 80RUMP_VOP_BWRITE(struct buf *bp)
84{ 81{
85 int error; 82 int error;
86 bool mpsafe; 83 bool mpsafe;
87 struct rump_vop_bwrite_args a; 84 struct rump_vop_bwrite_args a;
88#ifdef VNODE_LOCKDEBUG 
89#endif 
90 a.a_desc = VDESC(rump_vop_bwrite); 85 a.a_desc = VDESC(rump_vop_bwrite);
91 a.a_bp = bp; 86 a.a_bp = bp;
92 mpsafe = (bp->b_vp->v_vflag & VV_MPSAFE); 87 mpsafe = (bp->b_vp->v_vflag & VV_MPSAFE);
93 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 88 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
94 error = (VCALL(bp->b_vp, VOFFSET(rump_vop_bwrite), &a)); 89 error = (VCALL(bp->b_vp, VOFFSET(rump_vop_bwrite), &a));
95 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 90 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
96 return error; 91 return error;
97} 92}
98 93
99/* End of special cases */ 94/* End of special cases */
100 95
101const int rump_vop_lookup_vp_offsets[] = { 96const int rump_vop_lookup_vp_offsets[] = {
102 VOPARG_OFFSETOF(struct rump_vop_lookup_args,a_dvp), 97 VOPARG_OFFSETOF(struct rump_vop_lookup_args,a_dvp),
@@ -110,28 +105,26 @@ const struct vnodeop_desc rump_vop_looku @@ -110,28 +105,26 @@ const struct vnodeop_desc rump_vop_looku
110 VOPARG_OFFSETOF(struct rump_vop_lookup_args, a_vpp), 105 VOPARG_OFFSETOF(struct rump_vop_lookup_args, a_vpp),
111 VDESC_NO_OFFSET, 106 VDESC_NO_OFFSET,
112 VOPARG_OFFSETOF(struct rump_vop_lookup_args, a_cnp), 107 VOPARG_OFFSETOF(struct rump_vop_lookup_args, a_cnp),
113 NULL, 108 NULL,
114}; 109};
115int 110int
116RUMP_VOP_LOOKUP(struct vnode *dvp, 111RUMP_VOP_LOOKUP(struct vnode *dvp,
117 struct vnode **vpp, 112 struct vnode **vpp,
118 struct componentname *cnp) 113 struct componentname *cnp)
119{ 114{
120 int error; 115 int error;
121 bool mpsafe; 116 bool mpsafe;
122 struct rump_vop_lookup_args a; 117 struct rump_vop_lookup_args a;
123#ifdef VNODE_LOCKDEBUG 
124#endif 
125 a.a_desc = VDESC(rump_vop_lookup); 118 a.a_desc = VDESC(rump_vop_lookup);
126 a.a_dvp = dvp; 119 a.a_dvp = dvp;
127 a.a_vpp = vpp; 120 a.a_vpp = vpp;
128 a.a_cnp = cnp; 121 a.a_cnp = cnp;
129 mpsafe = (dvp->v_vflag & VV_MPSAFE); 122 mpsafe = (dvp->v_vflag & VV_MPSAFE);
130 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 123 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
131 error = (VCALL(dvp, VOFFSET(rump_vop_lookup), &a)); 124 error = (VCALL(dvp, VOFFSET(rump_vop_lookup), &a));
132 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 125 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
133#ifdef DIAGNOSTIC 126#ifdef DIAGNOSTIC
134 if (error == 0) 127 if (error == 0)
135 KASSERT((*vpp)->v_size != VSIZENOTSET 128 KASSERT((*vpp)->v_size != VSIZENOTSET
136 && (*vpp)->v_writesize != VSIZENOTSET); 129 && (*vpp)->v_writesize != VSIZENOTSET);
137#endif /* DIAGNOSTIC */ 130#endif /* DIAGNOSTIC */
@@ -151,36 +144,28 @@ const struct vnodeop_desc rump_vop_creat @@ -151,36 +144,28 @@ const struct vnodeop_desc rump_vop_creat
151 VDESC_NO_OFFSET, 144 VDESC_NO_OFFSET,
152 VOPARG_OFFSETOF(struct rump_vop_create_args, a_cnp), 145 VOPARG_OFFSETOF(struct rump_vop_create_args, a_cnp),
153 NULL, 146 NULL,
154}; 147};
155int 148int
156RUMP_VOP_CREATE(struct vnode *dvp, 149RUMP_VOP_CREATE(struct vnode *dvp,
157 struct vnode **vpp, 150 struct vnode **vpp,
158 struct componentname *cnp, 151 struct componentname *cnp,
159 struct vattr *vap) 152 struct vattr *vap)
160{ 153{
161 int error; 154 int error;
162 bool mpsafe; 155 bool mpsafe;
163 struct rump_vop_create_args a; 156 struct rump_vop_create_args a;
164#ifdef VNODE_LOCKDEBUG 
165 int islocked_dvp; 
166#endif 
167 a.a_desc = VDESC(rump_vop_create); 157 a.a_desc = VDESC(rump_vop_create);
168 a.a_dvp = dvp; 158 a.a_dvp = dvp;
169#ifdef VNODE_LOCKDEBUG 
170 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
171 if (islocked_dvp != 1) 
172 panic("rump_vop_create: dvp: locked %d, expected %d", islocked_dvp, 1); 
173#endif 
174 a.a_vpp = vpp; 159 a.a_vpp = vpp;
175 a.a_cnp = cnp; 160 a.a_cnp = cnp;
176 a.a_vap = vap; 161 a.a_vap = vap;
177 mpsafe = (dvp->v_vflag & VV_MPSAFE); 162 mpsafe = (dvp->v_vflag & VV_MPSAFE);
178 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 163 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
179 error = (VCALL(dvp, VOFFSET(rump_vop_create), &a)); 164 error = (VCALL(dvp, VOFFSET(rump_vop_create), &a));
180 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 165 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
181#ifdef DIAGNOSTIC 166#ifdef DIAGNOSTIC
182 if (error == 0) 167 if (error == 0)
183 KASSERT((*vpp)->v_size != VSIZENOTSET 168 KASSERT((*vpp)->v_size != VSIZENOTSET
184 && (*vpp)->v_writesize != VSIZENOTSET); 169 && (*vpp)->v_writesize != VSIZENOTSET);
185#endif /* DIAGNOSTIC */ 170#endif /* DIAGNOSTIC */
186 return error; 171 return error;
@@ -199,36 +184,28 @@ const struct vnodeop_desc rump_vop_mknod @@ -199,36 +184,28 @@ const struct vnodeop_desc rump_vop_mknod
199 VDESC_NO_OFFSET, 184 VDESC_NO_OFFSET,
200 VOPARG_OFFSETOF(struct rump_vop_mknod_args, a_cnp), 185 VOPARG_OFFSETOF(struct rump_vop_mknod_args, a_cnp),
201 NULL, 186 NULL,
202}; 187};
203int 188int
204RUMP_VOP_MKNOD(struct vnode *dvp, 189RUMP_VOP_MKNOD(struct vnode *dvp,
205 struct vnode **vpp, 190 struct vnode **vpp,
206 struct componentname *cnp, 191 struct componentname *cnp,
207 struct vattr *vap) 192 struct vattr *vap)
208{ 193{
209 int error; 194 int error;
210 bool mpsafe; 195 bool mpsafe;
211 struct rump_vop_mknod_args a; 196 struct rump_vop_mknod_args a;
212#ifdef VNODE_LOCKDEBUG 
213 int islocked_dvp; 
214#endif 
215 a.a_desc = VDESC(rump_vop_mknod); 197 a.a_desc = VDESC(rump_vop_mknod);
216 a.a_dvp = dvp; 198 a.a_dvp = dvp;
217#ifdef VNODE_LOCKDEBUG 
218 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
219 if (islocked_dvp != 1) 
220 panic("rump_vop_mknod: dvp: locked %d, expected %d", islocked_dvp, 1); 
221#endif 
222 a.a_vpp = vpp; 199 a.a_vpp = vpp;
223 a.a_cnp = cnp; 200 a.a_cnp = cnp;
224 a.a_vap = vap; 201 a.a_vap = vap;
225 mpsafe = (dvp->v_vflag & VV_MPSAFE); 202 mpsafe = (dvp->v_vflag & VV_MPSAFE);
226 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 203 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
227 error = (VCALL(dvp, VOFFSET(rump_vop_mknod), &a)); 204 error = (VCALL(dvp, VOFFSET(rump_vop_mknod), &a));
228 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 205 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
229#ifdef DIAGNOSTIC 206#ifdef DIAGNOSTIC
230 if (error == 0) 207 if (error == 0)
231 KASSERT((*vpp)->v_size != VSIZENOTSET 208 KASSERT((*vpp)->v_size != VSIZENOTSET
232 && (*vpp)->v_writesize != VSIZENOTSET); 209 && (*vpp)->v_writesize != VSIZENOTSET);
233#endif /* DIAGNOSTIC */ 210#endif /* DIAGNOSTIC */
234 return error; 211 return error;
@@ -246,36 +223,28 @@ const struct vnodeop_desc rump_vop_open_ @@ -246,36 +223,28 @@ const struct vnodeop_desc rump_vop_open_
246 VDESC_NO_OFFSET, 223 VDESC_NO_OFFSET,
247 VOPARG_OFFSETOF(struct rump_vop_open_args, a_cred), 224 VOPARG_OFFSETOF(struct rump_vop_open_args, a_cred),
248 VDESC_NO_OFFSET, 225 VDESC_NO_OFFSET,
249 NULL, 226 NULL,
250}; 227};
251int 228int
252RUMP_VOP_OPEN(struct vnode *vp, 229RUMP_VOP_OPEN(struct vnode *vp,
253 int mode, 230 int mode,
254 kauth_cred_t cred) 231 kauth_cred_t cred)
255{ 232{
256 int error; 233 int error;
257 bool mpsafe; 234 bool mpsafe;
258 struct rump_vop_open_args a; 235 struct rump_vop_open_args a;
259#ifdef VNODE_LOCKDEBUG 
260 int islocked_vp; 
261#endif 
262 a.a_desc = VDESC(rump_vop_open); 236 a.a_desc = VDESC(rump_vop_open);
263 a.a_vp = vp; 237 a.a_vp = vp;
264#ifdef VNODE_LOCKDEBUG 
265 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
266 if (islocked_vp != 1) 
267 panic("rump_vop_open: vp: locked %d, expected %d", islocked_vp, 1); 
268#endif 
269 a.a_mode = mode; 238 a.a_mode = mode;
270 a.a_cred = cred; 239 a.a_cred = cred;
271 mpsafe = (vp->v_vflag & VV_MPSAFE); 240 mpsafe = (vp->v_vflag & VV_MPSAFE);
272 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 241 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
273 error = (VCALL(vp, VOFFSET(rump_vop_open), &a)); 242 error = (VCALL(vp, VOFFSET(rump_vop_open), &a));
274 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 243 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
275 return error; 244 return error;
276} 245}
277 246
278const int rump_vop_close_vp_offsets[] = { 247const int rump_vop_close_vp_offsets[] = {
279 VOPARG_OFFSETOF(struct rump_vop_close_args,a_vp), 248 VOPARG_OFFSETOF(struct rump_vop_close_args,a_vp),
280 VDESC_NO_OFFSET 249 VDESC_NO_OFFSET
281}; 250};
@@ -287,36 +256,28 @@ const struct vnodeop_desc rump_vop_close @@ -287,36 +256,28 @@ const struct vnodeop_desc rump_vop_close
287 VDESC_NO_OFFSET, 256 VDESC_NO_OFFSET,
288 VOPARG_OFFSETOF(struct rump_vop_close_args, a_cred), 257 VOPARG_OFFSETOF(struct rump_vop_close_args, a_cred),
289 VDESC_NO_OFFSET, 258 VDESC_NO_OFFSET,
290 NULL, 259 NULL,
291}; 260};
292int 261int
293RUMP_VOP_CLOSE(struct vnode *vp, 262RUMP_VOP_CLOSE(struct vnode *vp,
294 int fflag, 263 int fflag,
295 kauth_cred_t cred) 264 kauth_cred_t cred)
296{ 265{
297 int error; 266 int error;
298 bool mpsafe; 267 bool mpsafe;
299 struct rump_vop_close_args a; 268 struct rump_vop_close_args a;
300#ifdef VNODE_LOCKDEBUG 
301 int islocked_vp; 
302#endif 
303 a.a_desc = VDESC(rump_vop_close); 269 a.a_desc = VDESC(rump_vop_close);
304 a.a_vp = vp; 270 a.a_vp = vp;
305#ifdef VNODE_LOCKDEBUG 
306 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
307 if (islocked_vp != 1) 
308 panic("rump_vop_close: vp: locked %d, expected %d", islocked_vp, 1); 
309#endif 
310 a.a_fflag = fflag; 271 a.a_fflag = fflag;
311 a.a_cred = cred; 272 a.a_cred = cred;
312 mpsafe = (vp->v_vflag & VV_MPSAFE); 273 mpsafe = (vp->v_vflag & VV_MPSAFE);
313 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 274 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
314 error = (VCALL(vp, VOFFSET(rump_vop_close), &a)); 275 error = (VCALL(vp, VOFFSET(rump_vop_close), &a));
315 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 276 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
316 return error; 277 return error;
317} 278}
318 279
319const int rump_vop_access_vp_offsets[] = { 280const int rump_vop_access_vp_offsets[] = {
320 VOPARG_OFFSETOF(struct rump_vop_access_args,a_vp), 281 VOPARG_OFFSETOF(struct rump_vop_access_args,a_vp),
321 VDESC_NO_OFFSET 282 VDESC_NO_OFFSET
322}; 283};
@@ -328,36 +289,28 @@ const struct vnodeop_desc rump_vop_acces @@ -328,36 +289,28 @@ const struct vnodeop_desc rump_vop_acces
328 VDESC_NO_OFFSET, 289 VDESC_NO_OFFSET,
329 VOPARG_OFFSETOF(struct rump_vop_access_args, a_cred), 290 VOPARG_OFFSETOF(struct rump_vop_access_args, a_cred),
330 VDESC_NO_OFFSET, 291 VDESC_NO_OFFSET,
331 NULL, 292 NULL,
332}; 293};
333int 294int
334RUMP_VOP_ACCESS(struct vnode *vp, 295RUMP_VOP_ACCESS(struct vnode *vp,
335 int mode, 296 int mode,
336 kauth_cred_t cred) 297 kauth_cred_t cred)
337{ 298{
338 int error; 299 int error;
339 bool mpsafe; 300 bool mpsafe;
340 struct rump_vop_access_args a; 301 struct rump_vop_access_args a;
341#ifdef VNODE_LOCKDEBUG 
342 int islocked_vp; 
343#endif 
344 a.a_desc = VDESC(rump_vop_access); 302 a.a_desc = VDESC(rump_vop_access);
345 a.a_vp = vp; 303 a.a_vp = vp;
346#ifdef VNODE_LOCKDEBUG 
347 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
348 if (islocked_vp != 1) 
349 panic("rump_vop_access: vp: locked %d, expected %d", islocked_vp, 1); 
350#endif 
351 a.a_mode = mode; 304 a.a_mode = mode;
352 a.a_cred = cred; 305 a.a_cred = cred;
353 mpsafe = (vp->v_vflag & VV_MPSAFE); 306 mpsafe = (vp->v_vflag & VV_MPSAFE);
354 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 307 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
355 error = (VCALL(vp, VOFFSET(rump_vop_access), &a)); 308 error = (VCALL(vp, VOFFSET(rump_vop_access), &a));
356 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 309 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
357 return error; 310 return error;
358} 311}
359 312
360const int rump_vop_getattr_vp_offsets[] = { 313const int rump_vop_getattr_vp_offsets[] = {
361 VOPARG_OFFSETOF(struct rump_vop_getattr_args,a_vp), 314 VOPARG_OFFSETOF(struct rump_vop_getattr_args,a_vp),
362 VDESC_NO_OFFSET 315 VDESC_NO_OFFSET
363}; 316};
@@ -369,28 +322,26 @@ const struct vnodeop_desc rump_vop_getat @@ -369,28 +322,26 @@ const struct vnodeop_desc rump_vop_getat
369 VDESC_NO_OFFSET, 322 VDESC_NO_OFFSET,
370 VOPARG_OFFSETOF(struct rump_vop_getattr_args, a_cred), 323 VOPARG_OFFSETOF(struct rump_vop_getattr_args, a_cred),
371 VDESC_NO_OFFSET, 324 VDESC_NO_OFFSET,
372 NULL, 325 NULL,
373}; 326};
374int 327int
375RUMP_VOP_GETATTR(struct vnode *vp, 328RUMP_VOP_GETATTR(struct vnode *vp,
376 struct vattr *vap, 329 struct vattr *vap,
377 kauth_cred_t cred) 330 kauth_cred_t cred)
378{ 331{
379 int error; 332 int error;
380 bool mpsafe; 333 bool mpsafe;
381 struct rump_vop_getattr_args a; 334 struct rump_vop_getattr_args a;
382#ifdef VNODE_LOCKDEBUG 
383#endif 
384 a.a_desc = VDESC(rump_vop_getattr); 335 a.a_desc = VDESC(rump_vop_getattr);
385 a.a_vp = vp; 336 a.a_vp = vp;
386 a.a_vap = vap; 337 a.a_vap = vap;
387 a.a_cred = cred; 338 a.a_cred = cred;
388 mpsafe = (vp->v_vflag & VV_MPSAFE); 339 mpsafe = (vp->v_vflag & VV_MPSAFE);
389 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 340 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
390 error = (VCALL(vp, VOFFSET(rump_vop_getattr), &a)); 341 error = (VCALL(vp, VOFFSET(rump_vop_getattr), &a));
391 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 342 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
392 return error; 343 return error;
393} 344}
394 345
395const int rump_vop_setattr_vp_offsets[] = { 346const int rump_vop_setattr_vp_offsets[] = {
396 VOPARG_OFFSETOF(struct rump_vop_setattr_args,a_vp), 347 VOPARG_OFFSETOF(struct rump_vop_setattr_args,a_vp),
@@ -404,36 +355,28 @@ const struct vnodeop_desc rump_vop_setat @@ -404,36 +355,28 @@ const struct vnodeop_desc rump_vop_setat
404 VDESC_NO_OFFSET, 355 VDESC_NO_OFFSET,
405 VOPARG_OFFSETOF(struct rump_vop_setattr_args, a_cred), 356 VOPARG_OFFSETOF(struct rump_vop_setattr_args, a_cred),
406 VDESC_NO_OFFSET, 357 VDESC_NO_OFFSET,
407 NULL, 358 NULL,
408}; 359};
409int 360int
410RUMP_VOP_SETATTR(struct vnode *vp, 361RUMP_VOP_SETATTR(struct vnode *vp,
411 struct vattr *vap, 362 struct vattr *vap,
412 kauth_cred_t cred) 363 kauth_cred_t cred)
413{ 364{
414 int error; 365 int error;
415 bool mpsafe; 366 bool mpsafe;
416 struct rump_vop_setattr_args a; 367 struct rump_vop_setattr_args a;
417#ifdef VNODE_LOCKDEBUG 
418 int islocked_vp; 
419#endif 
420 a.a_desc = VDESC(rump_vop_setattr); 368 a.a_desc = VDESC(rump_vop_setattr);
421 a.a_vp = vp; 369 a.a_vp = vp;
422#ifdef VNODE_LOCKDEBUG 
423 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
424 if (islocked_vp != 1) 
425 panic("rump_vop_setattr: vp: locked %d, expected %d", islocked_vp, 1); 
426#endif 
427 a.a_vap = vap; 370 a.a_vap = vap;
428 a.a_cred = cred; 371 a.a_cred = cred;
429 mpsafe = (vp->v_vflag & VV_MPSAFE); 372 mpsafe = (vp->v_vflag & VV_MPSAFE);
430 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 373 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
431 error = (VCALL(vp, VOFFSET(rump_vop_setattr), &a)); 374 error = (VCALL(vp, VOFFSET(rump_vop_setattr), &a));
432 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 375 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
433 return error; 376 return error;
434} 377}
435 378
436const int rump_vop_read_vp_offsets[] = { 379const int rump_vop_read_vp_offsets[] = {
437 VOPARG_OFFSETOF(struct rump_vop_read_args,a_vp), 380 VOPARG_OFFSETOF(struct rump_vop_read_args,a_vp),
438 VDESC_NO_OFFSET 381 VDESC_NO_OFFSET
439}; 382};
@@ -446,36 +389,28 @@ const struct vnodeop_desc rump_vop_read_ @@ -446,36 +389,28 @@ const struct vnodeop_desc rump_vop_read_
446 VOPARG_OFFSETOF(struct rump_vop_read_args, a_cred), 389 VOPARG_OFFSETOF(struct rump_vop_read_args, a_cred),
447 VDESC_NO_OFFSET, 390 VDESC_NO_OFFSET,
448 NULL, 391 NULL,
449}; 392};
450int 393int
451RUMP_VOP_READ(struct vnode *vp, 394RUMP_VOP_READ(struct vnode *vp,
452 struct uio *uio, 395 struct uio *uio,
453 int ioflag, 396 int ioflag,
454 kauth_cred_t cred) 397 kauth_cred_t cred)
455{ 398{
456 int error; 399 int error;
457 bool mpsafe; 400 bool mpsafe;
458 struct rump_vop_read_args a; 401 struct rump_vop_read_args a;
459#ifdef VNODE_LOCKDEBUG 
460 int islocked_vp; 
461#endif 
462 a.a_desc = VDESC(rump_vop_read); 402 a.a_desc = VDESC(rump_vop_read);
463 a.a_vp = vp; 403 a.a_vp = vp;
464#ifdef VNODE_LOCKDEBUG 
465 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
466 if (islocked_vp != 1) 
467 panic("rump_vop_read: vp: locked %d, expected %d", islocked_vp, 1); 
468#endif 
469 a.a_uio = uio; 404 a.a_uio = uio;
470 a.a_ioflag = ioflag; 405 a.a_ioflag = ioflag;
471 a.a_cred = cred; 406 a.a_cred = cred;
472 mpsafe = (vp->v_vflag & VV_MPSAFE); 407 mpsafe = (vp->v_vflag & VV_MPSAFE);
473 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 408 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
474 error = (VCALL(vp, VOFFSET(rump_vop_read), &a)); 409 error = (VCALL(vp, VOFFSET(rump_vop_read), &a));
475 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 410 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
476 return error; 411 return error;
477} 412}
478 413
479const int rump_vop_write_vp_offsets[] = { 414const int rump_vop_write_vp_offsets[] = {
480 VOPARG_OFFSETOF(struct rump_vop_write_args,a_vp), 415 VOPARG_OFFSETOF(struct rump_vop_write_args,a_vp),
481 VDESC_NO_OFFSET 416 VDESC_NO_OFFSET
@@ -489,36 +424,28 @@ const struct vnodeop_desc rump_vop_write @@ -489,36 +424,28 @@ const struct vnodeop_desc rump_vop_write
489 VOPARG_OFFSETOF(struct rump_vop_write_args, a_cred), 424 VOPARG_OFFSETOF(struct rump_vop_write_args, a_cred),
490 VDESC_NO_OFFSET, 425 VDESC_NO_OFFSET,
491 NULL, 426 NULL,
492}; 427};
493int 428int
494RUMP_VOP_WRITE(struct vnode *vp, 429RUMP_VOP_WRITE(struct vnode *vp,
495 struct uio *uio, 430 struct uio *uio,
496 int ioflag, 431 int ioflag,
497 kauth_cred_t cred) 432 kauth_cred_t cred)
498{ 433{
499 int error; 434 int error;
500 bool mpsafe; 435 bool mpsafe;
501 struct rump_vop_write_args a; 436 struct rump_vop_write_args a;
502#ifdef VNODE_LOCKDEBUG 
503 int islocked_vp; 
504#endif 
505 a.a_desc = VDESC(rump_vop_write); 437 a.a_desc = VDESC(rump_vop_write);
506 a.a_vp = vp; 438 a.a_vp = vp;
507#ifdef VNODE_LOCKDEBUG 
508 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
509 if (islocked_vp != 1) 
510 panic("rump_vop_write: vp: locked %d, expected %d", islocked_vp, 1); 
511#endif 
512 a.a_uio = uio; 439 a.a_uio = uio;
513 a.a_ioflag = ioflag; 440 a.a_ioflag = ioflag;
514 a.a_cred = cred; 441 a.a_cred = cred;
515 mpsafe = (vp->v_vflag & VV_MPSAFE); 442 mpsafe = (vp->v_vflag & VV_MPSAFE);
516 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 443 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
517 error = (VCALL(vp, VOFFSET(rump_vop_write), &a)); 444 error = (VCALL(vp, VOFFSET(rump_vop_write), &a));
518 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 445 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
519 return error; 446 return error;
520} 447}
521 448
522const int rump_vop_ioctl_vp_offsets[] = { 449const int rump_vop_ioctl_vp_offsets[] = {
523 VOPARG_OFFSETOF(struct rump_vop_ioctl_args,a_vp), 450 VOPARG_OFFSETOF(struct rump_vop_ioctl_args,a_vp),
524 VDESC_NO_OFFSET 451 VDESC_NO_OFFSET
@@ -533,36 +460,28 @@ const struct vnodeop_desc rump_vop_ioctl @@ -533,36 +460,28 @@ const struct vnodeop_desc rump_vop_ioctl
533 VDESC_NO_OFFSET, 460 VDESC_NO_OFFSET,
534 NULL, 461 NULL,
535}; 462};
536int 463int
537RUMP_VOP_IOCTL(struct vnode *vp, 464RUMP_VOP_IOCTL(struct vnode *vp,
538 u_long command, 465 u_long command,
539 void *data, 466 void *data,
540 int fflag, 467 int fflag,
541 kauth_cred_t cred) 468 kauth_cred_t cred)
542{ 469{
543 int error; 470 int error;
544 bool mpsafe; 471 bool mpsafe;
545 struct rump_vop_ioctl_args a; 472 struct rump_vop_ioctl_args a;
546#ifdef VNODE_LOCKDEBUG 
547 int islocked_vp; 
548#endif 
549 a.a_desc = VDESC(rump_vop_ioctl); 473 a.a_desc = VDESC(rump_vop_ioctl);
550 a.a_vp = vp; 474 a.a_vp = vp;
551#ifdef VNODE_LOCKDEBUG 
552 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
553 if (islocked_vp != 0) 
554 panic("rump_vop_ioctl: vp: locked %d, expected %d", islocked_vp, 0); 
555#endif 
556 a.a_command = command; 475 a.a_command = command;
557 a.a_data = data; 476 a.a_data = data;
558 a.a_fflag = fflag; 477 a.a_fflag = fflag;
559 a.a_cred = cred; 478 a.a_cred = cred;
560 mpsafe = (vp->v_vflag & VV_MPSAFE); 479 mpsafe = (vp->v_vflag & VV_MPSAFE);
561 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 480 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
562 error = (VCALL(vp, VOFFSET(rump_vop_ioctl), &a)); 481 error = (VCALL(vp, VOFFSET(rump_vop_ioctl), &a));
563 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 482 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
564 return error; 483 return error;
565} 484}
566 485
567const int rump_vop_fcntl_vp_offsets[] = { 486const int rump_vop_fcntl_vp_offsets[] = {
568 VOPARG_OFFSETOF(struct rump_vop_fcntl_args,a_vp), 487 VOPARG_OFFSETOF(struct rump_vop_fcntl_args,a_vp),
@@ -578,36 +497,28 @@ const struct vnodeop_desc rump_vop_fcntl @@ -578,36 +497,28 @@ const struct vnodeop_desc rump_vop_fcntl
578 VDESC_NO_OFFSET, 497 VDESC_NO_OFFSET,
579 NULL, 498 NULL,
580}; 499};
581int 500int
582RUMP_VOP_FCNTL(struct vnode *vp, 501RUMP_VOP_FCNTL(struct vnode *vp,
583 u_int command, 502 u_int command,
584 void *data, 503 void *data,
585 int fflag, 504 int fflag,
586 kauth_cred_t cred) 505 kauth_cred_t cred)
587{ 506{
588 int error; 507 int error;
589 bool mpsafe; 508 bool mpsafe;
590 struct rump_vop_fcntl_args a; 509 struct rump_vop_fcntl_args a;
591#ifdef VNODE_LOCKDEBUG 
592 int islocked_vp; 
593#endif 
594 a.a_desc = VDESC(rump_vop_fcntl); 510 a.a_desc = VDESC(rump_vop_fcntl);
595 a.a_vp = vp; 511 a.a_vp = vp;
596#ifdef VNODE_LOCKDEBUG 
597 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
598 if (islocked_vp != 0) 
599 panic("rump_vop_fcntl: vp: locked %d, expected %d", islocked_vp, 0); 
600#endif 
601 a.a_command = command; 512 a.a_command = command;
602 a.a_data = data; 513 a.a_data = data;
603 a.a_fflag = fflag; 514 a.a_fflag = fflag;
604 a.a_cred = cred; 515 a.a_cred = cred;
605 mpsafe = (vp->v_vflag & VV_MPSAFE); 516 mpsafe = (vp->v_vflag & VV_MPSAFE);
606 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 517 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
607 error = (VCALL(vp, VOFFSET(rump_vop_fcntl), &a)); 518 error = (VCALL(vp, VOFFSET(rump_vop_fcntl), &a));
608 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 519 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
609 return error; 520 return error;
610} 521}
611 522
612const int rump_vop_poll_vp_offsets[] = { 523const int rump_vop_poll_vp_offsets[] = {
613 VOPARG_OFFSETOF(struct rump_vop_poll_args,a_vp), 524 VOPARG_OFFSETOF(struct rump_vop_poll_args,a_vp),
@@ -620,36 +531,28 @@ const struct vnodeop_desc rump_vop_poll_ @@ -620,36 +531,28 @@ const struct vnodeop_desc rump_vop_poll_
620 rump_vop_poll_vp_offsets, 531 rump_vop_poll_vp_offsets,
621 VDESC_NO_OFFSET, 532 VDESC_NO_OFFSET,
622 VDESC_NO_OFFSET, 533 VDESC_NO_OFFSET,
623 VDESC_NO_OFFSET, 534 VDESC_NO_OFFSET,
624 NULL, 535 NULL,
625}; 536};
626int 537int
627RUMP_VOP_POLL(struct vnode *vp, 538RUMP_VOP_POLL(struct vnode *vp,
628 int events) 539 int events)
629{ 540{
630 int error; 541 int error;
631 bool mpsafe; 542 bool mpsafe;
632 struct rump_vop_poll_args a; 543 struct rump_vop_poll_args a;
633#ifdef VNODE_LOCKDEBUG 
634 int islocked_vp; 
635#endif 
636 a.a_desc = VDESC(rump_vop_poll); 544 a.a_desc = VDESC(rump_vop_poll);
637 a.a_vp = vp; 545 a.a_vp = vp;
638#ifdef VNODE_LOCKDEBUG 
639 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
640 if (islocked_vp != 0) 
641 panic("rump_vop_poll: vp: locked %d, expected %d", islocked_vp, 0); 
642#endif 
643 a.a_events = events; 546 a.a_events = events;
644 mpsafe = (vp->v_vflag & VV_MPSAFE); 547 mpsafe = (vp->v_vflag & VV_MPSAFE);
645 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 548 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
646 error = (VCALL(vp, VOFFSET(rump_vop_poll), &a)); 549 error = (VCALL(vp, VOFFSET(rump_vop_poll), &a));
647 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 550 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
648 return error; 551 return error;
649} 552}
650 553
651const int rump_vop_kqfilter_vp_offsets[] = { 554const int rump_vop_kqfilter_vp_offsets[] = {
652 VOPARG_OFFSETOF(struct rump_vop_kqfilter_args,a_vp), 555 VOPARG_OFFSETOF(struct rump_vop_kqfilter_args,a_vp),
653 VDESC_NO_OFFSET 556 VDESC_NO_OFFSET
654}; 557};
655const struct vnodeop_desc rump_vop_kqfilter_desc = { 558const struct vnodeop_desc rump_vop_kqfilter_desc = {
@@ -659,36 +562,28 @@ const struct vnodeop_desc rump_vop_kqfil @@ -659,36 +562,28 @@ const struct vnodeop_desc rump_vop_kqfil
659 rump_vop_kqfilter_vp_offsets, 562 rump_vop_kqfilter_vp_offsets,
660 VDESC_NO_OFFSET, 563 VDESC_NO_OFFSET,
661 VDESC_NO_OFFSET, 564 VDESC_NO_OFFSET,
662 VDESC_NO_OFFSET, 565 VDESC_NO_OFFSET,
663 NULL, 566 NULL,
664}; 567};
665int 568int
666RUMP_VOP_KQFILTER(struct vnode *vp, 569RUMP_VOP_KQFILTER(struct vnode *vp,
667 struct knote *kn) 570 struct knote *kn)
668{ 571{
669 int error; 572 int error;
670 bool mpsafe; 573 bool mpsafe;
671 struct rump_vop_kqfilter_args a; 574 struct rump_vop_kqfilter_args a;
672#ifdef VNODE_LOCKDEBUG 
673 int islocked_vp; 
674#endif 
675 a.a_desc = VDESC(rump_vop_kqfilter); 575 a.a_desc = VDESC(rump_vop_kqfilter);
676 a.a_vp = vp; 576 a.a_vp = vp;
677#ifdef VNODE_LOCKDEBUG 
678 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
679 if (islocked_vp != 0) 
680 panic("rump_vop_kqfilter: vp: locked %d, expected %d", islocked_vp, 0); 
681#endif 
682 a.a_kn = kn; 577 a.a_kn = kn;
683 mpsafe = (vp->v_vflag & VV_MPSAFE); 578 mpsafe = (vp->v_vflag & VV_MPSAFE);
684 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 579 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
685 error = (VCALL(vp, VOFFSET(rump_vop_kqfilter), &a)); 580 error = (VCALL(vp, VOFFSET(rump_vop_kqfilter), &a));
686 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 581 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
687 return error; 582 return error;
688} 583}
689 584
690const int rump_vop_revoke_vp_offsets[] = { 585const int rump_vop_revoke_vp_offsets[] = {
691 VOPARG_OFFSETOF(struct rump_vop_revoke_args,a_vp), 586 VOPARG_OFFSETOF(struct rump_vop_revoke_args,a_vp),
692 VDESC_NO_OFFSET 587 VDESC_NO_OFFSET
693}; 588};
694const struct vnodeop_desc rump_vop_revoke_desc = { 589const struct vnodeop_desc rump_vop_revoke_desc = {
@@ -698,36 +593,28 @@ const struct vnodeop_desc rump_vop_revok @@ -698,36 +593,28 @@ const struct vnodeop_desc rump_vop_revok
698 rump_vop_revoke_vp_offsets, 593 rump_vop_revoke_vp_offsets,
699 VDESC_NO_OFFSET, 594 VDESC_NO_OFFSET,
700 VDESC_NO_OFFSET, 595 VDESC_NO_OFFSET,
701 VDESC_NO_OFFSET, 596 VDESC_NO_OFFSET,
702 NULL, 597 NULL,
703}; 598};
704int 599int
705RUMP_VOP_REVOKE(struct vnode *vp, 600RUMP_VOP_REVOKE(struct vnode *vp,
706 int flags) 601 int flags)
707{ 602{
708 int error; 603 int error;
709 bool mpsafe; 604 bool mpsafe;
710 struct rump_vop_revoke_args a; 605 struct rump_vop_revoke_args a;
711#ifdef VNODE_LOCKDEBUG 
712 int islocked_vp; 
713#endif 
714 a.a_desc = VDESC(rump_vop_revoke); 606 a.a_desc = VDESC(rump_vop_revoke);
715 a.a_vp = vp; 607 a.a_vp = vp;
716#ifdef VNODE_LOCKDEBUG 
717 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
718 if (islocked_vp != 0) 
719 panic("rump_vop_revoke: vp: locked %d, expected %d", islocked_vp, 0); 
720#endif 
721 a.a_flags = flags; 608 a.a_flags = flags;
722 mpsafe = (vp->v_vflag & VV_MPSAFE); 609 mpsafe = (vp->v_vflag & VV_MPSAFE);
723 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 610 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
724 error = (VCALL(vp, VOFFSET(rump_vop_revoke), &a)); 611 error = (VCALL(vp, VOFFSET(rump_vop_revoke), &a));
725 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 612 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
726 return error; 613 return error;
727} 614}
728 615
729const int rump_vop_mmap_vp_offsets[] = { 616const int rump_vop_mmap_vp_offsets[] = {
730 VOPARG_OFFSETOF(struct rump_vop_mmap_args,a_vp), 617 VOPARG_OFFSETOF(struct rump_vop_mmap_args,a_vp),
731 VDESC_NO_OFFSET 618 VDESC_NO_OFFSET
732}; 619};
733const struct vnodeop_desc rump_vop_mmap_desc = { 620const struct vnodeop_desc rump_vop_mmap_desc = {
@@ -738,28 +625,26 @@ const struct vnodeop_desc rump_vop_mmap_ @@ -738,28 +625,26 @@ const struct vnodeop_desc rump_vop_mmap_
738 VDESC_NO_OFFSET, 625 VDESC_NO_OFFSET,
739 VOPARG_OFFSETOF(struct rump_vop_mmap_args, a_cred), 626 VOPARG_OFFSETOF(struct rump_vop_mmap_args, a_cred),
740 VDESC_NO_OFFSET, 627 VDESC_NO_OFFSET,
741 NULL, 628 NULL,
742}; 629};
743int 630int
744RUMP_VOP_MMAP(struct vnode *vp, 631RUMP_VOP_MMAP(struct vnode *vp,
745 int prot, 632 int prot,
746 kauth_cred_t cred) 633 kauth_cred_t cred)
747{ 634{
748 int error; 635 int error;
749 bool mpsafe; 636 bool mpsafe;
750 struct rump_vop_mmap_args a; 637 struct rump_vop_mmap_args a;
751#ifdef VNODE_LOCKDEBUG 
752#endif 
753 a.a_desc = VDESC(rump_vop_mmap); 638 a.a_desc = VDESC(rump_vop_mmap);
754 a.a_vp = vp; 639 a.a_vp = vp;
755 a.a_prot = prot; 640 a.a_prot = prot;
756 a.a_cred = cred; 641 a.a_cred = cred;
757 mpsafe = (vp->v_vflag & VV_MPSAFE); 642 mpsafe = (vp->v_vflag & VV_MPSAFE);
758 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 643 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
759 error = (VCALL(vp, VOFFSET(rump_vop_mmap), &a)); 644 error = (VCALL(vp, VOFFSET(rump_vop_mmap), &a));
760 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 645 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
761 return error; 646 return error;
762} 647}
763 648
764const int rump_vop_fsync_vp_offsets[] = { 649const int rump_vop_fsync_vp_offsets[] = {
765 VOPARG_OFFSETOF(struct rump_vop_fsync_args,a_vp), 650 VOPARG_OFFSETOF(struct rump_vop_fsync_args,a_vp),
@@ -775,36 +660,28 @@ const struct vnodeop_desc rump_vop_fsync @@ -775,36 +660,28 @@ const struct vnodeop_desc rump_vop_fsync
775 VDESC_NO_OFFSET, 660 VDESC_NO_OFFSET,
776 NULL, 661 NULL,
777}; 662};
778int 663int
779RUMP_VOP_FSYNC(struct vnode *vp, 664RUMP_VOP_FSYNC(struct vnode *vp,
780 kauth_cred_t cred, 665 kauth_cred_t cred,
781 int flags, 666 int flags,
782 off_t offlo, 667 off_t offlo,
783 off_t offhi) 668 off_t offhi)
784{ 669{
785 int error; 670 int error;
786 bool mpsafe; 671 bool mpsafe;
787 struct rump_vop_fsync_args a; 672 struct rump_vop_fsync_args a;
788#ifdef VNODE_LOCKDEBUG 
789 int islocked_vp; 
790#endif 
791 a.a_desc = VDESC(rump_vop_fsync); 673 a.a_desc = VDESC(rump_vop_fsync);
792 a.a_vp = vp; 674 a.a_vp = vp;
793#ifdef VNODE_LOCKDEBUG 
794 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
795 if (islocked_vp != 1) 
796 panic("rump_vop_fsync: vp: locked %d, expected %d", islocked_vp, 1); 
797#endif 
798 a.a_cred = cred; 675 a.a_cred = cred;
799 a.a_flags = flags; 676 a.a_flags = flags;
800 a.a_offlo = offlo; 677 a.a_offlo = offlo;
801 a.a_offhi = offhi; 678 a.a_offhi = offhi;
802 mpsafe = (vp->v_vflag & VV_MPSAFE); 679 mpsafe = (vp->v_vflag & VV_MPSAFE);
803 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 680 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
804 error = (VCALL(vp, VOFFSET(rump_vop_fsync), &a)); 681 error = (VCALL(vp, VOFFSET(rump_vop_fsync), &a));
805 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 682 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
806 return error; 683 return error;
807} 684}
808 685
809const int rump_vop_seek_vp_offsets[] = { 686const int rump_vop_seek_vp_offsets[] = {
810 VOPARG_OFFSETOF(struct rump_vop_seek_args,a_vp), 687 VOPARG_OFFSETOF(struct rump_vop_seek_args,a_vp),
@@ -819,28 +696,26 @@ const struct vnodeop_desc rump_vop_seek_ @@ -819,28 +696,26 @@ const struct vnodeop_desc rump_vop_seek_
819 VOPARG_OFFSETOF(struct rump_vop_seek_args, a_cred), 696 VOPARG_OFFSETOF(struct rump_vop_seek_args, a_cred),
820 VDESC_NO_OFFSET, 697 VDESC_NO_OFFSET,
821 NULL, 698 NULL,
822}; 699};
823int 700int
824RUMP_VOP_SEEK(struct vnode *vp, 701RUMP_VOP_SEEK(struct vnode *vp,
825 off_t oldoff, 702 off_t oldoff,
826 off_t newoff, 703 off_t newoff,
827 kauth_cred_t cred) 704 kauth_cred_t cred)
828{ 705{
829 int error; 706 int error;
830 bool mpsafe; 707 bool mpsafe;
831 struct rump_vop_seek_args a; 708 struct rump_vop_seek_args a;
832#ifdef VNODE_LOCKDEBUG 
833#endif 
834 a.a_desc = VDESC(rump_vop_seek); 709 a.a_desc = VDESC(rump_vop_seek);
835 a.a_vp = vp; 710 a.a_vp = vp;
836 a.a_oldoff = oldoff; 711 a.a_oldoff = oldoff;
837 a.a_newoff = newoff; 712 a.a_newoff = newoff;
838 a.a_cred = cred; 713 a.a_cred = cred;
839 mpsafe = (vp->v_vflag & VV_MPSAFE); 714 mpsafe = (vp->v_vflag & VV_MPSAFE);
840 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 715 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
841 error = (VCALL(vp, VOFFSET(rump_vop_seek), &a)); 716 error = (VCALL(vp, VOFFSET(rump_vop_seek), &a));
842 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 717 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
843 return error; 718 return error;
844} 719}
845 720
846const int rump_vop_remove_vp_offsets[] = { 721const int rump_vop_remove_vp_offsets[] = {
@@ -856,43 +731,29 @@ const struct vnodeop_desc rump_vop_remov @@ -856,43 +731,29 @@ const struct vnodeop_desc rump_vop_remov
856 VDESC_NO_OFFSET, 731 VDESC_NO_OFFSET,
857 VDESC_NO_OFFSET, 732 VDESC_NO_OFFSET,
858 VOPARG_OFFSETOF(struct rump_vop_remove_args, a_cnp), 733 VOPARG_OFFSETOF(struct rump_vop_remove_args, a_cnp),
859 NULL, 734 NULL,
860}; 735};
861int 736int
862RUMP_VOP_REMOVE(struct vnode *dvp, 737RUMP_VOP_REMOVE(struct vnode *dvp,
863 struct vnode *vp, 738 struct vnode *vp,
864 struct componentname *cnp) 739 struct componentname *cnp)
865{ 740{
866 int error; 741 int error;
867 bool mpsafe; 742 bool mpsafe;
868 struct rump_vop_remove_args a; 743 struct rump_vop_remove_args a;
869#ifdef VNODE_LOCKDEBUG 
870 int islocked_dvp; 
871 int islocked_vp; 
872#endif 
873 a.a_desc = VDESC(rump_vop_remove); 744 a.a_desc = VDESC(rump_vop_remove);
874 a.a_dvp = dvp; 745 a.a_dvp = dvp;
875#ifdef VNODE_LOCKDEBUG 746 a.a_vp = vp;
876 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
877 if (islocked_dvp != 1) 
878 panic("rump_vop_remove: dvp: locked %d, expected %d", islocked_dvp, 1); 
879#endif 
880 a.a_vp = vp; 
881#ifdef VNODE_LOCKDEBUG 
882 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
883 if (islocked_vp != 1) 
884 panic("rump_vop_remove: vp: locked %d, expected %d", islocked_vp, 1); 
885#endif 
886 a.a_cnp = cnp; 747 a.a_cnp = cnp;
887 mpsafe = (dvp->v_vflag & VV_MPSAFE); 748 mpsafe = (dvp->v_vflag & VV_MPSAFE);
888 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 749 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
889 error = (VCALL(dvp, VOFFSET(rump_vop_remove), &a)); 750 error = (VCALL(dvp, VOFFSET(rump_vop_remove), &a));
890 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 751 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
891 return error; 752 return error;
892} 753}
893 754
894const int rump_vop_link_vp_offsets[] = { 755const int rump_vop_link_vp_offsets[] = {
895 VOPARG_OFFSETOF(struct rump_vop_link_args,a_dvp), 756 VOPARG_OFFSETOF(struct rump_vop_link_args,a_dvp),
896 VOPARG_OFFSETOF(struct rump_vop_link_args,a_vp), 757 VOPARG_OFFSETOF(struct rump_vop_link_args,a_vp),
897 VDESC_NO_OFFSET 758 VDESC_NO_OFFSET
898}; 759};
@@ -904,43 +765,29 @@ const struct vnodeop_desc rump_vop_link_ @@ -904,43 +765,29 @@ const struct vnodeop_desc rump_vop_link_
904 VDESC_NO_OFFSET, 765 VDESC_NO_OFFSET,
905 VDESC_NO_OFFSET, 766 VDESC_NO_OFFSET,
906 VOPARG_OFFSETOF(struct rump_vop_link_args, a_cnp), 767 VOPARG_OFFSETOF(struct rump_vop_link_args, a_cnp),
907 NULL, 768 NULL,
908}; 769};
909int 770int
910RUMP_VOP_LINK(struct vnode *dvp, 771RUMP_VOP_LINK(struct vnode *dvp,
911 struct vnode *vp, 772 struct vnode *vp,
912 struct componentname *cnp) 773 struct componentname *cnp)
913{ 774{
914 int error; 775 int error;
915 bool mpsafe; 776 bool mpsafe;
916 struct rump_vop_link_args a; 777 struct rump_vop_link_args a;
917#ifdef VNODE_LOCKDEBUG 
918 int islocked_dvp; 
919 int islocked_vp; 
920#endif 
921 a.a_desc = VDESC(rump_vop_link); 778 a.a_desc = VDESC(rump_vop_link);
922 a.a_dvp = dvp; 779 a.a_dvp = dvp;
923#ifdef VNODE_LOCKDEBUG 780 a.a_vp = vp;
924 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
925 if (islocked_dvp != 1) 
926 panic("rump_vop_link: dvp: locked %d, expected %d", islocked_dvp, 1); 
927#endif 
928 a.a_vp = vp; 
929#ifdef VNODE_LOCKDEBUG 
930 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
931 if (islocked_vp != 0) 
932 panic("rump_vop_link: vp: locked %d, expected %d", islocked_vp, 0); 
933#endif 
934 a.a_cnp = cnp; 781 a.a_cnp = cnp;
935 mpsafe = (dvp->v_vflag & VV_MPSAFE); 782 mpsafe = (dvp->v_vflag & VV_MPSAFE);
936 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 783 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
937 error = (VCALL(dvp, VOFFSET(rump_vop_link), &a)); 784 error = (VCALL(dvp, VOFFSET(rump_vop_link), &a));
938 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 785 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
939 return error; 786 return error;
940} 787}
941 788
942const int rump_vop_rename_vp_offsets[] = { 789const int rump_vop_rename_vp_offsets[] = {
943 VOPARG_OFFSETOF(struct rump_vop_rename_args,a_fdvp), 790 VOPARG_OFFSETOF(struct rump_vop_rename_args,a_fdvp),
944 VOPARG_OFFSETOF(struct rump_vop_rename_args,a_fvp), 791 VOPARG_OFFSETOF(struct rump_vop_rename_args,a_fvp),
945 VOPARG_OFFSETOF(struct rump_vop_rename_args,a_tdvp), 792 VOPARG_OFFSETOF(struct rump_vop_rename_args,a_tdvp),
946 VOPARG_OFFSETOF(struct rump_vop_rename_args,a_tvp), 793 VOPARG_OFFSETOF(struct rump_vop_rename_args,a_tvp),
@@ -957,51 +804,31 @@ const struct vnodeop_desc rump_vop_renam @@ -957,51 +804,31 @@ const struct vnodeop_desc rump_vop_renam
957 NULL, 804 NULL,
958}; 805};
959int 806int
960RUMP_VOP_RENAME(struct vnode *fdvp, 807RUMP_VOP_RENAME(struct vnode *fdvp,
961 struct vnode *fvp, 808 struct vnode *fvp,
962 struct componentname *fcnp, 809 struct componentname *fcnp,
963 struct vnode *tdvp, 810 struct vnode *tdvp,
964 struct vnode *tvp, 811 struct vnode *tvp,
965 struct componentname *tcnp) 812 struct componentname *tcnp)
966{ 813{
967 int error; 814 int error;
968 bool mpsafe; 815 bool mpsafe;
969 struct rump_vop_rename_args a; 816 struct rump_vop_rename_args a;
970#ifdef VNODE_LOCKDEBUG 
971 int islocked_fdvp; 
972 int islocked_fvp; 
973 int islocked_tdvp; 
974#endif 
975 a.a_desc = VDESC(rump_vop_rename); 817 a.a_desc = VDESC(rump_vop_rename);
976 a.a_fdvp = fdvp; 818 a.a_fdvp = fdvp;
977#ifdef VNODE_LOCKDEBUG 
978 islocked_fdvp = (fdvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(fdvp) == LK_EXCLUSIVE) : 0; 
979 if (islocked_fdvp != 0) 
980 panic("rump_vop_rename: fdvp: locked %d, expected %d", islocked_fdvp, 0); 
981#endif 
982 a.a_fvp = fvp; 819 a.a_fvp = fvp;
983#ifdef VNODE_LOCKDEBUG 
984 islocked_fvp = (fvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(fvp) == LK_EXCLUSIVE) : 0; 
985 if (islocked_fvp != 0) 
986 panic("rump_vop_rename: fvp: locked %d, expected %d", islocked_fvp, 0); 
987#endif 
988 a.a_fcnp = fcnp; 820 a.a_fcnp = fcnp;
989 a.a_tdvp = tdvp; 821 a.a_tdvp = tdvp;
990#ifdef VNODE_LOCKDEBUG 
991 islocked_tdvp = (tdvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(tdvp) == LK_EXCLUSIVE) : 1; 
992 if (islocked_tdvp != 1) 
993 panic("rump_vop_rename: tdvp: locked %d, expected %d", islocked_tdvp, 1); 
994#endif 
995 a.a_tvp = tvp; 822 a.a_tvp = tvp;
996 a.a_tcnp = tcnp; 823 a.a_tcnp = tcnp;
997 mpsafe = (fdvp->v_vflag & VV_MPSAFE); 824 mpsafe = (fdvp->v_vflag & VV_MPSAFE);
998 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 825 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
999 error = (VCALL(fdvp, VOFFSET(rump_vop_rename), &a)); 826 error = (VCALL(fdvp, VOFFSET(rump_vop_rename), &a));
1000 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 827 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1001 return error; 828 return error;
1002} 829}
1003 830
1004const int rump_vop_mkdir_vp_offsets[] = { 831const int rump_vop_mkdir_vp_offsets[] = {
1005 VOPARG_OFFSETOF(struct rump_vop_mkdir_args,a_dvp), 832 VOPARG_OFFSETOF(struct rump_vop_mkdir_args,a_dvp),
1006 VDESC_NO_OFFSET 833 VDESC_NO_OFFSET
1007}; 834};
@@ -1014,36 +841,28 @@ const struct vnodeop_desc rump_vop_mkdir @@ -1014,36 +841,28 @@ const struct vnodeop_desc rump_vop_mkdir
1014 VDESC_NO_OFFSET, 841 VDESC_NO_OFFSET,
1015 VOPARG_OFFSETOF(struct rump_vop_mkdir_args, a_cnp), 842 VOPARG_OFFSETOF(struct rump_vop_mkdir_args, a_cnp),
1016 NULL, 843 NULL,
1017}; 844};
1018int 845int
1019RUMP_VOP_MKDIR(struct vnode *dvp, 846RUMP_VOP_MKDIR(struct vnode *dvp,
1020 struct vnode **vpp, 847 struct vnode **vpp,
1021 struct componentname *cnp, 848 struct componentname *cnp,
1022 struct vattr *vap) 849 struct vattr *vap)
1023{ 850{
1024 int error; 851 int error;
1025 bool mpsafe; 852 bool mpsafe;
1026 struct rump_vop_mkdir_args a; 853 struct rump_vop_mkdir_args a;
1027#ifdef VNODE_LOCKDEBUG 
1028 int islocked_dvp; 
1029#endif 
1030 a.a_desc = VDESC(rump_vop_mkdir); 854 a.a_desc = VDESC(rump_vop_mkdir);
1031 a.a_dvp = dvp; 855 a.a_dvp = dvp;
1032#ifdef VNODE_LOCKDEBUG 
1033 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
1034 if (islocked_dvp != 1) 
1035 panic("rump_vop_mkdir: dvp: locked %d, expected %d", islocked_dvp, 1); 
1036#endif 
1037 a.a_vpp = vpp; 856 a.a_vpp = vpp;
1038 a.a_cnp = cnp; 857 a.a_cnp = cnp;
1039 a.a_vap = vap; 858 a.a_vap = vap;
1040 mpsafe = (dvp->v_vflag & VV_MPSAFE); 859 mpsafe = (dvp->v_vflag & VV_MPSAFE);
1041 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 860 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1042 error = (VCALL(dvp, VOFFSET(rump_vop_mkdir), &a)); 861 error = (VCALL(dvp, VOFFSET(rump_vop_mkdir), &a));
1043 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 862 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1044#ifdef DIAGNOSTIC 863#ifdef DIAGNOSTIC
1045 if (error == 0) 864 if (error == 0)
1046 KASSERT((*vpp)->v_size != VSIZENOTSET 865 KASSERT((*vpp)->v_size != VSIZENOTSET
1047 && (*vpp)->v_writesize != VSIZENOTSET); 866 && (*vpp)->v_writesize != VSIZENOTSET);
1048#endif /* DIAGNOSTIC */ 867#endif /* DIAGNOSTIC */
1049 return error; 868 return error;
@@ -1062,43 +881,29 @@ const struct vnodeop_desc rump_vop_rmdir @@ -1062,43 +881,29 @@ const struct vnodeop_desc rump_vop_rmdir
1062 VDESC_NO_OFFSET, 881 VDESC_NO_OFFSET,
1063 VDESC_NO_OFFSET, 882 VDESC_NO_OFFSET,
1064 VOPARG_OFFSETOF(struct rump_vop_rmdir_args, a_cnp), 883 VOPARG_OFFSETOF(struct rump_vop_rmdir_args, a_cnp),
1065 NULL, 884 NULL,
1066}; 885};
1067int 886int
1068RUMP_VOP_RMDIR(struct vnode *dvp, 887RUMP_VOP_RMDIR(struct vnode *dvp,
1069 struct vnode *vp, 888 struct vnode *vp,
1070 struct componentname *cnp) 889 struct componentname *cnp)
1071{ 890{
1072 int error; 891 int error;
1073 bool mpsafe; 892 bool mpsafe;
1074 struct rump_vop_rmdir_args a; 893 struct rump_vop_rmdir_args a;
1075#ifdef VNODE_LOCKDEBUG 
1076 int islocked_dvp; 
1077 int islocked_vp; 
1078#endif 
1079 a.a_desc = VDESC(rump_vop_rmdir); 894 a.a_desc = VDESC(rump_vop_rmdir);
1080 a.a_dvp = dvp; 895 a.a_dvp = dvp;
1081#ifdef VNODE_LOCKDEBUG 896 a.a_vp = vp;
1082 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
1083 if (islocked_dvp != 1) 
1084 panic("rump_vop_rmdir: dvp: locked %d, expected %d", islocked_dvp, 1); 
1085#endif 
1086 a.a_vp = vp; 
1087#ifdef VNODE_LOCKDEBUG 
1088 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1089 if (islocked_vp != 1) 
1090 panic("rump_vop_rmdir: vp: locked %d, expected %d", islocked_vp, 1); 
1091#endif 
1092 a.a_cnp = cnp; 897 a.a_cnp = cnp;
1093 mpsafe = (dvp->v_vflag & VV_MPSAFE); 898 mpsafe = (dvp->v_vflag & VV_MPSAFE);
1094 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 899 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1095 error = (VCALL(dvp, VOFFSET(rump_vop_rmdir), &a)); 900 error = (VCALL(dvp, VOFFSET(rump_vop_rmdir), &a));
1096 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 901 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1097 return error; 902 return error;
1098} 903}
1099 904
1100const int rump_vop_symlink_vp_offsets[] = { 905const int rump_vop_symlink_vp_offsets[] = {
1101 VOPARG_OFFSETOF(struct rump_vop_symlink_args,a_dvp), 906 VOPARG_OFFSETOF(struct rump_vop_symlink_args,a_dvp),
1102 VDESC_NO_OFFSET 907 VDESC_NO_OFFSET
1103}; 908};
1104const struct vnodeop_desc rump_vop_symlink_desc = { 909const struct vnodeop_desc rump_vop_symlink_desc = {
@@ -1111,36 +916,28 @@ const struct vnodeop_desc rump_vop_symli @@ -1111,36 +916,28 @@ const struct vnodeop_desc rump_vop_symli
1111 VOPARG_OFFSETOF(struct rump_vop_symlink_args, a_cnp), 916 VOPARG_OFFSETOF(struct rump_vop_symlink_args, a_cnp),
1112 NULL, 917 NULL,
1113}; 918};
1114int 919int
1115RUMP_VOP_SYMLINK(struct vnode *dvp, 920RUMP_VOP_SYMLINK(struct vnode *dvp,
1116 struct vnode **vpp, 921 struct vnode **vpp,
1117 struct componentname *cnp, 922 struct componentname *cnp,
1118 struct vattr *vap, 923 struct vattr *vap,
1119 char *target) 924 char *target)
1120{ 925{
1121 int error; 926 int error;
1122 bool mpsafe; 927 bool mpsafe;
1123 struct rump_vop_symlink_args a; 928 struct rump_vop_symlink_args a;
1124#ifdef VNODE_LOCKDEBUG 
1125 int islocked_dvp; 
1126#endif 
1127 a.a_desc = VDESC(rump_vop_symlink); 929 a.a_desc = VDESC(rump_vop_symlink);
1128 a.a_dvp = dvp; 930 a.a_dvp = dvp;
1129#ifdef VNODE_LOCKDEBUG 
1130 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
1131 if (islocked_dvp != 1) 
1132 panic("rump_vop_symlink: dvp: locked %d, expected %d", islocked_dvp, 1); 
1133#endif 
1134 a.a_vpp = vpp; 931 a.a_vpp = vpp;
1135 a.a_cnp = cnp; 932 a.a_cnp = cnp;
1136 a.a_vap = vap; 933 a.a_vap = vap;
1137 a.a_target = target; 934 a.a_target = target;
1138 mpsafe = (dvp->v_vflag & VV_MPSAFE); 935 mpsafe = (dvp->v_vflag & VV_MPSAFE);
1139 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 936 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1140 error = (VCALL(dvp, VOFFSET(rump_vop_symlink), &a)); 937 error = (VCALL(dvp, VOFFSET(rump_vop_symlink), &a));
1141 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 938 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1142#ifdef DIAGNOSTIC 939#ifdef DIAGNOSTIC
1143 if (error == 0) 940 if (error == 0)
1144 KASSERT((*vpp)->v_size != VSIZENOTSET 941 KASSERT((*vpp)->v_size != VSIZENOTSET
1145 && (*vpp)->v_writesize != VSIZENOTSET); 942 && (*vpp)->v_writesize != VSIZENOTSET);
1146#endif /* DIAGNOSTIC */ 943#endif /* DIAGNOSTIC */
@@ -1162,36 +959,28 @@ const struct vnodeop_desc rump_vop_readd @@ -1162,36 +959,28 @@ const struct vnodeop_desc rump_vop_readd
1162 NULL, 959 NULL,
1163}; 960};
1164int 961int
1165RUMP_VOP_READDIR(struct vnode *vp, 962RUMP_VOP_READDIR(struct vnode *vp,
1166 struct uio *uio, 963 struct uio *uio,
1167 kauth_cred_t cred, 964 kauth_cred_t cred,
1168 int *eofflag, 965 int *eofflag,
1169 off_t **cookies, 966 off_t **cookies,
1170 int *ncookies) 967 int *ncookies)
1171{ 968{
1172 int error; 969 int error;
1173 bool mpsafe; 970 bool mpsafe;
1174 struct rump_vop_readdir_args a; 971 struct rump_vop_readdir_args a;
1175#ifdef VNODE_LOCKDEBUG 
1176 int islocked_vp; 
1177#endif 
1178 a.a_desc = VDESC(rump_vop_readdir); 972 a.a_desc = VDESC(rump_vop_readdir);
1179 a.a_vp = vp; 973 a.a_vp = vp;
1180#ifdef VNODE_LOCKDEBUG 
1181 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1182 if (islocked_vp != 1) 
1183 panic("rump_vop_readdir: vp: locked %d, expected %d", islocked_vp, 1); 
1184#endif 
1185 a.a_uio = uio; 974 a.a_uio = uio;
1186 a.a_cred = cred; 975 a.a_cred = cred;
1187 a.a_eofflag = eofflag; 976 a.a_eofflag = eofflag;
1188 a.a_cookies = cookies; 977 a.a_cookies = cookies;
1189 a.a_ncookies = ncookies; 978 a.a_ncookies = ncookies;
1190 mpsafe = (vp->v_vflag & VV_MPSAFE); 979 mpsafe = (vp->v_vflag & VV_MPSAFE);
1191 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 980 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1192 error = (VCALL(vp, VOFFSET(rump_vop_readdir), &a)); 981 error = (VCALL(vp, VOFFSET(rump_vop_readdir), &a));
1193 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 982 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1194 return error; 983 return error;
1195} 984}
1196 985
1197const int rump_vop_readlink_vp_offsets[] = { 986const int rump_vop_readlink_vp_offsets[] = {
@@ -1206,36 +995,28 @@ const struct vnodeop_desc rump_vop_readl @@ -1206,36 +995,28 @@ const struct vnodeop_desc rump_vop_readl
1206 VDESC_NO_OFFSET, 995 VDESC_NO_OFFSET,
1207 VOPARG_OFFSETOF(struct rump_vop_readlink_args, a_cred), 996 VOPARG_OFFSETOF(struct rump_vop_readlink_args, a_cred),
1208 VDESC_NO_OFFSET, 997 VDESC_NO_OFFSET,
1209 NULL, 998 NULL,
1210}; 999};
1211int 1000int
1212RUMP_VOP_READLINK(struct vnode *vp, 1001RUMP_VOP_READLINK(struct vnode *vp,
1213 struct uio *uio, 1002 struct uio *uio,
1214 kauth_cred_t cred) 1003 kauth_cred_t cred)
1215{ 1004{
1216 int error; 1005 int error;
1217 bool mpsafe; 1006 bool mpsafe;
1218 struct rump_vop_readlink_args a; 1007 struct rump_vop_readlink_args a;
1219#ifdef VNODE_LOCKDEBUG 
1220 int islocked_vp; 
1221#endif 
1222 a.a_desc = VDESC(rump_vop_readlink); 1008 a.a_desc = VDESC(rump_vop_readlink);
1223 a.a_vp = vp; 1009 a.a_vp = vp;
1224#ifdef VNODE_LOCKDEBUG 
1225 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1226 if (islocked_vp != 1) 
1227 panic("rump_vop_readlink: vp: locked %d, expected %d", islocked_vp, 1); 
1228#endif 
1229 a.a_uio = uio; 1010 a.a_uio = uio;
1230 a.a_cred = cred; 1011 a.a_cred = cred;
1231 mpsafe = (vp->v_vflag & VV_MPSAFE); 1012 mpsafe = (vp->v_vflag & VV_MPSAFE);
1232 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1013 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1233 error = (VCALL(vp, VOFFSET(rump_vop_readlink), &a)); 1014 error = (VCALL(vp, VOFFSET(rump_vop_readlink), &a));
1234 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1015 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1235 return error; 1016 return error;
1236} 1017}
1237 1018
1238const int rump_vop_abortop_vp_offsets[] = { 1019const int rump_vop_abortop_vp_offsets[] = {
1239 VOPARG_OFFSETOF(struct rump_vop_abortop_args,a_dvp), 1020 VOPARG_OFFSETOF(struct rump_vop_abortop_args,a_dvp),
1240 VDESC_NO_OFFSET 1021 VDESC_NO_OFFSET
1241}; 1022};
@@ -1246,28 +1027,26 @@ const struct vnodeop_desc rump_vop_abort @@ -1246,28 +1027,26 @@ const struct vnodeop_desc rump_vop_abort
1246 rump_vop_abortop_vp_offsets, 1027 rump_vop_abortop_vp_offsets,
1247 VDESC_NO_OFFSET, 1028 VDESC_NO_OFFSET,
1248 VDESC_NO_OFFSET, 1029 VDESC_NO_OFFSET,
1249 VOPARG_OFFSETOF(struct rump_vop_abortop_args, a_cnp), 1030 VOPARG_OFFSETOF(struct rump_vop_abortop_args, a_cnp),
1250 NULL, 1031 NULL,
1251}; 1032};
1252int 1033int
1253RUMP_VOP_ABORTOP(struct vnode *dvp, 1034RUMP_VOP_ABORTOP(struct vnode *dvp,
1254 struct componentname *cnp) 1035 struct componentname *cnp)
1255{ 1036{
1256 int error; 1037 int error;
1257 bool mpsafe; 1038 bool mpsafe;
1258 struct rump_vop_abortop_args a; 1039 struct rump_vop_abortop_args a;
1259#ifdef VNODE_LOCKDEBUG 
1260#endif 
1261 a.a_desc = VDESC(rump_vop_abortop); 1040 a.a_desc = VDESC(rump_vop_abortop);
1262 a.a_dvp = dvp; 1041 a.a_dvp = dvp;
1263 a.a_cnp = cnp; 1042 a.a_cnp = cnp;
1264 mpsafe = (dvp->v_vflag & VV_MPSAFE); 1043 mpsafe = (dvp->v_vflag & VV_MPSAFE);
1265 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1044 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1266 error = (VCALL(dvp, VOFFSET(rump_vop_abortop), &a)); 1045 error = (VCALL(dvp, VOFFSET(rump_vop_abortop), &a));
1267 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1046 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1268 return error; 1047 return error;
1269} 1048}
1270 1049
1271const int rump_vop_inactive_vp_offsets[] = { 1050const int rump_vop_inactive_vp_offsets[] = {
1272 VOPARG_OFFSETOF(struct rump_vop_inactive_args,a_vp), 1051 VOPARG_OFFSETOF(struct rump_vop_inactive_args,a_vp),
1273 VDESC_NO_OFFSET 1052 VDESC_NO_OFFSET
@@ -1279,36 +1058,28 @@ const struct vnodeop_desc rump_vop_inact @@ -1279,36 +1058,28 @@ const struct vnodeop_desc rump_vop_inact
1279 rump_vop_inactive_vp_offsets, 1058 rump_vop_inactive_vp_offsets,
1280 VDESC_NO_OFFSET, 1059 VDESC_NO_OFFSET,
1281 VDESC_NO_OFFSET, 1060 VDESC_NO_OFFSET,
1282 VDESC_NO_OFFSET, 1061 VDESC_NO_OFFSET,
1283 NULL, 1062 NULL,
1284}; 1063};
1285int 1064int
1286RUMP_VOP_INACTIVE(struct vnode *vp, 1065RUMP_VOP_INACTIVE(struct vnode *vp,
1287 bool *recycle) 1066 bool *recycle)
1288{ 1067{
1289 int error; 1068 int error;
1290 bool mpsafe; 1069 bool mpsafe;
1291 struct rump_vop_inactive_args a; 1070 struct rump_vop_inactive_args a;
1292#ifdef VNODE_LOCKDEBUG 
1293 int islocked_vp; 
1294#endif 
1295 a.a_desc = VDESC(rump_vop_inactive); 1071 a.a_desc = VDESC(rump_vop_inactive);
1296 a.a_vp = vp; 1072 a.a_vp = vp;
1297#ifdef VNODE_LOCKDEBUG 
1298 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1299 if (islocked_vp != 1) 
1300 panic("rump_vop_inactive: vp: locked %d, expected %d", islocked_vp, 1); 
1301#endif 
1302 a.a_recycle = recycle; 1073 a.a_recycle = recycle;
1303 mpsafe = (vp->v_vflag & VV_MPSAFE); 1074 mpsafe = (vp->v_vflag & VV_MPSAFE);
1304 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1075 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1305 error = (VCALL(vp, VOFFSET(rump_vop_inactive), &a)); 1076 error = (VCALL(vp, VOFFSET(rump_vop_inactive), &a));
1306 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1077 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1307 return error; 1078 return error;
1308} 1079}
1309 1080
1310const int rump_vop_reclaim_vp_offsets[] = { 1081const int rump_vop_reclaim_vp_offsets[] = {
1311 VOPARG_OFFSETOF(struct rump_vop_reclaim_args,a_vp), 1082 VOPARG_OFFSETOF(struct rump_vop_reclaim_args,a_vp),
1312 VDESC_NO_OFFSET 1083 VDESC_NO_OFFSET
1313}; 1084};
1314const struct vnodeop_desc rump_vop_reclaim_desc = { 1085const struct vnodeop_desc rump_vop_reclaim_desc = {
@@ -1317,36 +1088,28 @@ const struct vnodeop_desc rump_vop_recla @@ -1317,36 +1088,28 @@ const struct vnodeop_desc rump_vop_recla
1317 0, 1088 0,
1318 rump_vop_reclaim_vp_offsets, 1089 rump_vop_reclaim_vp_offsets,
1319 VDESC_NO_OFFSET, 1090 VDESC_NO_OFFSET,
1320 VDESC_NO_OFFSET, 1091 VDESC_NO_OFFSET,
1321 VDESC_NO_OFFSET, 1092 VDESC_NO_OFFSET,
1322 NULL, 1093 NULL,
1323}; 1094};
1324int 1095int
1325RUMP_VOP_RECLAIM(struct vnode *vp) 1096RUMP_VOP_RECLAIM(struct vnode *vp)
1326{ 1097{
1327 int error; 1098 int error;
1328 bool mpsafe; 1099 bool mpsafe;
1329 struct rump_vop_reclaim_args a; 1100 struct rump_vop_reclaim_args a;
1330#ifdef VNODE_LOCKDEBUG 
1331 int islocked_vp; 
1332#endif 
1333 a.a_desc = VDESC(rump_vop_reclaim); 1101 a.a_desc = VDESC(rump_vop_reclaim);
1334 a.a_vp = vp; 1102 a.a_vp = vp;
1335#ifdef VNODE_LOCKDEBUG 
1336 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
1337 if (islocked_vp != 0) 
1338 panic("rump_vop_reclaim: vp: locked %d, expected %d", islocked_vp, 0); 
1339#endif 
1340 mpsafe = (vp->v_vflag & VV_MPSAFE); 1103 mpsafe = (vp->v_vflag & VV_MPSAFE);
1341 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1104 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1342 error = (VCALL(vp, VOFFSET(rump_vop_reclaim), &a)); 1105 error = (VCALL(vp, VOFFSET(rump_vop_reclaim), &a));
1343 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1106 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1344 return error; 1107 return error;
1345} 1108}
1346 1109
1347const int rump_vop_lock_vp_offsets[] = { 1110const int rump_vop_lock_vp_offsets[] = {
1348 VOPARG_OFFSETOF(struct rump_vop_lock_args,a_vp), 1111 VOPARG_OFFSETOF(struct rump_vop_lock_args,a_vp),
1349 VDESC_NO_OFFSET 1112 VDESC_NO_OFFSET
1350}; 1113};
1351const struct vnodeop_desc rump_vop_lock_desc = { 1114const struct vnodeop_desc rump_vop_lock_desc = {
1352 RUMP_VOP_LOCK_DESCOFFSET, 1115 RUMP_VOP_LOCK_DESCOFFSET,
@@ -1355,36 +1118,28 @@ const struct vnodeop_desc rump_vop_lock_ @@ -1355,36 +1118,28 @@ const struct vnodeop_desc rump_vop_lock_
1355 rump_vop_lock_vp_offsets, 1118 rump_vop_lock_vp_offsets,
1356 VDESC_NO_OFFSET, 1119 VDESC_NO_OFFSET,
1357 VDESC_NO_OFFSET, 1120 VDESC_NO_OFFSET,
1358 VDESC_NO_OFFSET, 1121 VDESC_NO_OFFSET,
1359 NULL, 1122 NULL,
1360}; 1123};
1361int 1124int
1362RUMP_VOP_LOCK(struct vnode *vp, 1125RUMP_VOP_LOCK(struct vnode *vp,
1363 int flags) 1126 int flags)
1364{ 1127{
1365 int error; 1128 int error;
1366 bool mpsafe; 1129 bool mpsafe;
1367 struct rump_vop_lock_args a; 1130 struct rump_vop_lock_args a;
1368#ifdef VNODE_LOCKDEBUG 
1369 int islocked_vp; 
1370#endif 
1371 a.a_desc = VDESC(rump_vop_lock); 1131 a.a_desc = VDESC(rump_vop_lock);
1372 a.a_vp = vp; 1132 a.a_vp = vp;
1373#ifdef VNODE_LOCKDEBUG 
1374 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
1375 if (islocked_vp != 0) 
1376 panic("rump_vop_lock: vp: locked %d, expected %d", islocked_vp, 0); 
1377#endif 
1378 a.a_flags = flags; 1133 a.a_flags = flags;
1379 mpsafe = (vp->v_vflag & VV_MPSAFE); 1134 mpsafe = (vp->v_vflag & VV_MPSAFE);
1380 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1135 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1381 error = (VCALL(vp, VOFFSET(rump_vop_lock), &a)); 1136 error = (VCALL(vp, VOFFSET(rump_vop_lock), &a));
1382 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1137 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1383 return error; 1138 return error;
1384} 1139}
1385 1140
1386const int rump_vop_unlock_vp_offsets[] = { 1141const int rump_vop_unlock_vp_offsets[] = {
1387 VOPARG_OFFSETOF(struct rump_vop_unlock_args,a_vp), 1142 VOPARG_OFFSETOF(struct rump_vop_unlock_args,a_vp),
1388 VDESC_NO_OFFSET 1143 VDESC_NO_OFFSET
1389}; 1144};
1390const struct vnodeop_desc rump_vop_unlock_desc = { 1145const struct vnodeop_desc rump_vop_unlock_desc = {
@@ -1394,36 +1149,28 @@ const struct vnodeop_desc rump_vop_unloc @@ -1394,36 +1149,28 @@ const struct vnodeop_desc rump_vop_unloc
1394 rump_vop_unlock_vp_offsets, 1149 rump_vop_unlock_vp_offsets,
1395 VDESC_NO_OFFSET, 1150 VDESC_NO_OFFSET,
1396 VDESC_NO_OFFSET, 1151 VDESC_NO_OFFSET,
1397 VDESC_NO_OFFSET, 1152 VDESC_NO_OFFSET,
1398 NULL, 1153 NULL,
1399}; 1154};
1400int 1155int
1401RUMP_VOP_UNLOCK(struct vnode *vp, 1156RUMP_VOP_UNLOCK(struct vnode *vp,
1402 int flags) 1157 int flags)
1403{ 1158{
1404 int error; 1159 int error;
1405 bool mpsafe; 1160 bool mpsafe;
1406 struct rump_vop_unlock_args a; 1161 struct rump_vop_unlock_args a;
1407#ifdef VNODE_LOCKDEBUG 
1408 int islocked_vp; 
1409#endif 
1410 a.a_desc = VDESC(rump_vop_unlock); 1162 a.a_desc = VDESC(rump_vop_unlock);
1411 a.a_vp = vp; 1163 a.a_vp = vp;
1412#ifdef VNODE_LOCKDEBUG 
1413 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1414 if (islocked_vp != 1) 
1415 panic("rump_vop_unlock: vp: locked %d, expected %d", islocked_vp, 1); 
1416#endif 
1417 a.a_flags = flags; 1164 a.a_flags = flags;
1418 mpsafe = (vp->v_vflag & VV_MPSAFE); 1165 mpsafe = (vp->v_vflag & VV_MPSAFE);
1419 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1166 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1420 error = (VCALL(vp, VOFFSET(rump_vop_unlock), &a)); 1167 error = (VCALL(vp, VOFFSET(rump_vop_unlock), &a));
1421 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1168 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1422 return error; 1169 return error;
1423} 1170}
1424 1171
1425const int rump_vop_bmap_vp_offsets[] = { 1172const int rump_vop_bmap_vp_offsets[] = {
1426 VOPARG_OFFSETOF(struct rump_vop_bmap_args,a_vp), 1173 VOPARG_OFFSETOF(struct rump_vop_bmap_args,a_vp),
1427 VDESC_NO_OFFSET 1174 VDESC_NO_OFFSET
1428}; 1175};
1429const struct vnodeop_desc rump_vop_bmap_desc = { 1176const struct vnodeop_desc rump_vop_bmap_desc = {
@@ -1436,28 +1183,26 @@ const struct vnodeop_desc rump_vop_bmap_ @@ -1436,28 +1183,26 @@ const struct vnodeop_desc rump_vop_bmap_
1436 VDESC_NO_OFFSET, 1183 VDESC_NO_OFFSET,
1437 NULL, 1184 NULL,
1438}; 1185};
1439int 1186int
1440RUMP_VOP_BMAP(struct vnode *vp, 1187RUMP_VOP_BMAP(struct vnode *vp,
1441 daddr_t bn, 1188 daddr_t bn,
1442 struct vnode **vpp, 1189 struct vnode **vpp,
1443 daddr_t *bnp, 1190 daddr_t *bnp,
1444 int *runp) 1191 int *runp)
1445{ 1192{
1446 int error; 1193 int error;
1447 bool mpsafe; 1194 bool mpsafe;
1448 struct rump_vop_bmap_args a; 1195 struct rump_vop_bmap_args a;
1449#ifdef VNODE_LOCKDEBUG 
1450#endif 
1451 a.a_desc = VDESC(rump_vop_bmap); 1196 a.a_desc = VDESC(rump_vop_bmap);
1452 a.a_vp = vp; 1197 a.a_vp = vp;
1453 a.a_bn = bn; 1198 a.a_bn = bn;
1454 a.a_vpp = vpp; 1199 a.a_vpp = vpp;
1455 a.a_bnp = bnp; 1200 a.a_bnp = bnp;
1456 a.a_runp = runp; 1201 a.a_runp = runp;
1457 mpsafe = (vp->v_vflag & VV_MPSAFE); 1202 mpsafe = (vp->v_vflag & VV_MPSAFE);
1458 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1203 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1459 error = (VCALL(vp, VOFFSET(rump_vop_bmap), &a)); 1204 error = (VCALL(vp, VOFFSET(rump_vop_bmap), &a));
1460 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1205 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1461 return error; 1206 return error;
1462} 1207}
1463 1208
@@ -1472,28 +1217,26 @@ const struct vnodeop_desc rump_vop_strat @@ -1472,28 +1217,26 @@ const struct vnodeop_desc rump_vop_strat
1472 rump_vop_strategy_vp_offsets, 1217 rump_vop_strategy_vp_offsets,
1473 VDESC_NO_OFFSET, 1218 VDESC_NO_OFFSET,
1474 VDESC_NO_OFFSET, 1219 VDESC_NO_OFFSET,
1475 VDESC_NO_OFFSET, 1220 VDESC_NO_OFFSET,
1476 NULL, 1221 NULL,
1477}; 1222};
1478int 1223int
1479RUMP_VOP_STRATEGY(struct vnode *vp, 1224RUMP_VOP_STRATEGY(struct vnode *vp,
1480 struct buf *bp) 1225 struct buf *bp)
1481{ 1226{
1482 int error; 1227 int error;
1483 bool mpsafe; 1228 bool mpsafe;
1484 struct rump_vop_strategy_args a; 1229 struct rump_vop_strategy_args a;
1485#ifdef VNODE_LOCKDEBUG 
1486#endif 
1487 a.a_desc = VDESC(rump_vop_strategy); 1230 a.a_desc = VDESC(rump_vop_strategy);
1488 a.a_vp = vp; 1231 a.a_vp = vp;
1489 a.a_bp = bp; 1232 a.a_bp = bp;
1490 mpsafe = (vp->v_vflag & VV_MPSAFE); 1233 mpsafe = (vp->v_vflag & VV_MPSAFE);
1491 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1234 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1492 error = (VCALL(vp, VOFFSET(rump_vop_strategy), &a)); 1235 error = (VCALL(vp, VOFFSET(rump_vop_strategy), &a));
1493 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1236 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1494 return error; 1237 return error;
1495} 1238}
1496 1239
1497const int rump_vop_print_vp_offsets[] = { 1240const int rump_vop_print_vp_offsets[] = {
1498 VOPARG_OFFSETOF(struct rump_vop_print_args,a_vp), 1241 VOPARG_OFFSETOF(struct rump_vop_print_args,a_vp),
1499 VDESC_NO_OFFSET 1242 VDESC_NO_OFFSET
@@ -1504,28 +1247,26 @@ const struct vnodeop_desc rump_vop_print @@ -1504,28 +1247,26 @@ const struct vnodeop_desc rump_vop_print
1504 0, 1247 0,
1505 rump_vop_print_vp_offsets, 1248 rump_vop_print_vp_offsets,
1506 VDESC_NO_OFFSET, 1249 VDESC_NO_OFFSET,
1507 VDESC_NO_OFFSET, 1250 VDESC_NO_OFFSET,
1508 VDESC_NO_OFFSET, 1251 VDESC_NO_OFFSET,
1509 NULL, 1252 NULL,
1510}; 1253};
1511int 1254int
1512RUMP_VOP_PRINT(struct vnode *vp) 1255RUMP_VOP_PRINT(struct vnode *vp)
1513{ 1256{
1514 int error; 1257 int error;
1515 bool mpsafe; 1258 bool mpsafe;
1516 struct rump_vop_print_args a; 1259 struct rump_vop_print_args a;
1517#ifdef VNODE_LOCKDEBUG 
1518#endif 
1519 a.a_desc = VDESC(rump_vop_print); 1260 a.a_desc = VDESC(rump_vop_print);
1520 a.a_vp = vp; 1261 a.a_vp = vp;
1521 mpsafe = (vp->v_vflag & VV_MPSAFE); 1262 mpsafe = (vp->v_vflag & VV_MPSAFE);
1522 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1263 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1523 error = (VCALL(vp, VOFFSET(rump_vop_print), &a)); 1264 error = (VCALL(vp, VOFFSET(rump_vop_print), &a));
1524 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1265 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1525 return error; 1266 return error;
1526} 1267}
1527 1268
1528const int rump_vop_islocked_vp_offsets[] = { 1269const int rump_vop_islocked_vp_offsets[] = {
1529 VOPARG_OFFSETOF(struct rump_vop_islocked_args,a_vp), 1270 VOPARG_OFFSETOF(struct rump_vop_islocked_args,a_vp),
1530 VDESC_NO_OFFSET 1271 VDESC_NO_OFFSET
1531}; 1272};
@@ -1535,28 +1276,26 @@ const struct vnodeop_desc rump_vop_isloc @@ -1535,28 +1276,26 @@ const struct vnodeop_desc rump_vop_isloc
1535 0, 1276 0,
1536 rump_vop_islocked_vp_offsets, 1277 rump_vop_islocked_vp_offsets,
1537 VDESC_NO_OFFSET, 1278 VDESC_NO_OFFSET,
1538 VDESC_NO_OFFSET, 1279 VDESC_NO_OFFSET,
1539 VDESC_NO_OFFSET, 1280 VDESC_NO_OFFSET,
1540 NULL, 1281 NULL,
1541}; 1282};
1542int 1283int
1543RUMP_VOP_ISLOCKED(struct vnode *vp) 1284RUMP_VOP_ISLOCKED(struct vnode *vp)
1544{ 1285{
1545 int error; 1286 int error;
1546 bool mpsafe; 1287 bool mpsafe;
1547 struct rump_vop_islocked_args a; 1288 struct rump_vop_islocked_args a;
1548#ifdef VNODE_LOCKDEBUG 
1549#endif 
1550 a.a_desc = VDESC(rump_vop_islocked); 1289 a.a_desc = VDESC(rump_vop_islocked);
1551 a.a_vp = vp; 1290 a.a_vp = vp;
1552 mpsafe = (vp->v_vflag & VV_MPSAFE); 1291 mpsafe = (vp->v_vflag & VV_MPSAFE);
1553 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1292 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1554 error = (VCALL(vp, VOFFSET(rump_vop_islocked), &a)); 1293 error = (VCALL(vp, VOFFSET(rump_vop_islocked), &a));
1555 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1294 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1556 return error; 1295 return error;
1557} 1296}
1558 1297
1559const int rump_vop_pathconf_vp_offsets[] = { 1298const int rump_vop_pathconf_vp_offsets[] = {
1560 VOPARG_OFFSETOF(struct rump_vop_pathconf_args,a_vp), 1299 VOPARG_OFFSETOF(struct rump_vop_pathconf_args,a_vp),
1561 VDESC_NO_OFFSET 1300 VDESC_NO_OFFSET
1562}; 1301};
@@ -1568,36 +1307,28 @@ const struct vnodeop_desc rump_vop_pathc @@ -1568,36 +1307,28 @@ const struct vnodeop_desc rump_vop_pathc
1568 VDESC_NO_OFFSET, 1307 VDESC_NO_OFFSET,
1569 VDESC_NO_OFFSET, 1308 VDESC_NO_OFFSET,
1570 VDESC_NO_OFFSET, 1309 VDESC_NO_OFFSET,
1571 NULL, 1310 NULL,
1572}; 1311};
1573int 1312int
1574RUMP_VOP_PATHCONF(struct vnode *vp, 1313RUMP_VOP_PATHCONF(struct vnode *vp,
1575 int name, 1314 int name,
1576 register_t *retval) 1315 register_t *retval)
1577{ 1316{
1578 int error; 1317 int error;
1579 bool mpsafe; 1318 bool mpsafe;
1580 struct rump_vop_pathconf_args a; 1319 struct rump_vop_pathconf_args a;
1581#ifdef VNODE_LOCKDEBUG 
1582 int islocked_vp; 
1583#endif 
1584 a.a_desc = VDESC(rump_vop_pathconf); 1320 a.a_desc = VDESC(rump_vop_pathconf);
1585 a.a_vp = vp; 1321 a.a_vp = vp;
1586#ifdef VNODE_LOCKDEBUG 
1587 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1588 if (islocked_vp != 1) 
1589 panic("rump_vop_pathconf: vp: locked %d, expected %d", islocked_vp, 1); 
1590#endif 
1591 a.a_name = name; 1322 a.a_name = name;
1592 a.a_retval = retval; 1323 a.a_retval = retval;
1593 mpsafe = (vp->v_vflag & VV_MPSAFE); 1324 mpsafe = (vp->v_vflag & VV_MPSAFE);
1594 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1325 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1595 error = (VCALL(vp, VOFFSET(rump_vop_pathconf), &a)); 1326 error = (VCALL(vp, VOFFSET(rump_vop_pathconf), &a));
1596 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1327 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1597 return error; 1328 return error;
1598} 1329}
1599 1330
1600const int rump_vop_advlock_vp_offsets[] = { 1331const int rump_vop_advlock_vp_offsets[] = {
1601 VOPARG_OFFSETOF(struct rump_vop_advlock_args,a_vp), 1332 VOPARG_OFFSETOF(struct rump_vop_advlock_args,a_vp),
1602 VDESC_NO_OFFSET 1333 VDESC_NO_OFFSET
1603}; 1334};
@@ -1611,36 +1342,28 @@ const struct vnodeop_desc rump_vop_advlo @@ -1611,36 +1342,28 @@ const struct vnodeop_desc rump_vop_advlo
1611 VDESC_NO_OFFSET, 1342 VDESC_NO_OFFSET,
1612 NULL, 1343 NULL,
1613}; 1344};
1614int 1345int
1615RUMP_VOP_ADVLOCK(struct vnode *vp, 1346RUMP_VOP_ADVLOCK(struct vnode *vp,
1616 void *id, 1347 void *id,
1617 int op, 1348 int op,
1618 struct flock *fl, 1349 struct flock *fl,
1619 int flags) 1350 int flags)
1620{ 1351{
1621 int error; 1352 int error;
1622 bool mpsafe; 1353 bool mpsafe;
1623 struct rump_vop_advlock_args a; 1354 struct rump_vop_advlock_args a;
1624#ifdef VNODE_LOCKDEBUG 
1625 int islocked_vp; 
1626#endif 
1627 a.a_desc = VDESC(rump_vop_advlock); 1355 a.a_desc = VDESC(rump_vop_advlock);
1628 a.a_vp = vp; 1356 a.a_vp = vp;
1629#ifdef VNODE_LOCKDEBUG 
1630 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0; 
1631 if (islocked_vp != 0) 
1632 panic("rump_vop_advlock: vp: locked %d, expected %d", islocked_vp, 0); 
1633#endif 
1634 a.a_id = id; 1357 a.a_id = id;
1635 a.a_op = op; 1358 a.a_op = op;
1636 a.a_fl = fl; 1359 a.a_fl = fl;
1637 a.a_flags = flags; 1360 a.a_flags = flags;
1638 mpsafe = (vp->v_vflag & VV_MPSAFE); 1361 mpsafe = (vp->v_vflag & VV_MPSAFE);
1639 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1362 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1640 error = (VCALL(vp, VOFFSET(rump_vop_advlock), &a)); 1363 error = (VCALL(vp, VOFFSET(rump_vop_advlock), &a));
1641 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1364 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1642 return error; 1365 return error;
1643} 1366}
1644 1367
1645const int rump_vop_whiteout_vp_offsets[] = { 1368const int rump_vop_whiteout_vp_offsets[] = {
1646 VOPARG_OFFSETOF(struct rump_vop_whiteout_args,a_dvp), 1369 VOPARG_OFFSETOF(struct rump_vop_whiteout_args,a_dvp),
@@ -1654,36 +1377,28 @@ const struct vnodeop_desc rump_vop_white @@ -1654,36 +1377,28 @@ const struct vnodeop_desc rump_vop_white
1654 VDESC_NO_OFFSET, 1377 VDESC_NO_OFFSET,
1655 VDESC_NO_OFFSET, 1378 VDESC_NO_OFFSET,
1656 VOPARG_OFFSETOF(struct rump_vop_whiteout_args, a_cnp), 1379 VOPARG_OFFSETOF(struct rump_vop_whiteout_args, a_cnp),
1657 NULL, 1380 NULL,
1658}; 1381};
1659int 1382int
1660RUMP_VOP_WHITEOUT(struct vnode *dvp, 1383RUMP_VOP_WHITEOUT(struct vnode *dvp,
1661 struct componentname *cnp, 1384 struct componentname *cnp,
1662 int flags) 1385 int flags)
1663{ 1386{
1664 int error; 1387 int error;
1665 bool mpsafe; 1388 bool mpsafe;
1666 struct rump_vop_whiteout_args a; 1389 struct rump_vop_whiteout_args a;
1667#ifdef VNODE_LOCKDEBUG 
1668 int islocked_dvp; 
1669#endif 
1670 a.a_desc = VDESC(rump_vop_whiteout); 1390 a.a_desc = VDESC(rump_vop_whiteout);
1671 a.a_dvp = dvp; 1391 a.a_dvp = dvp;
1672#ifdef VNODE_LOCKDEBUG 
1673 islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1; 
1674 if (islocked_dvp != 1) 
1675 panic("rump_vop_whiteout: dvp: locked %d, expected %d", islocked_dvp, 1); 
1676#endif 
1677 a.a_cnp = cnp; 1392 a.a_cnp = cnp;
1678 a.a_flags = flags; 1393 a.a_flags = flags;
1679 mpsafe = (dvp->v_vflag & VV_MPSAFE); 1394 mpsafe = (dvp->v_vflag & VV_MPSAFE);
1680 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1395 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1681 error = (VCALL(dvp, VOFFSET(rump_vop_whiteout), &a)); 1396 error = (VCALL(dvp, VOFFSET(rump_vop_whiteout), &a));
1682 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1397 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1683 return error; 1398 return error;
1684} 1399}
1685 1400
1686const int rump_vop_getpages_vp_offsets[] = { 1401const int rump_vop_getpages_vp_offsets[] = {
1687 VOPARG_OFFSETOF(struct rump_vop_getpages_args,a_vp), 1402 VOPARG_OFFSETOF(struct rump_vop_getpages_args,a_vp),
1688 VDESC_NO_OFFSET 1403 VDESC_NO_OFFSET
1689}; 1404};
@@ -1700,28 +1415,26 @@ const struct vnodeop_desc rump_vop_getpa @@ -1700,28 +1415,26 @@ const struct vnodeop_desc rump_vop_getpa
1700int 1415int
1701RUMP_VOP_GETPAGES(struct vnode *vp, 1416RUMP_VOP_GETPAGES(struct vnode *vp,
1702 off_t offset, 1417 off_t offset,
1703 struct vm_page **m, 1418 struct vm_page **m,
1704 int *count, 1419 int *count,
1705 int centeridx, 1420 int centeridx,
1706 int access_type, 1421 int access_type,
1707 int advice, 1422 int advice,
1708 int flags) 1423 int flags)
1709{ 1424{
1710 int error; 1425 int error;
1711 bool mpsafe; 1426 bool mpsafe;
1712 struct rump_vop_getpages_args a; 1427 struct rump_vop_getpages_args a;
1713#ifdef VNODE_LOCKDEBUG 
1714#endif 
1715 a.a_desc = VDESC(rump_vop_getpages); 1428 a.a_desc = VDESC(rump_vop_getpages);
1716 a.a_vp = vp; 1429 a.a_vp = vp;
1717 a.a_offset = offset; 1430 a.a_offset = offset;
1718 a.a_m = m; 1431 a.a_m = m;
1719 a.a_count = count; 1432 a.a_count = count;
1720 a.a_centeridx = centeridx; 1433 a.a_centeridx = centeridx;
1721 a.a_access_type = access_type; 1434 a.a_access_type = access_type;
1722 a.a_advice = advice; 1435 a.a_advice = advice;
1723 a.a_flags = flags; 1436 a.a_flags = flags;
1724 mpsafe = (vp->v_vflag & VV_MPSAFE); 1437 mpsafe = (vp->v_vflag & VV_MPSAFE);
1725 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1438 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1726 error = (VCALL(vp, VOFFSET(rump_vop_getpages), &a)); 1439 error = (VCALL(vp, VOFFSET(rump_vop_getpages), &a));
1727 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1440 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
@@ -1741,28 +1454,26 @@ const struct vnodeop_desc rump_vop_putpa @@ -1741,28 +1454,26 @@ const struct vnodeop_desc rump_vop_putpa
1741 VDESC_NO_OFFSET, 1454 VDESC_NO_OFFSET,
1742 VDESC_NO_OFFSET, 1455 VDESC_NO_OFFSET,
1743 NULL, 1456 NULL,
1744}; 1457};
1745int 1458int
1746RUMP_VOP_PUTPAGES(struct vnode *vp, 1459RUMP_VOP_PUTPAGES(struct vnode *vp,
1747 off_t offlo, 1460 off_t offlo,
1748 off_t offhi, 1461 off_t offhi,
1749 int flags) 1462 int flags)
1750{ 1463{
1751 int error; 1464 int error;
1752 bool mpsafe; 1465 bool mpsafe;
1753 struct rump_vop_putpages_args a; 1466 struct rump_vop_putpages_args a;
1754#ifdef VNODE_LOCKDEBUG 
1755#endif 
1756 a.a_desc = VDESC(rump_vop_putpages); 1467 a.a_desc = VDESC(rump_vop_putpages);
1757 a.a_vp = vp; 1468 a.a_vp = vp;
1758 a.a_offlo = offlo; 1469 a.a_offlo = offlo;
1759 a.a_offhi = offhi; 1470 a.a_offhi = offhi;
1760 a.a_flags = flags; 1471 a.a_flags = flags;
1761 mpsafe = (vp->v_vflag & VV_MPSAFE); 1472 mpsafe = (vp->v_vflag & VV_MPSAFE);
1762 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1473 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1763 error = (VCALL(vp, VOFFSET(rump_vop_putpages), &a)); 1474 error = (VCALL(vp, VOFFSET(rump_vop_putpages), &a));
1764 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1475 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1765 return error; 1476 return error;
1766} 1477}
1767 1478
1768const int rump_vop_closeextattr_vp_offsets[] = { 1479const int rump_vop_closeextattr_vp_offsets[] = {
@@ -1777,36 +1488,28 @@ const struct vnodeop_desc rump_vop_close @@ -1777,36 +1488,28 @@ const struct vnodeop_desc rump_vop_close
1777 VDESC_NO_OFFSET, 1488 VDESC_NO_OFFSET,
1778 VOPARG_OFFSETOF(struct rump_vop_closeextattr_args, a_cred), 1489 VOPARG_OFFSETOF(struct rump_vop_closeextattr_args, a_cred),
1779 VDESC_NO_OFFSET, 1490 VDESC_NO_OFFSET,
1780 NULL, 1491 NULL,
1781}; 1492};
1782int 1493int
1783RUMP_VOP_CLOSEEXTATTR(struct vnode *vp, 1494RUMP_VOP_CLOSEEXTATTR(struct vnode *vp,
1784 int commit, 1495 int commit,
1785 kauth_cred_t cred) 1496 kauth_cred_t cred)
1786{ 1497{
1787 int error; 1498 int error;
1788 bool mpsafe; 1499 bool mpsafe;
1789 struct rump_vop_closeextattr_args a; 1500 struct rump_vop_closeextattr_args a;
1790#ifdef VNODE_LOCKDEBUG 
1791 int islocked_vp; 
1792#endif 
1793 a.a_desc = VDESC(rump_vop_closeextattr); 1501 a.a_desc = VDESC(rump_vop_closeextattr);
1794 a.a_vp = vp; 1502 a.a_vp = vp;
1795#ifdef VNODE_LOCKDEBUG 
1796 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1797 if (islocked_vp != 1) 
1798 panic("rump_vop_closeextattr: vp: locked %d, expected %d", islocked_vp, 1); 
1799#endif 
1800 a.a_commit = commit; 1503 a.a_commit = commit;
1801 a.a_cred = cred; 1504 a.a_cred = cred;
1802 mpsafe = (vp->v_vflag & VV_MPSAFE); 1505 mpsafe = (vp->v_vflag & VV_MPSAFE);
1803 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1506 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1804 error = (VCALL(vp, VOFFSET(rump_vop_closeextattr), &a)); 1507 error = (VCALL(vp, VOFFSET(rump_vop_closeextattr), &a));
1805 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1508 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1806 return error; 1509 return error;
1807} 1510}
1808 1511
1809const int rump_vop_getextattr_vp_offsets[] = { 1512const int rump_vop_getextattr_vp_offsets[] = {
1810 VOPARG_OFFSETOF(struct rump_vop_getextattr_args,a_vp), 1513 VOPARG_OFFSETOF(struct rump_vop_getextattr_args,a_vp),
1811 VDESC_NO_OFFSET 1514 VDESC_NO_OFFSET
1812}; 1515};
@@ -1821,36 +1524,28 @@ const struct vnodeop_desc rump_vop_getex @@ -1821,36 +1524,28 @@ const struct vnodeop_desc rump_vop_getex
1821 NULL, 1524 NULL,
1822}; 1525};
1823int 1526int
1824RUMP_VOP_GETEXTATTR(struct vnode *vp, 1527RUMP_VOP_GETEXTATTR(struct vnode *vp,
1825 int attrnamespace, 1528 int attrnamespace,
1826 const char *name, 1529 const char *name,
1827 struct uio *uio, 1530 struct uio *uio,
1828 size_t *size, 1531 size_t *size,
1829 kauth_cred_t cred) 1532 kauth_cred_t cred)
1830{ 1533{
1831 int error; 1534 int error;
1832 bool mpsafe; 1535 bool mpsafe;
1833 struct rump_vop_getextattr_args a; 1536 struct rump_vop_getextattr_args a;
1834#ifdef VNODE_LOCKDEBUG 
1835 int islocked_vp; 
1836#endif 
1837 a.a_desc = VDESC(rump_vop_getextattr); 1537 a.a_desc = VDESC(rump_vop_getextattr);
1838 a.a_vp = vp; 1538 a.a_vp = vp;
1839#ifdef VNODE_LOCKDEBUG 
1840 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1841 if (islocked_vp != 1) 
1842 panic("rump_vop_getextattr: vp: locked %d, expected %d", islocked_vp, 1); 
1843#endif 
1844 a.a_attrnamespace = attrnamespace; 1539 a.a_attrnamespace = attrnamespace;
1845 a.a_name = name; 1540 a.a_name = name;
1846 a.a_uio = uio; 1541 a.a_uio = uio;
1847 a.a_size = size; 1542 a.a_size = size;
1848 a.a_cred = cred; 1543 a.a_cred = cred;
1849 mpsafe = (vp->v_vflag & VV_MPSAFE); 1544 mpsafe = (vp->v_vflag & VV_MPSAFE);
1850 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1545 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1851 error = (VCALL(vp, VOFFSET(rump_vop_getextattr), &a)); 1546 error = (VCALL(vp, VOFFSET(rump_vop_getextattr), &a));
1852 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1547 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1853 return error; 1548 return error;
1854} 1549}
1855 1550
1856const int rump_vop_listextattr_vp_offsets[] = { 1551const int rump_vop_listextattr_vp_offsets[] = {
@@ -1867,36 +1562,28 @@ const struct vnodeop_desc rump_vop_liste @@ -1867,36 +1562,28 @@ const struct vnodeop_desc rump_vop_liste
1867 VDESC_NO_OFFSET, 1562 VDESC_NO_OFFSET,
1868 NULL, 1563 NULL,
1869}; 1564};
1870int 1565int
1871RUMP_VOP_LISTEXTATTR(struct vnode *vp, 1566RUMP_VOP_LISTEXTATTR(struct vnode *vp,
1872 int attrnamespace, 1567 int attrnamespace,
1873 struct uio *uio, 1568 struct uio *uio,
1874 size_t *size, 1569 size_t *size,
1875 kauth_cred_t cred) 1570 kauth_cred_t cred)
1876{ 1571{
1877 int error; 1572 int error;
1878 bool mpsafe; 1573 bool mpsafe;
1879 struct rump_vop_listextattr_args a; 1574 struct rump_vop_listextattr_args a;
1880#ifdef VNODE_LOCKDEBUG 
1881 int islocked_vp; 
1882#endif 
1883 a.a_desc = VDESC(rump_vop_listextattr); 1575 a.a_desc = VDESC(rump_vop_listextattr);
1884 a.a_vp = vp; 1576 a.a_vp = vp;
1885#ifdef VNODE_LOCKDEBUG 
1886 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1887 if (islocked_vp != 1) 
1888 panic("rump_vop_listextattr: vp: locked %d, expected %d", islocked_vp, 1); 
1889#endif 
1890 a.a_attrnamespace = attrnamespace; 1577 a.a_attrnamespace = attrnamespace;
1891 a.a_uio = uio; 1578 a.a_uio = uio;
1892 a.a_size = size; 1579 a.a_size = size;
1893 a.a_cred = cred; 1580 a.a_cred = cred;
1894 mpsafe = (vp->v_vflag & VV_MPSAFE); 1581 mpsafe = (vp->v_vflag & VV_MPSAFE);
1895 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1582 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1896 error = (VCALL(vp, VOFFSET(rump_vop_listextattr), &a)); 1583 error = (VCALL(vp, VOFFSET(rump_vop_listextattr), &a));
1897 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1584 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1898 return error; 1585 return error;
1899} 1586}
1900 1587
1901const int rump_vop_openextattr_vp_offsets[] = { 1588const int rump_vop_openextattr_vp_offsets[] = {
1902 VOPARG_OFFSETOF(struct rump_vop_openextattr_args,a_vp), 1589 VOPARG_OFFSETOF(struct rump_vop_openextattr_args,a_vp),
@@ -1909,36 +1596,28 @@ const struct vnodeop_desc rump_vop_opene @@ -1909,36 +1596,28 @@ const struct vnodeop_desc rump_vop_opene
1909 rump_vop_openextattr_vp_offsets, 1596 rump_vop_openextattr_vp_offsets,
1910 VDESC_NO_OFFSET, 1597 VDESC_NO_OFFSET,
1911 VOPARG_OFFSETOF(struct rump_vop_openextattr_args, a_cred), 1598 VOPARG_OFFSETOF(struct rump_vop_openextattr_args, a_cred),
1912 VDESC_NO_OFFSET, 1599 VDESC_NO_OFFSET,
1913 NULL, 1600 NULL,
1914}; 1601};
1915int 1602int
1916RUMP_VOP_OPENEXTATTR(struct vnode *vp, 1603RUMP_VOP_OPENEXTATTR(struct vnode *vp,
1917 kauth_cred_t cred) 1604 kauth_cred_t cred)
1918{ 1605{
1919 int error; 1606 int error;
1920 bool mpsafe; 1607 bool mpsafe;
1921 struct rump_vop_openextattr_args a; 1608 struct rump_vop_openextattr_args a;
1922#ifdef VNODE_LOCKDEBUG 
1923 int islocked_vp; 
1924#endif 
1925 a.a_desc = VDESC(rump_vop_openextattr); 1609 a.a_desc = VDESC(rump_vop_openextattr);
1926 a.a_vp = vp; 1610 a.a_vp = vp;
1927#ifdef VNODE_LOCKDEBUG 
1928 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1929 if (islocked_vp != 1) 
1930 panic("rump_vop_openextattr: vp: locked %d, expected %d", islocked_vp, 1); 
1931#endif 
1932 a.a_cred = cred; 1611 a.a_cred = cred;
1933 mpsafe = (vp->v_vflag & VV_MPSAFE); 1612 mpsafe = (vp->v_vflag & VV_MPSAFE);
1934 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1613 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1935 error = (VCALL(vp, VOFFSET(rump_vop_openextattr), &a)); 1614 error = (VCALL(vp, VOFFSET(rump_vop_openextattr), &a));
1936 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1615 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1937 return error; 1616 return error;
1938} 1617}
1939 1618
1940const int rump_vop_deleteextattr_vp_offsets[] = { 1619const int rump_vop_deleteextattr_vp_offsets[] = {
1941 VOPARG_OFFSETOF(struct rump_vop_deleteextattr_args,a_vp), 1620 VOPARG_OFFSETOF(struct rump_vop_deleteextattr_args,a_vp),
1942 VDESC_NO_OFFSET 1621 VDESC_NO_OFFSET
1943}; 1622};
1944const struct vnodeop_desc rump_vop_deleteextattr_desc = { 1623const struct vnodeop_desc rump_vop_deleteextattr_desc = {
@@ -1950,36 +1629,28 @@ const struct vnodeop_desc rump_vop_delet @@ -1950,36 +1629,28 @@ const struct vnodeop_desc rump_vop_delet
1950 VOPARG_OFFSETOF(struct rump_vop_deleteextattr_args, a_cred), 1629 VOPARG_OFFSETOF(struct rump_vop_deleteextattr_args, a_cred),
1951 VDESC_NO_OFFSET, 1630 VDESC_NO_OFFSET,
1952 NULL, 1631 NULL,
1953}; 1632};
1954int 1633int
1955RUMP_VOP_DELETEEXTATTR(struct vnode *vp, 1634RUMP_VOP_DELETEEXTATTR(struct vnode *vp,
1956 int attrnamespace, 1635 int attrnamespace,
1957 const char *name, 1636 const char *name,
1958 kauth_cred_t cred) 1637 kauth_cred_t cred)
1959{ 1638{
1960 int error; 1639 int error;
1961 bool mpsafe; 1640 bool mpsafe;
1962 struct rump_vop_deleteextattr_args a; 1641 struct rump_vop_deleteextattr_args a;
1963#ifdef VNODE_LOCKDEBUG 
1964 int islocked_vp; 
1965#endif 
1966 a.a_desc = VDESC(rump_vop_deleteextattr); 1642 a.a_desc = VDESC(rump_vop_deleteextattr);
1967 a.a_vp = vp; 1643 a.a_vp = vp;
1968#ifdef VNODE_LOCKDEBUG 
1969 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
1970 if (islocked_vp != 1) 
1971 panic("rump_vop_deleteextattr: vp: locked %d, expected %d", islocked_vp, 1); 
1972#endif 
1973 a.a_attrnamespace = attrnamespace; 1644 a.a_attrnamespace = attrnamespace;
1974 a.a_name = name; 1645 a.a_name = name;
1975 a.a_cred = cred; 1646 a.a_cred = cred;
1976 mpsafe = (vp->v_vflag & VV_MPSAFE); 1647 mpsafe = (vp->v_vflag & VV_MPSAFE);
1977 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1648 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
1978 error = (VCALL(vp, VOFFSET(rump_vop_deleteextattr), &a)); 1649 error = (VCALL(vp, VOFFSET(rump_vop_deleteextattr), &a));
1979 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1650 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
1980 return error; 1651 return error;
1981} 1652}
1982 1653
1983const int rump_vop_setextattr_vp_offsets[] = { 1654const int rump_vop_setextattr_vp_offsets[] = {
1984 VOPARG_OFFSETOF(struct rump_vop_setextattr_args,a_vp), 1655 VOPARG_OFFSETOF(struct rump_vop_setextattr_args,a_vp),
1985 VDESC_NO_OFFSET 1656 VDESC_NO_OFFSET
@@ -1994,36 +1665,28 @@ const struct vnodeop_desc rump_vop_setex @@ -1994,36 +1665,28 @@ const struct vnodeop_desc rump_vop_setex
1994 VDESC_NO_OFFSET, 1665 VDESC_NO_OFFSET,
1995 NULL, 1666 NULL,
1996}; 1667};
1997int 1668int
1998RUMP_VOP_SETEXTATTR(struct vnode *vp, 1669RUMP_VOP_SETEXTATTR(struct vnode *vp,
1999 int attrnamespace, 1670 int attrnamespace,
2000 const char *name, 1671 const char *name,
2001 struct uio *uio, 1672 struct uio *uio,
2002 kauth_cred_t cred) 1673 kauth_cred_t cred)
2003{ 1674{
2004 int error; 1675 int error;
2005 bool mpsafe; 1676 bool mpsafe;
2006 struct rump_vop_setextattr_args a; 1677 struct rump_vop_setextattr_args a;
2007#ifdef VNODE_LOCKDEBUG 
2008 int islocked_vp; 
2009#endif 
2010 a.a_desc = VDESC(rump_vop_setextattr); 1678 a.a_desc = VDESC(rump_vop_setextattr);
2011 a.a_vp = vp; 1679 a.a_vp = vp;
2012#ifdef VNODE_LOCKDEBUG 
2013 islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1; 
2014 if (islocked_vp != 1) 
2015 panic("rump_vop_setextattr: vp: locked %d, expected %d", islocked_vp, 1); 
2016#endif 
2017 a.a_attrnamespace = attrnamespace; 1680 a.a_attrnamespace = attrnamespace;
2018 a.a_name = name; 1681 a.a_name = name;
2019 a.a_uio = uio; 1682 a.a_uio = uio;
2020 a.a_cred = cred; 1683 a.a_cred = cred;
2021 mpsafe = (vp->v_vflag & VV_MPSAFE); 1684 mpsafe = (vp->v_vflag & VV_MPSAFE);
2022 if (!mpsafe) { KERNEL_LOCK(1, curlwp); } 1685 if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
2023 error = (VCALL(vp, VOFFSET(rump_vop_setextattr), &a)); 1686 error = (VCALL(vp, VOFFSET(rump_vop_setextattr), &a));
2024 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); } 1687 if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
2025 return error; 1688 return error;
2026} 1689}
2027 1690
2028/* End of special cases. */ 1691/* End of special cases. */
2029 1692

cvs diff -r1.74 -r1.75 src/sys/sys/vnode_if.h (expand / switch to unified diff)

--- src/sys/sys/vnode_if.h 2008/11/17 08:59:33 1.74
+++ src/sys/sys/vnode_if.h 2009/09/29 11:54:52 1.75
@@ -1,23 +1,23 @@ @@ -1,23 +1,23 @@
1/* $NetBSD: vnode_if.h,v 1.74 2008/11/17 08:59:33 pooka Exp $ */ 1/* $NetBSD: vnode_if.h,v 1.75 2009/09/29 11:54:52 pooka Exp $ */
2 2
3/* 3/*
4 * Warning: DO NOT EDIT! This file is automatically generated! 4 * Warning: DO NOT EDIT! This file is automatically generated!
5 * (Modifications made here may easily be lost!) 5 * (Modifications made here may easily be lost!)
6 * 6 *
7 * Created from the file: 7 * Created from the file:
8 * NetBSD: vnode_if.src,v 1.58 2008/11/15 19:08:12 pooka Exp 8 * NetBSD: vnode_if.src,v 1.58 2008/11/15 19:08:12 pooka Exp
9 * by the script: 9 * by the script:
10 * NetBSD: vnode_if.sh,v 1.50 2008/11/17 08:46:03 pooka Exp 10 * NetBSD: vnode_if.sh,v 1.52 2009/09/29 11:51:02 pooka Exp
11 */ 11 */
12 12
13/* 13/*
14 * Copyright (c) 1992, 1993, 1994, 1995 14 * Copyright (c) 1992, 1993, 1994, 1995
15 * The Regents of the University of California. All rights reserved. 15 * The Regents of the University of California. All rights reserved.
16 * 16 *
17 * Redistribution and use in source and binary forms, with or without 17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions 18 * modification, are permitted provided that the following conditions
19 * are met: 19 * are met:
20 * 1. Redistributions of source code must retain the above copyright 20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer. 21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright 22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the 23 * notice, this list of conditions and the following disclaimer in the
@@ -32,30 +32,26 @@ @@ -32,30 +32,26 @@
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE. 39 * SUCH DAMAGE.
40 */ 40 */
41 41
42#ifndef _SYS_VNODE_IF_H_ 42#ifndef _SYS_VNODE_IF_H_
43#define _SYS_VNODE_IF_H_ 43#define _SYS_VNODE_IF_H_
44 44
45#ifdef _KERNEL_OPT 
46#include "opt_vnode_lockdebug.h" 
47#endif /* _KERNEL_OPT */ 
48 
49extern const struct vnodeop_desc vop_default_desc; 45extern const struct vnodeop_desc vop_default_desc;
50 46
51 47
52/* Special cases: */ 48/* Special cases: */
53struct buf; 49struct buf;
54#ifndef _KERNEL 50#ifndef _KERNEL
55#include <stdbool.h> 51#include <stdbool.h>
56#endif 52#endif
57 53
58 54
59#define VOP_BWRITE_DESCOFFSET 1 55#define VOP_BWRITE_DESCOFFSET 1
60struct vop_bwrite_args { 56struct vop_bwrite_args {
61 const struct vnodeop_desc *a_desc; 57 const struct vnodeop_desc *a_desc;