| @@ -1,1285 +1,1285 @@ | | | @@ -1,1285 +1,1285 @@ |
1 | /* $NetBSD: nfs_clbio.c,v 1.6 2020/09/29 03:02:19 msaitoh Exp $ */ | | 1 | /* $NetBSD: nfs_clbio.c,v 1.7 2021/03/29 02:13:37 simonb Exp $ */ |
2 | /*- | | 2 | /*- |
3 | * Copyright (c) 1989, 1993 | | 3 | * Copyright (c) 1989, 1993 |
4 | * The Regents of the University of California. All rights reserved. | | 4 | * The Regents of the University of California. All rights reserved. |
5 | * | | 5 | * |
6 | * This code is derived from software contributed to Berkeley by | | 6 | * This code is derived from software contributed to Berkeley by |
7 | * Rick Macklem at The University of Guelph. | | 7 | * Rick Macklem at The University of Guelph. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * 4. Neither the name of the University nor the names of its contributors | | 17 | * 4. Neither the name of the University nor the names of its contributors |
18 | * may be used to endorse or promote products derived from this software | | 18 | * may be used to endorse or promote products derived from this software |
19 | * without specific prior written permission. | | 19 | * without specific prior written permission. |
20 | * | | 20 | * |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
31 | * SUCH DAMAGE. | | 31 | * SUCH DAMAGE. |
32 | * | | 32 | * |
33 | * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 | | 33 | * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 |
34 | */ | | 34 | */ |
35 | | | 35 | |
36 | #include <sys/cdefs.h> | | 36 | #include <sys/cdefs.h> |
37 | /* __FBSDID("FreeBSD: head/sys/fs/nfsclient/nfs_clbio.c 304026 2016-08-12 22:44:59Z rmacklem "); */ | | 37 | /* __FBSDID("FreeBSD: head/sys/fs/nfsclient/nfs_clbio.c 304026 2016-08-12 22:44:59Z rmacklem "); */ |
38 | __RCSID("$NetBSD: nfs_clbio.c,v 1.6 2020/09/29 03:02:19 msaitoh Exp $"); | | 38 | __RCSID("$NetBSD: nfs_clbio.c,v 1.7 2021/03/29 02:13:37 simonb Exp $"); |
39 | | | 39 | |
40 | #include <sys/param.h> | | 40 | #include <sys/param.h> |
41 | #include <sys/systm.h> | | 41 | #include <sys/systm.h> |
42 | #include <sys/buf.h> | | 42 | #include <sys/buf.h> |
43 | #include <sys/kernel.h> | | 43 | #include <sys/kernel.h> |
44 | #include <sys/mount.h> | | 44 | #include <sys/mount.h> |
45 | #include <sys/rwlock.h> | | 45 | #include <sys/rwlock.h> |
46 | #include <sys/vmmeter.h> | | 46 | #include <sys/vmmeter.h> |
47 | #include <sys/vnode.h> | | 47 | #include <sys/vnode.h> |
48 | | | 48 | |
49 | #include <fs/nfs/common/nfsport.h> | | 49 | #include <fs/nfs/common/nfsport.h> |
50 | #include <fs/nfs/client/nfsmount.h> | | 50 | #include <fs/nfs/client/nfsmount.h> |
51 | #include <fs/nfs/client/nfs.h> | | 51 | #include <fs/nfs/client/nfs.h> |
52 | #include <fs/nfs/client/nfsnode.h> | | 52 | #include <fs/nfs/client/nfsnode.h> |
53 | #include <fs/nfs/client/nfs_kdtrace.h> | | 53 | #include <fs/nfs/client/nfs_kdtrace.h> |
54 | | | 54 | |
55 | extern int newnfs_directio_allow_mmap; | | 55 | extern int newnfs_directio_allow_mmap; |
56 | extern struct nfsstatsv1 nfsstatsv1; | | 56 | extern struct nfsstatsv1 nfsstatsv1; |
57 | extern struct mtx ncl_iod_mutex; | | 57 | extern struct mtx ncl_iod_mutex; |
58 | extern int ncl_numasync; | | 58 | extern int ncl_numasync; |
59 | extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; | | 59 | extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; |
60 | extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; | | 60 | extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; |
61 | extern int newnfs_directio_enable; | | 61 | extern int newnfs_directio_enable; |
62 | extern int nfs_keep_dirty_on_error; | | 62 | extern int nfs_keep_dirty_on_error; |
63 | | | 63 | |
64 | int ncl_pbuf_freecnt = -1; /* start out unlimited */ | | 64 | int ncl_pbuf_freecnt = -1; /* start out unlimited */ |
65 | | | 65 | |
66 | static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, | | 66 | static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, |
67 | struct thread *td); | | 67 | struct thread *td); |
68 | static int nfs_directio_write(struct vnode *vp, struct uio *uiop, | | 68 | static int nfs_directio_write(struct vnode *vp, struct uio *uiop, |
69 | struct ucred *cred, int ioflag); | | 69 | struct ucred *cred, int ioflag); |
70 | | | 70 | |
71 | /* | | 71 | /* |
72 | * Vnode op for VM getpages. | | 72 | * Vnode op for VM getpages. |
73 | */ | | 73 | */ |
74 | int | | 74 | int |
75 | ncl_getpages(struct vop_getpages_args *ap) | | 75 | ncl_getpages(struct vop_getpages_args *ap) |
76 | { | | 76 | { |
77 | int i, error, nextoff, size, toff, count, npages; | | 77 | int i, error, nextoff, size, toff, count, npages; |
78 | struct uio uio; | | 78 | struct uio uio; |
79 | struct iovec iov; | | 79 | struct iovec iov; |
80 | vm_offset_t kva; | | 80 | vaddr_t kva; |
81 | struct buf *bp; | | 81 | struct buf *bp; |
82 | struct vnode *vp; | | 82 | struct vnode *vp; |
83 | struct thread *td; | | 83 | struct thread *td; |
84 | struct ucred *cred; | | 84 | struct ucred *cred; |
85 | struct nfsmount *nmp; | | 85 | struct nfsmount *nmp; |
86 | vm_object_t object; | | 86 | vm_object_t object; |
87 | vm_page_t *pages; | | 87 | vm_page_t *pages; |
88 | struct nfsnode *np; | | 88 | struct nfsnode *np; |
89 | | | 89 | |
90 | vp = ap->a_vp; | | 90 | vp = ap->a_vp; |
91 | np = VTONFS(vp); | | 91 | np = VTONFS(vp); |
92 | td = curthread; /* XXX */ | | 92 | td = curthread; /* XXX */ |
93 | cred = curthread->td_ucred; /* XXX */ | | 93 | cred = curthread->td_ucred; /* XXX */ |
94 | nmp = VFSTONFS(vp->v_mount); | | 94 | nmp = VFSTONFS(vp->v_mount); |
95 | pages = ap->a_m; | | 95 | pages = ap->a_m; |
96 | npages = ap->a_count; | | 96 | npages = ap->a_count; |
97 | | | 97 | |
98 | if ((object = vp->v_object) == NULL) { | | 98 | if ((object = vp->v_object) == NULL) { |
99 | printf("ncl_getpages: called with non-merged cache vnode\n"); | | 99 | printf("ncl_getpages: called with non-merged cache vnode\n"); |
100 | return (VM_PAGER_ERROR); | | 100 | return (VM_PAGER_ERROR); |
101 | } | | 101 | } |
102 | | | 102 | |
103 | if (newnfs_directio_enable && !newnfs_directio_allow_mmap) { | | 103 | if (newnfs_directio_enable && !newnfs_directio_allow_mmap) { |
104 | mtx_lock(&np->n_mtx); | | 104 | mtx_lock(&np->n_mtx); |
105 | if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { | | 105 | if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { |
106 | mtx_unlock(&np->n_mtx); | | 106 | mtx_unlock(&np->n_mtx); |
107 | printf("ncl_getpages: called on non-cacheable vnode\n"); | | 107 | printf("ncl_getpages: called on non-cacheable vnode\n"); |
108 | return (VM_PAGER_ERROR); | | 108 | return (VM_PAGER_ERROR); |
109 | } else | | 109 | } else |
110 | mtx_unlock(&np->n_mtx); | | 110 | mtx_unlock(&np->n_mtx); |
111 | } | | 111 | } |
112 | | | 112 | |
113 | mtx_lock(&nmp->nm_mtx); | | 113 | mtx_lock(&nmp->nm_mtx); |
114 | if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && | | 114 | if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && |
115 | (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { | | 115 | (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { |
116 | mtx_unlock(&nmp->nm_mtx); | | 116 | mtx_unlock(&nmp->nm_mtx); |
117 | /* We'll never get here for v4, because we always have fsinfo */ | | 117 | /* We'll never get here for v4, because we always have fsinfo */ |
118 | (void)ncl_fsinfo(nmp, vp, cred, td); | | 118 | (void)ncl_fsinfo(nmp, vp, cred, td); |
119 | } else | | 119 | } else |
120 | mtx_unlock(&nmp->nm_mtx); | | 120 | mtx_unlock(&nmp->nm_mtx); |
121 | | | 121 | |
122 | /* | | 122 | /* |
123 | * If the requested page is partially valid, just return it and | | 123 | * If the requested page is partially valid, just return it and |
124 | * allow the pager to zero-out the blanks. Partially valid pages | | 124 | * allow the pager to zero-out the blanks. Partially valid pages |
125 | * can only occur at the file EOF. | | 125 | * can only occur at the file EOF. |
126 | * | | 126 | * |
127 | * XXXGL: is that true for NFS, where short read can occur??? | | 127 | * XXXGL: is that true for NFS, where short read can occur??? |
128 | */ | | 128 | */ |
129 | VM_OBJECT_WLOCK(object); | | 129 | VM_OBJECT_WLOCK(object); |
130 | if (pages[npages - 1]->valid != 0 && --npages == 0) | | 130 | if (pages[npages - 1]->valid != 0 && --npages == 0) |
131 | goto out; | | 131 | goto out; |
132 | VM_OBJECT_WUNLOCK(object); | | 132 | VM_OBJECT_WUNLOCK(object); |
133 | | | 133 | |
134 | /* | | 134 | /* |
135 | * We use only the kva address for the buffer, but this is extremely | | 135 | * We use only the kva address for the buffer, but this is extremely |
136 | * convenient and fast. | | 136 | * convenient and fast. |
137 | */ | | 137 | */ |
138 | bp = getpbuf(&ncl_pbuf_freecnt); | | 138 | bp = getpbuf(&ncl_pbuf_freecnt); |
139 | | | 139 | |
140 | kva = (vm_offset_t) bp->b_data; | | 140 | kva = (vaddr_t) bp->b_data; |
141 | pmap_qenter(kva, pages, npages); | | 141 | pmap_qenter(kva, pages, npages); |
142 | PCPU_INC(cnt.v_vnodein); | | 142 | PCPU_INC(cnt.v_vnodein); |
143 | PCPU_ADD(cnt.v_vnodepgsin, npages); | | 143 | PCPU_ADD(cnt.v_vnodepgsin, npages); |
144 | | | 144 | |
145 | count = npages << PAGE_SHIFT; | | 145 | count = npages << PAGE_SHIFT; |
146 | iov.iov_base = (caddr_t) kva; | | 146 | iov.iov_base = (caddr_t) kva; |
147 | iov.iov_len = count; | | 147 | iov.iov_len = count; |
148 | uio.uio_iov = &iov; | | 148 | uio.uio_iov = &iov; |
149 | uio.uio_iovcnt = 1; | | 149 | uio.uio_iovcnt = 1; |
150 | uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); | | 150 | uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); |
151 | uio.uio_resid = count; | | 151 | uio.uio_resid = count; |
152 | uio.uio_segflg = UIO_SYSSPACE; | | 152 | uio.uio_segflg = UIO_SYSSPACE; |
153 | uio.uio_rw = UIO_READ; | | 153 | uio.uio_rw = UIO_READ; |
154 | uio.uio_td = td; | | 154 | uio.uio_td = td; |
155 | | | 155 | |
156 | error = ncl_readrpc(vp, &uio, cred); | | 156 | error = ncl_readrpc(vp, &uio, cred); |
157 | pmap_qremove(kva, npages); | | 157 | pmap_qremove(kva, npages); |
158 | | | 158 | |
159 | relpbuf(bp, &ncl_pbuf_freecnt); | | 159 | relpbuf(bp, &ncl_pbuf_freecnt); |
160 | | | 160 | |
161 | if (error && (uio.uio_resid == count)) { | | 161 | if (error && (uio.uio_resid == count)) { |
162 | printf("ncl_getpages: error %d\n", error); | | 162 | printf("ncl_getpages: error %d\n", error); |
163 | return (VM_PAGER_ERROR); | | 163 | return (VM_PAGER_ERROR); |
164 | } | | 164 | } |
165 | | | 165 | |
166 | /* | | 166 | /* |
167 | * Calculate the number of bytes read and validate only that number | | 167 | * Calculate the number of bytes read and validate only that number |
168 | * of bytes. Note that due to pending writes, size may be 0. This | | 168 | * of bytes. Note that due to pending writes, size may be 0. This |
169 | * does not mean that the remaining data is invalid! | | 169 | * does not mean that the remaining data is invalid! |
170 | */ | | 170 | */ |
171 | | | 171 | |
172 | size = count - uio.uio_resid; | | 172 | size = count - uio.uio_resid; |
173 | VM_OBJECT_WLOCK(object); | | 173 | VM_OBJECT_WLOCK(object); |
174 | for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { | | 174 | for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { |
175 | vm_page_t m; | | 175 | vm_page_t m; |
176 | nextoff = toff + PAGE_SIZE; | | 176 | nextoff = toff + PAGE_SIZE; |
177 | m = pages[i]; | | 177 | m = pages[i]; |
178 | | | 178 | |
179 | if (nextoff <= size) { | | 179 | if (nextoff <= size) { |
180 | /* | | 180 | /* |
181 | * Read operation filled an entire page | | 181 | * Read operation filled an entire page |
182 | */ | | 182 | */ |
183 | m->valid = VM_PAGE_BITS_ALL; | | 183 | m->valid = VM_PAGE_BITS_ALL; |
184 | KASSERT(m->dirty == 0, | | 184 | KASSERT(m->dirty == 0, |
185 | ("nfs_getpages: page %p is dirty", m)); | | 185 | ("nfs_getpages: page %p is dirty", m)); |
186 | } else if (size > toff) { | | 186 | } else if (size > toff) { |
187 | /* | | 187 | /* |
188 | * Read operation filled a partial page. | | 188 | * Read operation filled a partial page. |
189 | */ | | 189 | */ |
190 | m->valid = 0; | | 190 | m->valid = 0; |
191 | vm_page_set_valid_range(m, 0, size - toff); | | 191 | vm_page_set_valid_range(m, 0, size - toff); |
192 | KASSERT(m->dirty == 0, | | 192 | KASSERT(m->dirty == 0, |
193 | ("nfs_getpages: page %p is dirty", m)); | | 193 | ("nfs_getpages: page %p is dirty", m)); |
194 | } else { | | 194 | } else { |
195 | /* | | 195 | /* |
196 | * Read operation was short. If no error | | 196 | * Read operation was short. If no error |
197 | * occurred we may have hit a zero-fill | | 197 | * occurred we may have hit a zero-fill |
198 | * section. We leave valid set to 0, and page | | 198 | * section. We leave valid set to 0, and page |
199 | * is freed by vm_page_readahead_finish() if | | 199 | * is freed by vm_page_readahead_finish() if |
200 | * its index is not equal to requested, or | | 200 | * its index is not equal to requested, or |
201 | * page is zeroed and set valid by | | 201 | * page is zeroed and set valid by |
202 | * vm_pager_get_pages() for requested page. | | 202 | * vm_pager_get_pages() for requested page. |
203 | */ | | 203 | */ |
204 | ; | | 204 | ; |
205 | } | | 205 | } |
206 | } | | 206 | } |
207 | out: | | 207 | out: |
208 | VM_OBJECT_WUNLOCK(object); | | 208 | VM_OBJECT_WUNLOCK(object); |
209 | if (ap->a_rbehind) | | 209 | if (ap->a_rbehind) |
210 | *ap->a_rbehind = 0; | | 210 | *ap->a_rbehind = 0; |
211 | if (ap->a_rahead) | | 211 | if (ap->a_rahead) |
212 | *ap->a_rahead = 0; | | 212 | *ap->a_rahead = 0; |
213 | return (VM_PAGER_OK); | | 213 | return (VM_PAGER_OK); |
214 | } | | 214 | } |
215 | | | 215 | |
216 | /* | | 216 | /* |
217 | * Vnode op for VM putpages. | | 217 | * Vnode op for VM putpages. |
218 | */ | | 218 | */ |
219 | int | | 219 | int |
220 | ncl_putpages(struct vop_putpages_args *ap) | | 220 | ncl_putpages(struct vop_putpages_args *ap) |
221 | { | | 221 | { |
222 | struct uio uio; | | 222 | struct uio uio; |
223 | struct iovec iov; | | 223 | struct iovec iov; |
224 | vm_offset_t kva; | | 224 | vaddr_t kva; |
225 | struct buf *bp; | | 225 | struct buf *bp; |
226 | int iomode, must_commit, i, error, npages, count; | | 226 | int iomode, must_commit, i, error, npages, count; |
227 | off_t offset; | | 227 | off_t offset; |
228 | int *rtvals; | | 228 | int *rtvals; |
229 | struct vnode *vp; | | 229 | struct vnode *vp; |
230 | struct thread *td; | | 230 | struct thread *td; |
231 | struct ucred *cred; | | 231 | struct ucred *cred; |
232 | struct nfsmount *nmp; | | 232 | struct nfsmount *nmp; |
233 | struct nfsnode *np; | | 233 | struct nfsnode *np; |
234 | vm_page_t *pages; | | 234 | vm_page_t *pages; |
235 | | | 235 | |
236 | vp = ap->a_vp; | | 236 | vp = ap->a_vp; |
237 | np = VTONFS(vp); | | 237 | np = VTONFS(vp); |
238 | td = curthread; /* XXX */ | | 238 | td = curthread; /* XXX */ |
239 | /* Set the cred to n_writecred for the write rpcs. */ | | 239 | /* Set the cred to n_writecred for the write rpcs. */ |
240 | if (np->n_writecred != NULL) | | 240 | if (np->n_writecred != NULL) |
241 | cred = crhold(np->n_writecred); | | 241 | cred = crhold(np->n_writecred); |
242 | else | | 242 | else |
243 | cred = crhold(curthread->td_ucred); /* XXX */ | | 243 | cred = crhold(curthread->td_ucred); /* XXX */ |
244 | nmp = VFSTONFS(vp->v_mount); | | 244 | nmp = VFSTONFS(vp->v_mount); |
245 | pages = ap->a_m; | | 245 | pages = ap->a_m; |
246 | count = ap->a_count; | | 246 | count = ap->a_count; |
247 | rtvals = ap->a_rtvals; | | 247 | rtvals = ap->a_rtvals; |
248 | npages = btoc(count); | | 248 | npages = btoc(count); |
249 | offset = IDX_TO_OFF(pages[0]->pindex); | | 249 | offset = IDX_TO_OFF(pages[0]->pindex); |
250 | | | 250 | |
251 | mtx_lock(&nmp->nm_mtx); | | 251 | mtx_lock(&nmp->nm_mtx); |
252 | if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && | | 252 | if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && |
253 | (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { | | 253 | (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { |
254 | mtx_unlock(&nmp->nm_mtx); | | 254 | mtx_unlock(&nmp->nm_mtx); |
255 | (void)ncl_fsinfo(nmp, vp, cred, td); | | 255 | (void)ncl_fsinfo(nmp, vp, cred, td); |
256 | } else | | 256 | } else |
257 | mtx_unlock(&nmp->nm_mtx); | | 257 | mtx_unlock(&nmp->nm_mtx); |
258 | | | 258 | |
259 | mtx_lock(&np->n_mtx); | | 259 | mtx_lock(&np->n_mtx); |
260 | if (newnfs_directio_enable && !newnfs_directio_allow_mmap && | | 260 | if (newnfs_directio_enable && !newnfs_directio_allow_mmap && |
261 | (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { | | 261 | (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { |
262 | mtx_unlock(&np->n_mtx); | | 262 | mtx_unlock(&np->n_mtx); |
263 | printf("ncl_putpages: called on noncache-able vnode\n"); | | 263 | printf("ncl_putpages: called on noncache-able vnode\n"); |
264 | mtx_lock(&np->n_mtx); | | 264 | mtx_lock(&np->n_mtx); |
265 | } | | 265 | } |
266 | | | 266 | |
267 | for (i = 0; i < npages; i++) | | 267 | for (i = 0; i < npages; i++) |
268 | rtvals[i] = VM_PAGER_ERROR; | | 268 | rtvals[i] = VM_PAGER_ERROR; |
269 | | | 269 | |
270 | /* | | 270 | /* |
271 | * When putting pages, do not extend file past EOF. | | 271 | * When putting pages, do not extend file past EOF. |
272 | */ | | 272 | */ |
273 | if (offset + count > np->n_size) { | | 273 | if (offset + count > np->n_size) { |
274 | count = np->n_size - offset; | | 274 | count = np->n_size - offset; |
275 | if (count < 0) | | 275 | if (count < 0) |
276 | count = 0; | | 276 | count = 0; |
277 | } | | 277 | } |
278 | mtx_unlock(&np->n_mtx); | | 278 | mtx_unlock(&np->n_mtx); |
279 | | | 279 | |
280 | /* | | 280 | /* |
281 | * We use only the kva address for the buffer, but this is extremely | | 281 | * We use only the kva address for the buffer, but this is extremely |
282 | * convenient and fast. | | 282 | * convenient and fast. |
283 | */ | | 283 | */ |
284 | bp = getpbuf(&ncl_pbuf_freecnt); | | 284 | bp = getpbuf(&ncl_pbuf_freecnt); |
285 | | | 285 | |
286 | kva = (vm_offset_t) bp->b_data; | | 286 | kva = (vaddr_t) bp->b_data; |
287 | pmap_qenter(kva, pages, npages); | | 287 | pmap_qenter(kva, pages, npages); |
288 | PCPU_INC(cnt.v_vnodeout); | | 288 | PCPU_INC(cnt.v_vnodeout); |
289 | PCPU_ADD(cnt.v_vnodepgsout, count); | | 289 | PCPU_ADD(cnt.v_vnodepgsout, count); |
290 | | | 290 | |
291 | iov.iov_base = (caddr_t) kva; | | 291 | iov.iov_base = (caddr_t) kva; |
292 | iov.iov_len = count; | | 292 | iov.iov_len = count; |
293 | uio.uio_iov = &iov; | | 293 | uio.uio_iov = &iov; |
294 | uio.uio_iovcnt = 1; | | 294 | uio.uio_iovcnt = 1; |
295 | uio.uio_offset = offset; | | 295 | uio.uio_offset = offset; |
296 | uio.uio_resid = count; | | 296 | uio.uio_resid = count; |
297 | uio.uio_segflg = UIO_SYSSPACE; | | 297 | uio.uio_segflg = UIO_SYSSPACE; |
298 | uio.uio_rw = UIO_WRITE; | | 298 | uio.uio_rw = UIO_WRITE; |
299 | uio.uio_td = td; | | 299 | uio.uio_td = td; |
300 | | | 300 | |
301 | if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) | | 301 | if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) |
302 | iomode = NFSWRITE_UNSTABLE; | | 302 | iomode = NFSWRITE_UNSTABLE; |
303 | else | | 303 | else |
304 | iomode = NFSWRITE_FILESYNC; | | 304 | iomode = NFSWRITE_FILESYNC; |
305 | | | 305 | |
306 | error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0); | | 306 | error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0); |
307 | crfree(cred); | | 307 | crfree(cred); |
308 | | | 308 | |
309 | pmap_qremove(kva, npages); | | 309 | pmap_qremove(kva, npages); |
310 | relpbuf(bp, &ncl_pbuf_freecnt); | | 310 | relpbuf(bp, &ncl_pbuf_freecnt); |
311 | | | 311 | |
312 | if (error == 0 || !nfs_keep_dirty_on_error) { | | 312 | if (error == 0 || !nfs_keep_dirty_on_error) { |
313 | vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid); | | 313 | vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid); |
314 | if (must_commit) | | 314 | if (must_commit) |
315 | ncl_clearcommit(vp->v_mount); | | 315 | ncl_clearcommit(vp->v_mount); |
316 | } | | 316 | } |
317 | return rtvals[0]; | | 317 | return rtvals[0]; |
318 | } | | 318 | } |
319 | | | 319 | |
320 | /* | | 320 | /* |
321 | * For nfs, cache consistency can only be maintained approximately. | | 321 | * For nfs, cache consistency can only be maintained approximately. |
322 | * Although RFC1094 does not specify the criteria, the following is | | 322 | * Although RFC1094 does not specify the criteria, the following is |
323 | * believed to be compatible with the reference port. | | 323 | * believed to be compatible with the reference port. |
324 | * For nfs: | | 324 | * For nfs: |
325 | * If the file's modify time on the server has changed since the | | 325 | * If the file's modify time on the server has changed since the |
326 | * last read rpc or you have written to the file, | | 326 | * last read rpc or you have written to the file, |
327 | * you may have lost data cache consistency with the | | 327 | * you may have lost data cache consistency with the |
328 | * server, so flush all of the file's data out of the cache. | | 328 | * server, so flush all of the file's data out of the cache. |
329 | * Then force a getattr rpc to ensure that you have up to date | | 329 | * Then force a getattr rpc to ensure that you have up to date |
330 | * attributes. | | 330 | * attributes. |
331 | * NB: This implies that cache data can be read when up to | | 331 | * NB: This implies that cache data can be read when up to |
332 | * NFS_ATTRTIMEO seconds out of date. If you find that you need current | | 332 | * NFS_ATTRTIMEO seconds out of date. If you find that you need current |
333 | * attributes this could be forced by setting n_attrstamp to 0 before | | 333 | * attributes this could be forced by setting n_attrstamp to 0 before |
334 | * the VOP_GETATTR() call. | | 334 | * the VOP_GETATTR() call. |
335 | */ | | 335 | */ |
336 | static inline int | | 336 | static inline int |
337 | nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) | | 337 | nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) |
338 | { | | 338 | { |
339 | int error = 0; | | 339 | int error = 0; |
340 | struct vattr vattr; | | 340 | struct vattr vattr; |
341 | struct nfsnode *np = VTONFS(vp); | | 341 | struct nfsnode *np = VTONFS(vp); |
342 | int old_lock; | | 342 | int old_lock; |
343 | | | 343 | |
344 | /* | | 344 | /* |
345 | * Grab the exclusive lock before checking whether the cache is | | 345 | * Grab the exclusive lock before checking whether the cache is |
346 | * consistent. | | 346 | * consistent. |
347 | * XXX - We can make this cheaper later (by acquiring cheaper locks). | | 347 | * XXX - We can make this cheaper later (by acquiring cheaper locks). |
348 | * But for now, this suffices. | | 348 | * But for now, this suffices. |
349 | */ | | 349 | */ |
350 | old_lock = ncl_upgrade_vnlock(vp); | | 350 | old_lock = ncl_upgrade_vnlock(vp); |
351 | if (vp->v_iflag & VI_DOOMED) { | | 351 | if (vp->v_iflag & VI_DOOMED) { |
352 | ncl_downgrade_vnlock(vp, old_lock); | | 352 | ncl_downgrade_vnlock(vp, old_lock); |
353 | return (EBADF); | | 353 | return (EBADF); |
354 | } | | 354 | } |
355 | | | 355 | |
356 | mtx_lock(&np->n_mtx); | | 356 | mtx_lock(&np->n_mtx); |
357 | if (np->n_flag & NMODIFIED) { | | 357 | if (np->n_flag & NMODIFIED) { |
358 | mtx_unlock(&np->n_mtx); | | 358 | mtx_unlock(&np->n_mtx); |
359 | if (vp->v_type != VREG) { | | 359 | if (vp->v_type != VREG) { |
360 | if (vp->v_type != VDIR) | | 360 | if (vp->v_type != VDIR) |
361 | panic("nfs: bioread, not dir"); | | 361 | panic("nfs: bioread, not dir"); |
362 | ncl_invaldir(vp); | | 362 | ncl_invaldir(vp); |
363 | error = ncl_vinvalbuf(vp, V_SAVE, td, 1); | | 363 | error = ncl_vinvalbuf(vp, V_SAVE, td, 1); |
364 | if (error) | | 364 | if (error) |
365 | goto out; | | 365 | goto out; |
366 | } | | 366 | } |
367 | np->n_attrstamp = 0; | | 367 | np->n_attrstamp = 0; |
368 | KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); | | 368 | KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); |
369 | error = VOP_GETATTR(vp, &vattr, cred); | | 369 | error = VOP_GETATTR(vp, &vattr, cred); |
370 | if (error) | | 370 | if (error) |
371 | goto out; | | 371 | goto out; |
372 | mtx_lock(&np->n_mtx); | | 372 | mtx_lock(&np->n_mtx); |
373 | np->n_mtime = vattr.va_mtime; | | 373 | np->n_mtime = vattr.va_mtime; |
374 | mtx_unlock(&np->n_mtx); | | 374 | mtx_unlock(&np->n_mtx); |
375 | } else { | | 375 | } else { |
376 | mtx_unlock(&np->n_mtx); | | 376 | mtx_unlock(&np->n_mtx); |
377 | error = VOP_GETATTR(vp, &vattr, cred); | | 377 | error = VOP_GETATTR(vp, &vattr, cred); |
378 | if (error) | | 378 | if (error) |
379 | return (error); | | 379 | return (error); |
380 | mtx_lock(&np->n_mtx); | | 380 | mtx_lock(&np->n_mtx); |
381 | if ((np->n_flag & NSIZECHANGED) | | 381 | if ((np->n_flag & NSIZECHANGED) |
382 | || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { | | 382 | || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { |
383 | mtx_unlock(&np->n_mtx); | | 383 | mtx_unlock(&np->n_mtx); |
384 | if (vp->v_type == VDIR) | | 384 | if (vp->v_type == VDIR) |
385 | ncl_invaldir(vp); | | 385 | ncl_invaldir(vp); |
386 | error = ncl_vinvalbuf(vp, V_SAVE, td, 1); | | 386 | error = ncl_vinvalbuf(vp, V_SAVE, td, 1); |
387 | if (error) | | 387 | if (error) |
388 | goto out; | | 388 | goto out; |
389 | mtx_lock(&np->n_mtx); | | 389 | mtx_lock(&np->n_mtx); |
390 | np->n_mtime = vattr.va_mtime; | | 390 | np->n_mtime = vattr.va_mtime; |
391 | np->n_flag &= ~NSIZECHANGED; | | 391 | np->n_flag &= ~NSIZECHANGED; |
392 | } | | 392 | } |
393 | mtx_unlock(&np->n_mtx); | | 393 | mtx_unlock(&np->n_mtx); |
394 | } | | 394 | } |
395 | out: | | 395 | out: |
396 | ncl_downgrade_vnlock(vp, old_lock); | | 396 | ncl_downgrade_vnlock(vp, old_lock); |
397 | return error; | | 397 | return error; |
398 | } | | 398 | } |
399 | | | 399 | |
400 | /* | | 400 | /* |
401 | * Vnode op for read using bio | | 401 | * Vnode op for read using bio |
402 | */ | | 402 | */ |
403 | int | | 403 | int |
404 | ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) | | 404 | ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) |
405 | { | | 405 | { |
406 | struct nfsnode *np = VTONFS(vp); | | 406 | struct nfsnode *np = VTONFS(vp); |
407 | int biosize, i; | | 407 | int biosize, i; |
408 | struct buf *bp, *rabp; | | 408 | struct buf *bp, *rabp; |
409 | struct thread *td; | | 409 | struct thread *td; |
410 | struct nfsmount *nmp = VFSTONFS(vp->v_mount); | | 410 | struct nfsmount *nmp = VFSTONFS(vp->v_mount); |
411 | daddr_t lbn, rabn; | | 411 | daddr_t lbn, rabn; |
412 | int bcount; | | 412 | int bcount; |
413 | int seqcount; | | 413 | int seqcount; |
414 | int nra, error = 0, n = 0, on = 0; | | 414 | int nra, error = 0, n = 0, on = 0; |
415 | off_t tmp_off; | | 415 | off_t tmp_off; |
416 | | | 416 | |
417 | KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode")); | | 417 | KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode")); |
418 | if (uio->uio_resid == 0) | | 418 | if (uio->uio_resid == 0) |
419 | return (0); | | 419 | return (0); |
420 | if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ | | 420 | if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ |
421 | return (EINVAL); | | 421 | return (EINVAL); |
422 | td = uio->uio_td; | | 422 | td = uio->uio_td; |
423 | | | 423 | |
424 | mtx_lock(&nmp->nm_mtx); | | 424 | mtx_lock(&nmp->nm_mtx); |
425 | if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && | | 425 | if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && |
426 | (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { | | 426 | (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { |
427 | mtx_unlock(&nmp->nm_mtx); | | 427 | mtx_unlock(&nmp->nm_mtx); |
428 | (void)ncl_fsinfo(nmp, vp, cred, td); | | 428 | (void)ncl_fsinfo(nmp, vp, cred, td); |
429 | mtx_lock(&nmp->nm_mtx); | | 429 | mtx_lock(&nmp->nm_mtx); |
430 | } | | 430 | } |
431 | if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0) | | 431 | if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0) |
432 | (void) newnfs_iosize(nmp); | | 432 | (void) newnfs_iosize(nmp); |
433 | | | 433 | |
434 | tmp_off = uio->uio_offset + uio->uio_resid; | | 434 | tmp_off = uio->uio_offset + uio->uio_resid; |
435 | if (vp->v_type != VDIR && | | 435 | if (vp->v_type != VDIR && |
436 | (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) { | | 436 | (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) { |
437 | mtx_unlock(&nmp->nm_mtx); | | 437 | mtx_unlock(&nmp->nm_mtx); |
438 | return (EFBIG); | | 438 | return (EFBIG); |
439 | } | | 439 | } |
440 | mtx_unlock(&nmp->nm_mtx); | | 440 | mtx_unlock(&nmp->nm_mtx); |
441 | | | 441 | |
442 | if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) | | 442 | if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) |
443 | /* No caching/ no readaheads. Just read data into the user buffer */ | | 443 | /* No caching/ no readaheads. Just read data into the user buffer */ |
444 | return ncl_readrpc(vp, uio, cred); | | 444 | return ncl_readrpc(vp, uio, cred); |
445 | | | 445 | |
446 | biosize = vp->v_bufobj.bo_bsize; | | 446 | biosize = vp->v_bufobj.bo_bsize; |
447 | seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); | | 447 | seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); |
448 | | | 448 | |
449 | error = nfs_bioread_check_cons(vp, td, cred); | | 449 | error = nfs_bioread_check_cons(vp, td, cred); |
450 | if (error) | | 450 | if (error) |
451 | return error; | | 451 | return error; |
452 | | | 452 | |
453 | do { | | 453 | do { |
454 | u_quad_t nsize; | | 454 | u_quad_t nsize; |
455 | | | 455 | |
456 | mtx_lock(&np->n_mtx); | | 456 | mtx_lock(&np->n_mtx); |
457 | nsize = np->n_size; | | 457 | nsize = np->n_size; |
458 | mtx_unlock(&np->n_mtx); | | 458 | mtx_unlock(&np->n_mtx); |
459 | | | 459 | |
460 | switch (vp->v_type) { | | 460 | switch (vp->v_type) { |
461 | case VREG: | | 461 | case VREG: |
462 | NFSINCRGLOBAL(nfsstatsv1.biocache_reads); | | 462 | NFSINCRGLOBAL(nfsstatsv1.biocache_reads); |
463 | lbn = uio->uio_offset / biosize; | | 463 | lbn = uio->uio_offset / biosize; |
464 | on = uio->uio_offset - (lbn * biosize); | | 464 | on = uio->uio_offset - (lbn * biosize); |
465 | | | 465 | |
466 | /* | | 466 | /* |
467 | * Start the read ahead(s), as required. | | 467 | * Start the read ahead(s), as required. |
468 | */ | | 468 | */ |
469 | if (nmp->nm_readahead > 0) { | | 469 | if (nmp->nm_readahead > 0) { |
470 | for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && | | 470 | for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && |
471 | (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { | | 471 | (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { |
472 | rabn = lbn + 1 + nra; | | 472 | rabn = lbn + 1 + nra; |
473 | if (incore(&vp->v_bufobj, rabn) == NULL) { | | 473 | if (incore(&vp->v_bufobj, rabn) == NULL) { |
474 | rabp = nfs_getcacheblk(vp, rabn, biosize, td); | | 474 | rabp = nfs_getcacheblk(vp, rabn, biosize, td); |
475 | if (!rabp) { | | 475 | if (!rabp) { |
476 | error = newnfs_sigintr(nmp, td); | | 476 | error = newnfs_sigintr(nmp, td); |
477 | return (error ? error : EINTR); | | 477 | return (error ? error : EINTR); |
478 | } | | 478 | } |
479 | if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { | | 479 | if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { |
480 | rabp->b_flags |= B_ASYNC; | | 480 | rabp->b_flags |= B_ASYNC; |
481 | rabp->b_iocmd = BIO_READ; | | 481 | rabp->b_iocmd = BIO_READ; |
482 | vfs_busy_pages(rabp, 0); | | 482 | vfs_busy_pages(rabp, 0); |
483 | if (ncl_asyncio(nmp, rabp, cred, td)) { | | 483 | if (ncl_asyncio(nmp, rabp, cred, td)) { |
484 | rabp->b_flags |= B_INVAL; | | 484 | rabp->b_flags |= B_INVAL; |
485 | rabp->b_ioflags |= BIO_ERROR; | | 485 | rabp->b_ioflags |= BIO_ERROR; |
486 | vfs_unbusy_pages(rabp); | | 486 | vfs_unbusy_pages(rabp); |
487 | brelse(rabp); | | 487 | brelse(rabp); |
488 | break; | | 488 | break; |
489 | } | | 489 | } |
490 | } else { | | 490 | } else { |
491 | brelse(rabp); | | 491 | brelse(rabp); |
492 | } | | 492 | } |
493 | } | | 493 | } |
494 | } | | 494 | } |
495 | } | | 495 | } |
496 | | | 496 | |
497 | /* Note that bcount is *not* DEV_BSIZE aligned. */ | | 497 | /* Note that bcount is *not* DEV_BSIZE aligned. */ |
498 | bcount = biosize; | | 498 | bcount = biosize; |
499 | if ((off_t)lbn * biosize >= nsize) { | | 499 | if ((off_t)lbn * biosize >= nsize) { |
500 | bcount = 0; | | 500 | bcount = 0; |
501 | } else if ((off_t)(lbn + 1) * biosize > nsize) { | | 501 | } else if ((off_t)(lbn + 1) * biosize > nsize) { |
502 | bcount = nsize - (off_t)lbn * biosize; | | 502 | bcount = nsize - (off_t)lbn * biosize; |
503 | } | | 503 | } |
504 | bp = nfs_getcacheblk(vp, lbn, bcount, td); | | 504 | bp = nfs_getcacheblk(vp, lbn, bcount, td); |
505 | | | 505 | |
506 | if (!bp) { | | 506 | if (!bp) { |
507 | error = newnfs_sigintr(nmp, td); | | 507 | error = newnfs_sigintr(nmp, td); |
508 | return (error ? error : EINTR); | | 508 | return (error ? error : EINTR); |
509 | } | | 509 | } |
510 | | | 510 | |
511 | /* | | 511 | /* |
512 | * If B_CACHE is not set, we must issue the read. If this | | 512 | * If B_CACHE is not set, we must issue the read. If this |
513 | * fails, we return an error. | | 513 | * fails, we return an error. |
514 | */ | | 514 | */ |
515 | | | 515 | |
516 | if ((bp->b_flags & B_CACHE) == 0) { | | 516 | if ((bp->b_flags & B_CACHE) == 0) { |
517 | bp->b_iocmd = BIO_READ; | | 517 | bp->b_iocmd = BIO_READ; |
518 | vfs_busy_pages(bp, 0); | | 518 | vfs_busy_pages(bp, 0); |
519 | error = ncl_doio(vp, bp, cred, td, 0); | | 519 | error = ncl_doio(vp, bp, cred, td, 0); |
520 | if (error) { | | 520 | if (error) { |
521 | brelse(bp); | | 521 | brelse(bp); |
522 | return (error); | | 522 | return (error); |
523 | } | | 523 | } |
524 | } | | 524 | } |
525 | | | 525 | |
526 | /* | | 526 | /* |
527 | * on is the offset into the current bp. Figure out how many | | 527 | * on is the offset into the current bp. Figure out how many |
528 | * bytes we can copy out of the bp. Note that bcount is | | 528 | * bytes we can copy out of the bp. Note that bcount is |
529 | * NOT DEV_BSIZE aligned. | | 529 | * NOT DEV_BSIZE aligned. |
530 | * | | 530 | * |
531 | * Then figure out how many bytes we can copy into the uio. | | 531 | * Then figure out how many bytes we can copy into the uio. |
532 | */ | | 532 | */ |
533 | | | 533 | |
534 | n = 0; | | 534 | n = 0; |
535 | if (on < bcount) | | 535 | if (on < bcount) |
536 | n = MIN((unsigned)(bcount - on), uio->uio_resid); | | 536 | n = MIN((unsigned)(bcount - on), uio->uio_resid); |
537 | break; | | 537 | break; |
538 | case VLNK: | | 538 | case VLNK: |
539 | NFSINCRGLOBAL(nfsstatsv1.biocache_readlinks); | | 539 | NFSINCRGLOBAL(nfsstatsv1.biocache_readlinks); |
540 | bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); | | 540 | bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); |
541 | if (!bp) { | | 541 | if (!bp) { |
542 | error = newnfs_sigintr(nmp, td); | | 542 | error = newnfs_sigintr(nmp, td); |
543 | return (error ? error : EINTR); | | 543 | return (error ? error : EINTR); |
544 | } | | 544 | } |
545 | if ((bp->b_flags & B_CACHE) == 0) { | | 545 | if ((bp->b_flags & B_CACHE) == 0) { |
546 | bp->b_iocmd = BIO_READ; | | 546 | bp->b_iocmd = BIO_READ; |
547 | vfs_busy_pages(bp, 0); | | 547 | vfs_busy_pages(bp, 0); |
548 | error = ncl_doio(vp, bp, cred, td, 0); | | 548 | error = ncl_doio(vp, bp, cred, td, 0); |
549 | if (error) { | | 549 | if (error) { |
550 | bp->b_ioflags |= BIO_ERROR; | | 550 | bp->b_ioflags |= BIO_ERROR; |
551 | brelse(bp); | | 551 | brelse(bp); |
552 | return (error); | | 552 | return (error); |
553 | } | | 553 | } |
554 | } | | 554 | } |
555 | n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); | | 555 | n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); |
556 | on = 0; | | 556 | on = 0; |
557 | break; | | 557 | break; |
558 | case VDIR: | | 558 | case VDIR: |
559 | NFSINCRGLOBAL(nfsstatsv1.biocache_readdirs); | | 559 | NFSINCRGLOBAL(nfsstatsv1.biocache_readdirs); |
560 | if (np->n_direofoffset | | 560 | if (np->n_direofoffset |
561 | && uio->uio_offset >= np->n_direofoffset) { | | 561 | && uio->uio_offset >= np->n_direofoffset) { |
562 | return (0); | | 562 | return (0); |
563 | } | | 563 | } |
564 | lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; | | 564 | lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; |
565 | on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); | | 565 | on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); |
566 | bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); | | 566 | bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); |
567 | if (!bp) { | | 567 | if (!bp) { |
568 | error = newnfs_sigintr(nmp, td); | | 568 | error = newnfs_sigintr(nmp, td); |
569 | return (error ? error : EINTR); | | 569 | return (error ? error : EINTR); |
570 | } | | 570 | } |
571 | if ((bp->b_flags & B_CACHE) == 0) { | | 571 | if ((bp->b_flags & B_CACHE) == 0) { |
572 | bp->b_iocmd = BIO_READ; | | 572 | bp->b_iocmd = BIO_READ; |
573 | vfs_busy_pages(bp, 0); | | 573 | vfs_busy_pages(bp, 0); |
574 | error = ncl_doio(vp, bp, cred, td, 0); | | 574 | error = ncl_doio(vp, bp, cred, td, 0); |
575 | if (error) { | | 575 | if (error) { |
576 | brelse(bp); | | 576 | brelse(bp); |
577 | } | | 577 | } |
578 | while (error == NFSERR_BAD_COOKIE) { | | 578 | while (error == NFSERR_BAD_COOKIE) { |
579 | ncl_invaldir(vp); | | 579 | ncl_invaldir(vp); |
580 | error = ncl_vinvalbuf(vp, 0, td, 1); | | 580 | error = ncl_vinvalbuf(vp, 0, td, 1); |
581 | /* | | 581 | /* |
582 | * Yuck! The directory has been modified on the | | 582 | * Yuck! The directory has been modified on the |
583 | * server. The only way to get the block is by | | 583 | * server. The only way to get the block is by |
584 | * reading from the beginning to get all the | | 584 | * reading from the beginning to get all the |
585 | * offset cookies. | | 585 | * offset cookies. |
586 | * | | 586 | * |
587 | * Leave the last bp intact unless there is an error. | | 587 | * Leave the last bp intact unless there is an error. |
588 | * Loop back up to the while if the error is another | | 588 | * Loop back up to the while if the error is another |
589 | * NFSERR_BAD_COOKIE (double yuch!). | | 589 | * NFSERR_BAD_COOKIE (double yuch!). |
590 | */ | | 590 | */ |
591 | for (i = 0; i <= lbn && !error; i++) { | | 591 | for (i = 0; i <= lbn && !error; i++) { |
592 | if (np->n_direofoffset | | 592 | if (np->n_direofoffset |
593 | && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) | | 593 | && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) |
594 | return (0); | | 594 | return (0); |
595 | bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); | | 595 | bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); |
596 | if (!bp) { | | 596 | if (!bp) { |
597 | error = newnfs_sigintr(nmp, td); | | 597 | error = newnfs_sigintr(nmp, td); |
598 | return (error ? error : EINTR); | | 598 | return (error ? error : EINTR); |
599 | } | | 599 | } |
600 | if ((bp->b_flags & B_CACHE) == 0) { | | 600 | if ((bp->b_flags & B_CACHE) == 0) { |
601 | bp->b_iocmd = BIO_READ; | | 601 | bp->b_iocmd = BIO_READ; |
602 | vfs_busy_pages(bp, 0); | | 602 | vfs_busy_pages(bp, 0); |
603 | error = ncl_doio(vp, bp, cred, td, 0); | | 603 | error = ncl_doio(vp, bp, cred, td, 0); |
604 | /* | | 604 | /* |
605 | * no error + B_INVAL == directory EOF, | | 605 | * no error + B_INVAL == directory EOF, |
606 | * use the block. | | 606 | * use the block. |
607 | */ | | 607 | */ |
608 | if (error == 0 && (bp->b_flags & B_INVAL)) | | 608 | if (error == 0 && (bp->b_flags & B_INVAL)) |
609 | break; | | 609 | break; |
610 | } | | 610 | } |
611 | /* | | 611 | /* |
612 | * An error will throw away the block and the | | 612 | * An error will throw away the block and the |
613 | * for loop will break out. If no error and this | | 613 | * for loop will break out. If no error and this |
614 | * is not the block we want, we throw away the | | 614 | * is not the block we want, we throw away the |
615 | * block and go for the next one via the for loop. | | 615 | * block and go for the next one via the for loop. |
616 | */ | | 616 | */ |
617 | if (error || i < lbn) | | 617 | if (error || i < lbn) |
618 | brelse(bp); | | 618 | brelse(bp); |
619 | } | | 619 | } |
620 | } | | 620 | } |
621 | /* | | 621 | /* |
622 | * The above while is repeated if we hit another cookie | | 622 | * The above while is repeated if we hit another cookie |
623 | * error. If we hit an error and it wasn't a cookie error, | | 623 | * error. If we hit an error and it wasn't a cookie error, |
624 | * we give up. | | 624 | * we give up. |
625 | */ | | 625 | */ |
626 | if (error) | | 626 | if (error) |
627 | return (error); | | 627 | return (error); |
628 | } | | 628 | } |
629 | | | 629 | |
630 | /* | | 630 | /* |
631 | * If not eof and read aheads are enabled, start one. | | 631 | * If not eof and read aheads are enabled, start one. |
632 | * (You need the current block first, so that you have the | | 632 | * (You need the current block first, so that you have the |
633 | * directory offset cookie of the next block.) | | 633 | * directory offset cookie of the next block.) |
634 | */ | | 634 | */ |
635 | if (nmp->nm_readahead > 0 && | | 635 | if (nmp->nm_readahead > 0 && |
636 | (bp->b_flags & B_INVAL) == 0 && | | 636 | (bp->b_flags & B_INVAL) == 0 && |
637 | (np->n_direofoffset == 0 || | | 637 | (np->n_direofoffset == 0 || |
638 | (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && | | 638 | (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && |
639 | incore(&vp->v_bufobj, lbn + 1) == NULL) { | | 639 | incore(&vp->v_bufobj, lbn + 1) == NULL) { |
640 | rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); | | 640 | rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); |
641 | if (rabp) { | | 641 | if (rabp) { |
642 | if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { | | 642 | if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { |
643 | rabp->b_flags |= B_ASYNC; | | 643 | rabp->b_flags |= B_ASYNC; |
644 | rabp->b_iocmd = BIO_READ; | | 644 | rabp->b_iocmd = BIO_READ; |
645 | vfs_busy_pages(rabp, 0); | | 645 | vfs_busy_pages(rabp, 0); |
646 | if (ncl_asyncio(nmp, rabp, cred, td)) { | | 646 | if (ncl_asyncio(nmp, rabp, cred, td)) { |
647 | rabp->b_flags |= B_INVAL; | | 647 | rabp->b_flags |= B_INVAL; |
648 | rabp->b_ioflags |= BIO_ERROR; | | 648 | rabp->b_ioflags |= BIO_ERROR; |
649 | vfs_unbusy_pages(rabp); | | 649 | vfs_unbusy_pages(rabp); |
650 | brelse(rabp); | | 650 | brelse(rabp); |
651 | } | | 651 | } |
652 | } else { | | 652 | } else { |
653 | brelse(rabp); | | 653 | brelse(rabp); |
654 | } | | 654 | } |
655 | } | | 655 | } |
656 | } | | 656 | } |
657 | /* | | 657 | /* |
658 | * Unlike VREG files, whos buffer size ( bp->b_bcount ) is | | 658 | * Unlike VREG files, whos buffer size ( bp->b_bcount ) is |
659 | * chopped for the EOF condition, we cannot tell how large | | 659 | * chopped for the EOF condition, we cannot tell how large |
660 | * NFS directories are going to be until we hit EOF. So | | 660 | * NFS directories are going to be until we hit EOF. So |
661 | * an NFS directory buffer is *not* chopped to its EOF. Now, | | 661 | * an NFS directory buffer is *not* chopped to its EOF. Now, |
662 | * it just so happens that b_resid will effectively chop it | | 662 | * it just so happens that b_resid will effectively chop it |
663 | * to EOF. *BUT* this information is lost if the buffer goes | | 663 | * to EOF. *BUT* this information is lost if the buffer goes |
664 | * away and is reconstituted into a B_CACHE state ( due to | | 664 | * away and is reconstituted into a B_CACHE state ( due to |
665 | * being VMIO ) later. So we keep track of the directory eof | | 665 | * being VMIO ) later. So we keep track of the directory eof |
666 | * in np->n_direofoffset and chop it off as an extra step | | 666 | * in np->n_direofoffset and chop it off as an extra step |
667 | * right here. | | 667 | * right here. |
668 | */ | | 668 | */ |
669 | n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); | | 669 | n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); |
670 | if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) | | 670 | if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) |
671 | n = np->n_direofoffset - uio->uio_offset; | | 671 | n = np->n_direofoffset - uio->uio_offset; |
672 | break; | | 672 | break; |
673 | default: | | 673 | default: |
674 | printf(" ncl_bioread: type %x unexpected\n", vp->v_type); | | 674 | printf(" ncl_bioread: type %x unexpected\n", vp->v_type); |
675 | bp = NULL; | | 675 | bp = NULL; |
676 | break; | | 676 | break; |
677 | } | | 677 | } |
678 | | | 678 | |
679 | if (n > 0) { | | 679 | if (n > 0) { |
680 | error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio); | | 680 | error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio); |
681 | } | | 681 | } |
682 | if (vp->v_type == VLNK) | | 682 | if (vp->v_type == VLNK) |
683 | n = 0; | | 683 | n = 0; |
684 | if (bp != NULL) | | 684 | if (bp != NULL) |
685 | brelse(bp); | | 685 | brelse(bp); |
686 | } while (error == 0 && uio->uio_resid > 0 && n > 0); | | 686 | } while (error == 0 && uio->uio_resid > 0 && n > 0); |
687 | return (error); | | 687 | return (error); |
688 | } | | 688 | } |
689 | | | 689 | |
690 | /* | | 690 | /* |
691 | * The NFS write path cannot handle iovecs with len > 1. So we need to | | 691 | * The NFS write path cannot handle iovecs with len > 1. So we need to |
692 | * break up iovecs accordingly (restricting them to wsize). | | 692 | * break up iovecs accordingly (restricting them to wsize). |
693 | * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). | | 693 | * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). |
694 | * For the ASYNC case, 2 copies are needed. The first a copy from the | | 694 | * For the ASYNC case, 2 copies are needed. The first a copy from the |
695 | * user buffer to a staging buffer and then a second copy from the staging | | 695 | * user buffer to a staging buffer and then a second copy from the staging |
696 | * buffer to mbufs. This can be optimized by copying from the user buffer | | 696 | * buffer to mbufs. This can be optimized by copying from the user buffer |
697 | * directly into mbufs and passing the chain down, but that requires a | | 697 | * directly into mbufs and passing the chain down, but that requires a |
698 | * fair amount of re-working of the relevant codepaths (and can be done | | 698 | * fair amount of re-working of the relevant codepaths (and can be done |
699 | * later). | | 699 | * later). |
700 | */ | | 700 | */ |
701 | static int | | 701 | static int |
702 | nfs_directio_write(vp, uiop, cred, ioflag) | | 702 | nfs_directio_write(vp, uiop, cred, ioflag) |
703 | struct vnode *vp; | | 703 | struct vnode *vp; |
704 | struct uio *uiop; | | 704 | struct uio *uiop; |
705 | struct ucred *cred; | | 705 | struct ucred *cred; |
706 | int ioflag; | | 706 | int ioflag; |
707 | { | | 707 | { |
708 | int error; | | 708 | int error; |
709 | struct nfsmount *nmp = VFSTONFS(vp->v_mount); | | 709 | struct nfsmount *nmp = VFSTONFS(vp->v_mount); |
710 | struct thread *td = uiop->uio_td; | | 710 | struct thread *td = uiop->uio_td; |
711 | int size; | | 711 | int size; |
712 | int wsize; | | 712 | int wsize; |
713 | | | 713 | |
714 | mtx_lock(&nmp->nm_mtx); | | 714 | mtx_lock(&nmp->nm_mtx); |
715 | wsize = nmp->nm_wsize; | | 715 | wsize = nmp->nm_wsize; |
716 | mtx_unlock(&nmp->nm_mtx); | | 716 | mtx_unlock(&nmp->nm_mtx); |
717 | if (ioflag & IO_SYNC) { | | 717 | if (ioflag & IO_SYNC) { |
718 | int iomode, must_commit; | | 718 | int iomode, must_commit; |
719 | struct uio uio; | | 719 | struct uio uio; |
720 | struct iovec iov; | | 720 | struct iovec iov; |
721 | do_sync: | | 721 | do_sync: |
722 | while (uiop->uio_resid > 0) { | | 722 | while (uiop->uio_resid > 0) { |
723 | size = MIN(uiop->uio_resid, wsize); | | 723 | size = MIN(uiop->uio_resid, wsize); |
724 | size = MIN(uiop->uio_iov->iov_len, size); | | 724 | size = MIN(uiop->uio_iov->iov_len, size); |
725 | iov.iov_base = uiop->uio_iov->iov_base; | | 725 | iov.iov_base = uiop->uio_iov->iov_base; |
726 | iov.iov_len = size; | | 726 | iov.iov_len = size; |
727 | uio.uio_iov = &iov; | | 727 | uio.uio_iov = &iov; |
728 | uio.uio_iovcnt = 1; | | 728 | uio.uio_iovcnt = 1; |
729 | uio.uio_offset = uiop->uio_offset; | | 729 | uio.uio_offset = uiop->uio_offset; |
730 | uio.uio_resid = size; | | 730 | uio.uio_resid = size; |
731 | uio.uio_segflg = UIO_USERSPACE; | | 731 | uio.uio_segflg = UIO_USERSPACE; |
732 | uio.uio_rw = UIO_WRITE; | | 732 | uio.uio_rw = UIO_WRITE; |
733 | uio.uio_td = td; | | 733 | uio.uio_td = td; |
734 | iomode = NFSWRITE_FILESYNC; | | 734 | iomode = NFSWRITE_FILESYNC; |
735 | error = ncl_writerpc(vp, &uio, cred, &iomode, | | 735 | error = ncl_writerpc(vp, &uio, cred, &iomode, |
736 | &must_commit, 0); | | 736 | &must_commit, 0); |
737 | KASSERT((must_commit == 0), | | 737 | KASSERT((must_commit == 0), |
738 | ("ncl_directio_write: Did not commit write")); | | 738 | ("ncl_directio_write: Did not commit write")); |
739 | if (error) | | 739 | if (error) |
740 | return (error); | | 740 | return (error); |
741 | uiop->uio_offset += size; | | 741 | uiop->uio_offset += size; |
742 | uiop->uio_resid -= size; | | 742 | uiop->uio_resid -= size; |
743 | if (uiop->uio_iov->iov_len <= size) { | | 743 | if (uiop->uio_iov->iov_len <= size) { |
744 | uiop->uio_iovcnt--; | | 744 | uiop->uio_iovcnt--; |
745 | uiop->uio_iov++; | | 745 | uiop->uio_iov++; |
746 | } else { | | 746 | } else { |
747 | uiop->uio_iov->iov_base = | | 747 | uiop->uio_iov->iov_base = |
748 | (char *)uiop->uio_iov->iov_base + size; | | 748 | (char *)uiop->uio_iov->iov_base + size; |
749 | uiop->uio_iov->iov_len -= size; | | 749 | uiop->uio_iov->iov_len -= size; |
750 | } | | 750 | } |
751 | } | | 751 | } |
752 | } else { | | 752 | } else { |
753 | struct uio *t_uio; | | 753 | struct uio *t_uio; |
754 | struct iovec *t_iov; | | 754 | struct iovec *t_iov; |
755 | struct buf *bp; | | 755 | struct buf *bp; |
756 | | | 756 | |
757 | /* | | 757 | /* |
758 | * Break up the write into blocksize chunks and hand these | | 758 | * Break up the write into blocksize chunks and hand these |
759 | * over to nfsiod's for write back. | | 759 | * over to nfsiod's for write back. |
760 | * Unfortunately, this incurs a copy of the data. Since | | 760 | * Unfortunately, this incurs a copy of the data. Since |
761 | * the user could modify the buffer before the write is | | 761 | * the user could modify the buffer before the write is |
762 | * initiated. | | 762 | * initiated. |
763 | * | | 763 | * |
764 | * The obvious optimization here is that one of the 2 copies | | 764 | * The obvious optimization here is that one of the 2 copies |
765 | * in the async write path can be eliminated by copying the | | 765 | * in the async write path can be eliminated by copying the |
766 | * data here directly into mbufs and passing the mbuf chain | | 766 | * data here directly into mbufs and passing the mbuf chain |
767 | * down. But that will require a fair amount of re-working | | 767 | * down. But that will require a fair amount of re-working |
768 | * of the code and can be done if there's enough interest | | 768 | * of the code and can be done if there's enough interest |
769 | * in NFS directio access. | | 769 | * in NFS directio access. |
770 | */ | | 770 | */ |
771 | while (uiop->uio_resid > 0) { | | 771 | while (uiop->uio_resid > 0) { |
772 | size = MIN(uiop->uio_resid, wsize); | | 772 | size = MIN(uiop->uio_resid, wsize); |
773 | size = MIN(uiop->uio_iov->iov_len, size); | | 773 | size = MIN(uiop->uio_iov->iov_len, size); |
774 | bp = getpbuf(&ncl_pbuf_freecnt); | | 774 | bp = getpbuf(&ncl_pbuf_freecnt); |
775 | t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); | | 775 | t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); |
776 | t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); | | 776 | t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); |
777 | t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); | | 777 | t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); |
778 | t_iov->iov_len = size; | | 778 | t_iov->iov_len = size; |
779 | t_uio->uio_iov = t_iov; | | 779 | t_uio->uio_iov = t_iov; |
780 | t_uio->uio_iovcnt = 1; | | 780 | t_uio->uio_iovcnt = 1; |
781 | t_uio->uio_offset = uiop->uio_offset; | | 781 | t_uio->uio_offset = uiop->uio_offset; |
782 | t_uio->uio_resid = size; | | 782 | t_uio->uio_resid = size; |
783 | t_uio->uio_segflg = UIO_SYSSPACE; | | 783 | t_uio->uio_segflg = UIO_SYSSPACE; |
784 | t_uio->uio_rw = UIO_WRITE; | | 784 | t_uio->uio_rw = UIO_WRITE; |
785 | t_uio->uio_td = td; | | 785 | t_uio->uio_td = td; |
786 | KASSERT(uiop->uio_segflg == UIO_USERSPACE || | | 786 | KASSERT(uiop->uio_segflg == UIO_USERSPACE || |
787 | uiop->uio_segflg == UIO_SYSSPACE, | | 787 | uiop->uio_segflg == UIO_SYSSPACE, |
788 | ("nfs_directio_write: Bad uio_segflg")); | | 788 | ("nfs_directio_write: Bad uio_segflg")); |
789 | if (uiop->uio_segflg == UIO_USERSPACE) { | | 789 | if (uiop->uio_segflg == UIO_USERSPACE) { |
790 | error = copyin(uiop->uio_iov->iov_base, | | 790 | error = copyin(uiop->uio_iov->iov_base, |
791 | t_iov->iov_base, size); | | 791 | t_iov->iov_base, size); |
792 | if (error != 0) | | 792 | if (error != 0) |
793 | goto err_free; | | 793 | goto err_free; |
794 | } else | | 794 | } else |
795 | /* | | 795 | /* |
796 | * UIO_SYSSPACE may never happen, but handle | | 796 | * UIO_SYSSPACE may never happen, but handle |
797 | * it just in case it does. | | 797 | * it just in case it does. |
798 | */ | | 798 | */ |
799 | bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, | | 799 | bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, |
800 | size); | | 800 | size); |
801 | bp->b_flags |= B_DIRECT; | | 801 | bp->b_flags |= B_DIRECT; |
802 | bp->b_iocmd = BIO_WRITE; | | 802 | bp->b_iocmd = BIO_WRITE; |
803 | if (cred != NOCRED) { | | 803 | if (cred != NOCRED) { |
804 | crhold(cred); | | 804 | crhold(cred); |
805 | bp->b_wcred = cred; | | 805 | bp->b_wcred = cred; |
806 | } else | | 806 | } else |
807 | bp->b_wcred = NOCRED; | | 807 | bp->b_wcred = NOCRED; |
808 | bp->b_caller1 = (void *)t_uio; | | 808 | bp->b_caller1 = (void *)t_uio; |
809 | bp->b_vp = vp; | | 809 | bp->b_vp = vp; |
810 | error = ncl_asyncio(nmp, bp, NOCRED, td); | | 810 | error = ncl_asyncio(nmp, bp, NOCRED, td); |
811 | err_free: | | 811 | err_free: |
812 | if (error) { | | 812 | if (error) { |
813 | free(t_iov->iov_base, M_NFSDIRECTIO); | | 813 | free(t_iov->iov_base, M_NFSDIRECTIO); |
814 | free(t_iov, M_NFSDIRECTIO); | | 814 | free(t_iov, M_NFSDIRECTIO); |
815 | free(t_uio, M_NFSDIRECTIO); | | 815 | free(t_uio, M_NFSDIRECTIO); |
816 | bp->b_vp = NULL; | | 816 | bp->b_vp = NULL; |
817 | relpbuf(bp, &ncl_pbuf_freecnt); | | 817 | relpbuf(bp, &ncl_pbuf_freecnt); |
818 | if (error == EINTR) | | 818 | if (error == EINTR) |
819 | return (error); | | 819 | return (error); |
820 | goto do_sync; | | 820 | goto do_sync; |
821 | } | | 821 | } |
822 | uiop->uio_offset += size; | | 822 | uiop->uio_offset += size; |
823 | uiop->uio_resid -= size; | | 823 | uiop->uio_resid -= size; |
824 | if (uiop->uio_iov->iov_len <= size) { | | 824 | if (uiop->uio_iov->iov_len <= size) { |
825 | uiop->uio_iovcnt--; | | 825 | uiop->uio_iovcnt--; |
826 | uiop->uio_iov++; | | 826 | uiop->uio_iov++; |
827 | } else { | | 827 | } else { |
828 | uiop->uio_iov->iov_base = | | 828 | uiop->uio_iov->iov_base = |
829 | (char *)uiop->uio_iov->iov_base + size; | | 829 | (char *)uiop->uio_iov->iov_base + size; |
830 | uiop->uio_iov->iov_len -= size; | | 830 | uiop->uio_iov->iov_len -= size; |
831 | } | | 831 | } |
832 | } | | 832 | } |
833 | } | | 833 | } |
834 | return (0); | | 834 | return (0); |
835 | } | | 835 | } |
836 | | | 836 | |
837 | /* | | 837 | /* |
838 | * Vnode op for write using bio | | 838 | * Vnode op for write using bio |
839 | */ | | 839 | */ |
840 | int | | 840 | int |
841 | ncl_write(struct vop_write_args *ap) | | 841 | ncl_write(struct vop_write_args *ap) |
842 | { | | 842 | { |
843 | int biosize; | | 843 | int biosize; |
844 | struct uio *uio = ap->a_uio; | | 844 | struct uio *uio = ap->a_uio; |
845 | struct thread *td = uio->uio_td; | | 845 | struct thread *td = uio->uio_td; |
846 | struct vnode *vp = ap->a_vp; | | 846 | struct vnode *vp = ap->a_vp; |
847 | struct nfsnode *np = VTONFS(vp); | | 847 | struct nfsnode *np = VTONFS(vp); |
848 | struct ucred *cred = ap->a_cred; | | 848 | struct ucred *cred = ap->a_cred; |
849 | int ioflag = ap->a_ioflag; | | 849 | int ioflag = ap->a_ioflag; |
850 | struct buf *bp; | | 850 | struct buf *bp; |
851 | struct vattr vattr; | | 851 | struct vattr vattr; |
852 | struct nfsmount *nmp = VFSTONFS(vp->v_mount); | | 852 | struct nfsmount *nmp = VFSTONFS(vp->v_mount); |
853 | daddr_t lbn; | | 853 | daddr_t lbn; |
854 | int bcount, noncontig_write, obcount; | | 854 | int bcount, noncontig_write, obcount; |
855 | int bp_cached, n, on, error = 0, error1, wouldcommit; | | 855 | int bp_cached, n, on, error = 0, error1, wouldcommit; |
856 | size_t orig_resid, local_resid; | | 856 | size_t orig_resid, local_resid; |
857 | off_t orig_size, tmp_off; | | 857 | off_t orig_size, tmp_off; |
858 | | | 858 | |
859 | KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); | | 859 | KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); |
860 | KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, | | 860 | KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, |
861 | ("ncl_write proc")); | | 861 | ("ncl_write proc")); |
862 | if (vp->v_type != VREG) | | 862 | if (vp->v_type != VREG) |
863 | return (EIO); | | 863 | return (EIO); |
864 | mtx_lock(&np->n_mtx); | | 864 | mtx_lock(&np->n_mtx); |
865 | if (np->n_flag & NWRITEERR) { | | 865 | if (np->n_flag & NWRITEERR) { |
866 | np->n_flag &= ~NWRITEERR; | | 866 | np->n_flag &= ~NWRITEERR; |
867 | mtx_unlock(&np->n_mtx); | | 867 | mtx_unlock(&np->n_mtx); |
868 | return (np->n_error); | | 868 | return (np->n_error); |
869 | } else | | 869 | } else |
870 | mtx_unlock(&np->n_mtx); | | 870 | mtx_unlock(&np->n_mtx); |
871 | mtx_lock(&nmp->nm_mtx); | | 871 | mtx_lock(&nmp->nm_mtx); |
872 | if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && | | 872 | if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && |
873 | (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { | | 873 | (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { |
874 | mtx_unlock(&nmp->nm_mtx); | | 874 | mtx_unlock(&nmp->nm_mtx); |
875 | (void)ncl_fsinfo(nmp, vp, cred, td); | | 875 | (void)ncl_fsinfo(nmp, vp, cred, td); |
876 | mtx_lock(&nmp->nm_mtx); | | 876 | mtx_lock(&nmp->nm_mtx); |
877 | } | | 877 | } |
878 | if (nmp->nm_wsize == 0) | | 878 | if (nmp->nm_wsize == 0) |
879 | (void) newnfs_iosize(nmp); | | 879 | (void) newnfs_iosize(nmp); |
880 | mtx_unlock(&nmp->nm_mtx); | | 880 | mtx_unlock(&nmp->nm_mtx); |
881 | | | 881 | |
882 | /* | | 882 | /* |
883 | * Synchronously flush pending buffers if we are in synchronous | | 883 | * Synchronously flush pending buffers if we are in synchronous |
884 | * mode or if we are appending. | | 884 | * mode or if we are appending. |
885 | */ | | 885 | */ |
886 | if (ioflag & (IO_APPEND | IO_SYNC)) { | | 886 | if (ioflag & (IO_APPEND | IO_SYNC)) { |
887 | mtx_lock(&np->n_mtx); | | 887 | mtx_lock(&np->n_mtx); |
888 | if (np->n_flag & NMODIFIED) { | | 888 | if (np->n_flag & NMODIFIED) { |
889 | mtx_unlock(&np->n_mtx); | | 889 | mtx_unlock(&np->n_mtx); |
890 | #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ | | 890 | #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ |
891 | /* | | 891 | /* |
892 | * Require non-blocking, synchronous writes to | | 892 | * Require non-blocking, synchronous writes to |
893 | * dirty files to inform the program it needs | | 893 | * dirty files to inform the program it needs |
894 | * to fsync(2) explicitly. | | 894 | * to fsync(2) explicitly. |
895 | */ | | 895 | */ |
896 | if (ioflag & IO_NDELAY) | | 896 | if (ioflag & IO_NDELAY) |
897 | return (EAGAIN); | | 897 | return (EAGAIN); |
898 | #endif | | 898 | #endif |
899 | np->n_attrstamp = 0; | | 899 | np->n_attrstamp = 0; |
900 | KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); | | 900 | KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); |
901 | error = ncl_vinvalbuf(vp, V_SAVE, td, 1); | | 901 | error = ncl_vinvalbuf(vp, V_SAVE, td, 1); |
902 | if (error) | | 902 | if (error) |
903 | return (error); | | 903 | return (error); |
904 | } else | | 904 | } else |
905 | mtx_unlock(&np->n_mtx); | | 905 | mtx_unlock(&np->n_mtx); |
906 | } | | 906 | } |
907 | | | 907 | |
908 | orig_resid = uio->uio_resid; | | 908 | orig_resid = uio->uio_resid; |
909 | mtx_lock(&np->n_mtx); | | 909 | mtx_lock(&np->n_mtx); |
910 | orig_size = np->n_size; | | 910 | orig_size = np->n_size; |
911 | mtx_unlock(&np->n_mtx); | | 911 | mtx_unlock(&np->n_mtx); |
912 | | | 912 | |
913 | /* | | 913 | /* |
914 | * If IO_APPEND then load uio_offset. We restart here if we cannot | | 914 | * If IO_APPEND then load uio_offset. We restart here if we cannot |
915 | * get the append lock. | | 915 | * get the append lock. |
916 | */ | | 916 | */ |
917 | if (ioflag & IO_APPEND) { | | 917 | if (ioflag & IO_APPEND) { |
918 | np->n_attrstamp = 0; | | 918 | np->n_attrstamp = 0; |
919 | KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); | | 919 | KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); |
920 | error = VOP_GETATTR(vp, &vattr, cred); | | 920 | error = VOP_GETATTR(vp, &vattr, cred); |
921 | if (error) | | 921 | if (error) |
922 | return (error); | | 922 | return (error); |
923 | mtx_lock(&np->n_mtx); | | 923 | mtx_lock(&np->n_mtx); |
924 | uio->uio_offset = np->n_size; | | 924 | uio->uio_offset = np->n_size; |
925 | mtx_unlock(&np->n_mtx); | | 925 | mtx_unlock(&np->n_mtx); |
926 | } | | 926 | } |
927 | | | 927 | |
928 | if (uio->uio_offset < 0) | | 928 | if (uio->uio_offset < 0) |
929 | return (EINVAL); | | 929 | return (EINVAL); |
930 | tmp_off = uio->uio_offset + uio->uio_resid; | | 930 | tmp_off = uio->uio_offset + uio->uio_resid; |
931 | if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset) | | 931 | if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset) |
932 | return (EFBIG); | | 932 | return (EFBIG); |
933 | if (uio->uio_resid == 0) | | 933 | if (uio->uio_resid == 0) |
934 | return (0); | | 934 | return (0); |
935 | | | 935 | |
936 | if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) | | 936 | if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) |
937 | return nfs_directio_write(vp, uio, cred, ioflag); | | 937 | return nfs_directio_write(vp, uio, cred, ioflag); |
938 | | | 938 | |
939 | /* | | 939 | /* |
940 | * Maybe this should be above the vnode op call, but so long as | | 940 | * Maybe this should be above the vnode op call, but so long as |
941 | * file servers have no limits, i don't think it matters | | 941 | * file servers have no limits, i don't think it matters |
942 | */ | | 942 | */ |
943 | if (vn_rlimit_fsize(vp, uio, td)) | | 943 | if (vn_rlimit_fsize(vp, uio, td)) |
944 | return (EFBIG); | | 944 | return (EFBIG); |
945 | | | 945 | |
946 | biosize = vp->v_bufobj.bo_bsize; | | 946 | biosize = vp->v_bufobj.bo_bsize; |
947 | /* | | 947 | /* |
948 | * Find all of this file's B_NEEDCOMMIT buffers. If our writes | | 948 | * Find all of this file's B_NEEDCOMMIT buffers. If our writes |
949 | * would exceed the local maximum per-file write commit size when | | 949 | * would exceed the local maximum per-file write commit size when |
950 | * combined with those, we must decide whether to flush, | | 950 | * combined with those, we must decide whether to flush, |
951 | * go synchronous, or return error. We don't bother checking | | 951 | * go synchronous, or return error. We don't bother checking |
952 | * IO_UNIT -- we just make all writes atomic anyway, as there's | | 952 | * IO_UNIT -- we just make all writes atomic anyway, as there's |
953 | * no point optimizing for something that really won't ever happen. | | 953 | * no point optimizing for something that really won't ever happen. |
954 | */ | | 954 | */ |
955 | wouldcommit = 0; | | 955 | wouldcommit = 0; |
956 | if (!(ioflag & IO_SYNC)) { | | 956 | if (!(ioflag & IO_SYNC)) { |
957 | int nflag; | | 957 | int nflag; |
958 | | | 958 | |
959 | mtx_lock(&np->n_mtx); | | 959 | mtx_lock(&np->n_mtx); |
960 | nflag = np->n_flag; | | 960 | nflag = np->n_flag; |
961 | mtx_unlock(&np->n_mtx); | | 961 | mtx_unlock(&np->n_mtx); |
962 | if (nflag & NMODIFIED) { | | 962 | if (nflag & NMODIFIED) { |
963 | BO_LOCK(&vp->v_bufobj); | | 963 | BO_LOCK(&vp->v_bufobj); |
964 | if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { | | 964 | if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { |
965 | TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, | | 965 | TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, |
966 | b_bobufs) { | | 966 | b_bobufs) { |
967 | if (bp->b_flags & B_NEEDCOMMIT) | | 967 | if (bp->b_flags & B_NEEDCOMMIT) |
968 | wouldcommit += bp->b_bcount; | | 968 | wouldcommit += bp->b_bcount; |
969 | } | | 969 | } |
970 | } | | 970 | } |
971 | BO_UNLOCK(&vp->v_bufobj); | | 971 | BO_UNLOCK(&vp->v_bufobj); |
972 | } | | 972 | } |
973 | } | | 973 | } |
974 | | | 974 | |
975 | do { | | 975 | do { |
976 | if (!(ioflag & IO_SYNC)) { | | 976 | if (!(ioflag & IO_SYNC)) { |
977 | wouldcommit += biosize; | | 977 | wouldcommit += biosize; |
978 | if (wouldcommit > nmp->nm_wcommitsize) { | | 978 | if (wouldcommit > nmp->nm_wcommitsize) { |
979 | np->n_attrstamp = 0; | | 979 | np->n_attrstamp = 0; |
980 | KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); | | 980 | KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); |
981 | error = ncl_vinvalbuf(vp, V_SAVE, td, 1); | | 981 | error = ncl_vinvalbuf(vp, V_SAVE, td, 1); |
982 | if (error) | | 982 | if (error) |
983 | return (error); | | 983 | return (error); |
984 | wouldcommit = biosize; | | 984 | wouldcommit = biosize; |
985 | } | | 985 | } |
986 | } | | 986 | } |
987 | | | 987 | |
988 | NFSINCRGLOBAL(nfsstatsv1.biocache_writes); | | 988 | NFSINCRGLOBAL(nfsstatsv1.biocache_writes); |
989 | lbn = uio->uio_offset / biosize; | | 989 | lbn = uio->uio_offset / biosize; |
990 | on = uio->uio_offset - (lbn * biosize); | | 990 | on = uio->uio_offset - (lbn * biosize); |
991 | n = MIN((unsigned)(biosize - on), uio->uio_resid); | | 991 | n = MIN((unsigned)(biosize - on), uio->uio_resid); |
992 | again: | | 992 | again: |
993 | /* | | 993 | /* |
994 | * Handle direct append and file extension cases, calculate | | 994 | * Handle direct append and file extension cases, calculate |
995 | * unaligned buffer size. | | 995 | * unaligned buffer size. |
996 | */ | | 996 | */ |
997 | mtx_lock(&np->n_mtx); | | 997 | mtx_lock(&np->n_mtx); |
998 | if ((np->n_flag & NHASBEENLOCKED) == 0 && | | 998 | if ((np->n_flag & NHASBEENLOCKED) == 0 && |
999 | (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0) | | 999 | (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0) |
1000 | noncontig_write = 1; | | 1000 | noncontig_write = 1; |
1001 | else | | 1001 | else |
1002 | noncontig_write = 0; | | 1002 | noncontig_write = 0; |
1003 | if ((uio->uio_offset == np->n_size || | | 1003 | if ((uio->uio_offset == np->n_size || |
1004 | (noncontig_write != 0 && | | 1004 | (noncontig_write != 0 && |
1005 | lbn == (np->n_size / biosize) && | | 1005 | lbn == (np->n_size / biosize) && |
1006 | uio->uio_offset + n > np->n_size)) && n) { | | 1006 | uio->uio_offset + n > np->n_size)) && n) { |
1007 | mtx_unlock(&np->n_mtx); | | 1007 | mtx_unlock(&np->n_mtx); |
1008 | /* | | 1008 | /* |
1009 | * Get the buffer (in its pre-append state to maintain | | 1009 | * Get the buffer (in its pre-append state to maintain |
1010 | * B_CACHE if it was previously set). Resize the | | 1010 | * B_CACHE if it was previously set). Resize the |
1011 | * nfsnode after we have locked the buffer to prevent | | 1011 | * nfsnode after we have locked the buffer to prevent |
1012 | * readers from reading garbage. | | 1012 | * readers from reading garbage. |
1013 | */ | | 1013 | */ |
1014 | obcount = np->n_size - (lbn * biosize); | | 1014 | obcount = np->n_size - (lbn * biosize); |
1015 | bp = nfs_getcacheblk(vp, lbn, obcount, td); | | 1015 | bp = nfs_getcacheblk(vp, lbn, obcount, td); |
1016 | | | 1016 | |
1017 | if (bp != NULL) { | | 1017 | if (bp != NULL) { |
1018 | long save; | | 1018 | long save; |
1019 | | | 1019 | |
1020 | mtx_lock(&np->n_mtx); | | 1020 | mtx_lock(&np->n_mtx); |
1021 | np->n_size = uio->uio_offset + n; | | 1021 | np->n_size = uio->uio_offset + n; |
1022 | np->n_flag |= NMODIFIED; | | 1022 | np->n_flag |= NMODIFIED; |
1023 | vnode_pager_setsize(vp, np->n_size); | | 1023 | vnode_pager_setsize(vp, np->n_size); |
1024 | mtx_unlock(&np->n_mtx); | | 1024 | mtx_unlock(&np->n_mtx); |
1025 | | | 1025 | |
1026 | save = bp->b_flags & B_CACHE; | | 1026 | save = bp->b_flags & B_CACHE; |
1027 | bcount = on + n; | | 1027 | bcount = on + n; |
1028 | allocbuf(bp, bcount); | | 1028 | allocbuf(bp, bcount); |
1029 | bp->b_flags |= save; | | 1029 | bp->b_flags |= save; |
1030 | if (noncontig_write != 0 && on > obcount) | | 1030 | if (noncontig_write != 0 && on > obcount) |
1031 | vfs_bio_bzero_buf(bp, obcount, on - | | 1031 | vfs_bio_bzero_buf(bp, obcount, on - |
1032 | obcount); | | 1032 | obcount); |
1033 | } | | 1033 | } |
1034 | } else { | | 1034 | } else { |
1035 | /* | | 1035 | /* |
1036 | * Obtain the locked cache block first, and then | | 1036 | * Obtain the locked cache block first, and then |
1037 | * adjust the file's size as appropriate. | | 1037 | * adjust the file's size as appropriate. |
1038 | */ | | 1038 | */ |
1039 | bcount = on + n; | | 1039 | bcount = on + n; |
1040 | if ((off_t)lbn * biosize + bcount < np->n_size) { | | 1040 | if ((off_t)lbn * biosize + bcount < np->n_size) { |
1041 | if ((off_t)(lbn + 1) * biosize < np->n_size) | | 1041 | if ((off_t)(lbn + 1) * biosize < np->n_size) |
1042 | bcount = biosize; | | 1042 | bcount = biosize; |
1043 | else | | 1043 | else |
1044 | bcount = np->n_size - (off_t)lbn * biosize; | | 1044 | bcount = np->n_size - (off_t)lbn * biosize; |
1045 | } | | 1045 | } |
1046 | mtx_unlock(&np->n_mtx); | | 1046 | mtx_unlock(&np->n_mtx); |
1047 | bp = nfs_getcacheblk(vp, lbn, bcount, td); | | 1047 | bp = nfs_getcacheblk(vp, lbn, bcount, td); |
1048 | mtx_lock(&np->n_mtx); | | 1048 | mtx_lock(&np->n_mtx); |
1049 | if (uio->uio_offset + n > np->n_size) { | | 1049 | if (uio->uio_offset + n > np->n_size) { |
1050 | np->n_size = uio->uio_offset + n; | | 1050 | np->n_size = uio->uio_offset + n; |
1051 | np->n_flag |= NMODIFIED; | | 1051 | np->n_flag |= NMODIFIED; |
1052 | vnode_pager_setsize(vp, np->n_size); | | 1052 | vnode_pager_setsize(vp, np->n_size); |
1053 | } | | 1053 | } |
1054 | mtx_unlock(&np->n_mtx); | | 1054 | mtx_unlock(&np->n_mtx); |
1055 | } | | 1055 | } |
1056 | | | 1056 | |
1057 | if (!bp) { | | 1057 | if (!bp) { |
1058 | error = newnfs_sigintr(nmp, td); | | 1058 | error = newnfs_sigintr(nmp, td); |
1059 | if (!error) | | 1059 | if (!error) |
1060 | error = EINTR; | | 1060 | error = EINTR; |
1061 | break; | | 1061 | break; |
1062 | } | | 1062 | } |
1063 | | | 1063 | |
1064 | /* | | 1064 | /* |
1065 | * Issue a READ if B_CACHE is not set. In special-append | | 1065 | * Issue a READ if B_CACHE is not set. In special-append |
1066 | * mode, B_CACHE is based on the buffer prior to the write | | 1066 | * mode, B_CACHE is based on the buffer prior to the write |
1067 | * op and is typically set, avoiding the read. If a read | | 1067 | * op and is typically set, avoiding the read. If a read |
1068 | * is required in special append mode, the server will | | 1068 | * is required in special append mode, the server will |
1069 | * probably send us a short-read since we extended the file | | 1069 | * probably send us a short-read since we extended the file |
1070 | * on our end, resulting in b_resid == 0 and, thusly, | | 1070 | * on our end, resulting in b_resid == 0 and, thusly, |
1071 | * B_CACHE getting set. | | 1071 | * B_CACHE getting set. |
1072 | * | | 1072 | * |
1073 | * We can also avoid issuing the read if the write covers | | 1073 | * We can also avoid issuing the read if the write covers |
1074 | * the entire buffer. We have to make sure the buffer state | | 1074 | * the entire buffer. We have to make sure the buffer state |
1075 | * is reasonable in this case since we will not be initiating | | 1075 | * is reasonable in this case since we will not be initiating |
1076 | * I/O. See the comments in kern/vfs_bio.c's getblk() for | | 1076 | * I/O. See the comments in kern/vfs_bio.c's getblk() for |
1077 | * more information. | | 1077 | * more information. |
1078 | * | | 1078 | * |
1079 | * B_CACHE may also be set due to the buffer being cached | | 1079 | * B_CACHE may also be set due to the buffer being cached |
1080 | * normally. | | 1080 | * normally. |
1081 | */ | | 1081 | */ |
1082 | | | 1082 | |
1083 | bp_cached = 1; | | 1083 | bp_cached = 1; |
1084 | if (on == 0 && n == bcount) { | | 1084 | if (on == 0 && n == bcount) { |
1085 | if ((bp->b_flags & B_CACHE) == 0) | | 1085 | if ((bp->b_flags & B_CACHE) == 0) |
1086 | bp_cached = 0; | | 1086 | bp_cached = 0; |
1087 | bp->b_flags |= B_CACHE; | | 1087 | bp->b_flags |= B_CACHE; |
1088 | bp->b_flags &= ~B_INVAL; | | 1088 | bp->b_flags &= ~B_INVAL; |
1089 | bp->b_ioflags &= ~BIO_ERROR; | | 1089 | bp->b_ioflags &= ~BIO_ERROR; |
1090 | } | | 1090 | } |
1091 | | | 1091 | |
1092 | if ((bp->b_flags & B_CACHE) == 0) { | | 1092 | if ((bp->b_flags & B_CACHE) == 0) { |
1093 | bp->b_iocmd = BIO_READ; | | 1093 | bp->b_iocmd = BIO_READ; |
1094 | vfs_busy_pages(bp, 0); | | 1094 | vfs_busy_pages(bp, 0); |
1095 | error = ncl_doio(vp, bp, cred, td, 0); | | 1095 | error = ncl_doio(vp, bp, cred, td, 0); |
1096 | if (error) { | | 1096 | if (error) { |
1097 | brelse(bp); | | 1097 | brelse(bp); |
1098 | break; | | 1098 | break; |
1099 | } | | 1099 | } |
1100 | } | | 1100 | } |
1101 | if (bp->b_wcred == NOCRED) | | 1101 | if (bp->b_wcred == NOCRED) |
1102 | bp->b_wcred = crhold(cred); | | 1102 | bp->b_wcred = crhold(cred); |
1103 | mtx_lock(&np->n_mtx); | | 1103 | mtx_lock(&np->n_mtx); |
1104 | np->n_flag |= NMODIFIED; | | 1104 | np->n_flag |= NMODIFIED; |
1105 | mtx_unlock(&np->n_mtx); | | 1105 | mtx_unlock(&np->n_mtx); |
1106 | | | 1106 | |
1107 | /* | | 1107 | /* |
1108 | * If dirtyend exceeds file size, chop it down. This should | | 1108 | * If dirtyend exceeds file size, chop it down. This should |
1109 | * not normally occur but there is an append race where it | | 1109 | * not normally occur but there is an append race where it |
1110 | * might occur XXX, so we log it. | | 1110 | * might occur XXX, so we log it. |
1111 | * | | 1111 | * |
1112 | * If the chopping creates a reverse-indexed or degenerate | | 1112 | * If the chopping creates a reverse-indexed or degenerate |
1113 | * situation with dirtyoff/end, we 0 both of them. | | 1113 | * situation with dirtyoff/end, we 0 both of them. |
1114 | */ | | 1114 | */ |
1115 | | | 1115 | |
1116 | if (bp->b_dirtyend > bcount) { | | 1116 | if (bp->b_dirtyend > bcount) { |
1117 | printf("NFS append race @%lx:%d\n", | | 1117 | printf("NFS append race @%lx:%d\n", |
1118 | (long)bp->b_blkno * DEV_BSIZE, | | 1118 | (long)bp->b_blkno * DEV_BSIZE, |
1119 | bp->b_dirtyend - bcount); | | 1119 | bp->b_dirtyend - bcount); |
1120 | bp->b_dirtyend = bcount; | | 1120 | bp->b_dirtyend = bcount; |
1121 | } | | 1121 | } |
1122 | | | 1122 | |
1123 | if (bp->b_dirtyoff >= bp->b_dirtyend) | | 1123 | if (bp->b_dirtyoff >= bp->b_dirtyend) |
1124 | bp->b_dirtyoff = bp->b_dirtyend = 0; | | 1124 | bp->b_dirtyoff = bp->b_dirtyend = 0; |
1125 | | | 1125 | |
1126 | /* | | 1126 | /* |
1127 | * If the new write will leave a contiguous dirty | | 1127 | * If the new write will leave a contiguous dirty |
1128 | * area, just update the b_dirtyoff and b_dirtyend, | | 1128 | * area, just update the b_dirtyoff and b_dirtyend, |
1129 | * otherwise force a write rpc of the old dirty area. | | 1129 | * otherwise force a write rpc of the old dirty area. |
1130 | * | | 1130 | * |
1131 | * If there has been a file lock applied to this file | | 1131 | * If there has been a file lock applied to this file |
1132 | * or vfs.nfs.old_noncontig_writing is set, do the following: | | 1132 | * or vfs.nfs.old_noncontig_writing is set, do the following: |
1133 | * While it is possible to merge discontiguous writes due to | | 1133 | * While it is possible to merge discontiguous writes due to |
1134 | * our having a B_CACHE buffer ( and thus valid read data | | 1134 | * our having a B_CACHE buffer ( and thus valid read data |
1135 | * for the hole), we don't because it could lead to | | 1135 | * for the hole), we don't because it could lead to |
1136 | * significant cache coherency problems with multiple clients, | | 1136 | * significant cache coherency problems with multiple clients, |
1137 | * especially if locking is implemented later on. | | 1137 | * especially if locking is implemented later on. |
1138 | * | | 1138 | * |
1139 | * If vfs.nfs.old_noncontig_writing is not set and there has | | 1139 | * If vfs.nfs.old_noncontig_writing is not set and there has |
1140 | * not been file locking done on this file: | | 1140 | * not been file locking done on this file: |
1141 | * Relax coherency a bit for the sake of performance and | | 1141 | * Relax coherency a bit for the sake of performance and |
1142 | * expand the current dirty region to contain the new | | 1142 | * expand the current dirty region to contain the new |
1143 | * write even if it means we mark some non-dirty data as | | 1143 | * write even if it means we mark some non-dirty data as |
1144 | * dirty. | | 1144 | * dirty. |
1145 | */ | | 1145 | */ |
1146 | | | 1146 | |
1147 | if (noncontig_write == 0 && bp->b_dirtyend > 0 && | | 1147 | if (noncontig_write == 0 && bp->b_dirtyend > 0 && |
1148 | (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { | | 1148 | (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { |
1149 | if (bwrite(bp) == EINTR) { | | 1149 | if (bwrite(bp) == EINTR) { |
1150 | error = EINTR; | | 1150 | error = EINTR; |
1151 | break; | | 1151 | break; |
1152 | } | | 1152 | } |
1153 | goto again; | | 1153 | goto again; |
1154 | } | | 1154 | } |
1155 | | | 1155 | |
1156 | local_resid = uio->uio_resid; | | 1156 | local_resid = uio->uio_resid; |
1157 | error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio); | | 1157 | error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio); |
1158 | | | 1158 | |
1159 | if (error != 0 && !bp_cached) { | | 1159 | if (error != 0 && !bp_cached) { |
1160 | /* | | 1160 | /* |
1161 | * This block has no other content than what | | 1161 | * This block has no other content than what |
1162 | * possibly was written by the faulty uiomove. | | 1162 | * possibly was written by the faulty uiomove. |
1163 | * Release it, forgetting the data pages, to | | 1163 | * Release it, forgetting the data pages, to |
1164 | * prevent the leak of uninitialized data to | | 1164 | * prevent the leak of uninitialized data to |
1165 | * usermode. | | 1165 | * usermode. |
1166 | */ | | 1166 | */ |
1167 | bp->b_ioflags |= BIO_ERROR; | | 1167 | bp->b_ioflags |= BIO_ERROR; |
1168 | brelse(bp); | | 1168 | brelse(bp); |
1169 | uio->uio_offset -= local_resid - uio->uio_resid; | | 1169 | uio->uio_offset -= local_resid - uio->uio_resid; |
1170 | uio->uio_resid = local_resid; | | 1170 | uio->uio_resid = local_resid; |
1171 | break; | | 1171 | break; |
1172 | } | | 1172 | } |
1173 | | | 1173 | |
1174 | /* | | 1174 | /* |
1175 | * Since this block is being modified, it must be written | | 1175 | * Since this block is being modified, it must be written |
1176 | * again and not just committed. Since write clustering does | | 1176 | * again and not just committed. Since write clustering does |
1177 | * not work for the stage 1 data write, only the stage 2 | | 1177 | * not work for the stage 1 data write, only the stage 2 |
1178 | * commit rpc, we have to clear B_CLUSTEROK as well. | | 1178 | * commit rpc, we have to clear B_CLUSTEROK as well. |
1179 | */ | | 1179 | */ |
1180 | bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); | | 1180 | bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); |
1181 | | | 1181 | |
1182 | /* | | 1182 | /* |
1183 | * Get the partial update on the progress made from | | 1183 | * Get the partial update on the progress made from |
1184 | * uiomove, if an error occurred. | | 1184 | * uiomove, if an error occurred. |
1185 | */ | | 1185 | */ |
1186 | if (error != 0) | | 1186 | if (error != 0) |
1187 | n = local_resid - uio->uio_resid; | | 1187 | n = local_resid - uio->uio_resid; |
1188 | | | 1188 | |
1189 | /* | | 1189 | /* |
1190 | * Only update dirtyoff/dirtyend if not a degenerate | | 1190 | * Only update dirtyoff/dirtyend if not a degenerate |
1191 | * condition. | | 1191 | * condition. |
1192 | */ | | 1192 | */ |
1193 | if (n > 0) { | | 1193 | if (n > 0) { |
1194 | if (bp->b_dirtyend > 0) { | | 1194 | if (bp->b_dirtyend > 0) { |
1195 | bp->b_dirtyoff = uimin(on, bp->b_dirtyoff); | | 1195 | bp->b_dirtyoff = uimin(on, bp->b_dirtyoff); |
1196 | bp->b_dirtyend = uimax((on + n), bp->b_dirtyend); | | 1196 | bp->b_dirtyend = uimax((on + n), bp->b_dirtyend); |
1197 | } else { | | 1197 | } else { |
1198 | bp->b_dirtyoff = on; | | 1198 | bp->b_dirtyoff = on; |
1199 | bp->b_dirtyend = on + n; | | 1199 | bp->b_dirtyend = on + n; |
1200 | } | | 1200 | } |
1201 | vfs_bio_set_valid(bp, on, n); | | 1201 | vfs_bio_set_valid(bp, on, n); |
1202 | } | | 1202 | } |
1203 | | | 1203 | |
1204 | /* | | 1204 | /* |
1205 | * If IO_SYNC do bwrite(). | | 1205 | * If IO_SYNC do bwrite(). |
1206 | * | | 1206 | * |
1207 | * IO_INVAL appears to be unused. The idea appears to be | | 1207 | * IO_INVAL appears to be unused. The idea appears to be |
1208 | * to turn off caching in this case. Very odd. XXX | | 1208 | * to turn off caching in this case. Very odd. XXX |
1209 | */ | | 1209 | */ |
1210 | if ((ioflag & IO_SYNC)) { | | 1210 | if ((ioflag & IO_SYNC)) { |
1211 | if (ioflag & IO_INVAL) | | 1211 | if (ioflag & IO_INVAL) |
1212 | bp->b_flags |= B_NOCACHE; | | 1212 | bp->b_flags |= B_NOCACHE; |
1213 | error1 = bwrite(bp); | | 1213 | error1 = bwrite(bp); |
1214 | if (error1 != 0) { | | 1214 | if (error1 != 0) { |
1215 | if (error == 0) | | 1215 | if (error == 0) |
1216 | error = error1; | | 1216 | error = error1; |
1217 | break; | | 1217 | break; |
1218 | } | | 1218 | } |
1219 | } else if ((n + on) == biosize) { | | 1219 | } else if ((n + on) == biosize) { |
1220 | bp->b_flags |= B_ASYNC; | | 1220 | bp->b_flags |= B_ASYNC; |
1221 | (void) ncl_writebp(bp, 0, NULL); | | 1221 | (void) ncl_writebp(bp, 0, NULL); |
1222 | } else { | | 1222 | } else { |
1223 | bdwrite(bp); | | 1223 | bdwrite(bp); |
1224 | } | | 1224 | } |
1225 | | | 1225 | |
1226 | if (error != 0) | | 1226 | if (error != 0) |
1227 | break; | | 1227 | break; |
1228 | } while (uio->uio_resid > 0 && n > 0); | | 1228 | } while (uio->uio_resid > 0 && n > 0); |
1229 | | | 1229 | |
1230 | if (error != 0) { | | 1230 | if (error != 0) { |
1231 | if (ioflag & IO_UNIT) { | | 1231 | if (ioflag & IO_UNIT) { |
1232 | VATTR_NULL(&vattr); | | 1232 | VATTR_NULL(&vattr); |
1233 | vattr.va_size = orig_size; | | 1233 | vattr.va_size = orig_size; |
1234 | /* IO_SYNC is handled implicitly */ | | 1234 | /* IO_SYNC is handled implicitly */ |
1235 | (void)VOP_SETATTR(vp, &vattr, cred); | | 1235 | (void)VOP_SETATTR(vp, &vattr, cred); |
1236 | uio->uio_offset -= orig_resid - uio->uio_resid; | | 1236 | uio->uio_offset -= orig_resid - uio->uio_resid; |
1237 | uio->uio_resid = orig_resid; | | 1237 | uio->uio_resid = orig_resid; |
1238 | } | | 1238 | } |
1239 | } | | 1239 | } |
1240 | | | 1240 | |
1241 | return (error); | | 1241 | return (error); |
1242 | } | | 1242 | } |
1243 | | | 1243 | |
1244 | /* | | 1244 | /* |
1245 | * Get an nfs cache block. | | 1245 | * Get an nfs cache block. |
1246 | * | | 1246 | * |
1247 | * Allocate a new one if the block isn't currently in the cache | | 1247 | * Allocate a new one if the block isn't currently in the cache |
1248 | * and return the block marked busy. If the calling process is | | 1248 | * and return the block marked busy. If the calling process is |
1249 | * interrupted by a signal for an interruptible mount point, return | | 1249 | * interrupted by a signal for an interruptible mount point, return |
1250 | * NULL. | | 1250 | * NULL. |
1251 | * | | 1251 | * |
1252 | * The caller must carefully deal with the possible B_INVAL state of | | 1252 | * The caller must carefully deal with the possible B_INVAL state of |
1253 | * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it | | 1253 | * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it |
1254 | * indirectly), so synchronous reads can be issued without worrying about | | 1254 | * indirectly), so synchronous reads can be issued without worrying about |
1255 | * the B_INVAL state. We have to be a little more careful when dealing | | 1255 | * the B_INVAL state. We have to be a little more careful when dealing |
1256 | * with writes (see comments in nfs_write()) when extending a file past | | 1256 | * with writes (see comments in nfs_write()) when extending a file past |
1257 | * its EOF. | | 1257 | * its EOF. |
1258 | */ | | 1258 | */ |
1259 | static struct buf * | | 1259 | static struct buf * |
1260 | nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) | | 1260 | nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) |
1261 | { | | 1261 | { |
1262 | struct buf *bp; | | 1262 | struct buf *bp; |
1263 | struct mount *mp; | | 1263 | struct mount *mp; |
1264 | struct nfsmount *nmp; | | 1264 | struct nfsmount *nmp; |
1265 | | | 1265 | |
1266 | mp = vp->v_mount; | | 1266 | mp = vp->v_mount; |
1267 | nmp = VFSTONFS(mp); | | 1267 | nmp = VFSTONFS(mp); |
1268 | | | 1268 | |
1269 | if (nmp->nm_flag & NFSMNT_INT) { | | 1269 | if (nmp->nm_flag & NFSMNT_INT) { |
1270 | sigset_t oldset; | | 1270 | sigset_t oldset; |
1271 | | | 1271 | |
1272 | newnfs_set_sigmask(td, &oldset); | | 1272 | newnfs_set_sigmask(td, &oldset); |
1273 | bp = getblk(vp, bn, size, PCATCH, 0, 0); | | 1273 | bp = getblk(vp, bn, size, PCATCH, 0, 0); |
1274 | newnfs_restore_sigmask(td, &oldset); | | 1274 | newnfs_restore_sigmask(td, &oldset); |
1275 | while (bp == NULL) { | | 1275 | while (bp == NULL) { |
1276 | if (newnfs_sigintr(nmp, td)) | | 1276 | if (newnfs_sigintr(nmp, td)) |
1277 | return (NULL); | | 1277 | return (NULL); |
1278 | bp = getblk(vp, bn, size, 0, 2 * hz, 0); | | 1278 | bp = getblk(vp, bn, size, 0, 2 * hz, 0); |
1279 | } | | 1279 | } |
1280 | } else { | | 1280 | } else { |
1281 | bp = getblk(vp, bn, size, 0, 0, 0); | | 1281 | bp = getblk(vp, bn, size, 0, 0, 0); |
1282 | } | | 1282 | } |
1283 | | | 1283 | |
1284 | if (vp->v_type == VREG) | | 1284 | if (vp->v_type == VREG) |
1285 | bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE); | | 1285 | bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE); |