Thu Jan 28 08:02:12 2010 UTC ()
genfs_getpages: Constify orignpages.  Don't override its meaning by the value
re-calucated from GOP_SIZE(GOP_SIZE_MEM), but assign another variable
(orignmempages).


(uebayasi)
diff -r1.28 -r1.29 src/sys/miscfs/genfs/genfs_io.c

cvs diff -r1.28 -r1.29 src/sys/miscfs/genfs/genfs_io.c (expand / switch to unified diff)

--- src/sys/miscfs/genfs/genfs_io.c 2010/01/28 07:49:08 1.28
+++ src/sys/miscfs/genfs/genfs_io.c 2010/01/28 08:02:12 1.29
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: genfs_io.c,v 1.28 2010/01/28 07:49:08 uebayasi Exp $ */ 1/* $NetBSD: genfs_io.c,v 1.29 2010/01/28 08:02:12 uebayasi Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1982, 1986, 1989, 1993 4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -21,27 +21,27 @@ @@ -21,27 +21,27 @@
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE. 29 * SUCH DAMAGE.
30 * 30 *
31 */ 31 */
32 32
33#include <sys/cdefs.h> 33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.28 2010/01/28 07:49:08 uebayasi Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.29 2010/01/28 08:02:12 uebayasi Exp $");
35 35
36#include <sys/param.h> 36#include <sys/param.h>
37#include <sys/systm.h> 37#include <sys/systm.h>
38#include <sys/proc.h> 38#include <sys/proc.h>
39#include <sys/kernel.h> 39#include <sys/kernel.h>
40#include <sys/mount.h> 40#include <sys/mount.h>
41#include <sys/namei.h> 41#include <sys/namei.h>
42#include <sys/vnode.h> 42#include <sys/vnode.h>
43#include <sys/fcntl.h> 43#include <sys/fcntl.h>
44#include <sys/kmem.h> 44#include <sys/kmem.h>
45#include <sys/poll.h> 45#include <sys/poll.h>
46#include <sys/mman.h> 46#include <sys/mman.h>
47#include <sys/file.h> 47#include <sys/file.h>
@@ -97,27 +97,27 @@ genfs_getpages(void *v) @@ -97,27 +97,27 @@ genfs_getpages(void *v)
97 struct vop_getpages_args /* { 97 struct vop_getpages_args /* {
98 struct vnode *a_vp; 98 struct vnode *a_vp;
99 voff_t a_offset; 99 voff_t a_offset;
100 struct vm_page **a_m; 100 struct vm_page **a_m;
101 int *a_count; 101 int *a_count;
102 int a_centeridx; 102 int a_centeridx;
103 vm_prot_t a_access_type; 103 vm_prot_t a_access_type;
104 int a_advice; 104 int a_advice;
105 int a_flags; 105 int a_flags;
106 } */ * const ap = v; 106 } */ * const ap = v;
107 107
108 off_t diskeof, memeof; 108 off_t diskeof, memeof;
109 off_t startoffset, endoffset; 109 off_t startoffset, endoffset;
110 int i, error, npages, orignpages, npgs, run, ridx; 110 int i, error, npages, npgs, run, ridx;
111 const int flags = ap->a_flags; 111 const int flags = ap->a_flags;
112 struct vnode * const vp = ap->a_vp; 112 struct vnode * const vp = ap->a_vp;
113 struct genfs_node * const gp = VTOG(vp); 113 struct genfs_node * const gp = VTOG(vp);
114 struct uvm_object * const uobj = &vp->v_uobj; 114 struct uvm_object * const uobj = &vp->v_uobj;
115 struct vm_page *pg, **pgs, *pgs_onstack[UBC_MAX_PAGES]; 115 struct vm_page *pg, **pgs, *pgs_onstack[UBC_MAX_PAGES];
116 int pgs_size; 116 int pgs_size;
117 kauth_cred_t cred = curlwp->l_cred; /* XXXUBC curlwp */ 117 kauth_cred_t cred = curlwp->l_cred; /* XXXUBC curlwp */
118 const bool async = (flags & PGO_SYNCIO) == 0; 118 const bool async = (flags & PGO_SYNCIO) == 0;
119 const bool write = (ap->a_access_type & VM_PROT_WRITE) != 0; 119 const bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
120 bool has_trans = false; 120 bool has_trans = false;
121 const bool overwrite = (flags & PGO_OVERWRITE) != 0; 121 const bool overwrite = (flags & PGO_OVERWRITE) != 0;
122 const bool blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0; 122 const bool blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
123 UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist); 123 UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
@@ -125,27 +125,27 @@ genfs_getpages(void *v) @@ -125,27 +125,27 @@ genfs_getpages(void *v)
125 UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d", 125 UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
126 vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count); 126 vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
127 127
128 KASSERT(vp->v_type == VREG || vp->v_type == VDIR || 128 KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
129 vp->v_type == VLNK || vp->v_type == VBLK); 129 vp->v_type == VLNK || vp->v_type == VBLK);
130 130
131 pgs = NULL; 131 pgs = NULL;
132 pgs_size = 0; 132 pgs_size = 0;
133 133
134startover: 134startover:
135 error = 0; 135 error = 0;
136 const voff_t origvsize = vp->v_size; 136 const voff_t origvsize = vp->v_size;
137 const off_t origoffset = ap->a_offset; 137 const off_t origoffset = ap->a_offset;
138 orignpages = *ap->a_count; 138 const int orignpages = *ap->a_count;
139 GOP_SIZE(vp, origvsize, &diskeof, 0); 139 GOP_SIZE(vp, origvsize, &diskeof, 0);
140 if (flags & PGO_PASTEOF) { 140 if (flags & PGO_PASTEOF) {
141 off_t newsize; 141 off_t newsize;
142#if defined(DIAGNOSTIC) 142#if defined(DIAGNOSTIC)
143 off_t writeeof; 143 off_t writeeof;
144#endif /* defined(DIAGNOSTIC) */ 144#endif /* defined(DIAGNOSTIC) */
145 145
146 newsize = MAX(origvsize, 146 newsize = MAX(origvsize,
147 origoffset + (orignpages << PAGE_SHIFT)); 147 origoffset + (orignpages << PAGE_SHIFT));
148 GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM); 148 GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
149#if defined(DIAGNOSTIC) 149#if defined(DIAGNOSTIC)
150 GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM); 150 GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
151 if (newsize > round_page(writeeof)) { 151 if (newsize > round_page(writeeof)) {
@@ -245,29 +245,29 @@ startover: @@ -245,29 +245,29 @@ startover:
245 mutex_exit(&uobj->vmobjlock); 245 mutex_exit(&uobj->vmobjlock);
246 246
247 /* 247 /*
248 * find the requested pages and make some simple checks. 248 * find the requested pages and make some simple checks.
249 * leave space in the page array for a whole block. 249 * leave space in the page array for a whole block.
250 */ 250 */
251 251
252 const int fs_bshift = (vp->v_type != VBLK) ? 252 const int fs_bshift = (vp->v_type != VBLK) ?
253 vp->v_mount->mnt_fs_bshift : DEV_BSHIFT; 253 vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
254 const int dev_bshift = (vp->v_type != VBLK) ? 254 const int dev_bshift = (vp->v_type != VBLK) ?
255 vp->v_mount->mnt_dev_bshift : DEV_BSHIFT; 255 vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
256 const int fs_bsize = 1 << fs_bshift; 256 const int fs_bsize = 1 << fs_bshift;
257 257
258 orignpages = MIN(orignpages, 258 const int orignmempages = MIN(orignpages,
259 round_page(memeof - origoffset) >> PAGE_SHIFT); 259 round_page(memeof - origoffset) >> PAGE_SHIFT);
260 npages = orignpages; 260 npages = orignmempages;
261 startoffset = origoffset & ~(fs_bsize - 1); 261 startoffset = origoffset & ~(fs_bsize - 1);
262 endoffset = round_page((origoffset + (npages << PAGE_SHIFT) + 262 endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
263 fs_bsize - 1) & ~(fs_bsize - 1)); 263 fs_bsize - 1) & ~(fs_bsize - 1));
264 endoffset = MIN(endoffset, round_page(memeof)); 264 endoffset = MIN(endoffset, round_page(memeof));
265 ridx = (origoffset - startoffset) >> PAGE_SHIFT; 265 ridx = (origoffset - startoffset) >> PAGE_SHIFT;
266 266
267 pgs_size = sizeof(struct vm_page *) * 267 pgs_size = sizeof(struct vm_page *) *
268 ((endoffset - startoffset) >> PAGE_SHIFT); 268 ((endoffset - startoffset) >> PAGE_SHIFT);
269 if (pgs_size > sizeof(pgs_onstack)) { 269 if (pgs_size > sizeof(pgs_onstack)) {
270 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP); 270 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
271 if (pgs == NULL) { 271 if (pgs == NULL) {
272 pgs = pgs_onstack; 272 pgs = pgs_onstack;
273 error = ENOMEM; 273 error = ENOMEM;
@@ -297,30 +297,30 @@ startover: @@ -297,30 +297,30 @@ startover:
297 rw_enter(&gp->g_glock, RW_WRITER); 297 rw_enter(&gp->g_glock, RW_WRITER);
298 } else { 298 } else {
299 rw_enter(&gp->g_glock, RW_READER); 299 rw_enter(&gp->g_glock, RW_READER);
300 } 300 }
301 mutex_enter(&uobj->vmobjlock); 301 mutex_enter(&uobj->vmobjlock);
302 if (vp->v_size < origvsize) { 302 if (vp->v_size < origvsize) {
303 genfs_node_unlock(vp); 303 genfs_node_unlock(vp);
304 if (pgs != pgs_onstack) 304 if (pgs != pgs_onstack)
305 kmem_free(pgs, pgs_size); 305 kmem_free(pgs, pgs_size);
306 goto startover; 306 goto startover;
307 } 307 }
308 308
309 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], 309 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
310 async ? UFP_NOWAIT : UFP_ALL) != orignpages) { 310 async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
311 genfs_node_unlock(vp); 311 genfs_node_unlock(vp);
312 KASSERT(async != 0); 312 KASSERT(async != 0);
313 genfs_rel_pages(&pgs[ridx], orignpages); 313 genfs_rel_pages(&pgs[ridx], orignmempages);
314 mutex_exit(&uobj->vmobjlock); 314 mutex_exit(&uobj->vmobjlock);
315 error = EBUSY; 315 error = EBUSY;
316 goto out_err; 316 goto out_err;
317 } 317 }
318 318
319 /* 319 /*
320 * if the pages are already resident, just return them. 320 * if the pages are already resident, just return them.
321 */ 321 */
322 322
323 for (i = 0; i < npages; i++) { 323 for (i = 0; i < npages; i++) {
324 struct vm_page *pg1 = pgs[ridx + i]; 324 struct vm_page *pg1 = pgs[ridx + i];
325 325
326 if ((pg1->flags & PG_FAKE) || 326 if ((pg1->flags & PG_FAKE) ||
@@ -355,35 +355,35 @@ startover: @@ -355,35 +355,35 @@ startover:
355 { 355 {
356 size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes; 356 size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
357 vaddr_t kva; 357 vaddr_t kva;
358 struct buf *bp, *mbp; 358 struct buf *bp, *mbp;
359 bool sawhole = false; 359 bool sawhole = false;
360 360
361 /* 361 /*
362 * the page wasn't resident and we're not overwriting, 362 * the page wasn't resident and we're not overwriting,
363 * so we're going to have to do some i/o. 363 * so we're going to have to do some i/o.
364 * find any additional pages needed to cover the expanded range. 364 * find any additional pages needed to cover the expanded range.
365 */ 365 */
366 366
367 npages = (endoffset - startoffset) >> PAGE_SHIFT; 367 npages = (endoffset - startoffset) >> PAGE_SHIFT;
368 if (startoffset != origoffset || npages != orignpages) { 368 if (startoffset != origoffset || npages != orignmempages) {
369 369
370 /* 370 /*
371 * we need to avoid deadlocks caused by locking 371 * we need to avoid deadlocks caused by locking
372 * additional pages at lower offsets than pages we 372 * additional pages at lower offsets than pages we
373 * already have locked. unlock them all and start over. 373 * already have locked. unlock them all and start over.
374 */ 374 */
375 375
376 genfs_rel_pages(&pgs[ridx], orignpages); 376 genfs_rel_pages(&pgs[ridx], orignmempages);
377 memset(pgs, 0, pgs_size); 377 memset(pgs, 0, pgs_size);
378 378
379 UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x", 379 UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
380 startoffset, endoffset, 0,0); 380 startoffset, endoffset, 0,0);
381 npgs = npages; 381 npgs = npages;
382 if (uvn_findpages(uobj, startoffset, &npgs, pgs, 382 if (uvn_findpages(uobj, startoffset, &npgs, pgs,
383 async ? UFP_NOWAIT : UFP_ALL) != npages) { 383 async ? UFP_NOWAIT : UFP_ALL) != npages) {
384 genfs_node_unlock(vp); 384 genfs_node_unlock(vp);
385 KASSERT(async != 0); 385 KASSERT(async != 0);
386 genfs_rel_pages(pgs, npages); 386 genfs_rel_pages(pgs, npages);
387 mutex_exit(&uobj->vmobjlock); 387 mutex_exit(&uobj->vmobjlock);
388 error = EBUSY; 388 error = EBUSY;
389 goto out_err; 389 goto out_err;
@@ -656,50 +656,50 @@ out: @@ -656,50 +656,50 @@ out:
656 mutex_enter(&uvm_pageqlock); 656 mutex_enter(&uvm_pageqlock);
657 for (i = 0; i < npages; i++) { 657 for (i = 0; i < npages; i++) {
658 pg = pgs[i]; 658 pg = pgs[i];
659 if (pg == NULL) { 659 if (pg == NULL) {
660 continue; 660 continue;
661 } 661 }
662 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x", 662 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
663 pg, pg->flags, 0,0); 663 pg, pg->flags, 0,0);
664 if (pg->flags & PG_FAKE && !overwrite) { 664 if (pg->flags & PG_FAKE && !overwrite) {
665 pg->flags &= ~(PG_FAKE); 665 pg->flags &= ~(PG_FAKE);
666 pmap_clear_modify(pgs[i]); 666 pmap_clear_modify(pgs[i]);
667 } 667 }
668 KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0); 668 KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
669 if (i < ridx || i >= ridx + orignpages || async) { 669 if (i < ridx || i >= ridx + orignmempages || async) {
670 UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x", 670 UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
671 pg, pg->offset,0,0); 671 pg, pg->offset,0,0);
672 if (pg->flags & PG_WANTED) { 672 if (pg->flags & PG_WANTED) {
673 wakeup(pg); 673 wakeup(pg);
674 } 674 }
675 if (pg->flags & PG_FAKE) { 675 if (pg->flags & PG_FAKE) {
676 KASSERT(overwrite); 676 KASSERT(overwrite);
677 uvm_pagezero(pg); 677 uvm_pagezero(pg);
678 } 678 }
679 if (pg->flags & PG_RELEASED) { 679 if (pg->flags & PG_RELEASED) {
680 uvm_pagefree(pg); 680 uvm_pagefree(pg);
681 continue; 681 continue;
682 } 682 }
683 uvm_pageenqueue(pg); 683 uvm_pageenqueue(pg);
684 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE); 684 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
685 UVM_PAGE_OWN(pg, NULL); 685 UVM_PAGE_OWN(pg, NULL);
686 } 686 }
687 } 687 }
688 mutex_exit(&uvm_pageqlock); 688 mutex_exit(&uvm_pageqlock);
689 mutex_exit(&uobj->vmobjlock); 689 mutex_exit(&uobj->vmobjlock);
690 if (ap->a_m != NULL) { 690 if (ap->a_m != NULL) {
691 memcpy(ap->a_m, &pgs[ridx], 691 memcpy(ap->a_m, &pgs[ridx],
692 orignpages * sizeof(struct vm_page *)); 692 orignmempages * sizeof(struct vm_page *));
693 } 693 }
694 694
695out_err: 695out_err:
696 if (pgs != NULL && pgs != pgs_onstack) 696 if (pgs != NULL && pgs != pgs_onstack)
697 kmem_free(pgs, pgs_size); 697 kmem_free(pgs, pgs_size);
698 if (has_trans) 698 if (has_trans)
699 fstrans_done(vp->v_mount); 699 fstrans_done(vp->v_mount);
700 return (error); 700 return (error);
701} 701}
702 702
703/* 703/*
704 * generic VM putpages routine. 704 * generic VM putpages routine.
705 * Write the given range of pages to backing store. 705 * Write the given range of pages to backing store.