Add an optional pglist argument to uvm_obj_wirepages, to be filled with the list of pages that were wired.diff -r1.121 -r1.122 src/sys/kern/sysv_shm.c
(christos)
--- src/sys/kern/sysv_shm.c 2011/07/30 06:19:02 1.121
+++ src/sys/kern/sysv_shm.c 2011/08/27 09:11:52 1.122
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: sysv_shm.c,v 1.121 2011/07/30 06:19:02 uebayasi Exp $ */ | 1 | /* $NetBSD: sysv_shm.c,v 1.122 2011/08/27 09:11:52 christos Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | |
9 | * NASA Ames Research Center, and by Mindaugas Rasiukevicius. | 9 | * NASA Ames Research Center, and by Mindaugas Rasiukevicius. | |
10 | * | 10 | * | |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without | |
12 | * modification, are permitted provided that the following conditions | 12 | * modification, are permitted provided that the following conditions | |
13 | * are met: | 13 | * are met: | |
14 | * 1. Redistributions of source code must retain the above copyright | 14 | * 1. Redistributions of source code must retain the above copyright | |
@@ -51,27 +51,27 @@ | @@ -51,27 +51,27 @@ | |||
51 | * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR | 51 | * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR | |
52 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | 52 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
53 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | 53 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
54 | * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | 54 | * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
55 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | 55 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
56 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 56 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
60 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 60 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
61 | */ | 61 | */ | |
62 | 62 | |||
63 | #include <sys/cdefs.h> | 63 | #include <sys/cdefs.h> | |
64 | __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.121 2011/07/30 06:19:02 uebayasi Exp $"); | 64 | __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.122 2011/08/27 09:11:52 christos Exp $"); | |
65 | 65 | |||
66 | #define SYSVSHM | 66 | #define SYSVSHM | |
67 | 67 | |||
68 | #include <sys/param.h> | 68 | #include <sys/param.h> | |
69 | #include <sys/kernel.h> | 69 | #include <sys/kernel.h> | |
70 | #include <sys/kmem.h> | 70 | #include <sys/kmem.h> | |
71 | #include <sys/shm.h> | 71 | #include <sys/shm.h> | |
72 | #include <sys/mutex.h> | 72 | #include <sys/mutex.h> | |
73 | #include <sys/mman.h> | 73 | #include <sys/mman.h> | |
74 | #include <sys/stat.h> | 74 | #include <sys/stat.h> | |
75 | #include <sys/sysctl.h> | 75 | #include <sys/sysctl.h> | |
76 | #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */ | 76 | #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */ | |
77 | #include <sys/syscallargs.h> | 77 | #include <sys/syscallargs.h> | |
@@ -258,27 +258,27 @@ shm_memlock(struct lwp *l, struct shmid_ | @@ -258,27 +258,27 @@ shm_memlock(struct lwp *l, struct shmid_ | |||
258 | shmmap_s = shmmap_getprivate(p); | 258 | shmmap_s = shmmap_getprivate(p); | |
259 | 259 | |||
260 | /* Find our shared memory address by shmid */ | 260 | /* Find our shared memory address by shmid */ | |
261 | SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) { | 261 | SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) { | |
262 | if (shmmap_se->shmid != shmid) | 262 | if (shmmap_se->shmid != shmid) | |
263 | continue; | 263 | continue; | |
264 | 264 | |||
265 | size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET; | 265 | size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET; | |
266 | 266 | |||
267 | if (cmd == SHM_LOCK && | 267 | if (cmd == SHM_LOCK && | |
268 | (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) { | 268 | (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) { | |
269 | /* Wire the object and map, then tag it */ | 269 | /* Wire the object and map, then tag it */ | |
270 | error = uvm_obj_wirepages(shmseg->_shm_internal, | 270 | error = uvm_obj_wirepages(shmseg->_shm_internal, | |
271 | 0, size); | 271 | 0, size, NULL); | |
272 | if (error) | 272 | if (error) | |
273 | return EIO; | 273 | return EIO; | |
274 | error = uvm_map_pageable(&p->p_vmspace->vm_map, | 274 | error = uvm_map_pageable(&p->p_vmspace->vm_map, | |
275 | shmmap_se->va, shmmap_se->va + size, false, 0); | 275 | shmmap_se->va, shmmap_se->va + size, false, 0); | |
276 | if (error) { | 276 | if (error) { | |
277 | uvm_obj_unwirepages(shmseg->_shm_internal, | 277 | uvm_obj_unwirepages(shmseg->_shm_internal, | |
278 | 0, size); | 278 | 0, size); | |
279 | if (error == EFAULT) | 279 | if (error == EFAULT) | |
280 | error = ENOMEM; | 280 | error = ENOMEM; | |
281 | return error; | 281 | return error; | |
282 | } | 282 | } | |
283 | shmseg->shm_perm.mode |= SHMSEG_WIRED; | 283 | shmseg->shm_perm.mode |= SHMSEG_WIRED; | |
284 | 284 | |||
@@ -724,27 +724,27 @@ sys_shmget(struct lwp *l, const struct s | @@ -724,27 +724,27 @@ sys_shmget(struct lwp *l, const struct s | |||
724 | */ | 724 | */ | |
725 | shmseg = &shmsegs[segnum]; | 725 | shmseg = &shmsegs[segnum]; | |
726 | shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; | 726 | shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; | |
727 | shm_committed += btoc(size); | 727 | shm_committed += btoc(size); | |
728 | shm_nused++; | 728 | shm_nused++; | |
729 | lockmem = shm_use_phys; | 729 | lockmem = shm_use_phys; | |
730 | shm_realloc_disable++; | 730 | shm_realloc_disable++; | |
731 | mutex_exit(&shm_lock); | 731 | mutex_exit(&shm_lock); | |
732 | 732 | |||
733 | /* Allocate the memory object and lock it if needed */ | 733 | /* Allocate the memory object and lock it if needed */ | |
734 | shmseg->_shm_internal = uao_create(size, 0); | 734 | shmseg->_shm_internal = uao_create(size, 0); | |
735 | if (lockmem) { | 735 | if (lockmem) { | |
736 | /* Wire the pages and tag it */ | 736 | /* Wire the pages and tag it */ | |
737 | error = uvm_obj_wirepages(shmseg->_shm_internal, 0, size); | 737 | error = uvm_obj_wirepages(shmseg->_shm_internal, 0, size, NULL); | |
738 | if (error) { | 738 | if (error) { | |
739 | uao_detach(shmseg->_shm_internal); | 739 | uao_detach(shmseg->_shm_internal); | |
740 | mutex_enter(&shm_lock); | 740 | mutex_enter(&shm_lock); | |
741 | shm_free_segment(segnum); | 741 | shm_free_segment(segnum); | |
742 | shm_realloc_disable--; | 742 | shm_realloc_disable--; | |
743 | mutex_exit(&shm_lock); | 743 | mutex_exit(&shm_lock); | |
744 | return error; | 744 | return error; | |
745 | } | 745 | } | |
746 | } | 746 | } | |
747 | 747 | |||
748 | /* | 748 | /* | |
749 | * Please note, while segment is marked, there are no need to hold the | 749 | * Please note, while segment is marked, there are no need to hold the | |
750 | * lock, while setting it (except shm_perm.mode). | 750 | * lock, while setting it (except shm_perm.mode). |
--- src/sys/uvm/uvm_extern.h 2011/06/16 09:21:03 1.174
+++ src/sys/uvm/uvm_extern.h 2011/08/27 09:11:53 1.175
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: uvm_extern.h,v 1.174 2011/06/16 09:21:03 hannken Exp $ */ | 1 | /* $NetBSD: uvm_extern.h,v 1.175 2011/08/27 09:11:53 christos Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | 4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | 7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | 8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | 9 | * are met: | |
10 | * 1. Redistributions of source code must retain the above copyright | 10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | 11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | 12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | 13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | 14 | * documentation and/or other materials provided with the distribution. | |
@@ -696,27 +696,28 @@ int uvm_mmap(struct vm_map *, vaddr_t | @@ -696,27 +696,28 @@ int uvm_mmap(struct vm_map *, vaddr_t | |||
696 | void *, voff_t, vsize_t); | 696 | void *, voff_t, vsize_t); | |
697 | vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t); | 697 | vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t); | |
698 | 698 | |||
699 | /* uvm_mremap.c */ | 699 | /* uvm_mremap.c */ | |
700 | int uvm_mremap(struct vm_map *, vaddr_t, vsize_t, | 700 | int uvm_mremap(struct vm_map *, vaddr_t, vsize_t, | |
701 | struct vm_map *, vaddr_t *, vsize_t, | 701 | struct vm_map *, vaddr_t *, vsize_t, | |
702 | struct proc *, int); | 702 | struct proc *, int); | |
703 | 703 | |||
704 | /* uvm_object.c */ | 704 | /* uvm_object.c */ | |
705 | void uvm_obj_init(struct uvm_object *, | 705 | void uvm_obj_init(struct uvm_object *, | |
706 | const struct uvm_pagerops *, bool, u_int); | 706 | const struct uvm_pagerops *, bool, u_int); | |
707 | void uvm_obj_setlock(struct uvm_object *, kmutex_t *); | 707 | void uvm_obj_setlock(struct uvm_object *, kmutex_t *); | |
708 | void uvm_obj_destroy(struct uvm_object *, bool); | 708 | void uvm_obj_destroy(struct uvm_object *, bool); | |
709 | int uvm_obj_wirepages(struct uvm_object *, off_t, off_t); | 709 | int uvm_obj_wirepages(struct uvm_object *, off_t, off_t, | |
710 | struct pglist *); | |||
710 | void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t); | 711 | void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t); | |
711 | 712 | |||
712 | /* uvm_page.c */ | 713 | /* uvm_page.c */ | |
713 | struct vm_page *uvm_pagealloc_strat(struct uvm_object *, | 714 | struct vm_page *uvm_pagealloc_strat(struct uvm_object *, | |
714 | voff_t, struct vm_anon *, int, int, int); | 715 | voff_t, struct vm_anon *, int, int, int); | |
715 | #define uvm_pagealloc(obj, off, anon, flags) \ | 716 | #define uvm_pagealloc(obj, off, anon, flags) \ | |
716 | uvm_pagealloc_strat((obj), (off), (anon), (flags), \ | 717 | uvm_pagealloc_strat((obj), (off), (anon), (flags), \ | |
717 | UVM_PGA_STRAT_NORMAL, 0) | 718 | UVM_PGA_STRAT_NORMAL, 0) | |
718 | void uvm_pagereplace(struct vm_page *, | 719 | void uvm_pagereplace(struct vm_page *, | |
719 | struct vm_page *); | 720 | struct vm_page *); | |
720 | void uvm_pagerealloc(struct vm_page *, | 721 | void uvm_pagerealloc(struct vm_page *, | |
721 | struct uvm_object *, voff_t); | 722 | struct uvm_object *, voff_t); | |
722 | /* Actually, uvm_page_physload takes PF#s which need their own type */ | 723 | /* Actually, uvm_page_physload takes PF#s which need their own type */ |
--- src/sys/uvm/uvm_object.c 2011/06/18 21:14:43 1.10
+++ src/sys/uvm/uvm_object.c 2011/08/27 09:11:53 1.11
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: uvm_object.c,v 1.10 2011/06/18 21:14:43 rmind Exp $ */ | 1 | /* $NetBSD: uvm_object.c,v 1.11 2011/08/27 09:11:53 christos Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 2006, 2010 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 2006, 2010 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Mindaugas Rasiukevicius. | 8 | * by Mindaugas Rasiukevicius. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
@@ -27,27 +27,27 @@ | @@ -27,27 +27,27 @@ | |||
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | /* | 32 | /* | |
33 | * uvm_object.c: operate with memory objects | 33 | * uvm_object.c: operate with memory objects | |
34 | * | 34 | * | |
35 | * TODO: | 35 | * TODO: | |
36 | * 1. Support PG_RELEASED-using objects | 36 | * 1. Support PG_RELEASED-using objects | |
37 | */ | 37 | */ | |
38 | 38 | |||
39 | #include <sys/cdefs.h> | 39 | #include <sys/cdefs.h> | |
40 | __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.10 2011/06/18 21:14:43 rmind Exp $"); | 40 | __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.11 2011/08/27 09:11:53 christos Exp $"); | |
41 | 41 | |||
42 | #include "opt_ddb.h" | 42 | #include "opt_ddb.h" | |
43 | 43 | |||
44 | #include <sys/param.h> | 44 | #include <sys/param.h> | |
45 | #include <sys/mutex.h> | 45 | #include <sys/mutex.h> | |
46 | #include <sys/queue.h> | 46 | #include <sys/queue.h> | |
47 | #include <sys/rbtree.h> | 47 | #include <sys/rbtree.h> | |
48 | 48 | |||
49 | #include <uvm/uvm.h> | 49 | #include <uvm/uvm.h> | |
50 | #include <uvm/uvm_ddb.h> | 50 | #include <uvm/uvm_ddb.h> | |
51 | 51 | |||
52 | /* Page count to fetch per single step. */ | 52 | /* Page count to fetch per single step. */ | |
53 | #define FETCH_PAGECOUNT 16 | 53 | #define FETCH_PAGECOUNT 16 | |
@@ -113,27 +113,28 @@ uvm_obj_setlock(struct uvm_object *uo, k | @@ -113,27 +113,28 @@ uvm_obj_setlock(struct uvm_object *uo, k | |||
113 | lockptr = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); | 113 | lockptr = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); | |
114 | } | 114 | } | |
115 | uo->vmobjlock = lockptr; | 115 | uo->vmobjlock = lockptr; | |
116 | } | 116 | } | |
117 | 117 | |||
118 | /* | 118 | /* | |
119 | * uvm_obj_wirepages: wire the pages of entire UVM object. | 119 | * uvm_obj_wirepages: wire the pages of entire UVM object. | |
120 | * | 120 | * | |
121 | * => NOTE: this function should only be used for types of objects | 121 | * => NOTE: this function should only be used for types of objects | |
122 | * where PG_RELEASED flag is never set (aobj objects) | 122 | * where PG_RELEASED flag is never set (aobj objects) | |
123 | * => caller must pass page-aligned start and end values | 123 | * => caller must pass page-aligned start and end values | |
124 | */ | 124 | */ | |
125 | int | 125 | int | |
126 | uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end) | 126 | uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end, | |
127 | struct pglist *list) | |||
127 | { | 128 | { | |
128 | int i, npages, error; | 129 | int i, npages, error; | |
129 | struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL; | 130 | struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL; | |
130 | off_t offset = start, left; | 131 | off_t offset = start, left; | |
131 | 132 | |||
132 | left = (end - start) >> PAGE_SHIFT; | 133 | left = (end - start) >> PAGE_SHIFT; | |
133 | 134 | |||
134 | mutex_enter(uobj->vmobjlock); | 135 | mutex_enter(uobj->vmobjlock); | |
135 | while (left) { | 136 | while (left) { | |
136 | 137 | |||
137 | npages = MIN(FETCH_PAGECOUNT, left); | 138 | npages = MIN(FETCH_PAGECOUNT, left); | |
138 | 139 | |||
139 | /* Get the pages */ | 140 | /* Get the pages */ | |
@@ -167,26 +168,28 @@ uvm_obj_wirepages(struct uvm_object *uob | @@ -167,26 +168,28 @@ uvm_obj_wirepages(struct uvm_object *uob | |||
167 | pgs[i] = pg; | 168 | pgs[i] = pg; | |
168 | } | 169 | } | |
169 | 170 | |||
170 | if (pgs[i]->pqflags & PQ_AOBJ) { | 171 | if (pgs[i]->pqflags & PQ_AOBJ) { | |
171 | pgs[i]->flags &= ~(PG_CLEAN); | 172 | pgs[i]->flags &= ~(PG_CLEAN); | |
172 | uao_dropswap(uobj, i); | 173 | uao_dropswap(uobj, i); | |
173 | } | 174 | } | |
174 | } | 175 | } | |
175 | 176 | |||
176 | /* Wire the pages */ | 177 | /* Wire the pages */ | |
177 | mutex_enter(&uvm_pageqlock); | 178 | mutex_enter(&uvm_pageqlock); | |
178 | for (i = 0; i < npages; i++) { | 179 | for (i = 0; i < npages; i++) { | |
179 | uvm_pagewire(pgs[i]); | 180 | uvm_pagewire(pgs[i]); | |
181 | if (list != NULL) | |||
182 | TAILQ_INSERT_TAIL(list, pgs[i], pageq.queue); | |||
180 | } | 183 | } | |
181 | mutex_exit(&uvm_pageqlock); | 184 | mutex_exit(&uvm_pageqlock); | |
182 | 185 | |||
183 | /* Unbusy the pages */ | 186 | /* Unbusy the pages */ | |
184 | uvm_page_unbusy(pgs, npages); | 187 | uvm_page_unbusy(pgs, npages); | |
185 | 188 | |||
186 | left -= npages; | 189 | left -= npages; | |
187 | offset += npages << PAGE_SHIFT; | 190 | offset += npages << PAGE_SHIFT; | |
188 | } | 191 | } | |
189 | mutex_exit(uobj->vmobjlock); | 192 | mutex_exit(uobj->vmobjlock); | |
190 | 193 | |||
191 | return 0; | 194 | return 0; | |
192 | 195 |