Fri Apr 23 21:18:00 2010 UTC ()
Use consistent naming - uvm_obj_*().


(rmind)
diff -r1.117 -r1.117.4.1 src/sys/kern/sysv_shm.c
diff -r1.162.2.2 -r1.162.2.3 src/sys/uvm/uvm_extern.h
diff -r1.7.4.1 -r1.7.4.2 src/sys/uvm/uvm_object.c

cvs diff -r1.117 -r1.117.4.1 src/sys/kern/sysv_shm.c (expand / switch to unified diff)

--- src/sys/kern/sysv_shm.c 2009/10/05 23:47:04 1.117
+++ src/sys/kern/sysv_shm.c 2010/04/23 21:18:00 1.117.4.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sysv_shm.c,v 1.117 2009/10/05 23:47:04 rmind Exp $ */ 1/* $NetBSD: sysv_shm.c,v 1.117.4.1 2010/04/23 21:18:00 rmind Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Mindaugas Rasiukevicius. 9 * NASA Ames Research Center, and by Mindaugas Rasiukevicius.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -51,27 +51,27 @@ @@ -51,27 +51,27 @@
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 54 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 */ 61 */
62 62
63#include <sys/cdefs.h> 63#include <sys/cdefs.h>
64__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.117 2009/10/05 23:47:04 rmind Exp $"); 64__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.117.4.1 2010/04/23 21:18:00 rmind Exp $");
65 65
66#define SYSVSHM 66#define SYSVSHM
67 67
68#include <sys/param.h> 68#include <sys/param.h>
69#include <sys/kernel.h> 69#include <sys/kernel.h>
70#include <sys/kmem.h> 70#include <sys/kmem.h>
71#include <sys/shm.h> 71#include <sys/shm.h>
72#include <sys/mutex.h> 72#include <sys/mutex.h>
73#include <sys/mman.h> 73#include <sys/mman.h>
74#include <sys/stat.h> 74#include <sys/stat.h>
75#include <sys/sysctl.h> 75#include <sys/sysctl.h>
76#include <sys/mount.h> /* XXX for <sys/syscallargs.h> */ 76#include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
77#include <sys/syscallargs.h> 77#include <sys/syscallargs.h>
@@ -258,43 +258,45 @@ shm_memlock(struct lwp *l, struct shmid_ @@ -258,43 +258,45 @@ shm_memlock(struct lwp *l, struct shmid_
258 KASSERT(mutex_owned(&shm_lock)); 258 KASSERT(mutex_owned(&shm_lock));
259 shmmap_s = shmmap_getprivate(p); 259 shmmap_s = shmmap_getprivate(p);
260 260
261 /* Find our shared memory address by shmid */ 261 /* Find our shared memory address by shmid */
262 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) { 262 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
263 if (shmmap_se->shmid != shmid) 263 if (shmmap_se->shmid != shmid)
264 continue; 264 continue;
265 265
266 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET; 266 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
267 267
268 if (cmd == SHM_LOCK && 268 if (cmd == SHM_LOCK &&
269 (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) { 269 (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
270 /* Wire the object and map, then tag it */ 270 /* Wire the object and map, then tag it */
271 error = uobj_wirepages(shmseg->_shm_internal, 0, size); 271 error = uvm_obj_wirepages(shmseg->_shm_internal,
 272 0, size);
272 if (error) 273 if (error)
273 return EIO; 274 return EIO;
274 error = uvm_map_pageable(&p->p_vmspace->vm_map, 275 error = uvm_map_pageable(&p->p_vmspace->vm_map,
275 shmmap_se->va, shmmap_se->va + size, false, 0); 276 shmmap_se->va, shmmap_se->va + size, false, 0);
276 if (error) { 277 if (error) {
277 uobj_unwirepages(shmseg->_shm_internal, 0, size); 278 uvm_obj_unwirepages(shmseg->_shm_internal,
 279 0, size);
278 if (error == EFAULT) 280 if (error == EFAULT)
279 error = ENOMEM; 281 error = ENOMEM;
280 return error; 282 return error;
281 } 283 }
282 shmseg->shm_perm.mode |= SHMSEG_WIRED; 284 shmseg->shm_perm.mode |= SHMSEG_WIRED;
283 285
284 } else if (cmd == SHM_UNLOCK && 286 } else if (cmd == SHM_UNLOCK &&
285 (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) { 287 (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
286 /* Unwire the object and map, then untag it */ 288 /* Unwire the object and map, then untag it */
287 uobj_unwirepages(shmseg->_shm_internal, 0, size); 289 uvm_obj_unwirepages(shmseg->_shm_internal, 0, size);
288 error = uvm_map_pageable(&p->p_vmspace->vm_map, 290 error = uvm_map_pageable(&p->p_vmspace->vm_map,
289 shmmap_se->va, shmmap_se->va + size, true, 0); 291 shmmap_se->va, shmmap_se->va + size, true, 0);
290 if (error) 292 if (error)
291 return EIO; 293 return EIO;
292 shmseg->shm_perm.mode &= ~SHMSEG_WIRED; 294 shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
293 } 295 }
294 } 296 }
295 297
296 return 0; 298 return 0;
297} 299}
298 300
299/* 301/*
300 * Unmap shared memory. 302 * Unmap shared memory.
@@ -720,27 +722,27 @@ sys_shmget(struct lwp *l, const struct s @@ -720,27 +722,27 @@ sys_shmget(struct lwp *l, const struct s
720 */ 722 */
721 shmseg = &shmsegs[segnum]; 723 shmseg = &shmsegs[segnum];
722 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 724 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
723 shm_committed += btoc(size); 725 shm_committed += btoc(size);
724 shm_nused++; 726 shm_nused++;
725 lockmem = shm_use_phys; 727 lockmem = shm_use_phys;
726 shm_realloc_disable++; 728 shm_realloc_disable++;
727 mutex_exit(&shm_lock); 729 mutex_exit(&shm_lock);
728 730
729 /* Allocate the memory object and lock it if needed */ 731 /* Allocate the memory object and lock it if needed */
730 shmseg->_shm_internal = uao_create(size, 0); 732 shmseg->_shm_internal = uao_create(size, 0);
731 if (lockmem) { 733 if (lockmem) {
732 /* Wire the pages and tag it */ 734 /* Wire the pages and tag it */
733 error = uobj_wirepages(shmseg->_shm_internal, 0, size); 735 error = uvm_obj_wirepages(shmseg->_shm_internal, 0, size);
734 if (error) { 736 if (error) {
735 uao_detach(shmseg->_shm_internal); 737 uao_detach(shmseg->_shm_internal);
736 mutex_enter(&shm_lock); 738 mutex_enter(&shm_lock);
737 shm_free_segment(segnum); 739 shm_free_segment(segnum);
738 shm_realloc_disable--; 740 shm_realloc_disable--;
739 mutex_exit(&shm_lock); 741 mutex_exit(&shm_lock);
740 return error; 742 return error;
741 } 743 }
742 } 744 }
743 745
744 /* 746 /*
745 * Please note, while segment is marked, there are no need to hold the 747 * Please note, while segment is marked, there are no need to hold the
746 * lock, while setting it (except shm_perm.mode). 748 * lock, while setting it (except shm_perm.mode).

cvs diff -r1.162.2.2 -r1.162.2.3 src/sys/uvm/uvm_extern.h (expand / switch to unified diff)

--- src/sys/uvm/uvm_extern.h 2010/03/18 04:36:54 1.162.2.2
+++ src/sys/uvm/uvm_extern.h 2010/04/23 21:18:00 1.162.2.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_extern.h,v 1.162.2.2 2010/03/18 04:36:54 rmind Exp $ */ 1/* $NetBSD: uvm_extern.h,v 1.162.2.3 2010/04/23 21:18:00 rmind Exp $ */
2 2
3/* 3/*
4 * 4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -614,27 +614,27 @@ int uvm_fault_internal(struct vm_map *, @@ -614,27 +614,27 @@ int uvm_fault_internal(struct vm_map *,
614#if defined(KGDB) 614#if defined(KGDB)
615void uvm_chgkprot(void *, size_t, int); 615void uvm_chgkprot(void *, size_t, int);
616#endif 616#endif
617void uvm_proc_fork(struct proc *, struct proc *, bool); 617void uvm_proc_fork(struct proc *, struct proc *, bool);
618void uvm_lwp_fork(struct lwp *, struct lwp *, 618void uvm_lwp_fork(struct lwp *, struct lwp *,
619 void *, size_t, void (*)(void *), void *); 619 void *, size_t, void (*)(void *), void *);
620int uvm_coredump_walkmap(struct proc *, 620int uvm_coredump_walkmap(struct proc *,
621 void *, 621 void *,
622 int (*)(struct proc *, void *, 622 int (*)(struct proc *, void *,
623 struct uvm_coredump_state *), void *); 623 struct uvm_coredump_state *), void *);
624void uvm_proc_exit(struct proc *); 624void uvm_proc_exit(struct proc *);
625void uvm_lwp_exit(struct lwp *); 625void uvm_lwp_exit(struct lwp *);
626void uvm_init_limits(struct proc *); 626void uvm_init_limits(struct proc *);
627bool uvm_kernacc(void *, vm_prot_t); 627bool uvm_kernacc(void *, size_t, vm_prot_t);
628__dead void uvm_scheduler(void); 628__dead void uvm_scheduler(void);
629vaddr_t uvm_uarea_alloc(void); 629vaddr_t uvm_uarea_alloc(void);
630void uvm_uarea_free(vaddr_t); 630void uvm_uarea_free(vaddr_t);
631vaddr_t uvm_lwp_getuarea(lwp_t *); 631vaddr_t uvm_lwp_getuarea(lwp_t *);
632void uvm_lwp_setuarea(lwp_t *, vaddr_t); 632void uvm_lwp_setuarea(lwp_t *, vaddr_t);
633int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t); 633int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t);
634void uvm_vsunlock(struct vmspace *, void *, size_t); 634void uvm_vsunlock(struct vmspace *, void *, size_t);
635void uvm_cpu_attach(struct cpu_info *); 635void uvm_cpu_attach(struct cpu_info *);
636 636
637 637
638/* uvm_init.c */ 638/* uvm_init.c */
639void uvm_init(void); 639void uvm_init(void);
640 640
@@ -697,30 +697,28 @@ int uvm_mmap(struct vm_map *, vaddr_t  @@ -697,30 +697,28 @@ int uvm_mmap(struct vm_map *, vaddr_t
697 vm_prot_t, vm_prot_t, int, 697 vm_prot_t, vm_prot_t, int,
698 void *, voff_t, vsize_t); 698 void *, voff_t, vsize_t);
699vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t); 699vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t);
700 700
701/* uvm_mremap.c */ 701/* uvm_mremap.c */
702int uvm_mremap(struct vm_map *, vaddr_t, vsize_t, 702int uvm_mremap(struct vm_map *, vaddr_t, vsize_t,
703 struct vm_map *, vaddr_t *, vsize_t, 703 struct vm_map *, vaddr_t *, vsize_t,
704 struct proc *, int); 704 struct proc *, int);
705 705
706/* uvm_object.c */ 706/* uvm_object.c */
707void uvm_obj_init(struct uvm_object *, 707void uvm_obj_init(struct uvm_object *,
708 const struct uvm_pagerops *, kmutex_t *, u_int); 708 const struct uvm_pagerops *, kmutex_t *, u_int);
709void uvm_obj_destroy(struct uvm_object *, kmutex_t *); 709void uvm_obj_destroy(struct uvm_object *, kmutex_t *);
710int uobj_wirepages(struct uvm_object *uobj, off_t start, 710int uvm_obj_wirepages(struct uvm_object *, off_t, off_t);
711 off_t end); 711void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t);
712void uobj_unwirepages(struct uvm_object *uobj, off_t start, 
713 off_t end); 
714 712
715/* uvm_page.c */ 713/* uvm_page.c */
716struct vm_page *uvm_pagealloc_strat(struct uvm_object *, 714struct vm_page *uvm_pagealloc_strat(struct uvm_object *,
717 voff_t, struct vm_anon *, int, int, int); 715 voff_t, struct vm_anon *, int, int, int);
718#define uvm_pagealloc(obj, off, anon, flags) \ 716#define uvm_pagealloc(obj, off, anon, flags) \
719 uvm_pagealloc_strat((obj), (off), (anon), (flags), \ 717 uvm_pagealloc_strat((obj), (off), (anon), (flags), \
720 UVM_PGA_STRAT_NORMAL, 0) 718 UVM_PGA_STRAT_NORMAL, 0)
721void uvm_pagereplace(struct vm_page *, 719void uvm_pagereplace(struct vm_page *,
722 struct vm_page *); 720 struct vm_page *);
723void uvm_pagerealloc(struct vm_page *, 721void uvm_pagerealloc(struct vm_page *,
724 struct uvm_object *, voff_t); 722 struct uvm_object *, voff_t);
725/* Actually, uvm_page_physload takes PF#s which need their own type */ 723/* Actually, uvm_page_physload takes PF#s which need their own type */
726void uvm_page_physload(paddr_t, paddr_t, paddr_t, 724void uvm_page_physload(paddr_t, paddr_t, paddr_t,

cvs diff -r1.7.4.1 -r1.7.4.2 src/sys/uvm/uvm_object.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_object.c 2010/03/16 15:38:18 1.7.4.1
+++ src/sys/uvm/uvm_object.c 2010/04/23 21:18:00 1.7.4.2
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_object.c,v 1.7.4.1 2010/03/16 15:38:18 rmind Exp $ */ 1/* $NetBSD: uvm_object.c,v 1.7.4.2 2010/04/23 21:18:00 rmind Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius. 8 * by Mindaugas Rasiukevicius.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -27,27 +27,27 @@ @@ -27,27 +27,27 @@
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * uvm_object.c: operate with memory objects 33 * uvm_object.c: operate with memory objects
34 * 34 *
35 * TODO: 35 * TODO:
36 * 1. Support PG_RELEASED-using objects 36 * 1. Support PG_RELEASED-using objects
37 */ 37 */
38 38
39#include <sys/cdefs.h> 39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.7.4.1 2010/03/16 15:38:18 rmind Exp $"); 40__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.7.4.2 2010/04/23 21:18:00 rmind Exp $");
41 41
42#include "opt_ddb.h" 42#include "opt_ddb.h"
43 43
44#include <sys/param.h> 44#include <sys/param.h>
45#include <sys/mutex.h> 45#include <sys/mutex.h>
46#include <sys/queue.h> 46#include <sys/queue.h>
47#include <sys/rb.h> 47#include <sys/rb.h>
48 48
49#include <uvm/uvm.h> 49#include <uvm/uvm.h>
50#include <uvm/uvm_ddb.h> 50#include <uvm/uvm_ddb.h>
51 51
52/* We will fetch this page count per step */ 52/* We will fetch this page count per step */
53#define FETCH_PAGECOUNT 16 53#define FETCH_PAGECOUNT 16
@@ -90,27 +90,27 @@ uvm_obj_destroy(struct uvm_object *uo, k @@ -90,27 +90,27 @@ uvm_obj_destroy(struct uvm_object *uo, k
90 mutex_obj_free(uo->vmobjlock); 90 mutex_obj_free(uo->vmobjlock);
91 } 91 }
92} 92}
93 93
94/* 94/*
95 * uobj_wirepages: wire the pages of entire uobj 95 * uobj_wirepages: wire the pages of entire uobj
96 * 96 *
97 * => NOTE: this function should only be used for types of objects 97 * => NOTE: this function should only be used for types of objects
98 * where PG_RELEASED flag is never set (aobj objects) 98 * where PG_RELEASED flag is never set (aobj objects)
99 * => caller must pass page-aligned start and end values 99 * => caller must pass page-aligned start and end values
100 */ 100 */
101 101
102int 102int
103uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end) 103uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
104{ 104{
105 int i, npages, error; 105 int i, npages, error;
106 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL; 106 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
107 off_t offset = start, left; 107 off_t offset = start, left;
108 108
109 left = (end - start) >> PAGE_SHIFT; 109 left = (end - start) >> PAGE_SHIFT;
110 110
111 mutex_enter(uobj->vmobjlock); 111 mutex_enter(uobj->vmobjlock);
112 while (left) { 112 while (left) {
113 113
114 npages = MIN(FETCH_PAGECOUNT, left); 114 npages = MIN(FETCH_PAGECOUNT, left);
115 115
116 /* Get the pages */ 116 /* Get the pages */
@@ -173,27 +173,27 @@ error: @@ -173,27 +173,27 @@ error:
173 173
174 return error; 174 return error;
175} 175}
176 176
177/* 177/*
178 * uobj_unwirepages: unwire the pages of entire uobj 178 * uobj_unwirepages: unwire the pages of entire uobj
179 * 179 *
180 * => NOTE: this function should only be used for types of objects 180 * => NOTE: this function should only be used for types of objects
181 * where PG_RELEASED flag is never set 181 * where PG_RELEASED flag is never set
182 * => caller must pass page-aligned start and end values 182 * => caller must pass page-aligned start and end values
183 */ 183 */
184 184
185void 185void
186uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end) 186uvm_obj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
187{ 187{
188 struct vm_page *pg; 188 struct vm_page *pg;
189 off_t offset; 189 off_t offset;
190 190
191 mutex_enter(uobj->vmobjlock); 191 mutex_enter(uobj->vmobjlock);
192 mutex_enter(&uvm_pageqlock); 192 mutex_enter(&uvm_pageqlock);
193 for (offset = start; offset < end; offset += PAGE_SIZE) { 193 for (offset = start; offset < end; offset += PAGE_SIZE) {
194 pg = uvm_pagelookup(uobj, offset); 194 pg = uvm_pagelookup(uobj, offset);
195 195
196 KASSERT(pg != NULL); 196 KASSERT(pg != NULL);
197 KASSERT(!(pg->flags & PG_RELEASED)); 197 KASSERT(!(pg->flags & PG_RELEASED));
198 198
199 uvm_pageunwire(pg); 199 uvm_pageunwire(pg);