Sun Nov 20 10:52:35 2011 UTC ()
- fix page loaning  XXX make O->A loaning further
- add some statistics


(yamt)
diff -r1.436.2.1 -r1.436.2.2 src/sys/kern/init_main.c
diff -r1.62.4.2 -r1.62.4.3 src/sys/uvm/uvm.h
diff -r1.176.2.3 -r1.176.2.4 src/sys/uvm/uvm_extern.h
diff -r1.41 -r1.41.4.1 src/sys/uvm/uvm_init.c
diff -r1.81.2.3 -r1.81.2.4 src/sys/uvm/uvm_loan.c
diff -r1.56.4.4 -r1.56.4.5 src/sys/uvm/uvm_meter.c
diff -r1.178.2.6 -r1.178.2.7 src/sys/uvm/uvm_page.c
diff -r1.73.2.6 -r1.73.2.7 src/sys/uvm/uvm_page.h
diff -r1.1.2.4 -r1.1.2.5 src/sys/uvm/uvm_page_status.c

cvs diff -r1.436.2.1 -r1.436.2.2 src/sys/kern/init_main.c (expand / switch to unified diff)

--- src/sys/kern/init_main.c 2011/11/02 21:53:59 1.436.2.1
+++ src/sys/kern/init_main.c 2011/11/20 10:52:33 1.436.2.2
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: init_main.c,v 1.436.2.1 2011/11/02 21:53:59 yamt Exp $ */ 1/* $NetBSD: init_main.c,v 1.436.2.2 2011/11/20 10:52:33 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -87,27 +87,27 @@ @@ -87,27 +87,27 @@
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
88 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 88 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
89 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 89 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
90 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 90 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
94 * SUCH DAMAGE. 94 * SUCH DAMAGE.
95 * 95 *
96 * @(#)init_main.c 8.16 (Berkeley) 5/14/95 96 * @(#)init_main.c 8.16 (Berkeley) 5/14/95
97 */ 97 */
98 98
99#include <sys/cdefs.h> 99#include <sys/cdefs.h>
100__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.436.2.1 2011/11/02 21:53:59 yamt Exp $"); 100__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.436.2.2 2011/11/20 10:52:33 yamt Exp $");
101 101
102#include "opt_ddb.h" 102#include "opt_ddb.h"
103#include "opt_ipsec.h" 103#include "opt_ipsec.h"
104#include "opt_modular.h" 104#include "opt_modular.h"
105#include "opt_ntp.h" 105#include "opt_ntp.h"
106#include "opt_pipe.h" 106#include "opt_pipe.h"
107#include "opt_sa.h" 107#include "opt_sa.h"
108#include "opt_syscall_debug.h" 108#include "opt_syscall_debug.h"
109#include "opt_sysv.h" 109#include "opt_sysv.h"
110#include "opt_fileassoc.h" 110#include "opt_fileassoc.h"
111#include "opt_ktrace.h" 111#include "opt_ktrace.h"
112#include "opt_pax.h" 112#include "opt_pax.h"
113#include "opt_compat_netbsd.h" 113#include "opt_compat_netbsd.h"
@@ -314,30 +314,26 @@ main(void) @@ -314,30 +314,26 @@ main(void)
314 314
315 uvm_init(); 315 uvm_init();
316 kcpuset_sysinit(); 316 kcpuset_sysinit();
317 317
318 prop_kern_init(); 318 prop_kern_init();
319 319
320#if ((NKSYMS > 0) || (NDDB > 0) || (NMODULAR > 0)) 320#if ((NKSYMS > 0) || (NDDB > 0) || (NMODULAR > 0))
321 ksyms_init(); 321 ksyms_init();
322#endif 322#endif
323 kprintf_init(); 323 kprintf_init();
324 324
325 percpu_init(); 325 percpu_init();
326 326
327 /* Initialize lock caches. */ 
328 mutex_obj_init(); 
329 rw_obj_init(); 
330 
331 /* Passive serialization. */ 327 /* Passive serialization. */
332 pserialize_init(); 328 pserialize_init();
333 329
334 /* Initialize the extent manager. */ 330 /* Initialize the extent manager. */
335 extent_init(); 331 extent_init();
336 332
337 /* Do machine-dependent initialization. */ 333 /* Do machine-dependent initialization. */
338 cpu_startup(); 334 cpu_startup();
339 335
340 /* Initialize the sysctl subsystem. */ 336 /* Initialize the sysctl subsystem. */
341 sysctl_init(); 337 sysctl_init();
342 338
343 /* Initialize callouts, part 1. */ 339 /* Initialize callouts, part 1. */

cvs diff -r1.62.4.2 -r1.62.4.3 src/sys/uvm/uvm.h (expand / switch to unified diff)

--- src/sys/uvm/uvm.h 2011/11/12 02:54:04 1.62.4.2
+++ src/sys/uvm/uvm.h 2011/11/20 10:52:33 1.62.4.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm.h,v 1.62.4.2 2011/11/12 02:54:04 yamt Exp $ */ 1/* $NetBSD: uvm.h,v 1.62.4.3 2011/11/20 10:52:33 yamt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -78,26 +78,39 @@ struct uvm_cpu { @@ -78,26 +78,39 @@ struct uvm_cpu {
78 int page_free_nextcolor; /* next color to allocate from */ 78 int page_free_nextcolor; /* next color to allocate from */
79 int page_idlezero_next; /* which color to zero next */ 79 int page_idlezero_next; /* which color to zero next */
80 bool page_idle_zero; /* TRUE if we should try to zero 80 bool page_idle_zero; /* TRUE if we should try to zero
81 pages in the idle loop */ 81 pages in the idle loop */
82 int pages[PGFL_NQUEUES]; /* total of pages in page_free */ 82 int pages[PGFL_NQUEUES]; /* total of pages in page_free */
83 u_int emap_gen; /* emap generation number */ 83 u_int emap_gen; /* emap generation number */
84 84
85 /* 85 /*
86 * pagestate 86 * pagestate
87 * [0] non-anonymous 87 * [0] non-anonymous
88 * [1] anonymous (PQ_SWAPBACKED) 88 * [1] anonymous (PQ_SWAPBACKED)
89 */ 89 */
90 int64_t pagestate[2][UVM_PAGE_NUM_STATUS]; 90 int64_t pagestate[2][UVM_PAGE_NUM_STATUS];
 91
 92 int64_t loan_obj; /* O->K loan */
 93 int64_t unloan_obj; /* O->K unloan */
 94 int64_t loanbreak_obj; /* O->K loan resolved on write */
 95 int64_t loanfree_obj; /* O->K loan resolved on free */
 96
 97 int64_t loan_anon; /* A->K loan */
 98 int64_t unloan_anon; /* A->K unloan */
 99 int64_t loanbreak_anon; /* A->K loan resolved on write */
 100 int64_t loanfree_anon; /* A->K loan resolved on free */
 101
 102 int64_t loan_zero; /* O->K loan (zero) */
 103 int64_t unloan_zero; /* O->K unloan (zero) */
91}; 104};
92 105
93/* 106/*
94 * uvm structure (vm global state: collected in one structure for ease 107 * uvm structure (vm global state: collected in one structure for ease
95 * of reference...) 108 * of reference...)
96 */ 109 */
97 110
98struct uvm { 111struct uvm {
99 /* vm_page related parameters */ 112 /* vm_page related parameters */
100 113
101 /* vm_page queues */ 114 /* vm_page queues */
102 struct pgfreelist page_free[VM_NFREELIST]; /* unallocated pages */ 115 struct pgfreelist page_free[VM_NFREELIST]; /* unallocated pages */
103 bool page_init_done; /* TRUE if uvm_page_init() finished */ 116 bool page_init_done; /* TRUE if uvm_page_init() finished */

cvs diff -r1.176.2.3 -r1.176.2.4 src/sys/uvm/uvm_extern.h (expand / switch to unified diff)

--- src/sys/uvm/uvm_extern.h 2011/11/14 14:24:54 1.176.2.3
+++ src/sys/uvm/uvm_extern.h 2011/11/20 10:52:33 1.176.2.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_extern.h,v 1.176.2.3 2011/11/14 14:24:54 yamt Exp $ */ 1/* $NetBSD: uvm_extern.h,v 1.176.2.4 2011/11/20 10:52:33 yamt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -446,32 +446,46 @@ struct uvmexp_sysctl { @@ -446,32 +446,46 @@ struct uvmexp_sysctl {
446 int64_t pdanscan; 446 int64_t pdanscan;
447 int64_t pdobscan; 447 int64_t pdobscan;
448 int64_t pdreact; 448 int64_t pdreact;
449 int64_t pdbusy; 449 int64_t pdbusy;
450 int64_t pdpageouts; 450 int64_t pdpageouts;
451 int64_t pdpending; 451 int64_t pdpending;
452 int64_t pddeact; 452 int64_t pddeact;
453 int64_t anonpages; 453 int64_t anonpages;
454 int64_t filepages; 454 int64_t filepages;
455 int64_t execpages; 455 int64_t execpages;
456 int64_t colorhit; 456 int64_t colorhit;
457 int64_t colormiss; 457 int64_t colormiss;
458 int64_t ncolors; 458 int64_t ncolors;
 459
459 int64_t possiblydirtypages; 460 int64_t possiblydirtypages;
460 int64_t cleanpages; 461 int64_t cleanpages;
461 int64_t dirtypages; 462 int64_t dirtypages;
462 int64_t possiblydirtyanonpages; 463 int64_t possiblydirtyanonpages;
463 int64_t cleananonpages; 464 int64_t cleananonpages;
464 int64_t dirtyanonpages; 465 int64_t dirtyanonpages;
 466
 467 int64_t loan_obj; /* O->K loan */
 468 int64_t unloan_obj; /* O->K unloan */
 469 int64_t loanbreak_obj; /* O->K loan resolved on write */
 470 int64_t loanfree_obj; /* O->K loan resolved on free */
 471
 472 int64_t loan_anon; /* A->K loan */
 473 int64_t unloan_anon; /* A->K unloan */
 474 int64_t loanbreak_anon; /* A->K loan resolved on write */
 475 int64_t loanfree_anon; /* A->K loan resolved on free */
 476
 477 int64_t loan_zero; /* O->K loan (zero) */
 478 int64_t unloan_zero; /* O->K unloan (zero) */
465}; 479};
466 480
467#ifdef _KERNEL 481#ifdef _KERNEL
468/* we need this before including uvm_page.h on some platforms */ 482/* we need this before including uvm_page.h on some platforms */
469extern struct uvmexp uvmexp; 483extern struct uvmexp uvmexp;
470/* MD code needs this without including <uvm/uvm.h> */ 484/* MD code needs this without including <uvm/uvm.h> */
471extern bool vm_page_zero_enable; 485extern bool vm_page_zero_enable;
472#endif 486#endif
473 487
474/* 488/*
475 * Finally, bring in standard UVM headers. 489 * Finally, bring in standard UVM headers.
476 */ 490 */
477#include <sys/vmmeter.h> 491#include <sys/vmmeter.h>

cvs diff -r1.41 -r1.41.4.1 src/sys/uvm/uvm_init.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_init.c 2011/04/24 03:56:50 1.41
+++ src/sys/uvm/uvm_init.c 2011/11/20 10:52:33 1.41.4.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_init.c,v 1.41 2011/04/24 03:56:50 rmind Exp $ */ 1/* $NetBSD: uvm_init.c,v 1.41.4.1 2011/11/20 10:52:33 yamt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -22,36 +22,38 @@ @@ -22,36 +22,38 @@
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * 26 *
27 * from: Id: uvm_init.c,v 1.1.2.3 1998/02/06 05:15:27 chs Exp 27 * from: Id: uvm_init.c,v 1.1.2.3 1998/02/06 05:15:27 chs Exp
28 */ 28 */
29 29
30/* 30/*
31 * uvm_init.c: init the vm system. 31 * uvm_init.c: init the vm system.
32 */ 32 */
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.41 2011/04/24 03:56:50 rmind Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.41.4.1 2011/11/20 10:52:33 yamt Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/systm.h> 38#include <sys/systm.h>
39#include <sys/debug.h> 39#include <sys/debug.h>
40#include <sys/file.h> 40#include <sys/file.h>
41#include <sys/filedesc.h> 41#include <sys/filedesc.h>
42#include <sys/resourcevar.h> 42#include <sys/resourcevar.h>
43#include <sys/kmem.h> 43#include <sys/kmem.h>
44#include <sys/mman.h> 44#include <sys/mman.h>
 45#include <sys/mutex.h>
 46#include <sys/rwlock.h>
45#include <sys/vnode.h> 47#include <sys/vnode.h>
46 48
47#include <uvm/uvm.h> 49#include <uvm/uvm.h>
48#include <uvm/uvm_pdpolicy.h> 50#include <uvm/uvm_pdpolicy.h>
49#include <uvm/uvm_readahead.h> 51#include <uvm/uvm_readahead.h>
50 52
51/* 53/*
52 * struct uvm: we store most global vars in this structure to make them 54 * struct uvm: we store most global vars in this structure to make them
53 * easier to spot... 55 * easier to spot...
54 */ 56 */
55 57
56struct uvm uvm; /* decl */ 58struct uvm uvm; /* decl */
57struct uvmexp uvmexp; /* decl */ 59struct uvmexp uvmexp; /* decl */
@@ -147,27 +149,35 @@ uvm_init(void) @@ -147,27 +149,35 @@ uvm_init(void)
147 * Initialize pools. This must be done before anyone manipulates 149 * Initialize pools. This must be done before anyone manipulates
148 * any vm_maps because we use a pool for some map entry structures. 150 * any vm_maps because we use a pool for some map entry structures.
149 */ 151 */
150 152
151 pool_subsystem_init(); 153 pool_subsystem_init();
152 154
153 /* 155 /*
154 * init slab memory allocator kmem(9). 156 * init slab memory allocator kmem(9).
155 */ 157 */
156 158
157 kmem_init(); 159 kmem_init();
158 160
159 /* 161 /*
 162 * Initialize lock caches.
 163 */
 164
 165 mutex_obj_init();
 166 rw_obj_init();
 167
 168 /*
160 * Initialize the uvm_loan() facility. 169 * Initialize the uvm_loan() facility.
 170 * REQUIRE: mutex_obj_init
161 */ 171 */
162 172
163 uvm_loan_init(); 173 uvm_loan_init();
164 174
165 /* 175 /*
166 * init emap subsystem. 176 * init emap subsystem.
167 */ 177 */
168 178
169 uvm_emap_sysinit(); 179 uvm_emap_sysinit();
170 180
171 /* 181 /*
172 * the VM system is now up! now that kmem is up we can resize the 182 * the VM system is now up! now that kmem is up we can resize the
173 * <obj,off> => <page> hash table for general use and enable paging 183 * <obj,off> => <page> hash table for general use and enable paging

cvs diff -r1.81.2.3 -r1.81.2.4 src/sys/uvm/uvm_loan.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_loan.c 2011/11/18 00:57:33 1.81.2.3
+++ src/sys/uvm/uvm_loan.c 2011/11/20 10:52:33 1.81.2.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_loan.c,v 1.81.2.3 2011/11/18 00:57:33 yamt Exp $ */ 1/* $NetBSD: uvm_loan.c,v 1.81.2.4 2011/11/20 10:52:33 yamt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -22,27 +22,27 @@ @@ -22,27 +22,27 @@
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * 26 *
27 * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp 27 * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp
28 */ 28 */
29 29
30/* 30/*
31 * uvm_loan.c: page loanout handler 31 * uvm_loan.c: page loanout handler
32 */ 32 */
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.3 2011/11/18 00:57:33 yamt Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.4 2011/11/20 10:52:33 yamt Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/systm.h> 38#include <sys/systm.h>
39#include <sys/kernel.h> 39#include <sys/kernel.h>
40#include <sys/mman.h> 40#include <sys/mman.h>
41 41
42#include <uvm/uvm.h> 42#include <uvm/uvm.h>
43 43
44/* 44/*
45 * "loaned" pages are pages which are (read-only, copy-on-write) loaned 45 * "loaned" pages are pages which are (read-only, copy-on-write) loaned
46 * from the VM system to other parts of the kernel. this allows page 46 * from the VM system to other parts of the kernel. this allows page
47 * copying to be avoided (e.g. you can loan pages from objs/anons to 47 * copying to be avoided (e.g. you can loan pages from objs/anons to
48 * the mbuf system). 48 * the mbuf system).
@@ -330,26 +330,27 @@ fail: @@ -330,26 +330,27 @@ fail:
330 * 330 *
331 * => called with map, amap, uobj locked 331 * => called with map, amap, uobj locked
332 * => return value: 332 * => return value:
333 * -1 = fatal error, everything is unlocked, abort. 333 * -1 = fatal error, everything is unlocked, abort.
334 * 0 = lookup in ufi went stale, everything unlocked, relookup and 334 * 0 = lookup in ufi went stale, everything unlocked, relookup and
335 * try again 335 * try again
336 * 1 = got it, everything still locked 336 * 1 = got it, everything still locked
337 */ 337 */
338 338
339int 339int
340uvm_loananon(struct uvm_faultinfo *ufi, void ***output, int flags, 340uvm_loananon(struct uvm_faultinfo *ufi, void ***output, int flags,
341 struct vm_anon *anon) 341 struct vm_anon *anon)
342{ 342{
 343 struct uvm_cpu *ucpu;
343 struct vm_page *pg; 344 struct vm_page *pg;
344 int error; 345 int error;
345 346
346 UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist); 347 UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
347 348
348 /* 349 /*
349 * if we are loaning to "another" anon then it is easy, we just 350 * if we are loaning to "another" anon then it is easy, we just
350 * bump the reference count on the current anon and return a 351 * bump the reference count on the current anon and return a
351 * pointer to it (it becomes copy-on-write shared). 352 * pointer to it (it becomes copy-on-write shared).
352 */ 353 */
353 354
354 if (flags & UVM_LOAN_TOANON) { 355 if (flags & UVM_LOAN_TOANON) {
355 KASSERT(mutex_owned(anon->an_lock)); 356 KASSERT(mutex_owned(anon->an_lock));
@@ -418,42 +419,48 @@ uvm_loananon(struct uvm_faultinfo *ufi,  @@ -418,42 +419,48 @@ uvm_loananon(struct uvm_faultinfo *ufi,
418 } 419 }
419 if (pg->loan_count == 0) { 420 if (pg->loan_count == 0) {
420 pmap_page_protect(pg, VM_PROT_READ); 421 pmap_page_protect(pg, VM_PROT_READ);
421 } 422 }
422 pg->loan_count++; 423 pg->loan_count++;
423 uvm_pageactivate(pg); 424 uvm_pageactivate(pg);
424 mutex_exit(&uvm_pageqlock); 425 mutex_exit(&uvm_pageqlock);
425 **output = pg; 426 **output = pg;
426 (*output)++; 427 (*output)++;
427 428
428 /* unlock and return success */ 429 /* unlock and return success */
429 if (pg->uobject) 430 if (pg->uobject)
430 mutex_exit(pg->uobject->vmobjlock); 431 mutex_exit(pg->uobject->vmobjlock);
 432
 433 ucpu = uvm_cpu_get();
 434 ucpu->loan_anon++;
 435 uvm_cpu_put(ucpu);
 436
431 UVMHIST_LOG(loanhist, "->K done", 0,0,0,0); 437 UVMHIST_LOG(loanhist, "->K done", 0,0,0,0);
432 return (1); 438 return (1);
433} 439}
434 440
435/* 441/*
436 * uvm_loanpage: loan out pages to kernel (->K) 442 * uvm_loanpage: loan out pages to kernel (->K)
437 * 443 *
438 * => pages should be object-owned and the object should be locked. 444 * => pages should be object-owned and the object should be locked.
439 * => in the case of error, the object might be unlocked and relocked. 445 * => in the case of error, the object might be unlocked and relocked.
440 * => caller should busy the pages beforehand. 446 * => caller should busy the pages beforehand.
441 * => pages will be unbusied. 447 * => pages will be unbusied.
442 * => fail with EBUSY if meet a wired page. 448 * => fail with EBUSY if meet a wired page.
443 */ 449 */
444static int 450static int
445uvm_loanpage(struct vm_page **pgpp, int npages) 451uvm_loanpage(struct vm_page **pgpp, int npages)
446{ 452{
 453 struct uvm_cpu *ucpu;
447 int i; 454 int i;
448 int error = 0; 455 int error = 0;
449 456
450 UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist); 457 UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
451 458
452 for (i = 0; i < npages; i++) { 459 for (i = 0; i < npages; i++) {
453 struct vm_page *pg = pgpp[i]; 460 struct vm_page *pg = pgpp[i];
454 461
455 KASSERT(pg->uobject != NULL); 462 KASSERT(pg->uobject != NULL);
456 KASSERT(pg->uobject == pgpp[0]->uobject); 463 KASSERT(pg->uobject == pgpp[0]->uobject);
457 KASSERT(!(pg->flags & (PG_RELEASED|PG_PAGEOUT))); 464 KASSERT(!(pg->flags & (PG_RELEASED|PG_PAGEOUT)));
458 KASSERT(mutex_owned(pg->uobject->vmobjlock)); 465 KASSERT(mutex_owned(pg->uobject->vmobjlock));
459 KASSERT(pg->flags & PG_BUSY); 466 KASSERT(pg->flags & PG_BUSY);
@@ -465,35 +472,40 @@ uvm_loanpage(struct vm_page **pgpp, int  @@ -465,35 +472,40 @@ uvm_loanpage(struct vm_page **pgpp, int
465 error = EBUSY; 472 error = EBUSY;
466 break; 473 break;
467 } 474 }
468 if (pg->loan_count == 0) { 475 if (pg->loan_count == 0) {
469 pmap_page_protect(pg, VM_PROT_READ); 476 pmap_page_protect(pg, VM_PROT_READ);
470 } 477 }
471 pg->loan_count++; 478 pg->loan_count++;
472 uvm_pageactivate(pg); 479 uvm_pageactivate(pg);
473 mutex_exit(&uvm_pageqlock); 480 mutex_exit(&uvm_pageqlock);
474 } 481 }
475 482
476 uvm_page_unbusy(pgpp, npages); 483 uvm_page_unbusy(pgpp, npages);
477 484
478 if (error) { 485 if (i > 0) {
479 /* 486 ucpu = uvm_cpu_get();
480 * backout what we've done 487 ucpu->loan_obj += i;
481 */ 488 uvm_cpu_put(ucpu);
482 kmutex_t *slock = pgpp[0]->uobject->vmobjlock; 489 if (error) {
 490 /*
 491 * backout what we've done
 492 */
 493 kmutex_t *slock = pgpp[0]->uobject->vmobjlock;
483 494
484 mutex_exit(slock); 495 mutex_exit(slock);
485 uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE); 496 uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE);
486 mutex_enter(slock); 497 mutex_enter(slock);
 498 }
487 } 499 }
488 500
489 UVMHIST_LOG(loanhist, "done %d", error,0,0,0); 501 UVMHIST_LOG(loanhist, "done %d", error,0,0,0);
490 return error; 502 return error;
491} 503}
492 504
493/* 505/*
494 * XXX UBC temp limit 506 * XXX UBC temp limit
495 * number of pages to get at once. 507 * number of pages to get at once.
496 * should be <= MAX_READ_AHEAD in genfs_vnops.c 508 * should be <= MAX_READ_AHEAD in genfs_vnops.c
497 */ 509 */
498#define UVM_LOAN_GET_CHUNK 16 510#define UVM_LOAN_GET_CHUNK 16
499 511
@@ -819,27 +831,26 @@ fail: @@ -819,27 +831,26 @@ fail:
819 831
820/* 832/*
821 * uvm_loanzero: loan a zero-fill page out 833 * uvm_loanzero: loan a zero-fill page out
822 * 834 *
823 * => called with map, amap, uobj locked 835 * => called with map, amap, uobj locked
824 * => return value: 836 * => return value:
825 * -1 = fatal error, everything is unlocked, abort. 837 * -1 = fatal error, everything is unlocked, abort.
826 * 0 = lookup in ufi went stale, everything unlocked, relookup and 838 * 0 = lookup in ufi went stale, everything unlocked, relookup and
827 * try again 839 * try again
828 * 1 = got it, everything still locked 840 * 1 = got it, everything still locked
829 */ 841 */
830 842
831static struct uvm_object uvm_loanzero_object; 843static struct uvm_object uvm_loanzero_object;
832static kmutex_t uvm_loanzero_lock; 
833 844
834static int 845static int
835uvm_loanzero(struct uvm_faultinfo *ufi, void ***output, int flags) 846uvm_loanzero(struct uvm_faultinfo *ufi, void ***output, int flags)
836{ 847{
837 struct vm_page *pg; 848 struct vm_page *pg;
838 struct vm_amap *amap = ufi->entry->aref.ar_amap; 849 struct vm_amap *amap = ufi->entry->aref.ar_amap;
839 850
840 UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist); 851 UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
841again: 852again:
842 mutex_enter(uvm_loanzero_object.vmobjlock); 853 mutex_enter(uvm_loanzero_object.vmobjlock);
843 854
844 /* 855 /*
845 * first, get ahold of our single zero page. 856 * first, get ahold of our single zero page.
@@ -861,32 +872,37 @@ again: @@ -861,32 +872,37 @@ again:
861 goto again; 872 goto again;
862 } 873 }
863 874
864 /* got a zero'd page. */ 875 /* got a zero'd page. */
865 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE); 876 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
866 pg->flags |= PG_RDONLY; 877 pg->flags |= PG_RDONLY;
867 mutex_enter(&uvm_pageqlock); 878 mutex_enter(&uvm_pageqlock);
868 uvm_pageactivate(pg); 879 uvm_pageactivate(pg);
869 mutex_exit(&uvm_pageqlock); 880 mutex_exit(&uvm_pageqlock);
870 UVM_PAGE_OWN(pg, NULL); 881 UVM_PAGE_OWN(pg, NULL);
871 } 882 }
872 883
873 if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */ 884 if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */
 885 struct uvm_cpu *ucpu;
 886
874 mutex_enter(&uvm_pageqlock); 887 mutex_enter(&uvm_pageqlock);
875 pg->loan_count++; 888 pg->loan_count++;
876 mutex_exit(&uvm_pageqlock); 889 mutex_exit(&uvm_pageqlock);
877 mutex_exit(uvm_loanzero_object.vmobjlock); 890 mutex_exit(uvm_loanzero_object.vmobjlock);
878 **output = pg; 891 **output = pg;
879 (*output)++; 892 (*output)++;
 893 ucpu = uvm_cpu_get();
 894 ucpu->loan_zero++;
 895 uvm_cpu_put(ucpu);
880 return (1); 896 return (1);
881 } 897 }
882 898
883#ifdef notdef 899#ifdef notdef
884 /* 900 /*
885 * loaning to an anon. check to see if there is already an anon 901 * loaning to an anon. check to see if there is already an anon
886 * associated with this page. if so, then just return a reference 902 * associated with this page. if so, then just return a reference
887 * to this object. 903 * to this object.
888 */ 904 */
889 905
890 if (pg->uanon) { 906 if (pg->uanon) {
891 anon = pg->uanon; 907 anon = pg->uanon;
892 mutex_enter(&anon->an_lock); 908 mutex_enter(&anon->an_lock);
@@ -955,26 +971,30 @@ uvm_unloananon(struct vm_anon **aloans,  @@ -955,26 +971,30 @@ uvm_unloananon(struct vm_anon **aloans,
955 * uvm_unloanpage: kill loans on pages loaned out to the kernel 971 * uvm_unloanpage: kill loans on pages loaned out to the kernel
956 * 972 *
957 * => we expect all our resources to be unlocked 973 * => we expect all our resources to be unlocked
958 */ 974 */
959 975
960static void 976static void
961uvm_unloanpage(struct vm_page **ploans, int npages) 977uvm_unloanpage(struct vm_page **ploans, int npages)
962{ 978{
963 struct vm_page *pg; 979 struct vm_page *pg;
964 kmutex_t *slock; 980 kmutex_t *slock;
965 981
966 mutex_enter(&uvm_pageqlock); 982 mutex_enter(&uvm_pageqlock);
967 while (npages-- > 0) { 983 while (npages-- > 0) {
 984 struct uvm_object *obj;
 985 struct vm_anon *anon;
 986 struct uvm_cpu *ucpu;
 987
968 pg = *ploans++; 988 pg = *ploans++;
969 989
970 /* 990 /*
971 * do a little dance to acquire the object or anon lock 991 * do a little dance to acquire the object or anon lock
972 * as appropriate. we are locking in the wrong order, 992 * as appropriate. we are locking in the wrong order,
973 * so we have to do a try-lock here. 993 * so we have to do a try-lock here.
974 */ 994 */
975 995
976 slock = NULL; 996 slock = NULL;
977 while (pg->uobject != NULL || pg->uanon != NULL) { 997 while (pg->uobject != NULL || pg->uanon != NULL) {
978 if (pg->uobject != NULL) { 998 if (pg->uobject != NULL) {
979 slock = pg->uobject->vmobjlock; 999 slock = pg->uobject->vmobjlock;
980 } else { 1000 } else {
@@ -988,42 +1008,55 @@ uvm_unloanpage(struct vm_page **ploans,  @@ -988,42 +1008,55 @@ uvm_unloanpage(struct vm_page **ploans,
988 } 1008 }
989 1009
990 /* 1010 /*
991 * drop our loan. if page is owned by an anon but 1011 * drop our loan. if page is owned by an anon but
992 * PQ_ANON is not set, the page was loaned to the anon 1012 * PQ_ANON is not set, the page was loaned to the anon
993 * from an object which dropped ownership, so resolve 1013 * from an object which dropped ownership, so resolve
994 * this by turning the anon's loan into real ownership 1014 * this by turning the anon's loan into real ownership
995 * (ie. decrement loan_count again and set PQ_ANON). 1015 * (ie. decrement loan_count again and set PQ_ANON).
996 * after all this, if there are no loans left, put the 1016 * after all this, if there are no loans left, put the
997 * page back a paging queue (if the page is owned by 1017 * page back a paging queue (if the page is owned by
998 * an anon) or free it (if the page is now unowned). 1018 * an anon) or free it (if the page is now unowned).
999 */ 1019 */
1000 1020
 1021 obj = pg->uobject;
 1022 anon = pg->uanon;
1001 KASSERT(pg->loan_count > 0); 1023 KASSERT(pg->loan_count > 0);
1002 pg->loan_count--; 1024 pg->loan_count--;
1003 if (pg->uobject == NULL && pg->uanon != NULL && 1025 if (obj == NULL && anon != NULL &&
1004 (pg->pqflags & PQ_ANON) == 0) { 1026 (pg->pqflags & PQ_ANON) == 0) {
1005 KASSERT(pg->loan_count > 0); 1027 KASSERT(pg->loan_count > 0);
1006 pg->loan_count--; 1028 pg->loan_count--;
1007 pg->pqflags |= PQ_ANON; 1029 pg->pqflags |= PQ_ANON;
1008 } 1030 }
1009 if (pg->loan_count == 0 && pg->uobject == NULL && 1031 if (pg->loan_count == 0 && obj == NULL && anon == NULL) {
1010 pg->uanon == NULL) { 
1011 KASSERT((pg->flags & PG_BUSY) == 0); 1032 KASSERT((pg->flags & PG_BUSY) == 0);
1012 uvm_pagefree(pg); 1033 uvm_pagefree(pg);
1013 } 1034 }
1014 if (slock != NULL) { 1035 if (slock != NULL) {
1015 mutex_exit(slock); 1036 mutex_exit(slock);
1016 } 1037 }
 1038 ucpu = uvm_cpu_get();
 1039 if (obj != NULL) {
 1040 KASSERT(anon == NULL); /* XXX no O->A loan */
 1041 if (obj == &uvm_loanzero_object) {
 1042 ucpu->unloan_zero++;
 1043 } else {
 1044 ucpu->unloan_obj++;
 1045 }
 1046 } else if (anon != NULL) {
 1047 ucpu->unloan_anon++;
 1048 }
 1049 uvm_cpu_put(ucpu);
1017 } 1050 }
1018 mutex_exit(&uvm_pageqlock); 1051 mutex_exit(&uvm_pageqlock);
1019} 1052}
1020 1053
1021/* 1054/*
1022 * uvm_unloan: kill loans on pages or anons. 1055 * uvm_unloan: kill loans on pages or anons.
1023 */ 1056 */
1024 1057
1025void 1058void
1026uvm_unloan(void *v, int npages, int flags) 1059uvm_unloan(void *v, int npages, int flags)
1027{ 1060{
1028 if (flags & UVM_LOAN_TOANON) { 1061 if (flags & UVM_LOAN_TOANON) {
1029 uvm_unloananon(v, npages); 1062 uvm_unloananon(v, npages);
@@ -1077,146 +1110,147 @@ ulz_put(struct uvm_object *uobj, voff_t  @@ -1077,146 +1110,147 @@ ulz_put(struct uvm_object *uobj, voff_t
1077 1110
1078static const struct uvm_pagerops ulz_pager = { 1111static const struct uvm_pagerops ulz_pager = {
1079 .pgo_put = ulz_put, 1112 .pgo_put = ulz_put,
1080}; 1113};
1081 1114
1082/* 1115/*
1083 * uvm_loan_init(): initialize the uvm_loan() facility. 1116 * uvm_loan_init(): initialize the uvm_loan() facility.
1084 */ 1117 */
1085 1118
1086void 1119void
1087uvm_loan_init(void) 1120uvm_loan_init(void)
1088{ 1121{
1089 1122
1090 mutex_init(&uvm_loanzero_lock, MUTEX_DEFAULT, IPL_NONE); 1123 uvm_obj_init(&uvm_loanzero_object, &ulz_pager, true, 0);
1091 uvm_obj_init(&uvm_loanzero_object, &ulz_pager, false, 0); 
1092 uvm_obj_setlock(&uvm_loanzero_object, &uvm_loanzero_lock); 
1093 
1094 UVMHIST_INIT(loanhist, 300); 1124 UVMHIST_INIT(loanhist, 300);
1095} 1125}
1096 1126
1097/* 1127/*
1098 * uvm_loanbreak: break loan on a uobj page 1128 * uvm_loanbreak: break loan on a uobj page
1099 * 1129 *
1100 * => called with uobj locked 1130 * => called with uobj locked
1101 * => the page should be busy 1131 * => the page should be busy
1102 * => return value: 1132 * => return value:
1103 * newly allocated page if succeeded 1133 * newly allocated page if succeeded
1104 */ 1134 */
1105struct vm_page * 1135struct vm_page *
1106uvm_loanbreak(struct vm_page *uobjpage) 1136uvm_loanbreak(struct vm_page *uobjpage)
1107{ 1137{
 1138 struct uvm_cpu *ucpu;
1108 struct vm_page *pg; 1139 struct vm_page *pg;
1109#ifdef DIAGNOSTIC 1140#ifdef DIAGNOSTIC
1110 struct uvm_object *uobj = uobjpage->uobject; 1141 struct uvm_object *uobj = uobjpage->uobject;
1111#endif 1142#endif
 1143 const unsigned int count = uobjpage->loan_count;
1112 1144
1113 KASSERT(uobj != NULL); 1145 KASSERT(uobj != NULL);
1114 KASSERT(mutex_owned(uobj->vmobjlock)); 1146 KASSERT(mutex_owned(uobj->vmobjlock));
1115 KASSERT(uobjpage->flags & PG_BUSY); 1147 KASSERT(uobjpage->flags & PG_BUSY);
 1148 KASSERT(count > 0);
1116 1149
1117 /* alloc new un-owned page */ 1150 /* alloc new un-owned page */
1118 pg = uvm_pagealloc(NULL, 0, NULL, 0); 1151 pg = uvm_pagealloc(NULL, 0, NULL, 0);
1119 if (pg == NULL) 1152 if (pg == NULL)
1120 return NULL; 1153 return NULL;
1121 1154
1122 /* 1155 /*
1123 * copy the data from the old page to the new 1156 * copy the data from the old page to the new
1124 * one and clear the fake flags on the new page (keep it busy). 1157 * one and clear the fake flags on the new page (keep it busy).
1125 * force a reload of the old page by clearing it from all 1158 * force a reload of the old page by clearing it from all
1126 * pmaps. 1159 * pmaps.
1127 * transfer dirtiness of the old page to the new page. 
1128 * then lock the page queues to rename the pages. 1160 * then lock the page queues to rename the pages.
1129 */ 1161 */
1130 1162
1131 uvm_pagecopy(uobjpage, pg); /* old -> new */ 1163 uvm_pagecopy(uobjpage, pg); /* old -> new */
1132 pg->flags &= ~PG_FAKE; 1164 pg->flags &= ~PG_FAKE;
 1165 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
1133 pmap_page_protect(uobjpage, VM_PROT_NONE); 1166 pmap_page_protect(uobjpage, VM_PROT_NONE);
1134 if (uvm_pagegetdirty(uobjpage) == UVM_PAGE_STATUS_UNKNOWN && 
1135 !pmap_clear_modify(uobjpage)) { 
1136 uvm_pagemarkdirty(uobjpage, UVM_PAGE_STATUS_CLEAN); 
1137 } 
1138 if (uvm_pagegetdirty(uobjpage) == UVM_PAGE_STATUS_CLEAN) { 
1139 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_CLEAN); 
1140 } else { 
1141 /* uvm_pagecopy marked it dirty */ 
1142 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY); 
1143 /* a object with a dirty page should be dirty. */ 
1144 KASSERT(!UVM_OBJ_IS_CLEAN(uobj)); 
1145 } 
1146 if (uobjpage->flags & PG_WANTED) 1167 if (uobjpage->flags & PG_WANTED)
1147 wakeup(uobjpage); 1168 wakeup(uobjpage);
1148 /* uobj still locked */ 1169 /* uobj still locked */
1149 uobjpage->flags &= ~(PG_WANTED|PG_BUSY); 1170 uobjpage->flags &= ~(PG_WANTED|PG_BUSY);
1150 UVM_PAGE_OWN(uobjpage, NULL); 1171 UVM_PAGE_OWN(uobjpage, NULL);
1151 1172
1152 mutex_enter(&uvm_pageqlock); 
1153 
1154 /* 1173 /*
1155 * replace uobjpage with new page. 1174 * replace uobjpage with new page.
 1175 *
 1176 * this will update the page dirtiness statistics.
1156 */ 1177 */
1157 1178
1158 uvm_pagereplace(uobjpage, pg); 1179 uvm_pagereplace(uobjpage, pg);
1159 1180
 1181 mutex_enter(&uvm_pageqlock);
 1182 KASSERT(uobjpage->uanon == NULL); /* XXX no O->A loan */
 1183
1160 /* 1184 /*
1161 * if the page is no longer referenced by 1185 * if the page is no longer referenced by
1162 * an anon (i.e. we are breaking an O->K 1186 * an anon (i.e. we are breaking an O->K
1163 * loan), then remove it from any pageq's. 1187 * loan), then remove it from any pageq's.
1164 */ 1188 */
1165 if (uobjpage->uanon == NULL) 1189 if (uobjpage->uanon == NULL)
1166 uvm_pagedequeue(uobjpage); 1190 uvm_pagedequeue(uobjpage);
1167 1191
1168 /* 1192 /*
1169 * at this point we have absolutely no 1193 * at this point we have absolutely no
1170 * control over uobjpage 1194 * control over uobjpage
1171 */ 1195 */
1172 1196
1173 /* install new page */ 1197 /* install new page */
1174 uvm_pageactivate(pg); 1198 uvm_pageactivate(pg);
1175 mutex_exit(&uvm_pageqlock); 1199 mutex_exit(&uvm_pageqlock);
1176 1200
1177 /* 1201 /*
1178 * done! loan is broken and "pg" is 1202 * done! loan is broken and "pg" is
1179 * PG_BUSY. it can now replace uobjpage. 1203 * PG_BUSY. it can now replace uobjpage.
1180 */ 1204 */
1181 1205
 1206 ucpu = uvm_cpu_get();
 1207 ucpu->loanbreak_obj += count;
 1208 uvm_cpu_put(ucpu);
1182 return pg; 1209 return pg;
1183} 1210}
1184 1211
1185int 1212int
1186uvm_loanbreak_anon(struct vm_anon *anon, struct uvm_object *uobj) 1213uvm_loanbreak_anon(struct vm_anon *anon, struct uvm_object *uobj)
1187{ 1214{
 1215 struct uvm_cpu *ucpu;
1188 struct vm_page *pg; 1216 struct vm_page *pg;
 1217 unsigned int oldstatus;
 1218 const unsigned int count = anon->an_page->loan_count;
1189 1219
1190 KASSERT(mutex_owned(anon->an_lock)); 1220 KASSERT(mutex_owned(anon->an_lock));
1191 KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock)); 1221 KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
 1222 KASSERT(count > 0);
1192 1223
1193 /* get new un-owned replacement page */ 1224 /* get new un-owned replacement page */
1194 pg = uvm_pagealloc(NULL, 0, NULL, 0); 1225 pg = uvm_pagealloc(NULL, 0, NULL, 0);
1195 if (pg == NULL) { 1226 if (pg == NULL) {
1196 return ENOMEM; 1227 return ENOMEM;
1197 } 1228 }
1198 1229
1199 /* copy old -> new */ 1230 /* copy old -> new */
1200 uvm_pagecopy(anon->an_page, pg); 1231 uvm_pagecopy(anon->an_page, pg);
 1232 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
1201 1233
1202 /* force reload */ 1234 /* force reload */
1203 pmap_page_protect(anon->an_page, VM_PROT_NONE); 1235 pmap_page_protect(anon->an_page, VM_PROT_NONE);
 1236 oldstatus = uvm_pagegetdirty(anon->an_page);
1204 mutex_enter(&uvm_pageqlock); /* KILL loan */ 1237 mutex_enter(&uvm_pageqlock); /* KILL loan */
1205 1238
1206 anon->an_page->uanon = NULL; 1239 anon->an_page->uanon = NULL;
1207 /* in case we owned */ 1240 /* in case we owned */
1208 anon->an_page->pqflags &= ~PQ_ANON; 1241 anon->an_page->pqflags &= ~PQ_ANON;
1209 1242
 1243 KASSERT(uobj == NULL); /* XXX O->A loan is currently broken */
1210 if (uobj) { 1244 if (uobj) {
1211 /* if we were receiver of loan */ 1245 /* if we were receiver of loan */
1212 anon->an_page->loan_count--; 1246 anon->an_page->loan_count--;
1213 } else { 1247 } else {
1214 /* 1248 /*
1215 * we were the lender (A->K); need to remove the page from 1249 * we were the lender (A->K); need to remove the page from
1216 * pageq's. 1250 * pageq's.
1217 */ 1251 */
1218 uvm_pagedequeue(anon->an_page); 1252 uvm_pagedequeue(anon->an_page);
1219 } 1253 }
1220 1254
1221 if (uobj) { 1255 if (uobj) {
1222 mutex_exit(uobj->vmobjlock); 1256 mutex_exit(uobj->vmobjlock);
@@ -1224,16 +1258,22 @@ uvm_loanbreak_anon(struct vm_anon *anon, @@ -1224,16 +1258,22 @@ uvm_loanbreak_anon(struct vm_anon *anon,
1224 1258
1225 /* install new page in anon */ 1259 /* install new page in anon */
1226 anon->an_page = pg; 1260 anon->an_page = pg;
1227 pg->uanon = anon; 1261 pg->uanon = anon;
1228 pg->pqflags |= PQ_ANON; 1262 pg->pqflags |= PQ_ANON;
1229 1263
1230 uvm_pageactivate(pg); 1264 uvm_pageactivate(pg);
1231 mutex_exit(&uvm_pageqlock); 1265 mutex_exit(&uvm_pageqlock);
1232 1266
1233 pg->flags &= ~(PG_BUSY|PG_FAKE); 1267 pg->flags &= ~(PG_BUSY|PG_FAKE);
1234 UVM_PAGE_OWN(pg, NULL); 1268 UVM_PAGE_OWN(pg, NULL);
1235 1269
1236 /* done! */ 1270 /* done! */
1237 1271 if (uobj == NULL) {
 1272 ucpu = uvm_cpu_get();
 1273 ucpu->loanbreak_anon += count;
 1274 ucpu->pagestate[1][oldstatus]--;
 1275 ucpu->pagestate[1][UVM_PAGE_STATUS_DIRTY]++;
 1276 uvm_cpu_put(ucpu);
 1277 }
1238 return 0; 1278 return 0;
1239} 1279}

cvs diff -r1.56.4.4 -r1.56.4.5 src/sys/uvm/uvm_meter.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_meter.c 2011/11/14 14:24:54 1.56.4.4
+++ src/sys/uvm/uvm_meter.c 2011/11/20 10:52:34 1.56.4.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_meter.c,v 1.56.4.4 2011/11/14 14:24:54 yamt Exp $ */ 1/* $NetBSD: uvm_meter.c,v 1.56.4.5 2011/11/20 10:52:34 yamt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1982, 1986, 1989, 1993 5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. 6 * The Regents of the University of California.
7 * 7 *
8 * All rights reserved. 8 * All rights reserved.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -26,27 +26,27 @@ @@ -26,27 +26,27 @@
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE. 32 * SUCH DAMAGE.
33 * 33 *
34 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94 34 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
35 * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp 35 * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp
36 */ 36 */
37 37
38#include <sys/cdefs.h> 38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.56.4.4 2011/11/14 14:24:54 yamt Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.56.4.5 2011/11/20 10:52:34 yamt Exp $");
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/systm.h> 42#include <sys/systm.h>
43#include <sys/cpu.h> 43#include <sys/cpu.h>
44#include <sys/proc.h> 44#include <sys/proc.h>
45#include <sys/kernel.h> 45#include <sys/kernel.h>
46#include <sys/sysctl.h> 46#include <sys/sysctl.h>
47 47
48#include <uvm/uvm.h> 48#include <uvm/uvm.h>
49#include <uvm/uvm_pdpolicy.h> 49#include <uvm/uvm_pdpolicy.h>
50 50
51/* 51/*
52 * maxslp: ???? XXXCDC 52 * maxslp: ???? XXXCDC
@@ -166,37 +166,54 @@ sysctl_vm_uvmexp2(SYSCTLFN_ARGS) @@ -166,37 +166,54 @@ sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
166 u.pdobscan = uvmexp.pdobscan; 166 u.pdobscan = uvmexp.pdobscan;
167 u.pdreact = uvmexp.pdreact; 167 u.pdreact = uvmexp.pdreact;
168 u.pdbusy = uvmexp.pdbusy; 168 u.pdbusy = uvmexp.pdbusy;
169 u.pdpageouts = uvmexp.pdpageouts; 169 u.pdpageouts = uvmexp.pdpageouts;
170 u.pdpending = uvmexp.pdpending; 170 u.pdpending = uvmexp.pdpending;
171 u.pddeact = uvmexp.pddeact; 171 u.pddeact = uvmexp.pddeact;
172 u.anonpages = uvmexp.anonpages; 172 u.anonpages = uvmexp.anonpages;
173 u.filepages = uvmexp.filepages; 173 u.filepages = uvmexp.filepages;
174 u.execpages = uvmexp.execpages; 174 u.execpages = uvmexp.execpages;
175 u.colorhit = uvmexp.colorhit; 175 u.colorhit = uvmexp.colorhit;
176 u.colormiss = uvmexp.colormiss; 176 u.colormiss = uvmexp.colormiss;
177 u.cpuhit = uvmexp.cpuhit; 177 u.cpuhit = uvmexp.cpuhit;
178 u.cpumiss = uvmexp.cpumiss; 178 u.cpumiss = uvmexp.cpumiss;
 179 /*
 180 * XXX should use xcall
 181 * XXX should be an array
 182 */
179 for (CPU_INFO_FOREACH(cii, ci)) { 183 for (CPU_INFO_FOREACH(cii, ci)) {
180 struct uvm_cpu *ucpu = ci->ci_data.cpu_uvm; 184 struct uvm_cpu *ucpu = ci->ci_data.cpu_uvm;
181 185
182 u.possiblydirtypages += 186 u.possiblydirtypages +=
183 ucpu->pagestate[0][UVM_PAGE_STATUS_UNKNOWN]; 187 ucpu->pagestate[0][UVM_PAGE_STATUS_UNKNOWN];
184 u.cleanpages += ucpu->pagestate[0][UVM_PAGE_STATUS_CLEAN]; 188 u.cleanpages += ucpu->pagestate[0][UVM_PAGE_STATUS_CLEAN];
185 u.dirtypages += ucpu->pagestate[0][UVM_PAGE_STATUS_DIRTY]; 189 u.dirtypages += ucpu->pagestate[0][UVM_PAGE_STATUS_DIRTY];
186 u.possiblydirtyanonpages += 190 u.possiblydirtyanonpages +=
187 ucpu->pagestate[1][UVM_PAGE_STATUS_UNKNOWN]; 191 ucpu->pagestate[1][UVM_PAGE_STATUS_UNKNOWN];
188 u.cleananonpages += ucpu->pagestate[1][UVM_PAGE_STATUS_CLEAN]; 192 u.cleananonpages += ucpu->pagestate[1][UVM_PAGE_STATUS_CLEAN];
189 u.dirtyanonpages += ucpu->pagestate[1][UVM_PAGE_STATUS_DIRTY]; 193 u.dirtyanonpages += ucpu->pagestate[1][UVM_PAGE_STATUS_DIRTY];
 194
 195 u.loan_obj += ucpu->loan_obj;
 196 u.unloan_obj += ucpu->unloan_obj;
 197 u.loanbreak_obj += ucpu->loanbreak_obj;
 198 u.loanfree_obj += ucpu->loanfree_obj;
 199
 200 u.loan_anon += ucpu->loan_anon;
 201 u.unloan_anon += ucpu->unloan_anon;
 202 u.loanbreak_anon += ucpu->loanbreak_anon;
 203 u.loanfree_anon += ucpu->loanfree_anon;
 204
 205 u.loan_zero += ucpu->loan_zero;
 206 u.unloan_zero += ucpu->unloan_zero;
190 } 207 }
191 node = *rnode; 208 node = *rnode;
192 node.sysctl_data = &u; 209 node.sysctl_data = &u;
193 node.sysctl_size = sizeof(u); 210 node.sysctl_size = sizeof(u);
194 if (oldp) 211 if (oldp)
195 node.sysctl_size = min(*oldlenp, node.sysctl_size); 212 node.sysctl_size = min(*oldlenp, node.sysctl_size);
196 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 213 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
197} 214}
198 215
199/* 216/*
200 * sysctl helper routine for uvm_pctparam. 217 * sysctl helper routine for uvm_pctparam.
201 */ 218 */
202static int 219static int

cvs diff -r1.178.2.6 -r1.178.2.7 src/sys/uvm/uvm_page.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_page.c 2011/11/18 00:57:33 1.178.2.6
+++ src/sys/uvm/uvm_page.c 2011/11/20 10:52:34 1.178.2.7
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_page.c,v 1.178.2.6 2011/11/18 00:57:33 yamt Exp $ */ 1/* $NetBSD: uvm_page.c,v 1.178.2.7 2011/11/20 10:52:34 yamt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California. 5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 * 6 *
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * This code is derived from software contributed to Berkeley by 9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University. 10 * The Mach Operating System project at Carnegie-Mellon University.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -56,27 +56,27 @@ @@ -56,27 +56,27 @@
56 * School of Computer Science 56 * School of Computer Science
57 * Carnegie Mellon University 57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890 58 * Pittsburgh PA 15213-3890
59 * 59 *
60 * any improvements or extensions that they make and grant Carnegie the 60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes. 61 * rights to redistribute these changes.
62 */ 62 */
63 63
64/* 64/*
65 * uvm_page.c: page ops. 65 * uvm_page.c: page ops.
66 */ 66 */
67 67
68#include <sys/cdefs.h> 68#include <sys/cdefs.h>
69__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.6 2011/11/18 00:57:33 yamt Exp $"); 69__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.7 2011/11/20 10:52:34 yamt Exp $");
70 70
71#include "opt_ddb.h" 71#include "opt_ddb.h"
72#include "opt_uvmhist.h" 72#include "opt_uvmhist.h"
73#include "opt_readahead.h" 73#include "opt_readahead.h"
74 74
75#include <sys/param.h> 75#include <sys/param.h>
76#include <sys/systm.h> 76#include <sys/systm.h>
77#include <sys/malloc.h> 77#include <sys/malloc.h>
78#include <sys/sched.h> 78#include <sys/sched.h>
79#include <sys/kernel.h> 79#include <sys/kernel.h>
80#include <sys/vnode.h> 80#include <sys/vnode.h>
81#include <sys/proc.h> 81#include <sys/proc.h>
82#include <sys/radixtree.h> 82#include <sys/radixtree.h>
@@ -156,45 +156,41 @@ static void uvm_pageremove(struct uvm_ob @@ -156,45 +156,41 @@ static void uvm_pageremove(struct uvm_ob
156 156
157/* 157/*
158 * inline functions 158 * inline functions
159 */ 159 */
160 160
161/* 161/*
162 * uvm_pageinsert: insert a page in the object. 162 * uvm_pageinsert: insert a page in the object.
163 * 163 *
164 * => caller must lock object 164 * => caller must lock object
165 * => call should have already set pg's object and offset pointers 165 * => call should have already set pg's object and offset pointers
166 */ 166 */
167 167
168static inline void 168static inline void
169uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg, 169uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg)
170 struct vm_page *where) 
171{ 170{
172 171
173 KASSERT(uobj == pg->uobject); 172 KASSERT(uobj == pg->uobject);
174 KASSERT(mutex_owned(uobj->vmobjlock)); 173 KASSERT(mutex_owned(uobj->vmobjlock));
175 KASSERT((pg->flags & PG_TABLED) == 0); 174 KASSERT((pg->flags & PG_TABLED) == 0);
176 KASSERT(where == NULL || (where->flags & PG_TABLED)); 
177 KASSERT(where == NULL || (where->uobject == uobj)); 
178 175
179 if ((pg->pqflags & PQ_STAT) != 0) { 176 if ((pg->pqflags & PQ_STAT) != 0) {
180 struct uvm_cpu *ucpu; 177 struct uvm_cpu *ucpu;
181 const unsigned int status = uvm_pagegetdirty(pg); 178 const unsigned int status = uvm_pagegetdirty(pg);
182 const bool isaobj = (pg->pqflags & PQ_AOBJ) != 0; 179 const bool isaobj = (pg->pqflags & PQ_AOBJ) != 0;
183 180
184 kpreempt_disable(); 181 ucpu = uvm_cpu_get();
185 ucpu = curcpu()->ci_data.cpu_uvm; 
186 ucpu->pagestate[isaobj][status]++; 182 ucpu->pagestate[isaobj][status]++;
187 kpreempt_enable(); 183 uvm_cpu_put(ucpu);
188 if (!isaobj) { 184 if (!isaobj) {
189 KASSERT((pg->pqflags & PQ_FILE) != 0); 185 KASSERT((pg->pqflags & PQ_FILE) != 0);
190 if (uobj->uo_npages == 0) { 186 if (uobj->uo_npages == 0) {
191 struct vnode *vp = (struct vnode *)uobj; 187 struct vnode *vp = (struct vnode *)uobj;
192 188
193 vholdl(vp); 189 vholdl(vp);
194 } 190 }
195 if (UVM_OBJ_IS_VTEXT(uobj)) { 191 if (UVM_OBJ_IS_VTEXT(uobj)) {
196 atomic_inc_uint(&uvmexp.execpages); 192 atomic_inc_uint(&uvmexp.execpages);
197 } else { 193 } else {
198 atomic_inc_uint(&uvmexp.filepages); 194 atomic_inc_uint(&uvmexp.filepages);
199 } 195 }
200 } else { 196 } else {
@@ -225,53 +221,52 @@ uvm_pageinsert_tree(struct uvm_object *u @@ -225,53 +221,52 @@ uvm_pageinsert_tree(struct uvm_object *u
225 221
226static inline int 222static inline int
227uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg) 223uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg)
228{ 224{
229 int error; 225 int error;
230 226
231 KDASSERT(uobj != NULL); 227 KDASSERT(uobj != NULL);
232 KASSERT(uobj == pg->uobject); 228 KASSERT(uobj == pg->uobject);
233 error = uvm_pageinsert_tree(uobj, pg); 229 error = uvm_pageinsert_tree(uobj, pg);
234 if (error != 0) { 230 if (error != 0) {
235 KASSERT(error == ENOMEM); 231 KASSERT(error == ENOMEM);
236 return error; 232 return error;
237 } 233 }
238 uvm_pageinsert_list(uobj, pg, NULL); 234 uvm_pageinsert_list(uobj, pg);
239 return 0; 235 return 0;
240} 236}
241 237
242/* 238/*
243 * uvm_page_remove: remove page from object. 239 * uvm_page_remove: remove page from object.
244 * 240 *
245 * => caller must lock object 241 * => caller must lock object
246 */ 242 */
247 243
248static inline void 244static inline void
249uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg) 245uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg)
250{ 246{
251 247
252 KASSERT(uobj == pg->uobject); 248 KASSERT(uobj == pg->uobject);
253 KASSERT(mutex_owned(uobj->vmobjlock)); 249 KASSERT(mutex_owned(uobj->vmobjlock));
254 KASSERT(pg->flags & PG_TABLED); 250 KASSERT(pg->flags & PG_TABLED);
255 251
256 if ((pg->pqflags & PQ_STAT) != 0) { 252 if ((pg->pqflags & PQ_STAT) != 0) {
257 struct uvm_cpu *ucpu; 253 struct uvm_cpu *ucpu;
258 const unsigned int status = uvm_pagegetdirty(pg); 254 const unsigned int status = uvm_pagegetdirty(pg);
259 const bool isaobj = (pg->pqflags & PQ_AOBJ) != 0; 255 const bool isaobj = (pg->pqflags & PQ_AOBJ) != 0;
260 256
261 kpreempt_disable(); 257 ucpu = uvm_cpu_get();
262 ucpu = curcpu()->ci_data.cpu_uvm; 
263 ucpu->pagestate[isaobj][status]--; 258 ucpu->pagestate[isaobj][status]--;
264 kpreempt_enable(); 259 uvm_cpu_put(ucpu);
265 if (!isaobj) { 260 if (!isaobj) {
266 KASSERT((pg->pqflags & PQ_FILE) != 0); 261 KASSERT((pg->pqflags & PQ_FILE) != 0);
267 if (uobj->uo_npages == 1) { 262 if (uobj->uo_npages == 1) {
268 struct vnode *vp = (struct vnode *)uobj; 263 struct vnode *vp = (struct vnode *)uobj;
269 264
270 holdrelel(vp); 265 holdrelel(vp);
271 } 266 }
272 if (UVM_OBJ_IS_VTEXT(uobj)) { 267 if (UVM_OBJ_IS_VTEXT(uobj)) {
273 atomic_dec_uint(&uvmexp.execpages); 268 atomic_dec_uint(&uvmexp.execpages);
274 } else { 269 } else {
275 atomic_dec_uint(&uvmexp.filepages); 270 atomic_dec_uint(&uvmexp.filepages);
276 } 271 }
277 } else { 272 } else {
@@ -1319,30 +1314,29 @@ uvm_pagealloc_strat(struct uvm_object *o @@ -1319,30 +1314,29 @@ uvm_pagealloc_strat(struct uvm_object *o
1319 pg->uobject = obj; 1314 pg->uobject = obj;
1320 pg->uanon = anon; 1315 pg->uanon = anon;
1321 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; 1316 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
1322 /* 1317 /*
1323 * clear PQ_FREE before releasing uvm_fpageqlock. 1318 * clear PQ_FREE before releasing uvm_fpageqlock.
1324 * otherwise we race with uvm_pglistalloc. 1319 * otherwise we race with uvm_pglistalloc.
1325 */ 1320 */
1326 pg->pqflags = 0; 1321 pg->pqflags = 0;
1327 mutex_spin_exit(&uvm_fpageqlock); 1322 mutex_spin_exit(&uvm_fpageqlock);
1328 if (anon) { 1323 if (anon) {
1329 anon->an_page = pg; 1324 anon->an_page = pg;
1330 pg->pqflags = PQ_ANON; 1325 pg->pqflags = PQ_ANON;
1331 atomic_inc_uint(&uvmexp.anonpages); 1326 atomic_inc_uint(&uvmexp.anonpages);
1332 kpreempt_disable(); 1327 ucpu = uvm_cpu_get();
1333 ucpu = curcpu()->ci_data.cpu_uvm; 
1334 ucpu->pagestate[1][UVM_PAGE_STATUS_CLEAN]++; 1328 ucpu->pagestate[1][UVM_PAGE_STATUS_CLEAN]++;
1335 kpreempt_enable(); 1329 uvm_cpu_put(ucpu);
1336 } else { 1330 } else {
1337 if (obj) { 1331 if (obj) {
1338 int error; 1332 int error;
1339 1333
1340 /* 1334 /*
1341 * set PQ_FILE|PQ_AOBJ before the first uvm_pageinsert. 1335 * set PQ_FILE|PQ_AOBJ before the first uvm_pageinsert.
1342 */ 1336 */
1343 if (UVM_OBJ_IS_VNODE(obj)) { 1337 if (UVM_OBJ_IS_VNODE(obj)) {
1344 pg->pqflags |= PQ_FILE; 1338 pg->pqflags |= PQ_FILE;
1345 } else { 1339 } else {
1346 pg->pqflags |= PQ_AOBJ; 1340 pg->pqflags |= PQ_AOBJ;
1347 } 1341 }
1348 error = uvm_pageinsert(obj, pg); 1342 error = uvm_pageinsert(obj, pg);
@@ -1404,27 +1398,32 @@ uvm_pagereplace(struct vm_page *oldpg, s @@ -1404,27 +1398,32 @@ uvm_pagereplace(struct vm_page *oldpg, s
1404 newpg->offset = oldpg->offset; 1398 newpg->offset = oldpg->offset;
1405 idx = newpg->offset >> PAGE_SHIFT; 1399 idx = newpg->offset >> PAGE_SHIFT;
1406 pg = radix_tree_replace_node(&uobj->uo_pages, idx, newpg); 1400 pg = radix_tree_replace_node(&uobj->uo_pages, idx, newpg);
1407 KASSERT(pg == oldpg); 1401 KASSERT(pg == oldpg);
1408 if (((oldpg->flags ^ newpg->flags) & PG_CLEAN) != 0) { 1402 if (((oldpg->flags ^ newpg->flags) & PG_CLEAN) != 0) {
1409 if ((newpg->flags & PG_CLEAN) != 0) { 1403 if ((newpg->flags & PG_CLEAN) != 0) {
1410 radix_tree_clear_tag(&uobj->uo_pages, idx, 1404 radix_tree_clear_tag(&uobj->uo_pages, idx,
1411 UVM_PAGE_DIRTY_TAG); 1405 UVM_PAGE_DIRTY_TAG);
1412 } else { 1406 } else {
1413 radix_tree_set_tag(&uobj->uo_pages, idx, 1407 radix_tree_set_tag(&uobj->uo_pages, idx,
1414 UVM_PAGE_DIRTY_TAG); 1408 UVM_PAGE_DIRTY_TAG);
1415 } 1409 }
1416 } 1410 }
1417 uvm_pageinsert_list(uobj, newpg, oldpg); 1411 /*
 1412 * oldpg->pqflags is stable. newpg is not reachable by others yet.
 1413 */
 1414 newpg->pqflags =
 1415 (newpg->pqflags & ~PQ_STAT) | (oldpg->pqflags & PQ_STAT);
 1416 uvm_pageinsert_list(uobj, newpg);
1418 uvm_pageremove_list(uobj, oldpg); 1417 uvm_pageremove_list(uobj, oldpg);
1419} 1418}
1420 1419
1421/* 1420/*
1422 * uvm_pagerealloc: reallocate a page from one object to another 1421 * uvm_pagerealloc: reallocate a page from one object to another
1423 * 1422 *
1424 * => both objects must be locked 1423 * => both objects must be locked
1425 */ 1424 */
1426 1425
1427void 1426void
1428uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 1427uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
1429{ 1428{
1430 /* 1429 /*
@@ -1512,87 +1511,94 @@ uvm_pagefree(struct vm_page *pg) @@ -1512,87 +1511,94 @@ uvm_pagefree(struct vm_page *pg)
1512 1511
1513 KASSERT((pg->flags & PG_PAGEOUT) == 0); 1512 KASSERT((pg->flags & PG_PAGEOUT) == 0);
1514 KASSERT(!(pg->pqflags & PQ_FREE)); 1513 KASSERT(!(pg->pqflags & PQ_FREE));
1515 KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg)); 1514 KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg));
1516 KASSERT(pg->uobject == NULL || mutex_owned(pg->uobject->vmobjlock)); 1515 KASSERT(pg->uobject == NULL || mutex_owned(pg->uobject->vmobjlock));
1517 KASSERT(pg->uobject != NULL || pg->uanon == NULL || 1516 KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
1518 mutex_owned(pg->uanon->an_lock)); 1517 mutex_owned(pg->uanon->an_lock));
1519 1518
1520 /* 1519 /*
1521 * if the page is loaned, resolve the loan instead of freeing. 1520 * if the page is loaned, resolve the loan instead of freeing.
1522 */ 1521 */
1523 1522
1524 if (pg->loan_count) { 1523 if (pg->loan_count) {
 1524 struct uvm_object * const obj = pg->uobject;
 1525
1525 KASSERT(pg->wire_count == 0); 1526 KASSERT(pg->wire_count == 0);
1526 1527
1527 /* 1528 /*
1528 * if the page is owned by an anon then we just want to 1529 * if the page is owned by an anon then we just want to
1529 * drop anon ownership. the kernel will free the page when 1530 * drop anon ownership. the kernel will free the page when
1530 * it is done with it. if the page is owned by an object, 1531 * it is done with it. if the page is owned by an object,
1531 * remove it from the object and mark it dirty for the benefit 1532 * remove it from the object and mark it dirty for the benefit
1532 * of possible anon owners. 1533 * of possible anon owners.
1533 * 1534 *
1534 * regardless of previous ownership, wakeup any waiters, 1535 * regardless of previous ownership, wakeup any waiters,
1535 * unbusy the page, and we're done. 1536 * unbusy the page, and we're done.
1536 */ 1537 */
1537 1538
1538 if (pg->uobject != NULL) { 1539 if (obj != NULL) {
1539 uvm_pageremove(pg->uobject, pg); 1540 uvm_pageremove(obj, pg);
1540 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 1541 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
1541 } else if (pg->uanon != NULL) { 1542 } else if (pg->uanon != NULL) {
1542 if ((pg->pqflags & PQ_ANON) == 0) { 1543 if ((pg->pqflags & PQ_ANON) == 0) {
1543 pg->loan_count--; 1544 pg->loan_count--;
1544 } else { 1545 } else {
1545 pg->pqflags &= ~PQ_ANON; 1546 pg->pqflags &= ~PQ_ANON;
1546 atomic_dec_uint(&uvmexp.anonpages); 1547 atomic_dec_uint(&uvmexp.anonpages);
1547 status = uvm_pagegetdirty(pg); 1548 status = uvm_pagegetdirty(pg);
1548 kpreempt_disable(); 1549 ucpu = uvm_cpu_get();
1549 ucpu = curcpu()->ci_data.cpu_uvm; 
1550 ucpu->pagestate[1][status]--; 1550 ucpu->pagestate[1][status]--;
1551 kpreempt_enable(); 1551 uvm_cpu_put(ucpu);
1552 } 1552 }
1553 pg->uanon->an_page = NULL; 1553 pg->uanon->an_page = NULL;
1554 pg->uanon = NULL; 1554 pg->uanon = NULL;
1555 } 1555 }
1556 if (pg->flags & PG_WANTED) { 1556 if (pg->flags & PG_WANTED) {
1557 wakeup(pg); 1557 wakeup(pg);
1558 } 1558 }
1559 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1); 1559 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1);
1560#ifdef UVM_PAGE_TRKOWN 1560#ifdef UVM_PAGE_TRKOWN
1561 pg->owner_tag = NULL; 1561 pg->owner_tag = NULL;
1562#endif 1562#endif
1563 if (pg->loan_count) { 1563 if (pg->loan_count) {
1564 KASSERT(pg->uobject == NULL); 1564 KASSERT(pg->uobject == NULL);
1565 if (pg->uanon == NULL) { 1565 if (pg->uanon == NULL) {
1566 uvm_pagedequeue(pg); 1566 uvm_pagedequeue(pg);
1567 } 1567 }
 1568 ucpu = uvm_cpu_get();
 1569 if (obj != NULL) {
 1570 ucpu->loanfree_obj += pg->loan_count;
 1571 } else {
 1572 ucpu->loanfree_anon += pg->loan_count;
 1573 }
 1574 uvm_cpu_put(ucpu);
1568 return; 1575 return;
1569 } 1576 }
1570 } 1577 }
1571 1578
1572 /* 1579 /*
1573 * remove page from its object or anon. 1580 * remove page from its object or anon.
1574 */ 1581 */
1575 1582
1576 if (pg->uobject != NULL) { 1583 if (pg->uobject != NULL) {
1577 uvm_pageremove(pg->uobject, pg); 1584 uvm_pageremove(pg->uobject, pg);
1578 } else if (pg->uanon != NULL) { 1585 } else if (pg->uanon != NULL) {
1579 pg->uanon->an_page = NULL; 1586 pg->uanon->an_page = NULL;
1580 atomic_dec_uint(&uvmexp.anonpages); 1587 atomic_dec_uint(&uvmexp.anonpages);
1581 status = uvm_pagegetdirty(pg); 1588 status = uvm_pagegetdirty(pg);
1582 kpreempt_disable(); 1589 ucpu = uvm_cpu_get();
1583 ucpu = curcpu()->ci_data.cpu_uvm; 
1584 ucpu->pagestate[1][status]--; 1590 ucpu->pagestate[1][status]--;
1585 kpreempt_enable(); 1591 uvm_cpu_put(ucpu);
1586 } 1592 }
1587 1593
1588 /* 1594 /*
1589 * now remove the page from the queues. 1595 * now remove the page from the queues.
1590 */ 1596 */
1591 1597
1592 uvm_pagedequeue(pg); 1598 uvm_pagedequeue(pg);
1593 1599
1594 /* 1600 /*
1595 * if the page was wired, unwire it now. 1601 * if the page was wired, unwire it now.
1596 */ 1602 */
1597 1603
1598 if (pg->wire_count) { 1604 if (pg->wire_count) {

cvs diff -r1.73.2.6 -r1.73.2.7 src/sys/uvm/uvm_page.h (expand / switch to unified diff)

--- src/sys/uvm/uvm_page.h 2011/11/18 00:57:34 1.73.2.6
+++ src/sys/uvm/uvm_page.h 2011/11/20 10:52:34 1.73.2.7
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_page.h,v 1.73.2.6 2011/11/18 00:57:34 yamt Exp $ */ 1/* $NetBSD: uvm_page.h,v 1.73.2.7 2011/11/20 10:52:34 yamt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California. 5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 * 6 *
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * This code is derived from software contributed to Berkeley by 9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University. 10 * The Mach Operating System project at Carnegie-Mellon University.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -290,26 +290,30 @@ void uvm_page_unbusy(struct vm_page **,  @@ -290,26 +290,30 @@ void uvm_page_unbusy(struct vm_page **,
290struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t); 290struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
291void uvm_pageunwire(struct vm_page *); 291void uvm_pageunwire(struct vm_page *);
292void uvm_pagewire(struct vm_page *); 292void uvm_pagewire(struct vm_page *);
293void uvm_pagezero(struct vm_page *); 293void uvm_pagezero(struct vm_page *);
294bool uvm_pageismanaged(paddr_t); 294bool uvm_pageismanaged(paddr_t);
295unsigned int uvm_pagegetdirty(struct vm_page *); 295unsigned int uvm_pagegetdirty(struct vm_page *);
296void uvm_pagemarkdirty(struct vm_page *, unsigned int); 296void uvm_pagemarkdirty(struct vm_page *, unsigned int);
297bool uvm_pagecheckdirty(struct vm_page *, bool); 297bool uvm_pagecheckdirty(struct vm_page *, bool);
298bool uvm_pagereadonly_p(struct vm_page *); 298bool uvm_pagereadonly_p(struct vm_page *);
299bool uvm_page_locked_p(struct vm_page *); 299bool uvm_page_locked_p(struct vm_page *);
300kmutex_t *uvm_page_getlock(struct vm_page *); 300kmutex_t *uvm_page_getlock(struct vm_page *);
301bool uvm_page_samelock_p(struct vm_page *, struct vm_page *); 301bool uvm_page_samelock_p(struct vm_page *, struct vm_page *);
302 302
 303struct uvm_cpu;
 304struct uvm_cpu *uvm_cpu_get(void);
 305void uvm_cpu_put(struct uvm_cpu *);
 306
303/* 307/*
304 * page dirtiness status for uvm_pagegetdirty and uvm_pagemarkdirty 308 * page dirtiness status for uvm_pagegetdirty and uvm_pagemarkdirty
305 * 309 *
306 * UNKNOWN means that we need to consult pmap to know if the page is 310 * UNKNOWN means that we need to consult pmap to know if the page is
307 * dirty or not. 311 * dirty or not.
308 * basically, UVM_PAGE_STATUS_CLEAN implies that the page has no writable 312 * basically, UVM_PAGE_STATUS_CLEAN implies that the page has no writable
309 * mapping. 313 * mapping.
310 * 314 *
311 * if you want to renumber these, check __CTASSERTs in 315 * if you want to renumber these, check __CTASSERTs in
312 * uvm_page_status.c first. 316 * uvm_page_status.c first.
313 */ 317 */
314#define UVM_PAGE_STATUS_UNKNOWN 0 318#define UVM_PAGE_STATUS_UNKNOWN 0
315#define UVM_PAGE_STATUS_CLEAN 1 319#define UVM_PAGE_STATUS_CLEAN 1

cvs diff -r1.1.2.4 -r1.1.2.5 src/sys/uvm/uvm_page_status.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_page_status.c 2011/11/13 01:18:02 1.1.2.4
+++ src/sys/uvm/uvm_page_status.c 2011/11/20 10:52:35 1.1.2.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_page_status.c,v 1.1.2.4 2011/11/13 01:18:02 yamt Exp $ */ 1/* $NetBSD: uvm_page_status.c,v 1.1.2.5 2011/11/20 10:52:35 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c)2011 YAMAMOTO Takashi, 4 * Copyright (c)2011 YAMAMOTO Takashi,
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.1.2.4 2011/11/13 01:18:02 yamt Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.1.2.5 2011/11/20 10:52:35 yamt Exp $");
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/systm.h> 33#include <sys/systm.h>
34 34
35#include <uvm/uvm.h> 35#include <uvm/uvm.h>
36 36
37/* 37/*
38 * page dirtiness status tracking 38 * page dirtiness status tracking
39 * 39 *
40 * separated from uvm_page.c mainly for rump 40 * separated from uvm_page.c mainly for rump
41 */ 41 */
42 42
43/* 43/*
@@ -63,46 +63,44 @@ uvm_pagegetdirty(struct vm_page *pg) @@ -63,46 +63,44 @@ uvm_pagegetdirty(struct vm_page *pg)
63 KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0); 63 KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0);
64 KASSERT(uvm_page_locked_p(pg)); 64 KASSERT(uvm_page_locked_p(pg));
65 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 65 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
66 radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG)); 66 radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
67 return pg->flags & (PG_CLEAN|PG_DIRTY); 67 return pg->flags & (PG_CLEAN|PG_DIRTY);
68} 68}
69 69
70static void 70static void
71stat_update(bool isanon, unsigned int oldstatus, unsigned int newstatus) 71stat_update(bool isanon, unsigned int oldstatus, unsigned int newstatus)
72{ 72{
73 struct uvm_cpu *ucpu; 73 struct uvm_cpu *ucpu;
74 74
75 KASSERT(oldstatus != newstatus); 75 KASSERT(oldstatus != newstatus);
76 kpreempt_disable(); 76 ucpu = uvm_cpu_get();
77 ucpu = curcpu()->ci_data.cpu_uvm; 
78 ucpu->pagestate[isanon][oldstatus]--; 77 ucpu->pagestate[isanon][oldstatus]--;
79 ucpu->pagestate[isanon][newstatus]++; 78 ucpu->pagestate[isanon][newstatus]++;
80 kpreempt_enable(); 79 uvm_cpu_put(ucpu);
81} 80}
82 81
83/* 82/*
84 * uvm_pagemarkdirty: set the dirtiness status (one of UVM_PAGE_STATUS_ values) 83 * uvm_pagemarkdirty: set the dirtiness status (one of UVM_PAGE_STATUS_ values)
85 * of the page. 84 * of the page.
86 */ 85 */
87 86
88void 87void
89uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus) 88uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
90{ 89{
91 struct uvm_object * const uobj = pg->uobject; 90 struct uvm_object * const uobj = pg->uobject;
92 const uint64_t idx = pg->offset >> PAGE_SHIFT; 91 const uint64_t idx = pg->offset >> PAGE_SHIFT;
93 const unsigned int oldstatus = uvm_pagegetdirty(pg); 92 const unsigned int oldstatus = uvm_pagegetdirty(pg);
94 93
95 KASSERT(uobj != NULL || pg->uanon != NULL); 
96 KASSERT((~newstatus & (PG_CLEAN|PG_DIRTY)) != 0); 94 KASSERT((~newstatus & (PG_CLEAN|PG_DIRTY)) != 0);
97 KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0); 95 KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0);
98 KASSERT(uvm_page_locked_p(pg)); 96 KASSERT(uvm_page_locked_p(pg));
99 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 97 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
100 radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG)); 98 radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
101 99
102 if (oldstatus == newstatus) { 100 if (oldstatus == newstatus) {
103 return; 101 return;
104 } 102 }
105 /* 103 /*
106 * set UVM_PAGE_DIRTY_TAG tag unless known CLEAN so that putpages can 104 * set UVM_PAGE_DIRTY_TAG tag unless known CLEAN so that putpages can
107 * find possibly-dirty pages quickly. 105 * find possibly-dirty pages quickly.
108 */ 106 */
@@ -171,13 +169,30 @@ uvm_pagecheckdirty(struct vm_page *pg, b @@ -171,13 +169,30 @@ uvm_pagecheckdirty(struct vm_page *pg, b
171 169
172 if (oldstatus == UVM_PAGE_STATUS_DIRTY) { 170 if (oldstatus == UVM_PAGE_STATUS_DIRTY) {
173 modified = true; 171 modified = true;
174 if (newstatus == UVM_PAGE_STATUS_UNKNOWN) { 172 if (newstatus == UVM_PAGE_STATUS_UNKNOWN) {
175 pmap_clear_modify(pg); 173 pmap_clear_modify(pg);
176 } 174 }
177 } else { 175 } else {
178 modified = pmap_clear_modify(pg); 176 modified = pmap_clear_modify(pg);
179 } 177 }
180 uvm_pagemarkdirty(pg, newstatus); 178 uvm_pagemarkdirty(pg, newstatus);
181 } 179 }
182 return modified; 180 return modified;
183} 181}
 182
 183struct uvm_cpu *
 184uvm_cpu_get(void)
 185{
 186
 187 kpreempt_disable();
 188 return curcpu()->ci_data.cpu_uvm;
 189}
 190
 191void
 192uvm_cpu_put(struct uvm_cpu *ucpu)
 193{
 194
 195 KASSERT(kpreempt_disabled());
 196 KASSERT(curcpu()->ci_data.cpu_uvm == ucpu);
 197 kpreempt_enable();
 198}