Sun Apr 9 09:00:56 2023 UTC ()
uvm(9): KASSERT(A && B) -> KASSERT(A); KASSERT(B)


(riastradh)
diff -r1.126 -r1.127 src/sys/uvm/uvm_amap.c
diff -r1.127 -r1.128 src/sys/uvm/uvm_bio.c
diff -r1.231 -r1.232 src/sys/uvm/uvm_fault.c
diff -r1.164 -r1.165 src/sys/uvm/uvm_km.c
diff -r1.251 -r1.252 src/sys/uvm/uvm_page.c
diff -r1.17 -r1.18 src/sys/uvm/uvm_physseg.c
diff -r1.207 -r1.208 src/sys/uvm/uvm_swap.c
diff -r1.118 -r1.119 src/sys/uvm/uvm_vnode.c

cvs diff -r1.126 -r1.127 src/sys/uvm/uvm_amap.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_amap.c 2021/03/13 15:29:55 1.126
+++ src/sys/uvm/uvm_amap.c 2023/04/09 09:00:56 1.127
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_amap.c,v 1.126 2021/03/13 15:29:55 skrll Exp $ */ 1/* $NetBSD: uvm_amap.c,v 1.127 2023/04/09 09:00:56 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -25,27 +25,27 @@ @@ -25,27 +25,27 @@
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */ 26 */
27 27
28/* 28/*
29 * uvm_amap.c: amap operations 29 * uvm_amap.c: amap operations
30 */ 30 */
31 31
32/* 32/*
33 * this file contains functions that perform operations on amaps. see 33 * this file contains functions that perform operations on amaps. see
34 * uvm_amap.h for a brief explanation of the role of amaps in uvm. 34 * uvm_amap.h for a brief explanation of the role of amaps in uvm.
35 */ 35 */
36 36
37#include <sys/cdefs.h> 37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.126 2021/03/13 15:29:55 skrll Exp $"); 38__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.127 2023/04/09 09:00:56 riastradh Exp $");
39 39
40#include "opt_uvmhist.h" 40#include "opt_uvmhist.h"
41 41
42#include <sys/param.h> 42#include <sys/param.h>
43#include <sys/systm.h> 43#include <sys/systm.h>
44#include <sys/kernel.h> 44#include <sys/kernel.h>
45#include <sys/kmem.h> 45#include <sys/kmem.h>
46#include <sys/pool.h> 46#include <sys/pool.h>
47#include <sys/atomic.h> 47#include <sys/atomic.h>
48 48
49#include <uvm/uvm.h> 49#include <uvm/uvm.h>
50#include <uvm/uvm_swap.h> 50#include <uvm/uvm_swap.h>
51 51
@@ -313,27 +313,28 @@ uvm_amap_init(void) @@ -313,27 +313,28 @@ uvm_amap_init(void)
313/* 313/*
314 * amap_free: free an amap 314 * amap_free: free an amap
315 * 315 *
316 * => the amap must be unlocked 316 * => the amap must be unlocked
317 * => the amap should have a zero reference count and be empty 317 * => the amap should have a zero reference count and be empty
318 */ 318 */
319void 319void
320amap_free(struct vm_amap *amap) 320amap_free(struct vm_amap *amap)
321{ 321{
322 int slots; 322 int slots;
323 323
324 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 324 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
325 325
326 KASSERT(amap->am_ref == 0 && amap->am_nused == 0); 326 KASSERT(amap->am_ref == 0);
 327 KASSERT(amap->am_nused == 0);
327 KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0); 328 KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
328 slots = amap->am_maxslot; 329 slots = amap->am_maxslot;
329 kmem_free(amap->am_slots, slots * sizeof(*amap->am_slots)); 330 kmem_free(amap->am_slots, slots * sizeof(*amap->am_slots));
330 kmem_free(amap->am_bckptr, slots * sizeof(*amap->am_bckptr)); 331 kmem_free(amap->am_bckptr, slots * sizeof(*amap->am_bckptr));
331 kmem_free(amap->am_anon, slots * sizeof(*amap->am_anon)); 332 kmem_free(amap->am_anon, slots * sizeof(*amap->am_anon));
332#ifdef UVM_AMAP_PPREF 333#ifdef UVM_AMAP_PPREF
333 if (amap->am_ppref && amap->am_ppref != PPREF_NONE) 334 if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
334 kmem_free(amap->am_ppref, slots * sizeof(*amap->am_ppref)); 335 kmem_free(amap->am_ppref, slots * sizeof(*amap->am_ppref));
335#endif 336#endif
336 pool_cache_put(&uvm_amap_cache, amap); 337 pool_cache_put(&uvm_amap_cache, amap);
337 UVMHIST_LOG(maphist,"<- done, freed amap = %#jx", (uintptr_t)amap, 338 UVMHIST_LOG(maphist,"<- done, freed amap = %#jx", (uintptr_t)amap,
338 0, 0, 0); 339 0, 0, 0);
339} 340}
@@ -764,27 +765,28 @@ amap_wipeout(struct vm_amap *amap) @@ -764,27 +765,28 @@ amap_wipeout(struct vm_amap *amap)
764 /* 765 /*
765 * Note: amap_swap_off() will call us again. 766 * Note: amap_swap_off() will call us again.
766 */ 767 */
767 amap_unlock(amap); 768 amap_unlock(amap);
768 return; 769 return;
769 } 770 }
770 771
771 for (lcv = 0 ; lcv < amap->am_nused ; lcv++) { 772 for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
772 struct vm_anon *anon; 773 struct vm_anon *anon;
773 u_int slot; 774 u_int slot;
774 775
775 slot = amap->am_slots[lcv]; 776 slot = amap->am_slots[lcv];
776 anon = amap->am_anon[slot]; 777 anon = amap->am_anon[slot];
777 KASSERT(anon != NULL && anon->an_ref != 0); 778 KASSERT(anon != NULL);
 779 KASSERT(anon->an_ref != 0);
778 780
779 KASSERT(anon->an_lock == amap->am_lock); 781 KASSERT(anon->an_lock == amap->am_lock);
780 UVMHIST_LOG(maphist," processing anon %#jx, ref=%jd", 782 UVMHIST_LOG(maphist," processing anon %#jx, ref=%jd",
781 (uintptr_t)anon, anon->an_ref, 0, 0); 783 (uintptr_t)anon, anon->an_ref, 0, 0);
782 784
783 /* 785 /*
784 * Drop the reference. 786 * Drop the reference.
785 */ 787 */
786 788
787 if (__predict_true(--anon->an_ref == 0)) { 789 if (__predict_true(--anon->an_ref == 0)) {
788 uvm_anfree(anon); 790 uvm_anfree(anon);
789 } 791 }
790 if (__predict_false((lcv & 31) == 31)) { 792 if (__predict_false((lcv & 31) == 31)) {
@@ -1059,27 +1061,28 @@ ReStart: @@ -1059,27 +1061,28 @@ ReStart:
1059 1061
1060 pg = anon->an_page; 1062 pg = anon->an_page;
1061 KASSERT(pg != NULL); 1063 KASSERT(pg != NULL);
1062 KASSERT(pg->wire_count > 0); 1064 KASSERT(pg->wire_count > 0);
1063 1065
1064 /* 1066 /*
1065 * If the page is loaned then it must already be mapped 1067 * If the page is loaned then it must already be mapped
1066 * read-only and we don't need to copy it. 1068 * read-only and we don't need to copy it.
1067 */ 1069 */
1068 1070
1069 if (pg->loan_count != 0) { 1071 if (pg->loan_count != 0) {
1070 continue; 1072 continue;
1071 } 1073 }
1072 KASSERT(pg->uanon == anon && pg->uobject == NULL); 1074 KASSERT(pg->uanon == anon);
 1075 KASSERT(pg->uobject == NULL);
1073 1076
1074 /* 1077 /*
1075 * If the page is busy, then we have to unlock, wait for 1078 * If the page is busy, then we have to unlock, wait for
1076 * it and then restart. 1079 * it and then restart.
1077 */ 1080 */
1078 1081
1079 if (pg->flags & PG_BUSY) { 1082 if (pg->flags & PG_BUSY) {
1080 uvm_pagewait(pg, amap->am_lock, "cownow"); 1083 uvm_pagewait(pg, amap->am_lock, "cownow");
1081 goto ReStart; 1084 goto ReStart;
1082 } 1085 }
1083 1086
1084 /* 1087 /*
1085 * Perform a copy-on-write. 1088 * Perform a copy-on-write.

cvs diff -r1.127 -r1.128 src/sys/uvm/uvm_bio.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_bio.c 2023/02/12 16:28:32 1.127
+++ src/sys/uvm/uvm_bio.c 2023/04/09 09:00:56 1.128
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_bio.c,v 1.127 2023/02/12 16:28:32 andvar Exp $ */ 1/* $NetBSD: uvm_bio.c,v 1.128 2023/04/09 09:00:56 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998 Chuck Silvers. 4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -24,27 +24,27 @@ @@ -24,27 +24,27 @@
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE. 28 * SUCH DAMAGE.
29 * 29 *
30 */ 30 */
31 31
32/* 32/*
33 * uvm_bio.c: buffered i/o object mapping cache 33 * uvm_bio.c: buffered i/o object mapping cache
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.127 2023/02/12 16:28:32 andvar Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.128 2023/04/09 09:00:56 riastradh Exp $");
38 38
39#include "opt_uvmhist.h" 39#include "opt_uvmhist.h"
40#include "opt_ubc.h" 40#include "opt_ubc.h"
41 41
42#include <sys/param.h> 42#include <sys/param.h>
43#include <sys/systm.h> 43#include <sys/systm.h>
44#include <sys/kmem.h> 44#include <sys/kmem.h>
45#include <sys/kernel.h> 45#include <sys/kernel.h>
46#include <sys/proc.h> 46#include <sys/proc.h>
47#include <sys/sysctl.h> 47#include <sys/sysctl.h>
48#include <sys/vnode.h> 48#include <sys/vnode.h>
49#include <sys/bitops.h> /* for ilog2() */ 49#include <sys/bitops.h> /* for ilog2() */
50 50
@@ -545,27 +545,29 @@ again: @@ -545,27 +545,29 @@ again:
545 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)], 545 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
546 umap, hash); 546 umap, hash);
547 LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list); 547 LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
548 } else { 548 } else {
549 UBC_EVCNT_INCR(wincachehit); 549 UBC_EVCNT_INCR(wincachehit);
550 va = UBC_UMAP_ADDR(umap); 550 va = UBC_UMAP_ADDR(umap);
551 } 551 }
552 552
553 if (umap->refcount == 0) { 553 if (umap->refcount == 0) {
554 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive); 554 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
555 } 555 }
556 556
557 if (flags & UBC_WRITE) { 557 if (flags & UBC_WRITE) {
558 KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0, 558 KASSERTMSG(umap->writeoff == 0,
 559 "ubc_alloc: concurrent writes to uobj %p", uobj);
 560 KASSERTMSG(umap->writelen == 0,
559 "ubc_alloc: concurrent writes to uobj %p", uobj); 561 "ubc_alloc: concurrent writes to uobj %p", uobj);
560 umap->writeoff = slot_offset; 562 umap->writeoff = slot_offset;
561 umap->writelen = *lenp; 563 umap->writelen = *lenp;
562 } 564 }
563 565
564 umap->refcount++; 566 umap->refcount++;
565 umap->advice = advice; 567 umap->advice = advice;
566 rw_exit(ubc_object.uobj.vmobjlock); 568 rw_exit(ubc_object.uobj.vmobjlock);
567 UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags %#jx", 569 UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags %#jx",
568 (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags); 570 (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
569 571
570 if (flags & UBC_FAULTBUSY) { 572 if (flags & UBC_FAULTBUSY) {
571 int npages = (*lenp + (offset & (PAGE_SIZE - 1)) + 573 int npages = (*lenp + (offset & (PAGE_SIZE - 1)) +

cvs diff -r1.231 -r1.232 src/sys/uvm/uvm_fault.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_fault.c 2022/10/26 23:27:32 1.231
+++ src/sys/uvm/uvm_fault.c 2023/04/09 09:00:56 1.232
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_fault.c,v 1.231 2022/10/26 23:27:32 riastradh Exp $ */ 1/* $NetBSD: uvm_fault.c,v 1.232 2023/04/09 09:00:56 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -22,27 +22,27 @@ @@ -22,27 +22,27 @@
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * 26 *
27 * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp 27 * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
28 */ 28 */
29 29
30/* 30/*
31 * uvm_fault.c: fault handler 31 * uvm_fault.c: fault handler
32 */ 32 */
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.231 2022/10/26 23:27:32 riastradh Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.232 2023/04/09 09:00:56 riastradh Exp $");
36 36
37#include "opt_uvmhist.h" 37#include "opt_uvmhist.h"
38 38
39#include <sys/param.h> 39#include <sys/param.h>
40#include <sys/systm.h> 40#include <sys/systm.h>
41#include <sys/atomic.h> 41#include <sys/atomic.h>
42#include <sys/kernel.h> 42#include <sys/kernel.h>
43#include <sys/mman.h> 43#include <sys/mman.h>
44 44
45#include <uvm/uvm.h> 45#include <uvm/uvm.h>
46#include <uvm/uvm_pdpolicy.h> 46#include <uvm/uvm_pdpolicy.h>
47 47
48/* 48/*
@@ -2660,41 +2660,42 @@ uvm_fault_unwire_locked(struct vm_map *m @@ -2660,41 +2660,42 @@ uvm_fault_unwire_locked(struct vm_map *m
2660 struct vm_page *pg; 2660 struct vm_page *pg;
2661 2661
2662 /* 2662 /*
2663 * we assume that the area we are unwiring has actually been wired 2663 * we assume that the area we are unwiring has actually been wired
2664 * in the first place. this means that we should be able to extract 2664 * in the first place. this means that we should be able to extract
2665 * the PAs from the pmap. we also lock out the page daemon so that 2665 * the PAs from the pmap. we also lock out the page daemon so that
2666 * we can call uvm_pageunwire. 2666 * we can call uvm_pageunwire.
2667 */ 2667 */
2668 2668
2669 /* 2669 /*
2670 * find the beginning map entry for the region. 2670 * find the beginning map entry for the region.
2671 */ 2671 */
2672 2672
2673 KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map)); 2673 KASSERT(start >= vm_map_min(map));
 2674 KASSERT(end <= vm_map_max(map));
2674 if (uvm_map_lookup_entry(map, start, &entry) == false) 2675 if (uvm_map_lookup_entry(map, start, &entry) == false)
2675 panic("uvm_fault_unwire_locked: address not in map"); 2676 panic("uvm_fault_unwire_locked: address not in map");
2676 2677
2677 oentry = NULL; 2678 oentry = NULL;
2678 for (va = start; va < end; va += PAGE_SIZE) { 2679 for (va = start; va < end; va += PAGE_SIZE) {
2679 2680
2680 /* 2681 /*
2681 * find the map entry for the current address. 2682 * find the map entry for the current address.
2682 */ 2683 */
2683 2684
2684 KASSERT(va >= entry->start); 2685 KASSERT(va >= entry->start);
2685 while (va >= entry->end) { 2686 while (va >= entry->end) {
2686 KASSERT(entry->next != &map->header && 2687 KASSERT(entry->next != &map->header);
2687 entry->next->start <= entry->end); 2688 KASSERT(entry->next->start <= entry->end);
2688 entry = entry->next; 2689 entry = entry->next;
2689 } 2690 }
2690 2691
2691 /* 2692 /*
2692 * lock it. 2693 * lock it.
2693 */ 2694 */
2694 2695
2695 if (entry != oentry) { 2696 if (entry != oentry) {
2696 if (oentry != NULL) { 2697 if (oentry != NULL) {
2697 uvm_map_unlock_entry(oentry); 2698 uvm_map_unlock_entry(oentry);
2698 } 2699 }
2699 uvm_map_lock_entry(entry, RW_WRITER); 2700 uvm_map_lock_entry(entry, RW_WRITER);
2700 oentry = entry; 2701 oentry = entry;

cvs diff -r1.164 -r1.165 src/sys/uvm/uvm_km.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_km.c 2023/02/26 07:27:14 1.164
+++ src/sys/uvm/uvm_km.c 2023/04/09 09:00:56 1.165
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_km.c,v 1.164 2023/02/26 07:27:14 skrll Exp $ */ 1/* $NetBSD: uvm_km.c,v 1.165 2023/04/09 09:00:56 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California. 5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 * 6 *
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * This code is derived from software contributed to Berkeley by 9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University. 10 * The Mach Operating System project at Carnegie-Mellon University.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -142,27 +142,27 @@ @@ -142,27 +142,27 @@
142 * kmem_meta_arena: 142 * kmem_meta_arena:
143 * Imports from kmem_va_meta_arena. Allocations from this arena are 143 * Imports from kmem_va_meta_arena. Allocations from this arena are
144 * backed with the pages. 144 * backed with the pages.
145 * 145 *
146 * Arena stacking: 146 * Arena stacking:
147 * 147 *
148 * kmem_arena 148 * kmem_arena
149 * kmem_va_arena 149 * kmem_va_arena
150 * kmem_va_meta_arena 150 * kmem_va_meta_arena
151 * kmem_meta_arena 151 * kmem_meta_arena
152 */ 152 */
153 153
154#include <sys/cdefs.h> 154#include <sys/cdefs.h>
155__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.164 2023/02/26 07:27:14 skrll Exp $"); 155__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.165 2023/04/09 09:00:56 riastradh Exp $");
156 156
157#include "opt_uvmhist.h" 157#include "opt_uvmhist.h"
158 158
159#include "opt_kmempages.h" 159#include "opt_kmempages.h"
160 160
161#ifndef NKMEMPAGES 161#ifndef NKMEMPAGES
162#define NKMEMPAGES 0 162#define NKMEMPAGES 0
163#endif 163#endif
164 164
165/* 165/*
166 * Defaults for lower and upper-bounds for the kmem_arena page count. 166 * Defaults for lower and upper-bounds for the kmem_arena page count.
167 * Can be overridden by kernel config options. 167 * Can be overridden by kernel config options.
168 */ 168 */
@@ -527,27 +527,28 @@ uvm_km_pgremove_intrsafe(struct vm_map * @@ -527,27 +527,28 @@ uvm_km_pgremove_intrsafe(struct vm_map *
527 va += PAGE_SIZE) { 527 va += PAGE_SIZE) {
528 if (!pmap_extract(pmap_kernel(), va, &pa[i])) { 528 if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
529 continue; 529 continue;
530 } 530 }
531 i++; 531 i++;
532 } 532 }
533 npgrm = i; 533 npgrm = i;
534 /* now remove the mappings */ 534 /* now remove the mappings */
535 pmap_kremove(batch_vastart, va - batch_vastart); 535 pmap_kremove(batch_vastart, va - batch_vastart);
536 /* and free the pages */ 536 /* and free the pages */
537 for (i = 0; i < npgrm; i++) { 537 for (i = 0; i < npgrm; i++) {
538 pg = PHYS_TO_VM_PAGE(pa[i]); 538 pg = PHYS_TO_VM_PAGE(pa[i]);
539 KASSERT(pg); 539 KASSERT(pg);
540 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 540 KASSERT(pg->uobject == NULL);
 541 KASSERT(pg->uanon == NULL);
541 KASSERT((pg->flags & PG_BUSY) == 0); 542 KASSERT((pg->flags & PG_BUSY) == 0);
542 uvm_pagefree(pg); 543 uvm_pagefree(pg);
543 } 544 }
544 } 545 }
545#undef __PGRM_BATCH 546#undef __PGRM_BATCH
546} 547}
547 548
548#if defined(DEBUG) 549#if defined(DEBUG)
549void 550void
550uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end) 551uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
551{ 552{
552 vaddr_t va; 553 vaddr_t va;
553 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 554 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);

cvs diff -r1.251 -r1.252 src/sys/uvm/uvm_page.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_page.c 2022/10/26 23:38:09 1.251
+++ src/sys/uvm/uvm_page.c 2023/04/09 09:00:56 1.252
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_page.c,v 1.251 2022/10/26 23:38:09 riastradh Exp $ */ 1/* $NetBSD: uvm_page.c,v 1.252 2023/04/09 09:00:56 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -85,27 +85,27 @@ @@ -85,27 +85,27 @@
85 * School of Computer Science 85 * School of Computer Science
86 * Carnegie Mellon University 86 * Carnegie Mellon University
87 * Pittsburgh PA 15213-3890 87 * Pittsburgh PA 15213-3890
88 * 88 *
89 * any improvements or extensions that they make and grant Carnegie the 89 * any improvements or extensions that they make and grant Carnegie the
90 * rights to redistribute these changes. 90 * rights to redistribute these changes.
91 */ 91 */
92 92
93/* 93/*
94 * uvm_page.c: page ops. 94 * uvm_page.c: page ops.
95 */ 95 */
96 96
97#include <sys/cdefs.h> 97#include <sys/cdefs.h>
98__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.251 2022/10/26 23:38:09 riastradh Exp $"); 98__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.252 2023/04/09 09:00:56 riastradh Exp $");
99 99
100#include "opt_ddb.h" 100#include "opt_ddb.h"
101#include "opt_uvm.h" 101#include "opt_uvm.h"
102#include "opt_uvmhist.h" 102#include "opt_uvmhist.h"
103#include "opt_readahead.h" 103#include "opt_readahead.h"
104 104
105#include <sys/param.h> 105#include <sys/param.h>
106#include <sys/systm.h> 106#include <sys/systm.h>
107#include <sys/sched.h> 107#include <sys/sched.h>
108#include <sys/kernel.h> 108#include <sys/kernel.h>
109#include <sys/vnode.h> 109#include <sys/vnode.h>
110#include <sys/proc.h> 110#include <sys/proc.h>
111#include <sys/radixtree.h> 111#include <sys/radixtree.h>
@@ -1205,27 +1205,28 @@ uvm_pagealloc_strat(struct uvm_object *o @@ -1205,27 +1205,28 @@ uvm_pagealloc_strat(struct uvm_object *o
1205 if (pg != NULL) { 1205 if (pg != NULL) {
1206 goto gotit; 1206 goto gotit;
1207 } 1207 }
1208 } 1208 }
1209 1209
1210 /* No pages free! Have pagedaemon free some memory. */ 1210 /* No pages free! Have pagedaemon free some memory. */
1211 splx(s); 1211 splx(s);
1212 uvm_kick_pdaemon(); 1212 uvm_kick_pdaemon();
1213 return NULL; 1213 return NULL;
1214 1214
1215 case UVM_PGA_STRAT_ONLY: 1215 case UVM_PGA_STRAT_ONLY:
1216 case UVM_PGA_STRAT_FALLBACK: 1216 case UVM_PGA_STRAT_FALLBACK:
1217 /* Attempt to allocate from the specified free list. */ 1217 /* Attempt to allocate from the specified free list. */
1218 KASSERT(free_list >= 0 && free_list < VM_NFREELIST); 1218 KASSERT(free_list >= 0);
 1219 KASSERT(free_list < VM_NFREELIST);
1219 pg = uvm_pagealloc_pgfl(ucpu, free_list, &color, flags); 1220 pg = uvm_pagealloc_pgfl(ucpu, free_list, &color, flags);
1220 if (pg != NULL) { 1221 if (pg != NULL) {
1221 goto gotit; 1222 goto gotit;
1222 } 1223 }
1223 1224
1224 /* Fall back, if possible. */ 1225 /* Fall back, if possible. */
1225 if (strat == UVM_PGA_STRAT_FALLBACK) { 1226 if (strat == UVM_PGA_STRAT_FALLBACK) {
1226 strat = UVM_PGA_STRAT_NORMAL; 1227 strat = UVM_PGA_STRAT_NORMAL;
1227 goto again; 1228 goto again;
1228 } 1229 }
1229 1230
1230 /* No pages free! Have pagedaemon free some memory. */ 1231 /* No pages free! Have pagedaemon free some memory. */
1231 splx(s); 1232 splx(s);
@@ -2090,38 +2091,40 @@ uvm_pagereadonly_p(struct vm_page *pg) @@ -2090,38 +2091,40 @@ uvm_pagereadonly_p(struct vm_page *pg)
2090 * for it. Used to avoid actually mapping the pages, pmap most likely uses direct map 2091 * for it. Used to avoid actually mapping the pages, pmap most likely uses direct map
2091 * or equivalent. 2092 * or equivalent.
2092 */ 2093 */
2093int 2094int
2094uvm_direct_process(struct vm_page **pgs, u_int npages, voff_t off, vsize_t len, 2095uvm_direct_process(struct vm_page **pgs, u_int npages, voff_t off, vsize_t len,
2095 int (*process)(void *, size_t, void *), void *arg) 2096 int (*process)(void *, size_t, void *), void *arg)
2096{ 2097{
2097 int error = 0; 2098 int error = 0;
2098 paddr_t pa; 2099 paddr_t pa;
2099 size_t todo; 2100 size_t todo;
2100 voff_t pgoff = (off & PAGE_MASK); 2101 voff_t pgoff = (off & PAGE_MASK);
2101 struct vm_page *pg; 2102 struct vm_page *pg;
2102 2103
2103 KASSERT(npages > 0 && len > 0); 2104 KASSERT(npages > 0);
 2105 KASSERT(len > 0);
2104 2106
2105 for (int i = 0; i < npages; i++) { 2107 for (int i = 0; i < npages; i++) {
2106 pg = pgs[i]; 2108 pg = pgs[i];
2107 2109
2108 KASSERT(len > 0); 2110 KASSERT(len > 0);
2109 2111
2110 /* 2112 /*
2111 * Caller is responsible for ensuring all the pages are 2113 * Caller is responsible for ensuring all the pages are
2112 * available. 2114 * available.
2113 */ 2115 */
2114 KASSERT(pg != NULL && pg != PGO_DONTCARE); 2116 KASSERT(pg != NULL);
 2117 KASSERT(pg != PGO_DONTCARE);
2115 2118
2116 pa = VM_PAGE_TO_PHYS(pg); 2119 pa = VM_PAGE_TO_PHYS(pg);
2117 todo = MIN(len, PAGE_SIZE - pgoff); 2120 todo = MIN(len, PAGE_SIZE - pgoff);
2118 2121
2119 error = pmap_direct_process(pa, pgoff, todo, process, arg); 2122 error = pmap_direct_process(pa, pgoff, todo, process, arg);
2120 if (error) 2123 if (error)
2121 break; 2124 break;
2122 2125
2123 pgoff = 0; 2126 pgoff = 0;
2124 len -= todo; 2127 len -= todo;
2125 } 2128 }
2126 2129
2127 KASSERTMSG(error != 0 || len == 0, "len %lu != 0 for non-error", len); 2130 KASSERTMSG(error != 0 || len == 0, "len %lu != 0 for non-error", len);

cvs diff -r1.17 -r1.18 src/sys/uvm/uvm_physseg.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_physseg.c 2020/07/15 15:08:26 1.17
+++ src/sys/uvm/uvm_physseg.c 2023/04/09 09:00:56 1.18
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_physseg.c,v 1.17 2020/07/15 15:08:26 rin Exp $ */ 1/* $NetBSD: uvm_physseg.c,v 1.18 2023/04/09 09:00:56 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California. 5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 * 6 *
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * This code is derived from software contributed to Berkeley by 9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University. 10 * The Mach Operating System project at Carnegie-Mellon University.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -163,27 +163,28 @@ uvm_physseg_alloc(size_t sz) @@ -163,27 +163,28 @@ uvm_physseg_alloc(size_t sz)
163 * During boot time, we only support allocating vm_physseg 163 * During boot time, we only support allocating vm_physseg
164 * entries from the static array. 164 * entries from the static array.
165 * We need to assert for this. 165 * We need to assert for this.
166 */ 166 */
167 167
168 if (__predict_false(uvm.page_init_done == false)) { 168 if (__predict_false(uvm.page_init_done == false)) {
169 if (sz % sizeof(struct uvm_physseg)) 169 if (sz % sizeof(struct uvm_physseg))
170 panic("%s: tried to alloc size other than multiple" 170 panic("%s: tried to alloc size other than multiple"
171 " of struct uvm_physseg at boot\n", __func__); 171 " of struct uvm_physseg at boot\n", __func__);
172 172
173 size_t n = sz / sizeof(struct uvm_physseg); 173 size_t n = sz / sizeof(struct uvm_physseg);
174 nseg += n; 174 nseg += n;
175 175
176 KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX); 176 KASSERT(nseg > 0);
 177 KASSERT(nseg <= VM_PHYSSEG_MAX);
177 178
178 return &uvm_physseg[nseg - n]; 179 return &uvm_physseg[nseg - n];
179 } 180 }
180 181
181 return kmem_zalloc(sz, KM_NOSLEEP); 182 return kmem_zalloc(sz, KM_NOSLEEP);
182} 183}
183 184
184static void 185static void
185uvm_physseg_free(void *p, size_t sz) 186uvm_physseg_free(void *p, size_t sz)
186{ 187{
187 /* 188 /*
188 * This is a bit tricky. We do allow simulation of free() 189 * This is a bit tricky. We do allow simulation of free()
189 * during boot (for eg: when MD code is "steal"ing memory, 190 * during boot (for eg: when MD code is "steal"ing memory,
@@ -1002,42 +1003,44 @@ uvm_physseg_get_avail_start(uvm_physseg_ @@ -1002,42 +1003,44 @@ uvm_physseg_get_avail_start(uvm_physseg_
1002 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start; 1003 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
1003} 1004}
1004 1005
1005#if defined(UVM_PHYSSEG_LEGACY) 1006#if defined(UVM_PHYSSEG_LEGACY)
1006void 1007void
1007uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start) 1008uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
1008{ 1009{
1009 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm); 1010 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1010 1011
1011#if defined(DIAGNOSTIC) 1012#if defined(DIAGNOSTIC)
1012 paddr_t avail_end; 1013 paddr_t avail_end;
1013 avail_end = uvm_physseg_get_avail_end(upm); 1014 avail_end = uvm_physseg_get_avail_end(upm);
1014 KASSERT(uvm_physseg_valid_p(upm)); 1015 KASSERT(uvm_physseg_valid_p(upm));
1015 KASSERT(avail_start < avail_end && avail_start >= ps->start); 1016 KASSERT(avail_start < avail_end);
 1017 KASSERT(avail_start >= ps->start);
1016#endif 1018#endif
1017 1019
1018 ps->avail_start = avail_start; 1020 ps->avail_start = avail_start;
1019} 1021}
1020 1022
1021void 1023void
1022uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end) 1024uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
1023{ 1025{
1024 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm); 1026 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1025 1027
1026#if defined(DIAGNOSTIC) 1028#if defined(DIAGNOSTIC)
1027 paddr_t avail_start; 1029 paddr_t avail_start;
1028 avail_start = uvm_physseg_get_avail_start(upm); 1030 avail_start = uvm_physseg_get_avail_start(upm);
1029 KASSERT(uvm_physseg_valid_p(upm)); 1031 KASSERT(uvm_physseg_valid_p(upm));
1030 KASSERT(avail_end > avail_start && avail_end <= ps->end); 1032 KASSERT(avail_end > avail_start);
 1033 KASSERT(avail_end <= ps->end);
1031#endif 1034#endif
1032 1035
1033 ps->avail_end = avail_end; 1036 ps->avail_end = avail_end;
1034} 1037}
1035 1038
1036#endif /* UVM_PHYSSEG_LEGACY */ 1039#endif /* UVM_PHYSSEG_LEGACY */
1037 1040
1038paddr_t 1041paddr_t
1039uvm_physseg_get_avail_end(uvm_physseg_t upm) 1042uvm_physseg_get_avail_end(uvm_physseg_t upm)
1040{ 1043{
1041 if (uvm_physseg_valid_p(upm) == false) 1044 if (uvm_physseg_valid_p(upm) == false)
1042 return (paddr_t) -1; 1045 return (paddr_t) -1;
1043 1046
@@ -1083,27 +1086,28 @@ uvm_physseg_set_start_hint(uvm_physseg_t @@ -1083,27 +1086,28 @@ uvm_physseg_set_start_hint(uvm_physseg_t
1083 HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint; 1086 HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
1084 return true; 1087 return true;
1085} 1088}
1086 1089
1087void 1090void
1088uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs) 1091uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
1089{ 1092{
1090 psize_t i; 1093 psize_t i;
1091 psize_t n; 1094 psize_t n;
1092 paddr_t paddr; 1095 paddr_t paddr;
1093 struct uvm_physseg *seg; 1096 struct uvm_physseg *seg;
1094 struct vm_page *pg; 1097 struct vm_page *pg;
1095 1098
1096 KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL); 1099 KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID);
 1100 KASSERT(pgs != NULL);
1097 1101
1098 seg = HANDLE_TO_PHYSSEG_NODE(upm); 1102 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1099 KASSERT(seg != NULL); 1103 KASSERT(seg != NULL);
1100 KASSERT(seg->pgs == NULL); 1104 KASSERT(seg->pgs == NULL);
1101 1105
1102 n = seg->end - seg->start; 1106 n = seg->end - seg->start;
1103 seg->pgs = pgs; 1107 seg->pgs = pgs;
1104 1108
1105 /* init and free vm_pages (we've already zeroed them) */ 1109 /* init and free vm_pages (we've already zeroed them) */
1106 paddr = ctob(seg->start); 1110 paddr = ctob(seg->start);
1107 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 1111 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
1108 pg = &seg->pgs[i]; 1112 pg = &seg->pgs[i];
1109 pg->phys_addr = paddr; 1113 pg->phys_addr = paddr;

cvs diff -r1.207 -r1.208 src/sys/uvm/uvm_swap.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_swap.c 2022/12/21 02:28:06 1.207
+++ src/sys/uvm/uvm_swap.c 2023/04/09 09:00:56 1.208
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_swap.c,v 1.207 2022/12/21 02:28:06 chs Exp $ */ 1/* $NetBSD: uvm_swap.c,v 1.208 2023/04/09 09:00:56 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green 4 * Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 * 27 *
28 * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp 28 * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
29 * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp 29 * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.207 2022/12/21 02:28:06 chs Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.208 2023/04/09 09:00:56 riastradh Exp $");
34 34
35#include "opt_uvmhist.h" 35#include "opt_uvmhist.h"
36#include "opt_compat_netbsd.h" 36#include "opt_compat_netbsd.h"
37#include "opt_ddb.h" 37#include "opt_ddb.h"
38#include "opt_vmswap.h" 38#include "opt_vmswap.h"
39 39
40#include <sys/param.h> 40#include <sys/param.h>
41#include <sys/systm.h> 41#include <sys/systm.h>
42#include <sys/atomic.h> 42#include <sys/atomic.h>
43#include <sys/buf.h> 43#include <sys/buf.h>
44#include <sys/bufq.h> 44#include <sys/bufq.h>
45#include <sys/conf.h> 45#include <sys/conf.h>
46#include <sys/cprng.h> 46#include <sys/cprng.h>
@@ -1863,27 +1863,28 @@ uvm_swap_io(struct vm_page **pps, int st @@ -1863,27 +1863,28 @@ uvm_swap_io(struct vm_page **pps, int st
1863 bool write, async, swap_encrypt; 1863 bool write, async, swap_encrypt;
1864 UVMHIST_FUNC(__func__); 1864 UVMHIST_FUNC(__func__);
1865 UVMHIST_CALLARGS(pdhist, "<- called, startslot=%jd, npages=%jd, flags=%#jx", 1865 UVMHIST_CALLARGS(pdhist, "<- called, startslot=%jd, npages=%jd, flags=%#jx",
1866 startslot, npages, flags, 0); 1866 startslot, npages, flags, 0);
1867 1867
1868 write = (flags & B_READ) == 0; 1868 write = (flags & B_READ) == 0;
1869 async = (flags & B_ASYNC) != 0; 1869 async = (flags & B_ASYNC) != 0;
1870 swap_encrypt = atomic_load_relaxed(&uvm_swap_encrypt); 1870 swap_encrypt = atomic_load_relaxed(&uvm_swap_encrypt);
1871 1871
1872 /* 1872 /*
1873 * allocate a buf for the i/o. 1873 * allocate a buf for the i/o.
1874 */ 1874 */
1875 1875
1876 KASSERT(curlwp != uvm.pagedaemon_lwp || (write && async)); 1876 KASSERT(curlwp != uvm.pagedaemon_lwp || write);
 1877 KASSERT(curlwp != uvm.pagedaemon_lwp || async);
1877 bp = getiobuf(swapdev_vp, curlwp != uvm.pagedaemon_lwp); 1878 bp = getiobuf(swapdev_vp, curlwp != uvm.pagedaemon_lwp);
1878 if (bp == NULL) { 1879 if (bp == NULL) {
1879 uvm_aio_aiodone_pages(pps, npages, true, ENOMEM); 1880 uvm_aio_aiodone_pages(pps, npages, true, ENOMEM);
1880 return ENOMEM; 1881 return ENOMEM;
1881 } 1882 }
1882 1883
1883 /* 1884 /*
1884 * convert starting drum slot to block number 1885 * convert starting drum slot to block number
1885 */ 1886 */
1886 1887
1887 startblk = btodb((uint64_t)startslot << PAGE_SHIFT); 1888 startblk = btodb((uint64_t)startslot << PAGE_SHIFT);
1888 1889
1889 /* 1890 /*

cvs diff -r1.118 -r1.119 src/sys/uvm/uvm_vnode.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_vnode.c 2021/03/13 15:29:55 1.118
+++ src/sys/uvm/uvm_vnode.c 2023/04/09 09:00:56 1.119
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $ */ 1/* $NetBSD: uvm_vnode.c,v 1.119 2023/04/09 09:00:56 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993 5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. 6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah. 7 * Copyright (c) 1990 University of Utah.
8 * 8 *
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * This code is derived from software contributed to Berkeley by 11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer 12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department. 13 * Science Department.
14 * 14 *
@@ -35,27 +35,27 @@ @@ -35,27 +35,27 @@
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE. 37 * SUCH DAMAGE.
38 * 38 *
39 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94 39 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
40 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp 40 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41 */ 41 */
42 42
43/* 43/*
44 * uvm_vnode.c: the vnode pager. 44 * uvm_vnode.c: the vnode pager.
45 */ 45 */
46 46
47#include <sys/cdefs.h> 47#include <sys/cdefs.h>
48__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $"); 48__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.119 2023/04/09 09:00:56 riastradh Exp $");
49 49
50#ifdef _KERNEL_OPT 50#ifdef _KERNEL_OPT
51#include "opt_uvmhist.h" 51#include "opt_uvmhist.h"
52#endif 52#endif
53 53
54#include <sys/atomic.h> 54#include <sys/atomic.h>
55#include <sys/param.h> 55#include <sys/param.h>
56#include <sys/systm.h> 56#include <sys/systm.h>
57#include <sys/kernel.h> 57#include <sys/kernel.h>
58#include <sys/vnode.h> 58#include <sys/vnode.h>
59#include <sys/disklabel.h> 59#include <sys/disklabel.h>
60#include <sys/ioctl.h> 60#include <sys/ioctl.h>
61#include <sys/fcntl.h> 61#include <sys/fcntl.h>
@@ -439,54 +439,56 @@ uvm_vnp_setsize(struct vnode *vp, voff_t @@ -439,54 +439,56 @@ uvm_vnp_setsize(struct vnode *vp, voff_t
439 voff_t pgend = round_page(newsize); 439 voff_t pgend = round_page(newsize);
440 voff_t oldsize; 440 voff_t oldsize;
441 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 441 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
442 442
443 rw_enter(uobj->vmobjlock, RW_WRITER); 443 rw_enter(uobj->vmobjlock, RW_WRITER);
444 UVMHIST_LOG(ubchist, "vp %#jx old %#jx new %#jx", 444 UVMHIST_LOG(ubchist, "vp %#jx old %#jx new %#jx",
445 (uintptr_t)vp, vp->v_size, newsize, 0); 445 (uintptr_t)vp, vp->v_size, newsize, 0);
446 446
447 /* 447 /*
448 * now check if the size has changed: if we shrink we had better 448 * now check if the size has changed: if we shrink we had better
449 * toss some pages... 449 * toss some pages...
450 */ 450 */
451 451
452 KASSERT(newsize != VSIZENOTSET && newsize >= 0); 452 KASSERT(newsize != VSIZENOTSET);
 453 KASSERT(newsize >= 0);
453 KASSERT(vp->v_size <= vp->v_writesize); 454 KASSERT(vp->v_size <= vp->v_writesize);
454 KASSERT(vp->v_size == vp->v_writesize || 455 KASSERT(vp->v_size == vp->v_writesize ||
455 newsize == vp->v_writesize || newsize <= vp->v_size); 456 newsize == vp->v_writesize || newsize <= vp->v_size);
456 457
457 oldsize = vp->v_writesize; 458 oldsize = vp->v_writesize;
458 459
459 /* 460 /*
460 * check whether size shrinks 461 * check whether size shrinks
461 * if old size hasn't been set, there are no pages to drop 462 * if old size hasn't been set, there are no pages to drop
462 * if there was an integer overflow in pgend, then this is no shrink 463 * if there was an integer overflow in pgend, then this is no shrink
463 */ 464 */
464 if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) { 465 if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) {
465 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO); 466 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
466 rw_enter(uobj->vmobjlock, RW_WRITER); 467 rw_enter(uobj->vmobjlock, RW_WRITER);
467 } 468 }
468 mutex_enter(vp->v_interlock); 469 mutex_enter(vp->v_interlock);
469 vp->v_size = vp->v_writesize = newsize; 470 vp->v_size = vp->v_writesize = newsize;
470 mutex_exit(vp->v_interlock); 471 mutex_exit(vp->v_interlock);
471 rw_exit(uobj->vmobjlock); 472 rw_exit(uobj->vmobjlock);
472} 473}
473 474
474void 475void
475uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize) 476uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
476{ 477{
477 478
478 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 479 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
479 KASSERT(newsize != VSIZENOTSET && newsize >= 0); 480 KASSERT(newsize != VSIZENOTSET);
 481 KASSERT(newsize >= 0);
480 KASSERT(vp->v_size != VSIZENOTSET); 482 KASSERT(vp->v_size != VSIZENOTSET);
481 KASSERT(vp->v_writesize != VSIZENOTSET); 483 KASSERT(vp->v_writesize != VSIZENOTSET);
482 KASSERT(vp->v_size <= vp->v_writesize); 484 KASSERT(vp->v_size <= vp->v_writesize);
483 KASSERT(vp->v_size <= newsize); 485 KASSERT(vp->v_size <= newsize);
484 mutex_enter(vp->v_interlock); 486 mutex_enter(vp->v_interlock);
485 vp->v_writesize = newsize; 487 vp->v_writesize = newsize;
486 mutex_exit(vp->v_interlock); 488 mutex_exit(vp->v_interlock);
487 rw_exit(vp->v_uobj.vmobjlock); 489 rw_exit(vp->v_uobj.vmobjlock);
488} 490}
489 491
490bool 492bool
491uvn_text_p(struct uvm_object *uobj) 493uvn_text_p(struct uvm_object *uobj)
492{ 494{