Consistently use %#jx instead of 0x%jx or just %jx in UVMHIST_LOG formatsdiff -r1.125 -r1.126 src/sys/uvm/uvm_amap.c
(skrll)
--- src/sys/uvm/uvm_amap.c 2020/09/21 18:41:59 1.125
+++ src/sys/uvm/uvm_amap.c 2021/03/13 15:29:55 1.126
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: uvm_amap.c,v 1.125 2020/09/21 18:41:59 chs Exp $ */ | 1 | /* $NetBSD: uvm_amap.c,v 1.126 2021/03/13 15:29:55 skrll Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | 4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | 7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | 8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | 9 | * are met: | |
10 | * 1. Redistributions of source code must retain the above copyright | 10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | 11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | 12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | 13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | 14 | * documentation and/or other materials provided with the distribution. | |
@@ -25,27 +25,27 @@ | @@ -25,27 +25,27 @@ | |||
25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 | */ | 26 | */ | |
27 | 27 | |||
28 | /* | 28 | /* | |
29 | * uvm_amap.c: amap operations | 29 | * uvm_amap.c: amap operations | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | /* | 32 | /* | |
33 | * this file contains functions that perform operations on amaps. see | 33 | * this file contains functions that perform operations on amaps. see | |
34 | * uvm_amap.h for a brief explanation of the role of amaps in uvm. | 34 | * uvm_amap.h for a brief explanation of the role of amaps in uvm. | |
35 | */ | 35 | */ | |
36 | 36 | |||
37 | #include <sys/cdefs.h> | 37 | #include <sys/cdefs.h> | |
38 | __KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.125 2020/09/21 18:41:59 chs Exp $"); | 38 | __KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.126 2021/03/13 15:29:55 skrll Exp $"); | |
39 | 39 | |||
40 | #include "opt_uvmhist.h" | 40 | #include "opt_uvmhist.h" | |
41 | 41 | |||
42 | #include <sys/param.h> | 42 | #include <sys/param.h> | |
43 | #include <sys/systm.h> | 43 | #include <sys/systm.h> | |
44 | #include <sys/kernel.h> | 44 | #include <sys/kernel.h> | |
45 | #include <sys/kmem.h> | 45 | #include <sys/kmem.h> | |
46 | #include <sys/pool.h> | 46 | #include <sys/pool.h> | |
47 | #include <sys/atomic.h> | 47 | #include <sys/atomic.h> | |
48 | 48 | |||
49 | #include <uvm/uvm.h> | 49 | #include <uvm/uvm.h> | |
50 | #include <uvm/uvm_swap.h> | 50 | #include <uvm/uvm_swap.h> | |
51 | 51 | |||
@@ -817,27 +817,27 @@ amap_wipeout(struct vm_amap *amap) | @@ -817,27 +817,27 @@ amap_wipeout(struct vm_amap *amap) | |||
817 | */ | 817 | */ | |
818 | 818 | |||
819 | void | 819 | void | |
820 | amap_copy(struct vm_map *map, struct vm_map_entry *entry, int flags, | 820 | amap_copy(struct vm_map *map, struct vm_map_entry *entry, int flags, | |
821 | vaddr_t startva, vaddr_t endva) | 821 | vaddr_t startva, vaddr_t endva) | |
822 | { | 822 | { | |
823 | const int waitf = (flags & AMAP_COPY_NOWAIT) ? UVM_FLAG_NOWAIT : 0; | 823 | const int waitf = (flags & AMAP_COPY_NOWAIT) ? UVM_FLAG_NOWAIT : 0; | |
824 | struct vm_amap *amap, *srcamap; | 824 | struct vm_amap *amap, *srcamap; | |
825 | u_int slots, lcv; | 825 | u_int slots, lcv; | |
826 | krwlock_t *oldlock; | 826 | krwlock_t *oldlock; | |
827 | vsize_t len; | 827 | vsize_t len; | |
828 | 828 | |||
829 | UVMHIST_FUNC(__func__); | 829 | UVMHIST_FUNC(__func__); | |
830 | UVMHIST_CALLARGS(maphist, " (map=%#j, entry=%#j, flags=%jd)", | 830 | UVMHIST_CALLARGS(maphist, " (map=%#jx, entry=%#jx, flags=%#jx)", | |
831 | (uintptr_t)map, (uintptr_t)entry, flags, -2); | 831 | (uintptr_t)map, (uintptr_t)entry, flags, -2); | |
832 | 832 | |||
833 | KASSERT(map != kernel_map); /* we use nointr pool */ | 833 | KASSERT(map != kernel_map); /* we use nointr pool */ | |
834 | 834 | |||
835 | srcamap = entry->aref.ar_amap; | 835 | srcamap = entry->aref.ar_amap; | |
836 | len = entry->end - entry->start; | 836 | len = entry->end - entry->start; | |
837 | 837 | |||
838 | /* | 838 | /* | |
839 | * Is there an amap to copy? If not, create one. | 839 | * Is there an amap to copy? If not, create one. | |
840 | */ | 840 | */ | |
841 | 841 | |||
842 | if (srcamap == NULL) { | 842 | if (srcamap == NULL) { | |
843 | const bool canchunk = (flags & AMAP_COPY_NOCHUNK) == 0; | 843 | const bool canchunk = (flags & AMAP_COPY_NOCHUNK) == 0; | |
@@ -893,27 +893,27 @@ amap_copy(struct vm_map *map, struct vm_ | @@ -893,27 +893,27 @@ amap_copy(struct vm_map *map, struct vm_ | |||
893 | * of copying it. Note that we are reading am_ref without lock held | 893 | * of copying it. Note that we are reading am_ref without lock held | |
894 | * as the value value can only be one if we have the only reference | 894 | * as the value value can only be one if we have the only reference | |
895 | * to the amap (via our locked map). If the value is greater than | 895 | * to the amap (via our locked map). If the value is greater than | |
896 | * one, then allocate amap and re-check the value. | 896 | * one, then allocate amap and re-check the value. | |
897 | */ | 897 | */ | |
898 | 898 | |||
899 | if (srcamap->am_ref == 1) { | 899 | if (srcamap->am_ref == 1) { | |
900 | entry->etype &= ~UVM_ET_NEEDSCOPY; | 900 | entry->etype &= ~UVM_ET_NEEDSCOPY; | |
901 | UVMHIST_LOG(maphist, "<- done [ref cnt = 1, took it over]", | 901 | UVMHIST_LOG(maphist, "<- done [ref cnt = 1, took it over]", | |
902 | 0, 0, 0, 0); | 902 | 0, 0, 0, 0); | |
903 | return; | 903 | return; | |
904 | } | 904 | } | |
905 | 905 | |||
906 | UVMHIST_LOG(maphist," amap=%#j, ref=%jd, must copy it", | 906 | UVMHIST_LOG(maphist," amap=%#jx, ref=%jd, must copy it", | |
907 | (uintptr_t)srcamap, srcamap->am_ref, 0, 0); | 907 | (uintptr_t)srcamap, srcamap->am_ref, 0, 0); | |
908 | 908 | |||
909 | /* | 909 | /* | |
910 | * Allocate a new amap (note: not initialised, etc). | 910 | * Allocate a new amap (note: not initialised, etc). | |
911 | */ | 911 | */ | |
912 | 912 | |||
913 | AMAP_B2SLOT(slots, len); | 913 | AMAP_B2SLOT(slots, len); | |
914 | amap = amap_alloc1(slots, 0, waitf); | 914 | amap = amap_alloc1(slots, 0, waitf); | |
915 | if (amap == NULL) { | 915 | if (amap == NULL) { | |
916 | UVMHIST_LOG(maphist, " amap_alloc1 failed", 0,0,0,0); | 916 | UVMHIST_LOG(maphist, " amap_alloc1 failed", 0,0,0,0); | |
917 | return; | 917 | return; | |
918 | } | 918 | } | |
919 | 919 |
--- src/sys/uvm/uvm_aobj.c 2020/11/04 01:30:19 1.152
+++ src/sys/uvm/uvm_aobj.c 2021/03/13 15:29:55 1.153
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: uvm_aobj.c,v 1.152 2020/11/04 01:30:19 chs Exp $ */ | 1 | /* $NetBSD: uvm_aobj.c,v 1.153 2021/03/13 15:29:55 skrll Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and | 4 | * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and | |
5 | * Washington University. | 5 | * Washington University. | |
6 | * All rights reserved. | 6 | * All rights reserved. | |
7 | * | 7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | 9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | 10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | 11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | 12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | 13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | 14 | * notice, this list of conditions and the following disclaimer in the | |
@@ -28,27 +28,27 @@ | @@ -28,27 +28,27 @@ | |||
28 | * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp | 28 | * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp | |
29 | */ | 29 | */ | |
30 | 30 | |||
31 | /* | 31 | /* | |
32 | * uvm_aobj.c: anonymous memory uvm_object pager | 32 | * uvm_aobj.c: anonymous memory uvm_object pager | |
33 | * | 33 | * | |
34 | * author: Chuck Silvers <chuq@chuq.com> | 34 | * author: Chuck Silvers <chuq@chuq.com> | |
35 | * started: Jan-1998 | 35 | * started: Jan-1998 | |
36 | * | 36 | * | |
37 | * - design mostly from Chuck Cranor | 37 | * - design mostly from Chuck Cranor | |
38 | */ | 38 | */ | |
39 | 39 | |||
40 | #include <sys/cdefs.h> | 40 | #include <sys/cdefs.h> | |
41 | __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.152 2020/11/04 01:30:19 chs Exp $"); | 41 | __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.153 2021/03/13 15:29:55 skrll Exp $"); | |
42 | 42 | |||
43 | #ifdef _KERNEL_OPT | 43 | #ifdef _KERNEL_OPT | |
44 | #include "opt_uvmhist.h" | 44 | #include "opt_uvmhist.h" | |
45 | #endif | 45 | #endif | |
46 | 46 | |||
47 | #include <sys/param.h> | 47 | #include <sys/param.h> | |
48 | #include <sys/systm.h> | 48 | #include <sys/systm.h> | |
49 | #include <sys/kernel.h> | 49 | #include <sys/kernel.h> | |
50 | #include <sys/kmem.h> | 50 | #include <sys/kmem.h> | |
51 | #include <sys/pool.h> | 51 | #include <sys/pool.h> | |
52 | #include <sys/atomic.h> | 52 | #include <sys/atomic.h> | |
53 | 53 | |||
54 | #include <uvm/uvm.h> | 54 | #include <uvm/uvm.h> | |
@@ -800,27 +800,27 @@ uao_put(struct uvm_object *uobj, voff_t | @@ -800,27 +800,27 @@ uao_put(struct uvm_object *uobj, voff_t | |||
800 | */ | 800 | */ | |
801 | 801 | |||
802 | static int | 802 | static int | |
803 | uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, | 803 | uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, | |
804 | int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags) | 804 | int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags) | |
805 | { | 805 | { | |
806 | voff_t current_offset; | 806 | voff_t current_offset; | |
807 | struct vm_page *ptmp; | 807 | struct vm_page *ptmp; | |
808 | int lcv, gotpages, maxpages, swslot, pageidx; | 808 | int lcv, gotpages, maxpages, swslot, pageidx; | |
809 | bool overwrite = ((flags & PGO_OVERWRITE) != 0); | 809 | bool overwrite = ((flags & PGO_OVERWRITE) != 0); | |
810 | struct uvm_page_array a; | 810 | struct uvm_page_array a; | |
811 | 811 | |||
812 | UVMHIST_FUNC(__func__); | 812 | UVMHIST_FUNC(__func__); | |
813 | UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%jd", | 813 | UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%#jx", | |
814 | (uintptr_t)uobj, offset, flags,0); | 814 | (uintptr_t)uobj, offset, flags,0); | |
815 | 815 | |||
816 | /* | 816 | /* | |
817 | * the object must be locked. it can only be a read lock when | 817 | * the object must be locked. it can only be a read lock when | |
818 | * processing a read fault with PGO_LOCKED. | 818 | * processing a read fault with PGO_LOCKED. | |
819 | */ | 819 | */ | |
820 | 820 | |||
821 | KASSERT(UVM_OBJ_IS_AOBJ(uobj)); | 821 | KASSERT(UVM_OBJ_IS_AOBJ(uobj)); | |
822 | KASSERT(rw_lock_held(uobj->vmobjlock)); | 822 | KASSERT(rw_lock_held(uobj->vmobjlock)); | |
823 | KASSERT(rw_write_held(uobj->vmobjlock) || | 823 | KASSERT(rw_write_held(uobj->vmobjlock) || | |
824 | ((flags & PGO_LOCKED) != 0 && (access_type & VM_PROT_WRITE) == 0)); | 824 | ((flags & PGO_LOCKED) != 0 && (access_type & VM_PROT_WRITE) == 0)); | |
825 | 825 | |||
826 | /* | 826 | /* |
--- src/sys/uvm/uvm_bio.c 2020/11/10 04:27:22 1.124
+++ src/sys/uvm/uvm_bio.c 2021/03/13 15:29:55 1.125
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: uvm_bio.c,v 1.124 2020/11/10 04:27:22 chs Exp $ */ | 1 | /* $NetBSD: uvm_bio.c,v 1.125 2021/03/13 15:29:55 skrll Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1998 Chuck Silvers. | 4 | * Copyright (c) 1998 Chuck Silvers. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | 7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | 8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | 9 | * are met: | |
10 | * 1. Redistributions of source code must retain the above copyright | 10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | 11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | 12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | 13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | 14 | * documentation and/or other materials provided with the distribution. | |
@@ -24,27 +24,27 @@ | @@ -24,27 +24,27 @@ | |||
24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | 24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | |
25 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | 25 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |
26 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 26 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
27 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 27 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
28 | * SUCH DAMAGE. | 28 | * SUCH DAMAGE. | |
29 | * | 29 | * | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | /* | 32 | /* | |
33 | * uvm_bio.c: buffered i/o object mapping cache | 33 | * uvm_bio.c: buffered i/o object mapping cache | |
34 | */ | 34 | */ | |
35 | 35 | |||
36 | #include <sys/cdefs.h> | 36 | #include <sys/cdefs.h> | |
37 | __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.124 2020/11/10 04:27:22 chs Exp $"); | 37 | __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.125 2021/03/13 15:29:55 skrll Exp $"); | |
38 | 38 | |||
39 | #include "opt_uvmhist.h" | 39 | #include "opt_uvmhist.h" | |
40 | #include "opt_ubc.h" | 40 | #include "opt_ubc.h" | |
41 | 41 | |||
42 | #include <sys/param.h> | 42 | #include <sys/param.h> | |
43 | #include <sys/systm.h> | 43 | #include <sys/systm.h> | |
44 | #include <sys/kmem.h> | 44 | #include <sys/kmem.h> | |
45 | #include <sys/kernel.h> | 45 | #include <sys/kernel.h> | |
46 | #include <sys/proc.h> | 46 | #include <sys/proc.h> | |
47 | #include <sys/vnode.h> | 47 | #include <sys/vnode.h> | |
48 | #include <sys/bitops.h> /* for ilog2() */ | 48 | #include <sys/bitops.h> /* for ilog2() */ | |
49 | 49 | |||
50 | #include <uvm/uvm.h> | 50 | #include <uvm/uvm.h> | |
@@ -332,27 +332,27 @@ ubc_fault(struct uvm_faultinfo *ufi, vad | @@ -332,27 +332,27 @@ ubc_fault(struct uvm_faultinfo *ufi, vad | |||
332 | slot_offset = ubc_offset & (ubc_winsize - 1); | 332 | slot_offset = ubc_offset & (ubc_winsize - 1); | |
333 | 333 | |||
334 | /* | 334 | /* | |
335 | * some platforms cannot write to individual bytes atomically, so | 335 | * some platforms cannot write to individual bytes atomically, so | |
336 | * software has to do read/modify/write of larger quantities instead. | 336 | * software has to do read/modify/write of larger quantities instead. | |
337 | * this means that the access_type for "write" operations | 337 | * this means that the access_type for "write" operations | |
338 | * can be VM_PROT_READ, which confuses us mightily. | 338 | * can be VM_PROT_READ, which confuses us mightily. | |
339 | * | 339 | * | |
340 | * deal with this by resetting access_type based on the info | 340 | * deal with this by resetting access_type based on the info | |
341 | * that ubc_alloc() stores for us. | 341 | * that ubc_alloc() stores for us. | |
342 | */ | 342 | */ | |
343 | 343 | |||
344 | access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ; | 344 | access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ; | |
345 | UVMHIST_LOG(ubchist, "va 0x%jx ubc_offset 0x%jx access_type %jd", | 345 | UVMHIST_LOG(ubchist, "va %#jx ubc_offset %#jx access_type %jd", | |
346 | va, ubc_offset, access_type, 0); | 346 | va, ubc_offset, access_type, 0); | |
347 | 347 | |||
348 | if ((access_type & VM_PROT_WRITE) != 0) { | 348 | if ((access_type & VM_PROT_WRITE) != 0) { | |
349 | #ifndef PRIxOFF /* XXX */ | 349 | #ifndef PRIxOFF /* XXX */ | |
350 | #define PRIxOFF "jx" /* XXX */ | 350 | #define PRIxOFF "jx" /* XXX */ | |
351 | #endif /* XXX */ | 351 | #endif /* XXX */ | |
352 | KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset), | 352 | KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset), | |
353 | "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF, | 353 | "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF, | |
354 | slot_offset, (intmax_t)umap->writeoff); | 354 | slot_offset, (intmax_t)umap->writeoff); | |
355 | KASSERTMSG((slot_offset < umap->writeoff + umap->writelen), | 355 | KASSERTMSG((slot_offset < umap->writeoff + umap->writelen), | |
356 | "out of range write: slot=%#"PRIxVADDR | 356 | "out of range write: slot=%#"PRIxVADDR | |
357 | " off=%#"PRIxOFF" len=%#"PRIxVSIZE, | 357 | " off=%#"PRIxOFF" len=%#"PRIxVSIZE, | |
358 | slot_offset, (intmax_t)umap->writeoff, umap->writelen); | 358 | slot_offset, (intmax_t)umap->writeoff, umap->writelen); | |
@@ -364,29 +364,29 @@ ubc_fault(struct uvm_faultinfo *ufi, vad | @@ -364,29 +364,29 @@ ubc_fault(struct uvm_faultinfo *ufi, vad | |||
364 | if ((access_type & VM_PROT_WRITE) == 0) { | 364 | if ((access_type & VM_PROT_WRITE) == 0) { | |
365 | npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT; | 365 | npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT; | |
366 | } else { | 366 | } else { | |
367 | npages = (round_page(umap->offset + umap->writeoff + | 367 | npages = (round_page(umap->offset + umap->writeoff + | |
368 | umap->writelen) - (umap->offset + slot_offset)) | 368 | umap->writelen) - (umap->offset + slot_offset)) | |
369 | >> PAGE_SHIFT; | 369 | >> PAGE_SHIFT; | |
370 | flags |= PGO_PASTEOF; | 370 | flags |= PGO_PASTEOF; | |
371 | } | 371 | } | |
372 | 372 | |||
373 | again: | 373 | again: | |
374 | memset(pgs, 0, sizeof (pgs)); | 374 | memset(pgs, 0, sizeof (pgs)); | |
375 | rw_enter(uobj->vmobjlock, RW_WRITER); | 375 | rw_enter(uobj->vmobjlock, RW_WRITER); | |
376 | 376 | |||
377 | UVMHIST_LOG(ubchist, "slot_offset 0x%jx writeoff 0x%jx writelen 0x%jx ", | 377 | UVMHIST_LOG(ubchist, "slot_offset %#jx writeoff %#jx writelen %#jx ", | |
378 | slot_offset, umap->writeoff, umap->writelen, 0); | 378 | slot_offset, umap->writeoff, umap->writelen, 0); | |
379 | UVMHIST_LOG(ubchist, "getpages uobj %#jx offset 0x%jx npages %jd", | 379 | UVMHIST_LOG(ubchist, "getpages uobj %#jx offset %#jx npages %jd", | |
380 | (uintptr_t)uobj, umap->offset + slot_offset, npages, 0); | 380 | (uintptr_t)uobj, umap->offset + slot_offset, npages, 0); | |
381 | 381 | |||
382 | error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs, | 382 | error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs, | |
383 | &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC | | 383 | &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC | | |
384 | PGO_NOTIMESTAMP); | 384 | PGO_NOTIMESTAMP); | |
385 | UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0, | 385 | UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0, | |
386 | 0); | 386 | 0); | |
387 | 387 | |||
388 | if (error == EAGAIN) { | 388 | if (error == EAGAIN) { | |
389 | kpause("ubc_fault", false, hz >> 2, NULL); | 389 | kpause("ubc_fault", false, hz >> 2, NULL); | |
390 | goto again; | 390 | goto again; | |
391 | } | 391 | } | |
392 | if (error) { | 392 | if (error) { | |
@@ -399,27 +399,27 @@ again: | @@ -399,27 +399,27 @@ again: | |||
399 | * since the "compatible alias" trick does not work on such caches. | 399 | * since the "compatible alias" trick does not work on such caches. | |
400 | * Otherwise, we can always map the pages writable. | 400 | * Otherwise, we can always map the pages writable. | |
401 | */ | 401 | */ | |
402 | 402 | |||
403 | #ifdef PMAP_CACHE_VIVT | 403 | #ifdef PMAP_CACHE_VIVT | |
404 | prot = VM_PROT_READ | access_type; | 404 | prot = VM_PROT_READ | access_type; | |
405 | #else | 405 | #else | |
406 | prot = VM_PROT_READ | VM_PROT_WRITE; | 406 | prot = VM_PROT_READ | VM_PROT_WRITE; | |
407 | #endif | 407 | #endif | |
408 | 408 | |||
409 | va = ufi->orig_rvaddr; | 409 | va = ufi->orig_rvaddr; | |
410 | eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT); | 410 | eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT); | |
411 | 411 | |||
412 | UVMHIST_LOG(ubchist, "va 0x%jx eva 0x%jx", va, eva, 0, 0); | 412 | UVMHIST_LOG(ubchist, "va %#jx eva %#jx", va, eva, 0, 0); | |
413 | 413 | |||
414 | /* | 414 | /* | |
415 | * Note: normally all returned pages would have the same UVM object. | 415 | * Note: normally all returned pages would have the same UVM object. | |
416 | * However, layered file-systems and e.g. tmpfs, may return pages | 416 | * However, layered file-systems and e.g. tmpfs, may return pages | |
417 | * which belong to underlying UVM object. In such case, lock is | 417 | * which belong to underlying UVM object. In such case, lock is | |
418 | * shared amongst the objects. | 418 | * shared amongst the objects. | |
419 | */ | 419 | */ | |
420 | rw_enter(uobj->vmobjlock, RW_WRITER); | 420 | rw_enter(uobj->vmobjlock, RW_WRITER); | |
421 | for (i = 0; va < eva; i++, va += PAGE_SIZE) { | 421 | for (i = 0; va < eva; i++, va += PAGE_SIZE) { | |
422 | struct vm_page *pg; | 422 | struct vm_page *pg; | |
423 | 423 | |||
424 | UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i], | 424 | UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i], | |
425 | 0, 0); | 425 | 0, 0); | |
@@ -473,27 +473,27 @@ ubc_find_mapping(struct uvm_object *uobj | @@ -473,27 +473,27 @@ ubc_find_mapping(struct uvm_object *uobj | |||
473 | /* | 473 | /* | |
474 | * ubc_alloc: allocate a file mapping window | 474 | * ubc_alloc: allocate a file mapping window | |
475 | */ | 475 | */ | |
476 | 476 | |||
477 | static void * __noinline | 477 | static void * __noinline | |
478 | ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice, | 478 | ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice, | |
479 | int flags, struct vm_page **pgs, int *npagesp) | 479 | int flags, struct vm_page **pgs, int *npagesp) | |
480 | { | 480 | { | |
481 | vaddr_t slot_offset, va; | 481 | vaddr_t slot_offset, va; | |
482 | struct ubc_map *umap; | 482 | struct ubc_map *umap; | |
483 | voff_t umap_offset; | 483 | voff_t umap_offset; | |
484 | int error; | 484 | int error; | |
485 | UVMHIST_FUNC(__func__); | 485 | UVMHIST_FUNC(__func__); | |
486 | UVMHIST_CALLARGS(ubchist, "uobj %#jx offset 0x%jx len 0x%jx", | 486 | UVMHIST_CALLARGS(ubchist, "uobj %#jx offset %#jx len %#jx", | |
487 | (uintptr_t)uobj, offset, *lenp, 0); | 487 | (uintptr_t)uobj, offset, *lenp, 0); | |
488 | 488 | |||
489 | KASSERT(*lenp > 0); | 489 | KASSERT(*lenp > 0); | |
490 | umap_offset = (offset & ~((voff_t)ubc_winsize - 1)); | 490 | umap_offset = (offset & ~((voff_t)ubc_winsize - 1)); | |
491 | slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1)); | 491 | slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1)); | |
492 | *lenp = MIN(*lenp, ubc_winsize - slot_offset); | 492 | *lenp = MIN(*lenp, ubc_winsize - slot_offset); | |
493 | KASSERT(*lenp > 0); | 493 | KASSERT(*lenp > 0); | |
494 | 494 | |||
495 | rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER); | 495 | rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER); | |
496 | again: | 496 | again: | |
497 | /* | 497 | /* | |
498 | * The UVM object is already referenced. | 498 | * The UVM object is already referenced. | |
499 | * Lock order: UBC object -> ubc_map::uobj. | 499 | * Lock order: UBC object -> ubc_map::uobj. | |
@@ -550,27 +550,27 @@ again: | @@ -550,27 +550,27 @@ again: | |||
550 | TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive); | 550 | TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive); | |
551 | } | 551 | } | |
552 | 552 | |||
553 | if (flags & UBC_WRITE) { | 553 | if (flags & UBC_WRITE) { | |
554 | KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0, | 554 | KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0, | |
555 | "ubc_alloc: concurrent writes to uobj %p", uobj); | 555 | "ubc_alloc: concurrent writes to uobj %p", uobj); | |
556 | umap->writeoff = slot_offset; | 556 | umap->writeoff = slot_offset; | |
557 | umap->writelen = *lenp; | 557 | umap->writelen = *lenp; | |
558 | } | 558 | } | |
559 | 559 | |||
560 | umap->refcount++; | 560 | umap->refcount++; | |
561 | umap->advice = advice; | 561 | umap->advice = advice; | |
562 | rw_exit(ubc_object.uobj.vmobjlock); | 562 | rw_exit(ubc_object.uobj.vmobjlock); | |
563 | UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags 0x%jx", | 563 | UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags %#jx", | |
564 | (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags); | 564 | (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags); | |
565 | 565 | |||
566 | if (flags & UBC_FAULTBUSY) { | 566 | if (flags & UBC_FAULTBUSY) { | |
567 | int npages = (*lenp + (offset & (PAGE_SIZE - 1)) + | 567 | int npages = (*lenp + (offset & (PAGE_SIZE - 1)) + | |
568 | PAGE_SIZE - 1) >> PAGE_SHIFT; | 568 | PAGE_SIZE - 1) >> PAGE_SHIFT; | |
569 | int gpflags = | 569 | int gpflags = | |
570 | PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC| | 570 | PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC| | |
571 | PGO_NOTIMESTAMP; | 571 | PGO_NOTIMESTAMP; | |
572 | int i; | 572 | int i; | |
573 | KDASSERT(flags & UBC_WRITE); | 573 | KDASSERT(flags & UBC_WRITE); | |
574 | KASSERT(npages <= *npagesp); | 574 | KASSERT(npages <= *npagesp); | |
575 | KASSERT(umap->refcount == 1); | 575 | KASSERT(umap->refcount == 1); | |
576 | 576 |
--- src/sys/uvm/uvm_device.c 2020/07/09 05:57:15 1.71
+++ src/sys/uvm/uvm_device.c 2021/03/13 15:29:55 1.72
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: uvm_device.c,v 1.71 2020/07/09 05:57:15 skrll Exp $ */ | 1 | /* $NetBSD: uvm_device.c,v 1.72 2021/03/13 15:29:55 skrll Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | 4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | 7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | 8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | 9 | * are met: | |
10 | * 1. Redistributions of source code must retain the above copyright | 10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | 11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | 12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | 13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | 14 | * documentation and/or other materials provided with the distribution. | |
@@ -22,27 +22,27 @@ | @@ -22,27 +22,27 @@ | |||
22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 | * | 26 | * | |
27 | * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp | 27 | * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp | |
28 | */ | 28 | */ | |
29 | 29 | |||
30 | /* | 30 | /* | |
31 | * uvm_device.c: the device pager. | 31 | * uvm_device.c: the device pager. | |
32 | */ | 32 | */ | |
33 | 33 | |||
34 | #include <sys/cdefs.h> | 34 | #include <sys/cdefs.h> | |
35 | __KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.71 2020/07/09 05:57:15 skrll Exp $"); | 35 | __KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.72 2021/03/13 15:29:55 skrll Exp $"); | |
36 | 36 | |||
37 | #include "opt_uvmhist.h" | 37 | #include "opt_uvmhist.h" | |
38 | 38 | |||
39 | #include <sys/param.h> | 39 | #include <sys/param.h> | |
40 | #include <sys/systm.h> | 40 | #include <sys/systm.h> | |
41 | #include <sys/conf.h> | 41 | #include <sys/conf.h> | |
42 | #include <sys/proc.h> | 42 | #include <sys/proc.h> | |
43 | #include <sys/kmem.h> | 43 | #include <sys/kmem.h> | |
44 | 44 | |||
45 | #include <uvm/uvm.h> | 45 | #include <uvm/uvm.h> | |
46 | #include <uvm/uvm_device.h> | 46 | #include <uvm/uvm_device.h> | |
47 | #include <uvm/uvm_pmap.h> | 47 | #include <uvm/uvm_pmap.h> | |
48 | 48 | |||
@@ -354,27 +354,27 @@ udv_fault(struct uvm_faultinfo *ufi, vad | @@ -354,27 +354,27 @@ udv_fault(struct uvm_faultinfo *ufi, vad | |||
354 | int flags) | 354 | int flags) | |
355 | { | 355 | { | |
356 | struct vm_map_entry *entry = ufi->entry; | 356 | struct vm_map_entry *entry = ufi->entry; | |
357 | struct uvm_object *uobj = entry->object.uvm_obj; | 357 | struct uvm_object *uobj = entry->object.uvm_obj; | |
358 | struct uvm_device *udv = (struct uvm_device *)uobj; | 358 | struct uvm_device *udv = (struct uvm_device *)uobj; | |
359 | vaddr_t curr_va; | 359 | vaddr_t curr_va; | |
360 | off_t curr_offset; | 360 | off_t curr_offset; | |
361 | paddr_t paddr, mdpgno; | 361 | paddr_t paddr, mdpgno; | |
362 | u_int mmapflags; | 362 | u_int mmapflags; | |
363 | int lcv, retval; | 363 | int lcv, retval; | |
364 | dev_t device; | 364 | dev_t device; | |
365 | vm_prot_t mapprot; | 365 | vm_prot_t mapprot; | |
366 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); | 366 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); | |
367 | UVMHIST_LOG(maphist," flags=%jd", flags,0,0,0); | 367 | UVMHIST_LOG(maphist," flags=%#jx", flags,0,0,0); | |
368 | 368 | |||
369 | /* | 369 | /* | |
370 | * we do not allow device mappings to be mapped copy-on-write | 370 | * we do not allow device mappings to be mapped copy-on-write | |
371 | * so we kill any attempt to do so here. | 371 | * so we kill any attempt to do so here. | |
372 | */ | 372 | */ | |
373 | 373 | |||
374 | if (UVM_ET_ISCOPYONWRITE(entry)) { | 374 | if (UVM_ET_ISCOPYONWRITE(entry)) { | |
375 | UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=%#jx)", | 375 | UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=%#jx)", | |
376 | entry->etype, 0,0,0); | 376 | entry->etype, 0,0,0); | |
377 | uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); | 377 | uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); | |
378 | return(EIO); | 378 | return(EIO); | |
379 | } | 379 | } | |
380 | 380 |
--- src/sys/uvm/uvm_km.c 2020/07/09 05:57:15 1.159
+++ src/sys/uvm/uvm_km.c 2021/03/13 15:29:55 1.160
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: uvm_km.c,v 1.159 2020/07/09 05:57:15 skrll Exp $ */ | 1 | /* $NetBSD: uvm_km.c,v 1.160 2021/03/13 15:29:55 skrll Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | 4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | |
5 | * Copyright (c) 1991, 1993, The Regents of the University of California. | 5 | * Copyright (c) 1991, 1993, The Regents of the University of California. | |
6 | * | 6 | * | |
7 | * All rights reserved. | 7 | * All rights reserved. | |
8 | * | 8 | * | |
9 | * This code is derived from software contributed to Berkeley by | 9 | * This code is derived from software contributed to Berkeley by | |
10 | * The Mach Operating System project at Carnegie-Mellon University. | 10 | * The Mach Operating System project at Carnegie-Mellon University. | |
11 | * | 11 | * | |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without | |
13 | * modification, are permitted provided that the following conditions | 13 | * modification, are permitted provided that the following conditions | |
14 | * are met: | 14 | * are met: | |
@@ -142,27 +142,27 @@ | @@ -142,27 +142,27 @@ | |||
142 | * kmem_meta_arena: | 142 | * kmem_meta_arena: | |
143 | * Imports from kmem_va_meta_arena. Allocations from this arena are | 143 | * Imports from kmem_va_meta_arena. Allocations from this arena are | |
144 | * backed with the pages. | 144 | * backed with the pages. | |
145 | * | 145 | * | |
146 | * Arena stacking: | 146 | * Arena stacking: | |
147 | * | 147 | * | |
148 | * kmem_arena | 148 | * kmem_arena | |
149 | * kmem_va_arena | 149 | * kmem_va_arena | |
150 | * kmem_va_meta_arena | 150 | * kmem_va_meta_arena | |
151 | * kmem_meta_arena | 151 | * kmem_meta_arena | |
152 | */ | 152 | */ | |
153 | 153 | |||
154 | #include <sys/cdefs.h> | 154 | #include <sys/cdefs.h> | |
155 | __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.159 2020/07/09 05:57:15 skrll Exp $"); | 155 | __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.160 2021/03/13 15:29:55 skrll Exp $"); | |
156 | 156 | |||
157 | #include "opt_uvmhist.h" | 157 | #include "opt_uvmhist.h" | |
158 | 158 | |||
159 | #include "opt_kmempages.h" | 159 | #include "opt_kmempages.h" | |
160 | 160 | |||
161 | #ifndef NKMEMPAGES | 161 | #ifndef NKMEMPAGES | |
162 | #define NKMEMPAGES 0 | 162 | #define NKMEMPAGES 0 | |
163 | #endif | 163 | #endif | |
164 | 164 | |||
165 | /* | 165 | /* | |
166 | * Defaults for lower and upper-bounds for the kmem_arena page count. | 166 | * Defaults for lower and upper-bounds for the kmem_arena page count. | |
167 | * Can be overridden by kernel config options. | 167 | * Can be overridden by kernel config options. | |
168 | */ | 168 | */ | |
@@ -608,27 +608,27 @@ uvm_km_alloc(struct vm_map *map, vsize_t | @@ -608,27 +608,27 @@ uvm_km_alloc(struct vm_map *map, vsize_t | |||
608 | KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || | 608 | KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || | |
609 | (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || | 609 | (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || | |
610 | (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); | 610 | (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); | |
611 | KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0); | 611 | KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0); | |
612 | KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0); | 612 | KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0); | |
613 | 613 | |||
614 | /* | 614 | /* | |
615 | * setup for call | 615 | * setup for call | |
616 | */ | 616 | */ | |
617 | 617 | |||
618 | kva = vm_map_min(map); /* hint */ | 618 | kva = vm_map_min(map); /* hint */ | |
619 | size = round_page(size); | 619 | size = round_page(size); | |
620 | obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; | 620 | obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; | |
621 | UVMHIST_LOG(maphist," (map=%#jx, obj=%#jx, size=%#jx, flags=%jd)", | 621 | UVMHIST_LOG(maphist," (map=%#jx, obj=%#jx, size=%#jx, flags=%#jx)", | |
622 | (uintptr_t)map, (uintptr_t)obj, size, flags); | 622 | (uintptr_t)map, (uintptr_t)obj, size, flags); | |
623 | 623 | |||
624 | /* | 624 | /* | |
625 | * allocate some virtual space | 625 | * allocate some virtual space | |
626 | */ | 626 | */ | |
627 | 627 | |||
628 | vaprot = (flags & UVM_KMF_EXEC) ? UVM_PROT_ALL : UVM_PROT_RW; | 628 | vaprot = (flags & UVM_KMF_EXEC) ? UVM_PROT_ALL : UVM_PROT_RW; | |
629 | if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, | 629 | if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, | |
630 | align, UVM_MAPFLAG(vaprot, UVM_PROT_ALL, UVM_INH_NONE, | 630 | align, UVM_MAPFLAG(vaprot, UVM_PROT_ALL, UVM_INH_NONE, | |
631 | UVM_ADV_RANDOM, | 631 | UVM_ADV_RANDOM, | |
632 | (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA | 632 | (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA | |
633 | | UVM_KMF_COLORMATCH)))) != 0)) { | 633 | | UVM_KMF_COLORMATCH)))) != 0)) { | |
634 | UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); | 634 | UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); |
--- src/sys/uvm/uvm_map.c 2020/07/09 05:57:15 1.385
+++ src/sys/uvm/uvm_map.c 2021/03/13 15:29:55 1.386
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: uvm_map.c,v 1.385 2020/07/09 05:57:15 skrll Exp $ */ | 1 | /* $NetBSD: uvm_map.c,v 1.386 2021/03/13 15:29:55 skrll Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | 4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | |
5 | * Copyright (c) 1991, 1993, The Regents of the University of California. | 5 | * Copyright (c) 1991, 1993, The Regents of the University of California. | |
6 | * | 6 | * | |
7 | * All rights reserved. | 7 | * All rights reserved. | |
8 | * | 8 | * | |
9 | * This code is derived from software contributed to Berkeley by | 9 | * This code is derived from software contributed to Berkeley by | |
10 | * The Mach Operating System project at Carnegie-Mellon University. | 10 | * The Mach Operating System project at Carnegie-Mellon University. | |
11 | * | 11 | * | |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without | |
13 | * modification, are permitted provided that the following conditions | 13 | * modification, are permitted provided that the following conditions | |
14 | * are met: | 14 | * are met: | |
@@ -56,27 +56,27 @@ | @@ -56,27 +56,27 @@ | |||
56 | * School of Computer Science | 56 | * School of Computer Science | |
57 | * Carnegie Mellon University | 57 | * Carnegie Mellon University | |
58 | * Pittsburgh PA 15213-3890 | 58 | * Pittsburgh PA 15213-3890 | |
59 | * | 59 | * | |
60 | * any improvements or extensions that they make and grant Carnegie the | 60 | * any improvements or extensions that they make and grant Carnegie the | |
61 | * rights to redistribute these changes. | 61 | * rights to redistribute these changes. | |
62 | */ | 62 | */ | |
63 | 63 | |||
64 | /* | 64 | /* | |
65 | * uvm_map.c: uvm map operations | 65 | * uvm_map.c: uvm map operations | |
66 | */ | 66 | */ | |
67 | 67 | |||
68 | #include <sys/cdefs.h> | 68 | #include <sys/cdefs.h> | |
69 | __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.385 2020/07/09 05:57:15 skrll Exp $"); | 69 | __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.386 2021/03/13 15:29:55 skrll Exp $"); | |
70 | 70 | |||
71 | #include "opt_ddb.h" | 71 | #include "opt_ddb.h" | |
72 | #include "opt_pax.h" | 72 | #include "opt_pax.h" | |
73 | #include "opt_uvmhist.h" | 73 | #include "opt_uvmhist.h" | |
74 | #include "opt_uvm.h" | 74 | #include "opt_uvm.h" | |
75 | #include "opt_sysv.h" | 75 | #include "opt_sysv.h" | |
76 | 76 | |||
77 | #include <sys/param.h> | 77 | #include <sys/param.h> | |
78 | #include <sys/systm.h> | 78 | #include <sys/systm.h> | |
79 | #include <sys/mman.h> | 79 | #include <sys/mman.h> | |
80 | #include <sys/proc.h> | 80 | #include <sys/proc.h> | |
81 | #include <sys/pool.h> | 81 | #include <sys/pool.h> | |
82 | #include <sys/kernel.h> | 82 | #include <sys/kernel.h> | |
@@ -796,27 +796,27 @@ uvm_mapent_alloc(struct vm_map *map, int | @@ -796,27 +796,27 @@ uvm_mapent_alloc(struct vm_map *map, int | |||
796 | UVMHIST_LOG(maphist, "<- new entry=%#jx [kentry=%jd]", (uintptr_t)me, | 796 | UVMHIST_LOG(maphist, "<- new entry=%#jx [kentry=%jd]", (uintptr_t)me, | |
797 | (map == kernel_map), 0, 0); | 797 | (map == kernel_map), 0, 0); | |
798 | return me; | 798 | return me; | |
799 | } | 799 | } | |
800 | 800 | |||
801 | /* | 801 | /* | |
802 | * uvm_mapent_free: free map entry | 802 | * uvm_mapent_free: free map entry | |
803 | */ | 803 | */ | |
804 | 804 | |||
805 | static void | 805 | static void | |
806 | uvm_mapent_free(struct vm_map_entry *me) | 806 | uvm_mapent_free(struct vm_map_entry *me) | |
807 | { | 807 | { | |
808 | UVMHIST_FUNC(__func__); | 808 | UVMHIST_FUNC(__func__); | |
809 | UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%jd]", | 809 | UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%#jx]", | |
810 | (uintptr_t)me, me->flags, 0, 0); | 810 | (uintptr_t)me, me->flags, 0, 0); | |
811 | pool_cache_put(&uvm_map_entry_cache, me); | 811 | pool_cache_put(&uvm_map_entry_cache, me); | |
812 | } | 812 | } | |
813 | 813 | |||
814 | /* | 814 | /* | |
815 | * uvm_mapent_copy: copy a map entry, preserving flags | 815 | * uvm_mapent_copy: copy a map entry, preserving flags | |
816 | */ | 816 | */ | |
817 | 817 | |||
818 | static inline void | 818 | static inline void | |
819 | uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst) | 819 | uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst) | |
820 | { | 820 | { | |
821 | 821 | |||
822 | memcpy(dst, src, sizeof(*dst)); | 822 | memcpy(dst, src, sizeof(*dst)); | |
@@ -2105,29 +2105,29 @@ nextgap: | @@ -2105,29 +2105,29 @@ nextgap: | |||
2105 | entry = entry->next; | 2105 | entry = entry->next; | |
2106 | if (entry == &map->header) { | 2106 | if (entry == &map->header) { | |
2107 | UVMHIST_LOG(maphist, "<- failed (off end)", | 2107 | UVMHIST_LOG(maphist, "<- failed (off end)", | |
2108 | 0,0,0,0); | 2108 | 0,0,0,0); | |
2109 | goto notfound; | 2109 | goto notfound; | |
2110 | } | 2110 | } | |
2111 | } | 2111 | } | |
2112 | } | 2112 | } | |
2113 | 2113 | |||
2114 | found: | 2114 | found: | |
2115 | SAVE_HINT(map, map->hint, entry); | 2115 | SAVE_HINT(map, map->hint, entry); | |
2116 | *result = hint; | 2116 | *result = hint; | |
2117 | UVMHIST_LOG(maphist,"<- got it! (result=%#jx)", hint, 0,0,0); | 2117 | UVMHIST_LOG(maphist,"<- got it! (result=%#jx)", hint, 0,0,0); | |
2118 | KASSERTMSG( topdown || hint >= orig_hint, "hint: %jx, orig_hint: %jx", | 2118 | KASSERTMSG( topdown || hint >= orig_hint, "hint: %#jx, orig_hint: %#jx", | |
2119 | (uintmax_t)hint, (uintmax_t)orig_hint); | 2119 | (uintmax_t)hint, (uintmax_t)orig_hint); | |
2120 | KASSERTMSG(!topdown || hint <= orig_hint, "hint: %jx, orig_hint: %jx", | 2120 | KASSERTMSG(!topdown || hint <= orig_hint, "hint: %#jx, orig_hint: %#jx", | |
2121 | (uintmax_t)hint, (uintmax_t)orig_hint); | 2121 | (uintmax_t)hint, (uintmax_t)orig_hint); | |
2122 | KASSERT(entry->end <= hint); | 2122 | KASSERT(entry->end <= hint); | |
2123 | KASSERT(hint + length <= entry->next->start); | 2123 | KASSERT(hint + length <= entry->next->start); | |
2124 | return (entry); | 2124 | return (entry); | |
2125 | 2125 | |||
2126 | wraparound: | 2126 | wraparound: | |
2127 | UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0); | 2127 | UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0); | |
2128 | 2128 | |||
2129 | return (NULL); | 2129 | return (NULL); | |
2130 | 2130 | |||
2131 | notfound: | 2131 | notfound: | |
2132 | UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0); | 2132 | UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0); | |
2133 | 2133 | |||
@@ -4801,27 +4801,27 @@ do { \ | @@ -4801,27 +4801,27 @@ do { \ | |||
4801 | * => acquires a reference on the page's owner (uvm_object or vm_anon) | 4801 | * => acquires a reference on the page's owner (uvm_object or vm_anon) | |
4802 | */ | 4802 | */ | |
4803 | bool | 4803 | bool | |
4804 | uvm_voaddr_acquire(struct vm_map * const map, vaddr_t const va, | 4804 | uvm_voaddr_acquire(struct vm_map * const map, vaddr_t const va, | |
4805 | struct uvm_voaddr * const voaddr) | 4805 | struct uvm_voaddr * const voaddr) | |
4806 | { | 4806 | { | |
4807 | struct vm_map_entry *entry; | 4807 | struct vm_map_entry *entry; | |
4808 | struct vm_anon *anon = NULL; | 4808 | struct vm_anon *anon = NULL; | |
4809 | bool result = false; | 4809 | bool result = false; | |
4810 | bool exclusive = false; | 4810 | bool exclusive = false; | |
4811 | void (*unlock_fn)(struct vm_map *); | 4811 | void (*unlock_fn)(struct vm_map *); | |
4812 | 4812 | |||
4813 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); | 4813 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); | |
4814 | UVMHIST_LOG(maphist,"(map=%#jx,va=%jx)", (uintptr_t)map, va, 0, 0); | 4814 | UVMHIST_LOG(maphist,"(map=%#jx,va=%#jx)", (uintptr_t)map, va, 0, 0); | |
4815 | 4815 | |||
4816 | const vaddr_t start = trunc_page(va); | 4816 | const vaddr_t start = trunc_page(va); | |
4817 | const vaddr_t end = round_page(va+1); | 4817 | const vaddr_t end = round_page(va+1); | |
4818 | 4818 | |||
4819 | lookup_again: | 4819 | lookup_again: | |
4820 | if (__predict_false(exclusive)) { | 4820 | if (__predict_false(exclusive)) { | |
4821 | vm_map_lock(map); | 4821 | vm_map_lock(map); | |
4822 | unlock_fn = vm_map_unlock; | 4822 | unlock_fn = vm_map_unlock; | |
4823 | } else { | 4823 | } else { | |
4824 | vm_map_lock_read(map); | 4824 | vm_map_lock_read(map); | |
4825 | unlock_fn = vm_map_unlock_read; | 4825 | unlock_fn = vm_map_unlock_read; | |
4826 | } | 4826 | } | |
4827 | 4827 | |||
@@ -4959,27 +4959,27 @@ uvm_voaddr_acquire(struct vm_map * const | @@ -4959,27 +4959,27 @@ uvm_voaddr_acquire(struct vm_map * const | |||
4959 | struct uvm_object *uobj = entry->object.uvm_obj; | 4959 | struct uvm_object *uobj = entry->object.uvm_obj; | |
4960 | 4960 | |||
4961 | KASSERT(uobj != NULL); | 4961 | KASSERT(uobj != NULL); | |
4962 | (*uobj->pgops->pgo_reference)(uobj); | 4962 | (*uobj->pgops->pgo_reference)(uobj); | |
4963 | UVM_VOADDR_SET_UOBJ(voaddr, uobj); | 4963 | UVM_VOADDR_SET_UOBJ(voaddr, uobj); | |
4964 | voaddr->offset = entry->offset + (va - entry->start); | 4964 | voaddr->offset = entry->offset + (va - entry->start); | |
4965 | result = true; | 4965 | result = true; | |
4966 | } | 4966 | } | |
4967 | 4967 | |||
4968 | unlock_fn(map); | 4968 | unlock_fn(map); | |
4969 | 4969 | |||
4970 | if (result) { | 4970 | if (result) { | |
4971 | UVMHIST_LOG(maphist, | 4971 | UVMHIST_LOG(maphist, | |
4972 | "<- done OK (type=%jd,owner=#%jx,offset=%jx)", | 4972 | "<- done OK (type=%jd,owner=%#jx,offset=%#jx)", | |
4973 | UVM_VOADDR_GET_TYPE(voaddr), | 4973 | UVM_VOADDR_GET_TYPE(voaddr), | |
4974 | UVM_VOADDR_GET_OBJECT(voaddr), | 4974 | UVM_VOADDR_GET_OBJECT(voaddr), | |
4975 | voaddr->offset, 0); | 4975 | voaddr->offset, 0); | |
4976 | } else { | 4976 | } else { | |
4977 | UVMHIST_LOG(maphist,"<- done (failed)",0,0,0,0); | 4977 | UVMHIST_LOG(maphist,"<- done (failed)",0,0,0,0); | |
4978 | } | 4978 | } | |
4979 | 4979 | |||
4980 | return result; | 4980 | return result; | |
4981 | } | 4981 | } | |
4982 | 4982 | |||
4983 | /* | 4983 | /* | |
4984 | * uvm_voaddr_release: release the references held by the | 4984 | * uvm_voaddr_release: release the references held by the | |
4985 | * vitual object address. | 4985 | * vitual object address. |
--- src/sys/uvm/uvm_swap.c 2021/02/19 13:20:43 1.202
+++ src/sys/uvm/uvm_swap.c 2021/03/13 15:29:55 1.203
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: uvm_swap.c,v 1.202 2021/02/19 13:20:43 hannken Exp $ */ | 1 | /* $NetBSD: uvm_swap.c,v 1.203 2021/03/13 15:29:55 skrll Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green | 4 | * Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | 7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | 8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | 9 | * are met: | |
10 | * 1. Redistributions of source code must retain the above copyright | 10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | 11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | 12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | 13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | 14 | * documentation and/or other materials provided with the distribution. | |
@@ -20,27 +20,27 @@ | @@ -20,27 +20,27 @@ | |||
20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | 22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | |
23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |
24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
26 | * SUCH DAMAGE. | 26 | * SUCH DAMAGE. | |
27 | * | 27 | * | |
28 | * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp | 28 | * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp | |
29 | * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp | 29 | * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | #include <sys/cdefs.h> | 32 | #include <sys/cdefs.h> | |
33 | __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.202 2021/02/19 13:20:43 hannken Exp $"); | 33 | __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.203 2021/03/13 15:29:55 skrll Exp $"); | |
34 | 34 | |||
35 | #include "opt_uvmhist.h" | 35 | #include "opt_uvmhist.h" | |
36 | #include "opt_compat_netbsd.h" | 36 | #include "opt_compat_netbsd.h" | |
37 | #include "opt_ddb.h" | 37 | #include "opt_ddb.h" | |
38 | 38 | |||
39 | #include <sys/param.h> | 39 | #include <sys/param.h> | |
40 | #include <sys/systm.h> | 40 | #include <sys/systm.h> | |
41 | #include <sys/atomic.h> | 41 | #include <sys/atomic.h> | |
42 | #include <sys/buf.h> | 42 | #include <sys/buf.h> | |
43 | #include <sys/bufq.h> | 43 | #include <sys/bufq.h> | |
44 | #include <sys/conf.h> | 44 | #include <sys/conf.h> | |
45 | #include <sys/cprng.h> | 45 | #include <sys/cprng.h> | |
46 | #include <sys/proc.h> | 46 | #include <sys/proc.h> | |
@@ -923,27 +923,27 @@ swap_on(struct lwp *l, struct swapdev *s | @@ -923,27 +923,27 @@ swap_on(struct lwp *l, struct swapdev *s | |||
923 | } | 923 | } | |
924 | 924 | |||
925 | /* | 925 | /* | |
926 | * make sure we have enough blocks for a reasonable sized swap | 926 | * make sure we have enough blocks for a reasonable sized swap | |
927 | * area. we want at least one page. | 927 | * area. we want at least one page. | |
928 | */ | 928 | */ | |
929 | 929 | |||
930 | if (size < 1) { | 930 | if (size < 1) { | |
931 | UVMHIST_LOG(pdhist, " size <= 1!!", 0, 0, 0, 0); | 931 | UVMHIST_LOG(pdhist, " size <= 1!!", 0, 0, 0, 0); | |
932 | error = EINVAL; | 932 | error = EINVAL; | |
933 | goto bad; | 933 | goto bad; | |
934 | } | 934 | } | |
935 | 935 | |||
936 | UVMHIST_LOG(pdhist, " dev=%jx: size=%jd addr=%jd", dev, size, addr, 0); | 936 | UVMHIST_LOG(pdhist, " dev=%#jx: size=%jd addr=%jd", dev, size, addr, 0); | |
937 | 937 | |||
938 | /* | 938 | /* | |
939 | * now we need to allocate an extent to manage this swap device | 939 | * now we need to allocate an extent to manage this swap device | |
940 | */ | 940 | */ | |
941 | 941 | |||
942 | sdp->swd_blist = blist_create(npages); | 942 | sdp->swd_blist = blist_create(npages); | |
943 | /* mark all expect the `saved' region free. */ | 943 | /* mark all expect the `saved' region free. */ | |
944 | blist_free(sdp->swd_blist, addr, size); | 944 | blist_free(sdp->swd_blist, addr, size); | |
945 | 945 | |||
946 | /* | 946 | /* | |
947 | * allocate space to for swap encryption state and mark the | 947 | * allocate space to for swap encryption state and mark the | |
948 | * keys uninitialized so we generate them lazily | 948 | * keys uninitialized so we generate them lazily | |
949 | */ | 949 | */ | |
@@ -1042,27 +1042,27 @@ bad: | @@ -1042,27 +1042,27 @@ bad: | |||
1042 | 1042 | |||
1043 | /* | 1043 | /* | |
1044 | * swap_off: stop swapping on swapdev | 1044 | * swap_off: stop swapping on swapdev | |
1045 | * | 1045 | * | |
1046 | * => swap data should be locked, we will unlock. | 1046 | * => swap data should be locked, we will unlock. | |
1047 | */ | 1047 | */ | |
1048 | static int | 1048 | static int | |
1049 | swap_off(struct lwp *l, struct swapdev *sdp) | 1049 | swap_off(struct lwp *l, struct swapdev *sdp) | |
1050 | { | 1050 | { | |
1051 | int npages = sdp->swd_npages; | 1051 | int npages = sdp->swd_npages; | |
1052 | int error = 0; | 1052 | int error = 0; | |
1053 | 1053 | |||
1054 | UVMHIST_FUNC(__func__); | 1054 | UVMHIST_FUNC(__func__); | |
1055 | UVMHIST_CALLARGS(pdhist, " dev=%jx, npages=%jd", sdp->swd_dev,npages, 0, 0); | 1055 | UVMHIST_CALLARGS(pdhist, " dev=%#jx, npages=%jd", sdp->swd_dev,npages, 0, 0); | |
1056 | 1056 | |||
1057 | KASSERT(rw_write_held(&swap_syscall_lock)); | 1057 | KASSERT(rw_write_held(&swap_syscall_lock)); | |
1058 | KASSERT(mutex_owned(&uvm_swap_data_lock)); | 1058 | KASSERT(mutex_owned(&uvm_swap_data_lock)); | |
1059 | 1059 | |||
1060 | /* disable the swap area being removed */ | 1060 | /* disable the swap area being removed */ | |
1061 | sdp->swd_flags &= ~SWF_ENABLE; | 1061 | sdp->swd_flags &= ~SWF_ENABLE; | |
1062 | uvmexp.swpgavail -= npages; | 1062 | uvmexp.swpgavail -= npages; | |
1063 | mutex_exit(&uvm_swap_data_lock); | 1063 | mutex_exit(&uvm_swap_data_lock); | |
1064 | 1064 | |||
1065 | /* | 1065 | /* | |
1066 | * the idea is to find all the pages that are paged out to this | 1066 | * the idea is to find all the pages that are paged out to this | |
1067 | * device, and page them all in. in uvm, swap-backed pageable | 1067 | * device, and page them all in. in uvm, swap-backed pageable | |
1068 | * memory can take two forms: aobjs and anons. call the | 1068 | * memory can take two forms: aobjs and anons. call the | |
@@ -1206,27 +1206,27 @@ swstrategy(struct buf *bp) | @@ -1206,27 +1206,27 @@ swstrategy(struct buf *bp) | |||
1206 | bp->b_resid = bp->b_bcount; | 1206 | bp->b_resid = bp->b_bcount; | |
1207 | biodone(bp); | 1207 | biodone(bp); | |
1208 | UVMHIST_LOG(pdhist, " failed to get swap device", 0, 0, 0, 0); | 1208 | UVMHIST_LOG(pdhist, " failed to get swap device", 0, 0, 0, 0); | |
1209 | return; | 1209 | return; | |
1210 | } | 1210 | } | |
1211 | 1211 | |||
1212 | /* | 1212 | /* | |
1213 | * convert drum page number to block number on this swapdev. | 1213 | * convert drum page number to block number on this swapdev. | |
1214 | */ | 1214 | */ | |
1215 | 1215 | |||
1216 | pageno -= sdp->swd_drumoffset; /* page # on swapdev */ | 1216 | pageno -= sdp->swd_drumoffset; /* page # on swapdev */ | |
1217 | bn = btodb((uint64_t)pageno << PAGE_SHIFT); /* convert to diskblock */ | 1217 | bn = btodb((uint64_t)pageno << PAGE_SHIFT); /* convert to diskblock */ | |
1218 | 1218 | |||
1219 | UVMHIST_LOG(pdhist, " Rd/Wr (0/1) %jd: mapoff=%jx bn=%jx bcount=%jd", | 1219 | UVMHIST_LOG(pdhist, " Rd/Wr (0/1) %jd: mapoff=%#jx bn=%#jx bcount=%jd", | |
1220 | ((bp->b_flags & B_READ) == 0) ? 1 : 0, | 1220 | ((bp->b_flags & B_READ) == 0) ? 1 : 0, | |
1221 | sdp->swd_drumoffset, bn, bp->b_bcount); | 1221 | sdp->swd_drumoffset, bn, bp->b_bcount); | |
1222 | 1222 | |||
1223 | /* | 1223 | /* | |
1224 | * for block devices we finish up here. | 1224 | * for block devices we finish up here. | |
1225 | * for regular files we have to do more work which we delegate | 1225 | * for regular files we have to do more work which we delegate | |
1226 | * to sw_reg_strategy(). | 1226 | * to sw_reg_strategy(). | |
1227 | */ | 1227 | */ | |
1228 | 1228 | |||
1229 | vp = sdp->swd_vp; /* swapdev vnode pointer */ | 1229 | vp = sdp->swd_vp; /* swapdev vnode pointer */ | |
1230 | switch (vp->v_type) { | 1230 | switch (vp->v_type) { | |
1231 | default: | 1231 | default: | |
1232 | panic("%s: vnode type 0x%x", __func__, vp->v_type); | 1232 | panic("%s: vnode type 0x%x", __func__, vp->v_type); | |
@@ -1269,40 +1269,40 @@ swstrategy(struct buf *bp) | @@ -1269,40 +1269,40 @@ swstrategy(struct buf *bp) | |||
1269 | return; | 1269 | return; | |
1270 | } | 1270 | } | |
1271 | /* NOTREACHED */ | 1271 | /* NOTREACHED */ | |
1272 | } | 1272 | } | |
1273 | 1273 | |||
1274 | /* | 1274 | /* | |
1275 | * swread: the read function for the drum (just a call to physio) | 1275 | * swread: the read function for the drum (just a call to physio) | |
1276 | */ | 1276 | */ | |
1277 | /*ARGSUSED*/ | 1277 | /*ARGSUSED*/ | |
1278 | static int | 1278 | static int | |
1279 | swread(dev_t dev, struct uio *uio, int ioflag) | 1279 | swread(dev_t dev, struct uio *uio, int ioflag) | |
1280 | { | 1280 | { | |
1281 | UVMHIST_FUNC(__func__); | 1281 | UVMHIST_FUNC(__func__); | |
1282 | UVMHIST_CALLARGS(pdhist, " dev=%jx offset=%jx", dev, uio->uio_offset, 0, 0); | 1282 | UVMHIST_CALLARGS(pdhist, " dev=%#jx offset=%#jx", dev, uio->uio_offset, 0, 0); | |
1283 | 1283 | |||
1284 | return (physio(swstrategy, NULL, dev, B_READ, minphys, uio)); | 1284 | return (physio(swstrategy, NULL, dev, B_READ, minphys, uio)); | |
1285 | } | 1285 | } | |
1286 | 1286 | |||
1287 | /* | 1287 | /* | |
1288 | * swwrite: the write function for the drum (just a call to physio) | 1288 | * swwrite: the write function for the drum (just a call to physio) | |
1289 | */ | 1289 | */ | |
1290 | /*ARGSUSED*/ | 1290 | /*ARGSUSED*/ | |
1291 | static int | 1291 | static int | |
1292 | swwrite(dev_t dev, struct uio *uio, int ioflag) | 1292 | swwrite(dev_t dev, struct uio *uio, int ioflag) | |
1293 | { | 1293 | { | |
1294 | UVMHIST_FUNC(__func__); | 1294 | UVMHIST_FUNC(__func__); | |
1295 | UVMHIST_CALLARGS(pdhist, " dev=%jx offset=%jx", dev, uio->uio_offset, 0, 0); | 1295 | UVMHIST_CALLARGS(pdhist, " dev=%#jx offset=%#jx", dev, uio->uio_offset, 0, 0); | |
1296 | 1296 | |||
1297 | return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio)); | 1297 | return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio)); | |
1298 | } | 1298 | } | |
1299 | 1299 | |||
1300 | const struct bdevsw swap_bdevsw = { | 1300 | const struct bdevsw swap_bdevsw = { | |
1301 | .d_open = nullopen, | 1301 | .d_open = nullopen, | |
1302 | .d_close = nullclose, | 1302 | .d_close = nullclose, | |
1303 | .d_strategy = swstrategy, | 1303 | .d_strategy = swstrategy, | |
1304 | .d_ioctl = noioctl, | 1304 | .d_ioctl = noioctl, | |
1305 | .d_dump = nodump, | 1305 | .d_dump = nodump, | |
1306 | .d_psize = nosize, | 1306 | .d_psize = nosize, | |
1307 | .d_discard = nodiscard, | 1307 | .d_discard = nodiscard, | |
1308 | .d_flag = D_OTHER | 1308 | .d_flag = D_OTHER | |
@@ -1398,27 +1398,27 @@ sw_reg_strategy(struct swapdev *sdp, str | @@ -1398,27 +1398,27 @@ sw_reg_strategy(struct swapdev *sdp, str | |||
1398 | vnx->vx_error = error; /* pass error up */ | 1398 | vnx->vx_error = error; /* pass error up */ | |
1399 | goto out; | 1399 | goto out; | |
1400 | } | 1400 | } | |
1401 | 1401 | |||
1402 | /* | 1402 | /* | |
1403 | * compute the size ("sz") of this transfer (in bytes). | 1403 | * compute the size ("sz") of this transfer (in bytes). | |
1404 | */ | 1404 | */ | |
1405 | off = byteoff % sdp->swd_bsize; | 1405 | off = byteoff % sdp->swd_bsize; | |
1406 | sz = (1 + nra) * sdp->swd_bsize - off; | 1406 | sz = (1 + nra) * sdp->swd_bsize - off; | |
1407 | if (sz > resid) | 1407 | if (sz > resid) | |
1408 | sz = resid; | 1408 | sz = resid; | |
1409 | 1409 | |||
1410 | UVMHIST_LOG(pdhist, "sw_reg_strategy: " | 1410 | UVMHIST_LOG(pdhist, "sw_reg_strategy: " | |
1411 | "vp %#jx/%#jx offset 0x%jx/0x%jx", | 1411 | "vp %#jx/%#jx offset %#jx/%#jx", | |
1412 | (uintptr_t)sdp->swd_vp, (uintptr_t)vp, byteoff, nbn); | 1412 | (uintptr_t)sdp->swd_vp, (uintptr_t)vp, byteoff, nbn); | |
1413 | 1413 | |||
1414 | /* | 1414 | /* | |
1415 | * now get a buf structure. note that the vb_buf is | 1415 | * now get a buf structure. note that the vb_buf is | |
1416 | * at the front of the nbp structure so that you can | 1416 | * at the front of the nbp structure so that you can | |
1417 | * cast pointers between the two structure easily. | 1417 | * cast pointers between the two structure easily. | |
1418 | */ | 1418 | */ | |
1419 | nbp = pool_get(&vndbuf_pool, PR_WAITOK); | 1419 | nbp = pool_get(&vndbuf_pool, PR_WAITOK); | |
1420 | buf_init(&nbp->vb_buf); | 1420 | buf_init(&nbp->vb_buf); | |
1421 | nbp->vb_buf.b_flags = bp->b_flags; | 1421 | nbp->vb_buf.b_flags = bp->b_flags; | |
1422 | nbp->vb_buf.b_cflags = bp->b_cflags; | 1422 | nbp->vb_buf.b_cflags = bp->b_cflags; | |
1423 | nbp->vb_buf.b_oflags = bp->b_oflags; | 1423 | nbp->vb_buf.b_oflags = bp->b_oflags; | |
1424 | nbp->vb_buf.b_bcount = sz; | 1424 | nbp->vb_buf.b_bcount = sz; | |
@@ -1489,27 +1489,27 @@ sw_reg_start(struct swapdev *sdp) | @@ -1489,27 +1489,27 @@ sw_reg_start(struct swapdev *sdp) | |||
1489 | /* recursion control */ | 1489 | /* recursion control */ | |
1490 | if ((sdp->swd_flags & SWF_BUSY) != 0) | 1490 | if ((sdp->swd_flags & SWF_BUSY) != 0) | |
1491 | return; | 1491 | return; | |
1492 | 1492 | |||
1493 | sdp->swd_flags |= SWF_BUSY; | 1493 | sdp->swd_flags |= SWF_BUSY; | |
1494 | 1494 | |||
1495 | while (sdp->swd_active < sdp->swd_maxactive) { | 1495 | while (sdp->swd_active < sdp->swd_maxactive) { | |
1496 | bp = bufq_get(sdp->swd_tab); | 1496 | bp = bufq_get(sdp->swd_tab); | |
1497 | if (bp == NULL) | 1497 | if (bp == NULL) | |
1498 | break; | 1498 | break; | |
1499 | sdp->swd_active++; | 1499 | sdp->swd_active++; | |
1500 | 1500 | |||
1501 | UVMHIST_LOG(pdhist, | 1501 | UVMHIST_LOG(pdhist, | |
1502 | "sw_reg_start: bp %#jx vp %#jx blkno %#jx cnt %jx", | 1502 | "sw_reg_start: bp %#jx vp %#jx blkno %#jx cnt %#jx", | |
1503 | (uintptr_t)bp, (uintptr_t)bp->b_vp, (uintptr_t)bp->b_blkno, | 1503 | (uintptr_t)bp, (uintptr_t)bp->b_vp, (uintptr_t)bp->b_blkno, | |
1504 | bp->b_bcount); | 1504 | bp->b_bcount); | |
1505 | vp = bp->b_vp; | 1505 | vp = bp->b_vp; | |
1506 | KASSERT(bp->b_objlock == vp->v_interlock); | 1506 | KASSERT(bp->b_objlock == vp->v_interlock); | |
1507 | if ((bp->b_flags & B_READ) == 0) { | 1507 | if ((bp->b_flags & B_READ) == 0) { | |
1508 | mutex_enter(vp->v_interlock); | 1508 | mutex_enter(vp->v_interlock); | |
1509 | vp->v_numoutput++; | 1509 | vp->v_numoutput++; | |
1510 | mutex_exit(vp->v_interlock); | 1510 | mutex_exit(vp->v_interlock); | |
1511 | } | 1511 | } | |
1512 | VOP_STRATEGY(vp, bp); | 1512 | VOP_STRATEGY(vp, bp); | |
1513 | } | 1513 | } | |
1514 | sdp->swd_flags &= ~SWF_BUSY; | 1514 | sdp->swd_flags &= ~SWF_BUSY; | |
1515 | } | 1515 | } | |
@@ -1528,30 +1528,30 @@ sw_reg_biodone(struct buf *bp) | @@ -1528,30 +1528,30 @@ sw_reg_biodone(struct buf *bp) | |||
1528 | * | 1528 | * | |
1529 | * => note that we can recover the vndbuf struct by casting the buf ptr | 1529 | * => note that we can recover the vndbuf struct by casting the buf ptr | |
1530 | */ | 1530 | */ | |
1531 | static void | 1531 | static void | |
1532 | sw_reg_iodone(struct work *wk, void *dummy) | 1532 | sw_reg_iodone(struct work *wk, void *dummy) | |
1533 | { | 1533 | { | |
1534 | struct vndbuf *vbp = (void *)wk; | 1534 | struct vndbuf *vbp = (void *)wk; | |
1535 | struct vndxfer *vnx = vbp->vb_xfer; | 1535 | struct vndxfer *vnx = vbp->vb_xfer; | |
1536 | struct buf *pbp = vnx->vx_bp; /* parent buffer */ | 1536 | struct buf *pbp = vnx->vx_bp; /* parent buffer */ | |
1537 | struct swapdev *sdp = vnx->vx_sdp; | 1537 | struct swapdev *sdp = vnx->vx_sdp; | |
1538 | int s, resid, error; | 1538 | int s, resid, error; | |
1539 | KASSERT(&vbp->vb_buf.b_work == wk); | 1539 | KASSERT(&vbp->vb_buf.b_work == wk); | |
1540 | UVMHIST_FUNC(__func__); | 1540 | UVMHIST_FUNC(__func__); | |
1541 | UVMHIST_CALLARGS(pdhist, " vbp=%#jx vp=%#jx blkno=%jx addr=%#jx", | 1541 | UVMHIST_CALLARGS(pdhist, " vbp=%#jx vp=%#jx blkno=%#jx addr=%#jx", | |
1542 | (uintptr_t)vbp, (uintptr_t)vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, | 1542 | (uintptr_t)vbp, (uintptr_t)vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, | |
1543 | (uintptr_t)vbp->vb_buf.b_data); | 1543 | (uintptr_t)vbp->vb_buf.b_data); | |
1544 | UVMHIST_LOG(pdhist, " cnt=%jx resid=%jx", | 1544 | UVMHIST_LOG(pdhist, " cnt=%#jx resid=%#jx", | |
1545 | vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0); | 1545 | vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0); | |
1546 | 1546 | |||
1547 | /* | 1547 | /* | |
1548 | * protect vbp at splbio and update. | 1548 | * protect vbp at splbio and update. | |
1549 | */ | 1549 | */ | |
1550 | 1550 | |||
1551 | s = splbio(); | 1551 | s = splbio(); | |
1552 | resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid; | 1552 | resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid; | |
1553 | pbp->b_resid -= resid; | 1553 | pbp->b_resid -= resid; | |
1554 | vnx->vx_pending--; | 1554 | vnx->vx_pending--; | |
1555 | 1555 | |||
1556 | if (vbp->vb_buf.b_error != 0) { | 1556 | if (vbp->vb_buf.b_error != 0) { | |
1557 | /* pass error upward */ | 1557 | /* pass error upward */ | |
@@ -1826,27 +1826,27 @@ uvm_swap_get(struct vm_page *page, int s | @@ -1826,27 +1826,27 @@ uvm_swap_get(struct vm_page *page, int s | |||
1826 | /* | 1826 | /* | |
1827 | * uvm_swap_io: do an i/o operation to swap | 1827 | * uvm_swap_io: do an i/o operation to swap | |
1828 | */ | 1828 | */ | |
1829 | 1829 | |||
1830 | static int | 1830 | static int | |
1831 | uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags) | 1831 | uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags) | |
1832 | { | 1832 | { | |
1833 | daddr_t startblk; | 1833 | daddr_t startblk; | |
1834 | struct buf *bp; | 1834 | struct buf *bp; | |
1835 | vaddr_t kva; | 1835 | vaddr_t kva; | |
1836 | int error, mapinflags; | 1836 | int error, mapinflags; | |
1837 | bool write, async, swap_encrypt; | 1837 | bool write, async, swap_encrypt; | |
1838 | UVMHIST_FUNC(__func__); | 1838 | UVMHIST_FUNC(__func__); | |
1839 | UVMHIST_CALLARGS(pdhist, "<- called, startslot=%jd, npages=%jd, flags=%jd", | 1839 | UVMHIST_CALLARGS(pdhist, "<- called, startslot=%jd, npages=%jd, flags=%#jx", | |
1840 | startslot, npages, flags, 0); | 1840 | startslot, npages, flags, 0); | |
1841 | 1841 | |||
1842 | write = (flags & B_READ) == 0; | 1842 | write = (flags & B_READ) == 0; | |
1843 | async = (flags & B_ASYNC) != 0; | 1843 | async = (flags & B_ASYNC) != 0; | |
1844 | swap_encrypt = atomic_load_relaxed(&uvm_swap_encrypt); | 1844 | swap_encrypt = atomic_load_relaxed(&uvm_swap_encrypt); | |
1845 | 1845 | |||
1846 | /* | 1846 | /* | |
1847 | * allocate a buf for the i/o. | 1847 | * allocate a buf for the i/o. | |
1848 | */ | 1848 | */ | |
1849 | 1849 | |||
1850 | KASSERT(curlwp != uvm.pagedaemon_lwp || (write && async)); | 1850 | KASSERT(curlwp != uvm.pagedaemon_lwp || (write && async)); | |
1851 | bp = getiobuf(swapdev_vp, curlwp != uvm.pagedaemon_lwp); | 1851 | bp = getiobuf(swapdev_vp, curlwp != uvm.pagedaemon_lwp); | |
1852 | if (bp == NULL) { | 1852 | if (bp == NULL) { | |
@@ -1957,27 +1957,27 @@ uvm_swap_io(struct vm_page **pps, int st | @@ -1957,27 +1957,27 @@ uvm_swap_io(struct vm_page **pps, int st | |||
1957 | 1957 | |||
1958 | if (async) { | 1958 | if (async) { | |
1959 | bp->b_iodone = uvm_aio_aiodone; | 1959 | bp->b_iodone = uvm_aio_aiodone; | |
1960 | UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0); | 1960 | UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0); | |
1961 | if (curlwp == uvm.pagedaemon_lwp) | 1961 | if (curlwp == uvm.pagedaemon_lwp) | |
1962 | BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); | 1962 | BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); | |
1963 | else | 1963 | else | |
1964 | BIO_SETPRIO(bp, BPRIO_TIMELIMITED); | 1964 | BIO_SETPRIO(bp, BPRIO_TIMELIMITED); | |
1965 | } else { | 1965 | } else { | |
1966 | bp->b_iodone = NULL; | 1966 | bp->b_iodone = NULL; | |
1967 | BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); | 1967 | BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); | |
1968 | } | 1968 | } | |
1969 | UVMHIST_LOG(pdhist, | 1969 | UVMHIST_LOG(pdhist, | |
1970 | "about to start io: data = %#jx blkno = 0x%jx, bcount = %jd", | 1970 | "about to start io: data = %#jx blkno = %#jx, bcount = %jd", | |
1971 | (uintptr_t)bp->b_data, bp->b_blkno, bp->b_bcount, 0); | 1971 | (uintptr_t)bp->b_data, bp->b_blkno, bp->b_bcount, 0); | |
1972 | 1972 | |||
1973 | /* | 1973 | /* | |
1974 | * now we start the I/O, and if async, return. | 1974 | * now we start the I/O, and if async, return. | |
1975 | */ | 1975 | */ | |
1976 | 1976 | |||
1977 | VOP_STRATEGY(swapdev_vp, bp); | 1977 | VOP_STRATEGY(swapdev_vp, bp); | |
1978 | if (async) { | 1978 | if (async) { | |
1979 | /* | 1979 | /* | |
1980 | * Reads are always synchronous; if this changes, we | 1980 | * Reads are always synchronous; if this changes, we | |
1981 | * need to add an asynchronous path for decryption. | 1981 | * need to add an asynchronous path for decryption. | |
1982 | */ | 1982 | */ | |
1983 | KASSERT(write); | 1983 | KASSERT(write); |
--- src/sys/uvm/uvm_vnode.c 2020/08/16 00:24:41 1.117
+++ src/sys/uvm/uvm_vnode.c 2021/03/13 15:29:55 1.118
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: uvm_vnode.c,v 1.117 2020/08/16 00:24:41 chs Exp $ */ | 1 | /* $NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | 4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | |
5 | * Copyright (c) 1991, 1993 | 5 | * Copyright (c) 1991, 1993 | |
6 | * The Regents of the University of California. | 6 | * The Regents of the University of California. | |
7 | * Copyright (c) 1990 University of Utah. | 7 | * Copyright (c) 1990 University of Utah. | |
8 | * | 8 | * | |
9 | * All rights reserved. | 9 | * All rights reserved. | |
10 | * | 10 | * | |
11 | * This code is derived from software contributed to Berkeley by | 11 | * This code is derived from software contributed to Berkeley by | |
12 | * the Systems Programming Group of the University of Utah Computer | 12 | * the Systems Programming Group of the University of Utah Computer | |
13 | * Science Department. | 13 | * Science Department. | |
14 | * | 14 | * | |
@@ -35,27 +35,27 @@ | @@ -35,27 +35,27 @@ | |||
35 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 35 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
36 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 36 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
37 | * SUCH DAMAGE. | 37 | * SUCH DAMAGE. | |
38 | * | 38 | * | |
39 | * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94 | 39 | * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94 | |
40 | * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp | 40 | * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp | |
41 | */ | 41 | */ | |
42 | 42 | |||
43 | /* | 43 | /* | |
44 | * uvm_vnode.c: the vnode pager. | 44 | * uvm_vnode.c: the vnode pager. | |
45 | */ | 45 | */ | |
46 | 46 | |||
47 | #include <sys/cdefs.h> | 47 | #include <sys/cdefs.h> | |
48 | __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.117 2020/08/16 00:24:41 chs Exp $"); | 48 | __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $"); | |
49 | 49 | |||
50 | #ifdef _KERNEL_OPT | 50 | #ifdef _KERNEL_OPT | |
51 | #include "opt_uvmhist.h" | 51 | #include "opt_uvmhist.h" | |
52 | #endif | 52 | #endif | |
53 | 53 | |||
54 | #include <sys/atomic.h> | 54 | #include <sys/atomic.h> | |
55 | #include <sys/param.h> | 55 | #include <sys/param.h> | |
56 | #include <sys/systm.h> | 56 | #include <sys/systm.h> | |
57 | #include <sys/kernel.h> | 57 | #include <sys/kernel.h> | |
58 | #include <sys/vnode.h> | 58 | #include <sys/vnode.h> | |
59 | #include <sys/disklabel.h> | 59 | #include <sys/disklabel.h> | |
60 | #include <sys/ioctl.h> | 60 | #include <sys/ioctl.h> | |
61 | #include <sys/fcntl.h> | 61 | #include <sys/fcntl.h> | |
@@ -166,27 +166,27 @@ uvn_put(struct uvm_object *uobj, voff_t | @@ -166,27 +166,27 @@ uvn_put(struct uvm_object *uobj, voff_t | |||
166 | * => NOTE: caller must check for released pages!! | 166 | * => NOTE: caller must check for released pages!! | |
167 | */ | 167 | */ | |
168 | 168 | |||
169 | static int | 169 | static int | |
170 | uvn_get(struct uvm_object *uobj, voff_t offset, | 170 | uvn_get(struct uvm_object *uobj, voff_t offset, | |
171 | struct vm_page **pps /* IN/OUT */, | 171 | struct vm_page **pps /* IN/OUT */, | |
172 | int *npagesp /* IN (OUT if PGO_LOCKED)*/, | 172 | int *npagesp /* IN (OUT if PGO_LOCKED)*/, | |
173 | int centeridx, vm_prot_t access_type, int advice, int flags) | 173 | int centeridx, vm_prot_t access_type, int advice, int flags) | |
174 | { | 174 | { | |
175 | struct vnode *vp = (struct vnode *)uobj; | 175 | struct vnode *vp = (struct vnode *)uobj; | |
176 | int error; | 176 | int error; | |
177 | 177 | |||
178 | UVMHIST_FUNC(__func__); | 178 | UVMHIST_FUNC(__func__); | |
179 | UVMHIST_CALLARGS(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, offset, | 179 | UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)vp, offset, | |
180 | 0, 0); | 180 | 0, 0); | |
181 | 181 | |||
182 | if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0 | 182 | if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0 | |
183 | && (flags & PGO_LOCKED) == 0 && vp->v_tag != VT_TMPFS) { | 183 | && (flags & PGO_LOCKED) == 0 && vp->v_tag != VT_TMPFS) { | |
184 | uvn_alloc_ractx(uobj); | 184 | uvn_alloc_ractx(uobj); | |
185 | uvm_ra_request(vp->v_ractx, advice, uobj, offset, | 185 | uvm_ra_request(vp->v_ractx, advice, uobj, offset, | |
186 | *npagesp << PAGE_SHIFT); | 186 | *npagesp << PAGE_SHIFT); | |
187 | } | 187 | } | |
188 | 188 | |||
189 | error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, | 189 | error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, | |
190 | access_type, advice, flags); | 190 | access_type, advice, flags); | |
191 | 191 | |||
192 | KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) || | 192 | KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) || | |
@@ -280,27 +280,27 @@ uvn_findpages(struct uvm_object *uobj, v | @@ -280,27 +280,27 @@ uvn_findpages(struct uvm_object *uobj, v | |||
280 | /* | 280 | /* | |
281 | * uvn_findpage: find a single page | 281 | * uvn_findpage: find a single page | |
282 | * | 282 | * | |
283 | * if a suitable page was found, put it in *pgp and return 1. | 283 | * if a suitable page was found, put it in *pgp and return 1. | |
284 | * otherwise return 0. | 284 | * otherwise return 0. | |
285 | */ | 285 | */ | |
286 | 286 | |||
287 | static int | 287 | static int | |
288 | uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp, | 288 | uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp, | |
289 | unsigned int flags, struct uvm_page_array *a, unsigned int nleft) | 289 | unsigned int flags, struct uvm_page_array *a, unsigned int nleft) | |
290 | { | 290 | { | |
291 | struct vm_page *pg; | 291 | struct vm_page *pg; | |
292 | UVMHIST_FUNC(__func__); | 292 | UVMHIST_FUNC(__func__); | |
293 | UVMHIST_CALLARGS(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset, | 293 | UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)uobj, offset, | |
294 | 0, 0); | 294 | 0, 0); | |
295 | 295 | |||
296 | /* | 296 | /* | |
297 | * NOBUSY must come with NOWAIT and NOALLOC. if NOBUSY is | 297 | * NOBUSY must come with NOWAIT and NOALLOC. if NOBUSY is | |
298 | * specified, this may be called with a reader lock. | 298 | * specified, this may be called with a reader lock. | |
299 | */ | 299 | */ | |
300 | 300 | |||
301 | KASSERT(rw_lock_held(uobj->vmobjlock)); | 301 | KASSERT(rw_lock_held(uobj->vmobjlock)); | |
302 | KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOWAIT) != 0); | 302 | KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOWAIT) != 0); | |
303 | KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOALLOC) != 0); | 303 | KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOALLOC) != 0); | |
304 | KASSERT((flags & UFP_NOBUSY) != 0 || rw_write_held(uobj->vmobjlock)); | 304 | KASSERT((flags & UFP_NOBUSY) != 0 || rw_write_held(uobj->vmobjlock)); | |
305 | 305 | |||
306 | if (*pgp != NULL) { | 306 | if (*pgp != NULL) { | |
@@ -431,27 +431,27 @@ uvn_findpage(struct uvm_object *uobj, vo | @@ -431,27 +431,27 @@ uvn_findpage(struct uvm_object *uobj, vo | |||
431 | * vnode in question so that it will not be yanked out from under | 431 | * vnode in question so that it will not be yanked out from under | |
432 | * us. | 432 | * us. | |
433 | */ | 433 | */ | |
434 | 434 | |||
435 | void | 435 | void | |
436 | uvm_vnp_setsize(struct vnode *vp, voff_t newsize) | 436 | uvm_vnp_setsize(struct vnode *vp, voff_t newsize) | |
437 | { | 437 | { | |
438 | struct uvm_object *uobj = &vp->v_uobj; | 438 | struct uvm_object *uobj = &vp->v_uobj; | |
439 | voff_t pgend = round_page(newsize); | 439 | voff_t pgend = round_page(newsize); | |
440 | voff_t oldsize; | 440 | voff_t oldsize; | |
441 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); | 441 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); | |
442 | 442 | |||
443 | rw_enter(uobj->vmobjlock, RW_WRITER); | 443 | rw_enter(uobj->vmobjlock, RW_WRITER); | |
444 | UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx", | 444 | UVMHIST_LOG(ubchist, "vp %#jx old %#jx new %#jx", | |
445 | (uintptr_t)vp, vp->v_size, newsize, 0); | 445 | (uintptr_t)vp, vp->v_size, newsize, 0); | |
446 | 446 | |||
447 | /* | 447 | /* | |
448 | * now check if the size has changed: if we shrink we had better | 448 | * now check if the size has changed: if we shrink we had better | |
449 | * toss some pages... | 449 | * toss some pages... | |
450 | */ | 450 | */ | |
451 | 451 | |||
452 | KASSERT(newsize != VSIZENOTSET && newsize >= 0); | 452 | KASSERT(newsize != VSIZENOTSET && newsize >= 0); | |
453 | KASSERT(vp->v_size <= vp->v_writesize); | 453 | KASSERT(vp->v_size <= vp->v_writesize); | |
454 | KASSERT(vp->v_size == vp->v_writesize || | 454 | KASSERT(vp->v_size == vp->v_writesize || | |
455 | newsize == vp->v_writesize || newsize <= vp->v_size); | 455 | newsize == vp->v_writesize || newsize <= vp->v_size); | |
456 | 456 | |||
457 | oldsize = vp->v_writesize; | 457 | oldsize = vp->v_writesize; |
--- src/sys/uvm/pmap/pmap_segtab.c 2020/10/08 14:02:40 1.26
+++ src/sys/uvm/pmap/pmap_segtab.c 2021/03/13 15:29:55 1.27
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: pmap_segtab.c,v 1.26 2020/10/08 14:02:40 skrll Exp $ */ | 1 | /* $NetBSD: pmap_segtab.c,v 1.27 2021/03/13 15:29:55 skrll Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | |
9 | * NASA Ames Research Center and by Chris G. Demetriou. | 9 | * NASA Ames Research Center and by Chris G. Demetriou. | |
10 | * | 10 | * | |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without | |
12 | * modification, are permitted provided that the following conditions | 12 | * modification, are permitted provided that the following conditions | |
13 | * are met: | 13 | * are met: | |
14 | * 1. Redistributions of source code must retain the above copyright | 14 | * 1. Redistributions of source code must retain the above copyright | |
@@ -57,27 +57,27 @@ | @@ -57,27 +57,27 @@ | |||
57 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 57 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
58 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | 58 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
59 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | 59 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | 60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
63 | * SUCH DAMAGE. | 63 | * SUCH DAMAGE. | |
64 | * | 64 | * | |
65 | * @(#)pmap.c 8.4 (Berkeley) 1/26/94 | 65 | * @(#)pmap.c 8.4 (Berkeley) 1/26/94 | |
66 | */ | 66 | */ | |
67 | 67 | |||
68 | #include <sys/cdefs.h> | 68 | #include <sys/cdefs.h> | |
69 | 69 | |||
70 | __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.26 2020/10/08 14:02:40 skrll Exp $"); | 70 | __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.27 2021/03/13 15:29:55 skrll Exp $"); | |
71 | 71 | |||
72 | /* | 72 | /* | |
73 | * Manages physical address maps. | 73 | * Manages physical address maps. | |
74 | * | 74 | * | |
75 | * In addition to hardware address maps, this | 75 | * In addition to hardware address maps, this | |
76 | * module is called upon to provide software-use-only | 76 | * module is called upon to provide software-use-only | |
77 | * maps which may or may not be stored in the same | 77 | * maps which may or may not be stored in the same | |
78 | * form as hardware maps. These pseudo-maps are | 78 | * form as hardware maps. These pseudo-maps are | |
79 | * used to store intermediate results from copy | 79 | * used to store intermediate results from copy | |
80 | * operations to and from address spaces. | 80 | * operations to and from address spaces. | |
81 | * | 81 | * | |
82 | * Since the information managed by this module is | 82 | * Since the information managed by this module is | |
83 | * also stored by the logical address mapping module, | 83 | * also stored by the logical address mapping module, | |
@@ -255,27 +255,27 @@ pmap_segtab_free(pmap_segtab_t *stp) | @@ -255,27 +255,27 @@ pmap_segtab_free(pmap_segtab_t *stp) | |||
255 | mutex_spin_exit(&pmap_segtab_lock); | 255 | mutex_spin_exit(&pmap_segtab_lock); | |
256 | } | 256 | } | |
257 | 257 | |||
258 | static void | 258 | static void | |
259 | pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stp_p, bool free_stp, | 259 | pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stp_p, bool free_stp, | |
260 | pte_callback_t callback, uintptr_t flags, | 260 | pte_callback_t callback, uintptr_t flags, | |
261 | vaddr_t va, vsize_t vinc) | 261 | vaddr_t va, vsize_t vinc) | |
262 | { | 262 | { | |
263 | pmap_segtab_t *stp = *stp_p; | 263 | pmap_segtab_t *stp = *stp_p; | |
264 | 264 | |||
265 | UVMHIST_FUNC(__func__); | 265 | UVMHIST_FUNC(__func__); | |
266 | UVMHIST_CALLARGS(pmapsegtabhist, "pm=%#jx stpp=%#jx free=%jd", | 266 | UVMHIST_CALLARGS(pmapsegtabhist, "pm=%#jx stpp=%#jx free=%jd", | |
267 | (uintptr_t)pmap, (uintptr_t)stp_p, free_stp, 0); | 267 | (uintptr_t)pmap, (uintptr_t)stp_p, free_stp, 0); | |
268 | UVMHIST_LOG(pmapsegtabhist, " callback=%#jx flags=%jx va=%#jx vinc=%#jx", | 268 | UVMHIST_LOG(pmapsegtabhist, " callback=%#jx flags=%#jx va=%#jx vinc=%#jx", | |
269 | (uintptr_t)callback, flags, (uintptr_t)va, (uintptr_t)vinc); | 269 | (uintptr_t)callback, flags, (uintptr_t)va, (uintptr_t)vinc); | |
270 | for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1); | 270 | for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1); | |
271 | i < PMAP_SEGTABSIZE; | 271 | i < PMAP_SEGTABSIZE; | |
272 | i++, va += vinc) { | 272 | i++, va += vinc) { | |
273 | #ifdef _LP64 | 273 | #ifdef _LP64 | |
274 | if (vinc > NBSEG) { | 274 | if (vinc > NBSEG) { | |
275 | if (stp->seg_seg[i] != NULL) { | 275 | if (stp->seg_seg[i] != NULL) { | |
276 | UVMHIST_LOG(pmapsegtabhist, | 276 | UVMHIST_LOG(pmapsegtabhist, | |
277 | " recursing %jd", i, 0, 0, 0); | 277 | " recursing %jd", i, 0, 0, 0); | |
278 | pmap_segtab_release(pmap, &stp->seg_seg[i], | 278 | pmap_segtab_release(pmap, &stp->seg_seg[i], | |
279 | true, callback, flags, va, vinc / NSEGPG); | 279 | true, callback, flags, va, vinc / NSEGPG); | |
280 | KASSERT(stp->seg_seg[i] == NULL); | 280 | KASSERT(stp->seg_seg[i] == NULL); | |
281 | } | 281 | } |