| @@ -1,1727 +1,1725 @@ | | | @@ -1,1727 +1,1725 @@ |
1 | /* $NetBSD: pmap.c,v 1.49 2020/04/12 15:36:18 skrll Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.50 2020/07/18 16:12:09 skrll Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center and by Chris G. Demetriou. | | 9 | * NASA Ames Research Center and by Chris G. Demetriou. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. | | 15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright | | 16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the | | 17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. | | 18 | * documentation and/or other materials provided with the distribution. |
19 | * | | 19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. | | 30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ | | 31 | */ |
32 | | | 32 | |
33 | /* | | 33 | /* |
34 | * Copyright (c) 1992, 1993 | | 34 | * Copyright (c) 1992, 1993 |
35 | * The Regents of the University of California. All rights reserved. | | 35 | * The Regents of the University of California. All rights reserved. |
36 | * | | 36 | * |
37 | * This code is derived from software contributed to Berkeley by | | 37 | * This code is derived from software contributed to Berkeley by |
38 | * the Systems Programming Group of the University of Utah Computer | | 38 | * the Systems Programming Group of the University of Utah Computer |
39 | * Science Department and Ralph Campbell. | | 39 | * Science Department and Ralph Campbell. |
40 | * | | 40 | * |
41 | * Redistribution and use in source and binary forms, with or without | | 41 | * Redistribution and use in source and binary forms, with or without |
42 | * modification, are permitted provided that the following conditions | | 42 | * modification, are permitted provided that the following conditions |
43 | * are met: | | 43 | * are met: |
44 | * 1. Redistributions of source code must retain the above copyright | | 44 | * 1. Redistributions of source code must retain the above copyright |
45 | * notice, this list of conditions and the following disclaimer. | | 45 | * notice, this list of conditions and the following disclaimer. |
46 | * 2. Redistributions in binary form must reproduce the above copyright | | 46 | * 2. Redistributions in binary form must reproduce the above copyright |
47 | * notice, this list of conditions and the following disclaimer in the | | 47 | * notice, this list of conditions and the following disclaimer in the |
48 | * documentation and/or other materials provided with the distribution. | | 48 | * documentation and/or other materials provided with the distribution. |
49 | * 3. Neither the name of the University nor the names of its contributors | | 49 | * 3. Neither the name of the University nor the names of its contributors |
50 | * may be used to endorse or promote products derived from this software | | 50 | * may be used to endorse or promote products derived from this software |
51 | * without specific prior written permission. | | 51 | * without specific prior written permission. |
52 | * | | 52 | * |
53 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 53 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
54 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 54 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
55 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 55 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
56 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 56 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
57 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 57 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
58 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 58 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
59 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 59 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
63 | * SUCH DAMAGE. | | 63 | * SUCH DAMAGE. |
64 | * | | 64 | * |
65 | * @(#)pmap.c 8.4 (Berkeley) 1/26/94 | | 65 | * @(#)pmap.c 8.4 (Berkeley) 1/26/94 |
66 | */ | | 66 | */ |
67 | | | 67 | |
68 | #include <sys/cdefs.h> | | 68 | #include <sys/cdefs.h> |
69 | | | 69 | |
70 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.49 2020/04/12 15:36:18 skrll Exp $"); | | 70 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.50 2020/07/18 16:12:09 skrll Exp $"); |
71 | | | 71 | |
72 | /* | | 72 | /* |
73 | * Manages physical address maps. | | 73 | * Manages physical address maps. |
74 | * | | 74 | * |
75 | * In addition to hardware address maps, this | | 75 | * In addition to hardware address maps, this |
76 | * module is called upon to provide software-use-only | | 76 | * module is called upon to provide software-use-only |
77 | * maps which may or may not be stored in the same | | 77 | * maps which may or may not be stored in the same |
78 | * form as hardware maps. These pseudo-maps are | | 78 | * form as hardware maps. These pseudo-maps are |
79 | * used to store intermediate results from copy | | 79 | * used to store intermediate results from copy |
80 | * operations to and from address spaces. | | 80 | * operations to and from address spaces. |
81 | * | | 81 | * |
82 | * Since the information managed by this module is | | 82 | * Since the information managed by this module is |
83 | * also stored by the logical address mapping module, | | 83 | * also stored by the logical address mapping module, |
84 | * this module may throw away valid virtual-to-physical | | 84 | * this module may throw away valid virtual-to-physical |
85 | * mappings at almost any time. However, invalidations | | 85 | * mappings at almost any time. However, invalidations |
86 | * of virtual-to-physical mappings must be done as | | 86 | * of virtual-to-physical mappings must be done as |
87 | * requested. | | 87 | * requested. |
88 | * | | 88 | * |
89 | * In order to cope with hardware architectures which | | 89 | * In order to cope with hardware architectures which |
90 | * make virtual-to-physical map invalidates expensive, | | 90 | * make virtual-to-physical map invalidates expensive, |
91 | * this module may delay invalidate or reduced protection | | 91 | * this module may delay invalidate or reduced protection |
92 | * operations until such time as they are actually | | 92 | * operations until such time as they are actually |
93 | * necessary. This module is given full information as | | 93 | * necessary. This module is given full information as |
94 | * to which processors are currently using which maps, | | 94 | * to which processors are currently using which maps, |
95 | * and to when physical maps must be made correct. | | 95 | * and to when physical maps must be made correct. |
96 | */ | | 96 | */ |
97 | | | 97 | |
98 | #include "opt_modular.h" | | 98 | #include "opt_modular.h" |
99 | #include "opt_multiprocessor.h" | | 99 | #include "opt_multiprocessor.h" |
100 | #include "opt_sysv.h" | | 100 | #include "opt_sysv.h" |
101 | | | 101 | |
102 | #define __PMAP_PRIVATE | | 102 | #define __PMAP_PRIVATE |
103 | | | 103 | |
104 | #include <sys/param.h> | | 104 | #include <sys/param.h> |
105 | | | 105 | |
106 | #include <sys/atomic.h> | | 106 | #include <sys/atomic.h> |
107 | #include <sys/buf.h> | | 107 | #include <sys/buf.h> |
108 | #include <sys/cpu.h> | | 108 | #include <sys/cpu.h> |
109 | #include <sys/mutex.h> | | 109 | #include <sys/mutex.h> |
110 | #include <sys/pool.h> | | 110 | #include <sys/pool.h> |
111 | | | 111 | |
112 | #include <uvm/uvm.h> | | 112 | #include <uvm/uvm.h> |
113 | #include <uvm/uvm_physseg.h> | | 113 | #include <uvm/uvm_physseg.h> |
114 | | | 114 | |
115 | #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \ | | 115 | #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \ |
116 | && !defined(PMAP_NO_PV_UNCACHED) | | 116 | && !defined(PMAP_NO_PV_UNCACHED) |
117 | #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \ | | 117 | #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \ |
118 | PMAP_NO_PV_UNCACHED to be defined | | 118 | PMAP_NO_PV_UNCACHED to be defined |
119 | #endif | | 119 | #endif |
120 | | | 120 | |
121 | PMAP_COUNTER(remove_kernel_calls, "remove kernel calls"); | | 121 | PMAP_COUNTER(remove_kernel_calls, "remove kernel calls"); |
122 | PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped"); | | 122 | PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped"); |
123 | PMAP_COUNTER(remove_user_calls, "remove user calls"); | | 123 | PMAP_COUNTER(remove_user_calls, "remove user calls"); |
124 | PMAP_COUNTER(remove_user_pages, "user pages unmapped"); | | 124 | PMAP_COUNTER(remove_user_pages, "user pages unmapped"); |
125 | PMAP_COUNTER(remove_flushes, "remove cache flushes"); | | 125 | PMAP_COUNTER(remove_flushes, "remove cache flushes"); |
126 | PMAP_COUNTER(remove_tlb_ops, "remove tlb ops"); | | 126 | PMAP_COUNTER(remove_tlb_ops, "remove tlb ops"); |
127 | PMAP_COUNTER(remove_pvfirst, "remove pv first"); | | 127 | PMAP_COUNTER(remove_pvfirst, "remove pv first"); |
128 | PMAP_COUNTER(remove_pvsearch, "remove pv search"); | | 128 | PMAP_COUNTER(remove_pvsearch, "remove pv search"); |
129 | | | 129 | |
130 | PMAP_COUNTER(prefer_requests, "prefer requests"); | | 130 | PMAP_COUNTER(prefer_requests, "prefer requests"); |
131 | PMAP_COUNTER(prefer_adjustments, "prefer adjustments"); | | 131 | PMAP_COUNTER(prefer_adjustments, "prefer adjustments"); |
132 | | | 132 | |
133 | PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed"); | | 133 | PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed"); |
134 | | | 134 | |
135 | PMAP_COUNTER(kenter_pa, "kernel fast mapped pages"); | | 135 | PMAP_COUNTER(kenter_pa, "kernel fast mapped pages"); |
136 | PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)"); | | 136 | PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)"); |
137 | PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages"); | | 137 | PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages"); |
138 | PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages"); | | 138 | PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages"); |
139 | | | 139 | |
140 | PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable"); | | 140 | PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable"); |
141 | PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable"); | | 141 | PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable"); |
142 | | | 142 | |
143 | PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)"); | | 143 | PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)"); |
144 | PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)"); | | 144 | PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)"); |
145 | PMAP_COUNTER(kernel_mappings, "kernel pages mapped"); | | 145 | PMAP_COUNTER(kernel_mappings, "kernel pages mapped"); |
146 | PMAP_COUNTER(user_mappings, "user pages mapped"); | | 146 | PMAP_COUNTER(user_mappings, "user pages mapped"); |
147 | PMAP_COUNTER(user_mappings_changed, "user mapping changed"); | | 147 | PMAP_COUNTER(user_mappings_changed, "user mapping changed"); |
148 | PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed"); | | 148 | PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed"); |
149 | PMAP_COUNTER(uncached_mappings, "uncached pages mapped"); | | 149 | PMAP_COUNTER(uncached_mappings, "uncached pages mapped"); |
150 | PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped"); | | 150 | PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped"); |
151 | PMAP_COUNTER(managed_mappings, "managed pages mapped"); | | 151 | PMAP_COUNTER(managed_mappings, "managed pages mapped"); |
152 | PMAP_COUNTER(mappings, "pages mapped"); | | 152 | PMAP_COUNTER(mappings, "pages mapped"); |
153 | PMAP_COUNTER(remappings, "pages remapped"); | | 153 | PMAP_COUNTER(remappings, "pages remapped"); |
154 | PMAP_COUNTER(unmappings, "pages unmapped"); | | 154 | PMAP_COUNTER(unmappings, "pages unmapped"); |
155 | PMAP_COUNTER(primary_mappings, "page initial mappings"); | | 155 | PMAP_COUNTER(primary_mappings, "page initial mappings"); |
156 | PMAP_COUNTER(primary_unmappings, "page final unmappings"); | | 156 | PMAP_COUNTER(primary_unmappings, "page final unmappings"); |
157 | PMAP_COUNTER(tlb_hit, "page mapping"); | | 157 | PMAP_COUNTER(tlb_hit, "page mapping"); |
158 | | | 158 | |
159 | PMAP_COUNTER(exec_mappings, "exec pages mapped"); | | 159 | PMAP_COUNTER(exec_mappings, "exec pages mapped"); |
160 | PMAP_COUNTER(exec_synced_mappings, "exec pages synced"); | | 160 | PMAP_COUNTER(exec_synced_mappings, "exec pages synced"); |
161 | PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)"); | | 161 | PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)"); |
162 | PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)"); | | 162 | PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)"); |
163 | PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)"); | | 163 | PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)"); |
164 | PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)"); | | 164 | PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)"); |
165 | PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)"); | | 165 | PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)"); |
166 | PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)"); | | 166 | PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)"); |
167 | PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)"); | | 167 | PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)"); |
168 | PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)"); | | 168 | PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)"); |
169 | PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)"); | | 169 | PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)"); |
170 | | | 170 | |
171 | PMAP_COUNTER(create, "creates"); | | 171 | PMAP_COUNTER(create, "creates"); |
172 | PMAP_COUNTER(reference, "references"); | | 172 | PMAP_COUNTER(reference, "references"); |
173 | PMAP_COUNTER(dereference, "dereferences"); | | 173 | PMAP_COUNTER(dereference, "dereferences"); |
174 | PMAP_COUNTER(destroy, "destroyed"); | | 174 | PMAP_COUNTER(destroy, "destroyed"); |
175 | PMAP_COUNTER(activate, "activations"); | | 175 | PMAP_COUNTER(activate, "activations"); |
176 | PMAP_COUNTER(deactivate, "deactivations"); | | 176 | PMAP_COUNTER(deactivate, "deactivations"); |
177 | PMAP_COUNTER(update, "updates"); | | 177 | PMAP_COUNTER(update, "updates"); |
178 | #ifdef MULTIPROCESSOR | | 178 | #ifdef MULTIPROCESSOR |
179 | PMAP_COUNTER(shootdown_ipis, "shootdown IPIs"); | | 179 | PMAP_COUNTER(shootdown_ipis, "shootdown IPIs"); |
180 | #endif | | 180 | #endif |
181 | PMAP_COUNTER(unwire, "unwires"); | | 181 | PMAP_COUNTER(unwire, "unwires"); |
182 | PMAP_COUNTER(copy, "copies"); | | 182 | PMAP_COUNTER(copy, "copies"); |
183 | PMAP_COUNTER(clear_modify, "clear_modifies"); | | 183 | PMAP_COUNTER(clear_modify, "clear_modifies"); |
184 | PMAP_COUNTER(protect, "protects"); | | 184 | PMAP_COUNTER(protect, "protects"); |
185 | PMAP_COUNTER(page_protect, "page_protects"); | | 185 | PMAP_COUNTER(page_protect, "page_protects"); |
186 | | | 186 | |
187 | #define PMAP_ASID_RESERVED 0 | | 187 | #define PMAP_ASID_RESERVED 0 |
188 | CTASSERT(PMAP_ASID_RESERVED == 0); | | 188 | CTASSERT(PMAP_ASID_RESERVED == 0); |
189 | | | 189 | |
190 | #ifndef PMAP_SEGTAB_ALIGN | | 190 | #ifndef PMAP_SEGTAB_ALIGN |
191 | #define PMAP_SEGTAB_ALIGN /* nothing */ | | 191 | #define PMAP_SEGTAB_ALIGN /* nothing */ |
192 | #endif | | 192 | #endif |
193 | #ifdef _LP64 | | 193 | #ifdef _LP64 |
194 | pmap_segtab_t pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */ | | 194 | pmap_segtab_t pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */ |
195 | #endif | | 195 | #endif |
196 | pmap_segtab_t pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */ | | 196 | pmap_segtab_t pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */ |
197 | #ifdef _LP64 | | 197 | #ifdef _LP64 |
198 | .seg_seg[(VM_MIN_KERNEL_ADDRESS & XSEGOFSET) >> SEGSHIFT] = &pmap_kstart_segtab, | | 198 | .seg_seg[(VM_MIN_KERNEL_ADDRESS & XSEGOFSET) >> SEGSHIFT] = &pmap_kstart_segtab, |
199 | #endif | | 199 | #endif |
200 | }; | | 200 | }; |
201 | | | 201 | |
202 | struct pmap_kernel kernel_pmap_store = { | | 202 | struct pmap_kernel kernel_pmap_store = { |
203 | .kernel_pmap = { | | 203 | .kernel_pmap = { |
204 | .pm_count = 1, | | 204 | .pm_count = 1, |
205 | .pm_segtab = &pmap_kern_segtab, | | 205 | .pm_segtab = &pmap_kern_segtab, |
206 | .pm_minaddr = VM_MIN_KERNEL_ADDRESS, | | 206 | .pm_minaddr = VM_MIN_KERNEL_ADDRESS, |
207 | .pm_maxaddr = VM_MAX_KERNEL_ADDRESS, | | 207 | .pm_maxaddr = VM_MAX_KERNEL_ADDRESS, |
208 | }, | | 208 | }, |
209 | }; | | 209 | }; |
210 | | | 210 | |
211 | struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap; | | 211 | struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap; |
212 | | | 212 | |
213 | struct pmap_limits pmap_limits = { /* VA and PA limits */ | | 213 | struct pmap_limits pmap_limits = { /* VA and PA limits */ |
214 | .virtual_start = VM_MIN_KERNEL_ADDRESS, | | 214 | .virtual_start = VM_MIN_KERNEL_ADDRESS, |
215 | }; | | 215 | }; |
216 | | | 216 | |
217 | #ifdef UVMHIST | | 217 | #ifdef UVMHIST |
218 | static struct kern_history_ent pmapexechistbuf[10000]; | | 218 | static struct kern_history_ent pmapexechistbuf[10000]; |
219 | static struct kern_history_ent pmaphistbuf[10000]; | | 219 | static struct kern_history_ent pmaphistbuf[10000]; |
220 | UVMHIST_DEFINE(pmapexechist); | | 220 | UVMHIST_DEFINE(pmapexechist); |
221 | UVMHIST_DEFINE(pmaphist); | | 221 | UVMHIST_DEFINE(pmaphist); |
222 | #endif | | 222 | #endif |
223 | | | 223 | |
224 | /* | | 224 | /* |
225 | * The pools from which pmap structures and sub-structures are allocated. | | 225 | * The pools from which pmap structures and sub-structures are allocated. |
226 | */ | | 226 | */ |
227 | struct pool pmap_pmap_pool; | | 227 | struct pool pmap_pmap_pool; |
228 | struct pool pmap_pv_pool; | | 228 | struct pool pmap_pv_pool; |
229 | | | 229 | |
230 | #ifndef PMAP_PV_LOWAT | | 230 | #ifndef PMAP_PV_LOWAT |
231 | #define PMAP_PV_LOWAT 16 | | 231 | #define PMAP_PV_LOWAT 16 |
232 | #endif | | 232 | #endif |
233 | int pmap_pv_lowat = PMAP_PV_LOWAT; | | 233 | int pmap_pv_lowat = PMAP_PV_LOWAT; |
234 | | | 234 | |
235 | bool pmap_initialized = false; | | 235 | bool pmap_initialized = false; |
236 | #define PMAP_PAGE_COLOROK_P(a, b) \ | | 236 | #define PMAP_PAGE_COLOROK_P(a, b) \ |
237 | ((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0) | | 237 | ((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0) |
238 | u_int pmap_page_colormask; | | 238 | u_int pmap_page_colormask; |
239 | | | 239 | |
240 | #define PAGE_IS_MANAGED(pa) (pmap_initialized && uvm_pageismanaged(pa)) | | 240 | #define PAGE_IS_MANAGED(pa) (pmap_initialized && uvm_pageismanaged(pa)) |
241 | | | 241 | |
242 | #define PMAP_IS_ACTIVE(pm) \ | | 242 | #define PMAP_IS_ACTIVE(pm) \ |
243 | ((pm) == pmap_kernel() || \ | | 243 | ((pm) == pmap_kernel() || \ |
244 | (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap) | | 244 | (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap) |
245 | | | 245 | |
246 | /* Forward function declarations */ | | 246 | /* Forward function declarations */ |
247 | void pmap_page_remove(struct vm_page *); | | 247 | void pmap_page_remove(struct vm_page *); |
248 | static void pmap_pvlist_check(struct vm_page_md *); | | 248 | static void pmap_pvlist_check(struct vm_page_md *); |
249 | void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool); | | 249 | void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool); |
250 | void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, pt_entry_t *, u_int); | | 250 | void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, pt_entry_t *, u_int); |
251 | | | 251 | |
252 | /* | | 252 | /* |
253 | * PV table management functions. | | 253 | * PV table management functions. |
254 | */ | | 254 | */ |
255 | void *pmap_pv_page_alloc(struct pool *, int); | | 255 | void *pmap_pv_page_alloc(struct pool *, int); |
256 | void pmap_pv_page_free(struct pool *, void *); | | 256 | void pmap_pv_page_free(struct pool *, void *); |
257 | | | 257 | |
258 | struct pool_allocator pmap_pv_page_allocator = { | | 258 | struct pool_allocator pmap_pv_page_allocator = { |
259 | pmap_pv_page_alloc, pmap_pv_page_free, 0, | | 259 | pmap_pv_page_alloc, pmap_pv_page_free, 0, |
260 | }; | | 260 | }; |
261 | | | 261 | |
262 | #define pmap_pv_alloc() pool_get(&pmap_pv_pool, PR_NOWAIT) | | 262 | #define pmap_pv_alloc() pool_get(&pmap_pv_pool, PR_NOWAIT) |
263 | #define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv)) | | 263 | #define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv)) |
264 | | | 264 | |
265 | #ifndef PMAP_NEED_TLB_MISS_LOCK | | 265 | #ifndef PMAP_NEED_TLB_MISS_LOCK |
266 | | | 266 | |
267 | #if defined(PMAP_MD_NEED_TLB_MISS_LOCK) || defined(DEBUG) | | 267 | #if defined(PMAP_MD_NEED_TLB_MISS_LOCK) || defined(DEBUG) |
268 | #define PMAP_NEED_TLB_MISS_LOCK | | 268 | #define PMAP_NEED_TLB_MISS_LOCK |
269 | #endif /* PMAP_MD_NEED_TLB_MISS_LOCK || DEBUG */ | | 269 | #endif /* PMAP_MD_NEED_TLB_MISS_LOCK || DEBUG */ |
270 | | | 270 | |
271 | #endif /* PMAP_NEED_TLB_MISS_LOCK */ | | 271 | #endif /* PMAP_NEED_TLB_MISS_LOCK */ |
272 | | | 272 | |
273 | #ifdef PMAP_NEED_TLB_MISS_LOCK | | 273 | #ifdef PMAP_NEED_TLB_MISS_LOCK |
274 | | | 274 | |
275 | #ifdef PMAP_MD_NEED_TLB_MISS_LOCK | | 275 | #ifdef PMAP_MD_NEED_TLB_MISS_LOCK |
276 | #define pmap_tlb_miss_lock_init() __nothing /* MD code deals with this */ | | 276 | #define pmap_tlb_miss_lock_init() __nothing /* MD code deals with this */ |
277 | #define pmap_tlb_miss_lock_enter() pmap_md_tlb_miss_lock_enter() | | 277 | #define pmap_tlb_miss_lock_enter() pmap_md_tlb_miss_lock_enter() |
278 | #define pmap_tlb_miss_lock_exit() pmap_md_tlb_miss_lock_exit() | | 278 | #define pmap_tlb_miss_lock_exit() pmap_md_tlb_miss_lock_exit() |
279 | #else | | 279 | #else |
280 | kmutex_t pmap_tlb_miss_lock __cacheline_aligned; | | 280 | kmutex_t pmap_tlb_miss_lock __cacheline_aligned; |
281 | | | 281 | |
282 | static void | | 282 | static void |
283 | pmap_tlb_miss_lock_init(void) | | 283 | pmap_tlb_miss_lock_init(void) |
284 | { | | 284 | { |
285 | mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH); | | 285 | mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH); |
286 | } | | 286 | } |
287 | | | 287 | |
288 | static inline void | | 288 | static inline void |
289 | pmap_tlb_miss_lock_enter(void) | | 289 | pmap_tlb_miss_lock_enter(void) |
290 | { | | 290 | { |
291 | mutex_spin_enter(&pmap_tlb_miss_lock); | | 291 | mutex_spin_enter(&pmap_tlb_miss_lock); |
292 | } | | 292 | } |
293 | | | 293 | |
294 | static inline void | | 294 | static inline void |
295 | pmap_tlb_miss_lock_exit(void) | | 295 | pmap_tlb_miss_lock_exit(void) |
296 | { | | 296 | { |
297 | mutex_spin_exit(&pmap_tlb_miss_lock); | | 297 | mutex_spin_exit(&pmap_tlb_miss_lock); |
298 | } | | 298 | } |
299 | #endif /* PMAP_MD_NEED_TLB_MISS_LOCK */ | | 299 | #endif /* PMAP_MD_NEED_TLB_MISS_LOCK */ |
300 | | | 300 | |
301 | #else | | 301 | #else |
302 | | | 302 | |
303 | #define pmap_tlb_miss_lock_init() __nothing | | 303 | #define pmap_tlb_miss_lock_init() __nothing |
304 | #define pmap_tlb_miss_lock_enter() __nothing | | 304 | #define pmap_tlb_miss_lock_enter() __nothing |
305 | #define pmap_tlb_miss_lock_exit() __nothing | | 305 | #define pmap_tlb_miss_lock_exit() __nothing |
306 | | | 306 | |
307 | #endif /* PMAP_NEED_TLB_MISS_LOCK */ | | 307 | #endif /* PMAP_NEED_TLB_MISS_LOCK */ |
308 | | | 308 | |
309 | #ifndef MULTIPROCESSOR | | 309 | #ifndef MULTIPROCESSOR |
310 | kmutex_t pmap_pvlist_mutex __cacheline_aligned; | | 310 | kmutex_t pmap_pvlist_mutex __cacheline_aligned; |
311 | #endif | | 311 | #endif |
312 | | | 312 | |
313 | /* | | 313 | /* |
314 | * Debug functions. | | 314 | * Debug functions. |
315 | */ | | 315 | */ |
316 | | | 316 | |
317 | #ifdef DEBUG | | 317 | #ifdef DEBUG |
318 | static inline void | | 318 | static inline void |
319 | pmap_asid_check(pmap_t pm, const char *func) | | 319 | pmap_asid_check(pmap_t pm, const char *func) |
320 | { | | 320 | { |
321 | if (!PMAP_IS_ACTIVE(pm)) | | 321 | if (!PMAP_IS_ACTIVE(pm)) |
322 | return; | | 322 | return; |
323 | | | 323 | |
324 | struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu())); | | 324 | struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu())); |
325 | tlb_asid_t asid = tlb_get_asid(); | | 325 | tlb_asid_t asid = tlb_get_asid(); |
326 | if (asid != pai->pai_asid) | | 326 | if (asid != pai->pai_asid) |
327 | panic("%s: inconsistency for active TLB update: %u <-> %u", | | 327 | panic("%s: inconsistency for active TLB update: %u <-> %u", |
328 | func, asid, pai->pai_asid); | | 328 | func, asid, pai->pai_asid); |
329 | } | | 329 | } |
330 | #endif | | 330 | #endif |
331 | | | 331 | |
332 | static void | | 332 | static void |
333 | pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func) | | 333 | pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func) |
334 | { | | 334 | { |
335 | #ifdef DEBUG | | 335 | #ifdef DEBUG |
336 | if (pmap == pmap_kernel()) { | | 336 | if (pmap == pmap_kernel()) { |
337 | if (sva < VM_MIN_KERNEL_ADDRESS) | | 337 | if (sva < VM_MIN_KERNEL_ADDRESS) |
338 | panic("%s: kva %#"PRIxVADDR" not in range", | | 338 | panic("%s: kva %#"PRIxVADDR" not in range", |
339 | func, sva); | | 339 | func, sva); |
340 | if (eva >= pmap_limits.virtual_end) | | 340 | if (eva >= pmap_limits.virtual_end) |
341 | panic("%s: kva %#"PRIxVADDR" not in range", | | 341 | panic("%s: kva %#"PRIxVADDR" not in range", |
342 | func, eva); | | 342 | func, eva); |
343 | } else { | | 343 | } else { |
344 | if (eva > VM_MAXUSER_ADDRESS) | | 344 | if (eva > VM_MAXUSER_ADDRESS) |
345 | panic("%s: uva %#"PRIxVADDR" not in range", | | 345 | panic("%s: uva %#"PRIxVADDR" not in range", |
346 | func, eva); | | 346 | func, eva); |
347 | pmap_asid_check(pmap, func); | | 347 | pmap_asid_check(pmap, func); |
348 | } | | 348 | } |
349 | #endif | | 349 | #endif |
350 | } | | 350 | } |
351 | | | 351 | |
352 | /* | | 352 | /* |
353 | * Misc. functions. | | 353 | * Misc. functions. |
354 | */ | | 354 | */ |
355 | | | 355 | |
356 | bool | | 356 | bool |
357 | pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes) | | 357 | pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes) |
358 | { | | 358 | { |
359 | volatile unsigned long * const attrp = &mdpg->mdpg_attrs; | | 359 | volatile unsigned long * const attrp = &mdpg->mdpg_attrs; |
360 | #ifdef MULTIPROCESSOR | | 360 | #ifdef MULTIPROCESSOR |
361 | for (;;) { | | 361 | for (;;) { |
362 | u_int old_attr = *attrp; | | 362 | u_int old_attr = *attrp; |
363 | if ((old_attr & clear_attributes) == 0) | | 363 | if ((old_attr & clear_attributes) == 0) |
364 | return false; | | 364 | return false; |
365 | u_int new_attr = old_attr & ~clear_attributes; | | 365 | u_int new_attr = old_attr & ~clear_attributes; |
366 | if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr)) | | 366 | if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr)) |
367 | return true; | | 367 | return true; |
368 | } | | 368 | } |
369 | #else | | 369 | #else |
370 | unsigned long old_attr = *attrp; | | 370 | unsigned long old_attr = *attrp; |
371 | if ((old_attr & clear_attributes) == 0) | | 371 | if ((old_attr & clear_attributes) == 0) |
372 | return false; | | 372 | return false; |
373 | *attrp &= ~clear_attributes; | | 373 | *attrp &= ~clear_attributes; |
374 | return true; | | 374 | return true; |
375 | #endif | | 375 | #endif |
376 | } | | 376 | } |
377 | | | 377 | |
378 | void | | 378 | void |
379 | pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes) | | 379 | pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes) |
380 | { | | 380 | { |
381 | #ifdef MULTIPROCESSOR | | 381 | #ifdef MULTIPROCESSOR |
382 | atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes); | | 382 | atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes); |
383 | #else | | 383 | #else |
384 | mdpg->mdpg_attrs |= set_attributes; | | 384 | mdpg->mdpg_attrs |= set_attributes; |
385 | #endif | | 385 | #endif |
386 | } | | 386 | } |
387 | | | 387 | |
388 | static void | | 388 | static void |
389 | pmap_page_syncicache(struct vm_page *pg) | | 389 | pmap_page_syncicache(struct vm_page *pg) |
390 | { | | 390 | { |
391 | #ifndef MULTIPROCESSOR | | 391 | #ifndef MULTIPROCESSOR |
392 | struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap; | | 392 | struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap; |
393 | #endif | | 393 | #endif |
394 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); | | 394 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); |
395 | pv_entry_t pv = &mdpg->mdpg_first; | | 395 | pv_entry_t pv = &mdpg->mdpg_first; |
396 | kcpuset_t *onproc; | | 396 | kcpuset_t *onproc; |
397 | #ifdef MULTIPROCESSOR | | 397 | #ifdef MULTIPROCESSOR |
398 | kcpuset_create(&onproc, true); | | 398 | kcpuset_create(&onproc, true); |
399 | KASSERT(onproc != NULL); | | 399 | KASSERT(onproc != NULL); |
400 | #else | | 400 | #else |
401 | onproc = NULL; | | 401 | onproc = NULL; |
402 | #endif | | 402 | #endif |
403 | VM_PAGEMD_PVLIST_READLOCK(mdpg); | | 403 | VM_PAGEMD_PVLIST_READLOCK(mdpg); |
404 | pmap_pvlist_check(mdpg); | | 404 | pmap_pvlist_check(mdpg); |
405 | | | 405 | |
406 | if (pv->pv_pmap != NULL) { | | 406 | if (pv->pv_pmap != NULL) { |
407 | for (; pv != NULL; pv = pv->pv_next) { | | 407 | for (; pv != NULL; pv = pv->pv_next) { |
408 | #ifdef MULTIPROCESSOR | | 408 | #ifdef MULTIPROCESSOR |
409 | kcpuset_merge(onproc, pv->pv_pmap->pm_onproc); | | 409 | kcpuset_merge(onproc, pv->pv_pmap->pm_onproc); |
410 | if (kcpuset_match(onproc, kcpuset_running)) { | | 410 | if (kcpuset_match(onproc, kcpuset_running)) { |
411 | break; | | 411 | break; |
412 | } | | 412 | } |
413 | #else | | 413 | #else |
414 | if (pv->pv_pmap == curpmap) { | | 414 | if (pv->pv_pmap == curpmap) { |
415 | onproc = curcpu()->ci_data.cpu_kcpuset; | | 415 | onproc = curcpu()->ci_data.cpu_kcpuset; |
416 | break; | | 416 | break; |
417 | } | | 417 | } |
418 | #endif | | 418 | #endif |
419 | } | | 419 | } |
420 | } | | 420 | } |
421 | pmap_pvlist_check(mdpg); | | 421 | pmap_pvlist_check(mdpg); |
422 | VM_PAGEMD_PVLIST_UNLOCK(mdpg); | | 422 | VM_PAGEMD_PVLIST_UNLOCK(mdpg); |
423 | kpreempt_disable(); | | 423 | kpreempt_disable(); |
424 | pmap_md_page_syncicache(pg, onproc); | | 424 | pmap_md_page_syncicache(pg, onproc); |
425 | kpreempt_enable(); | | 425 | kpreempt_enable(); |
426 | #ifdef MULTIPROCESSOR | | 426 | #ifdef MULTIPROCESSOR |
427 | kcpuset_destroy(onproc); | | 427 | kcpuset_destroy(onproc); |
428 | #endif | | 428 | #endif |
429 | } | | 429 | } |
430 | | | 430 | |
431 | /* | | 431 | /* |
432 | * Define the initial bounds of the kernel virtual address space. | | 432 | * Define the initial bounds of the kernel virtual address space. |
433 | */ | | 433 | */ |
434 | void | | 434 | void |
435 | pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) | | 435 | pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) |
436 | { | | 436 | { |
437 | | | 437 | |
438 | *vstartp = pmap_limits.virtual_start; | | 438 | *vstartp = pmap_limits.virtual_start; |
439 | *vendp = pmap_limits.virtual_end; | | 439 | *vendp = pmap_limits.virtual_end; |
440 | } | | 440 | } |
441 | | | 441 | |
442 | vaddr_t | | 442 | vaddr_t |
443 | pmap_growkernel(vaddr_t maxkvaddr) | | 443 | pmap_growkernel(vaddr_t maxkvaddr) |
444 | { | | 444 | { |
445 | vaddr_t virtual_end = pmap_limits.virtual_end; | | 445 | vaddr_t virtual_end = pmap_limits.virtual_end; |
446 | maxkvaddr = pmap_round_seg(maxkvaddr) - 1; | | 446 | maxkvaddr = pmap_round_seg(maxkvaddr) - 1; |
447 | | | 447 | |
448 | /* | | 448 | /* |
449 | * Reserve PTEs for the new KVA space. | | 449 | * Reserve PTEs for the new KVA space. |
450 | */ | | 450 | */ |
451 | for (; virtual_end < maxkvaddr; virtual_end += NBSEG) { | | 451 | for (; virtual_end < maxkvaddr; virtual_end += NBSEG) { |
452 | pmap_pte_reserve(pmap_kernel(), virtual_end, 0); | | 452 | pmap_pte_reserve(pmap_kernel(), virtual_end, 0); |
453 | } | | 453 | } |
454 | | | 454 | |
455 | /* | | 455 | /* |
456 | * Don't exceed VM_MAX_KERNEL_ADDRESS! | | 456 | * Don't exceed VM_MAX_KERNEL_ADDRESS! |
457 | */ | | 457 | */ |
458 | if (virtual_end == 0 || virtual_end > VM_MAX_KERNEL_ADDRESS) | | 458 | if (virtual_end == 0 || virtual_end > VM_MAX_KERNEL_ADDRESS) |
459 | virtual_end = VM_MAX_KERNEL_ADDRESS; | | 459 | virtual_end = VM_MAX_KERNEL_ADDRESS; |
460 | | | 460 | |
461 | /* | | 461 | /* |
462 | * Update new end. | | 462 | * Update new end. |
463 | */ | | 463 | */ |
464 | pmap_limits.virtual_end = virtual_end; | | 464 | pmap_limits.virtual_end = virtual_end; |
465 | return virtual_end; | | 465 | return virtual_end; |
466 | } | | 466 | } |
467 | | | 467 | |
468 | /* | | 468 | /* |
469 | * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()). | | 469 | * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()). |
470 | * This function allows for early dynamic memory allocation until the virtual | | 470 | * This function allows for early dynamic memory allocation until the virtual |
471 | * memory system has been bootstrapped. After that point, either kmem_alloc | | 471 | * memory system has been bootstrapped. After that point, either kmem_alloc |
472 | * or malloc should be used. This function works by stealing pages from the | | 472 | * or malloc should be used. This function works by stealing pages from the |
473 | * (to be) managed page pool, then implicitly mapping the pages (by using | | 473 | * (to be) managed page pool, then implicitly mapping the pages (by using |
474 | * their direct mapped addresses) and zeroing them. | | 474 | * their direct mapped addresses) and zeroing them. |
475 | * | | 475 | * |
476 | * It may be used once the physical memory segments have been pre-loaded | | 476 | * It may be used once the physical memory segments have been pre-loaded |
477 | * into the vm_physmem[] array. Early memory allocation MUST use this | | 477 | * into the vm_physmem[] array. Early memory allocation MUST use this |
478 | * interface! This cannot be used after vm_page_startup(), and will | | 478 | * interface! This cannot be used after vm_page_startup(), and will |
479 | * generate a panic if tried. | | 479 | * generate a panic if tried. |
480 | * | | 480 | * |
481 | * Note that this memory will never be freed, and in essence it is wired | | 481 | * Note that this memory will never be freed, and in essence it is wired |
482 | * down. | | 482 | * down. |
483 | * | | 483 | * |
484 | * We must adjust *vstartp and/or *vendp iff we use address space | | 484 | * We must adjust *vstartp and/or *vendp iff we use address space |
485 | * from the kernel virtual address range defined by pmap_virtual_space(). | | 485 | * from the kernel virtual address range defined by pmap_virtual_space(). |
486 | */ | | 486 | */ |
487 | vaddr_t | | 487 | vaddr_t |
488 | pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) | | 488 | pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) |
489 | { | | 489 | { |
490 | size_t npgs; | | 490 | size_t npgs; |
491 | paddr_t pa; | | 491 | paddr_t pa; |
492 | vaddr_t va; | | 492 | vaddr_t va; |
493 | | | 493 | |
494 | uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID; | | 494 | uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID; |
495 | | | 495 | |
496 | size = round_page(size); | | 496 | size = round_page(size); |
497 | npgs = atop(size); | | 497 | npgs = atop(size); |
498 | | | 498 | |
499 | aprint_debug("%s: need %zu pages\n", __func__, npgs); | | 499 | aprint_debug("%s: need %zu pages\n", __func__, npgs); |
500 | | | 500 | |
501 | for (uvm_physseg_t bank = uvm_physseg_get_first(); | | 501 | for (uvm_physseg_t bank = uvm_physseg_get_first(); |
502 | uvm_physseg_valid_p(bank); | | 502 | uvm_physseg_valid_p(bank); |
503 | bank = uvm_physseg_get_next(bank)) { | | 503 | bank = uvm_physseg_get_next(bank)) { |
504 | | | 504 | |
505 | if (uvm.page_init_done == true) | | 505 | if (uvm.page_init_done == true) |
506 | panic("pmap_steal_memory: called _after_ bootstrap"); | | 506 | panic("pmap_steal_memory: called _after_ bootstrap"); |
507 | | | 507 | |
508 | aprint_debug("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n", | | 508 | aprint_debug("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n", |
509 | __func__, bank, | | 509 | __func__, bank, |
510 | uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank), | | 510 | uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank), |
511 | uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank)); | | 511 | uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank)); |
512 | | | 512 | |
513 | if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank) | | 513 | if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank) |
514 | || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) { | | 514 | || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) { |
515 | aprint_debug("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank); | | 515 | aprint_debug("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank); |
516 | continue; | | 516 | continue; |
517 | } | | 517 | } |
518 | | | 518 | |
519 | if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) { | | 519 | if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) { |
520 | aprint_debug("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n", | | 520 | aprint_debug("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n", |
521 | __func__, bank, npgs); | | 521 | __func__, bank, npgs); |
522 | continue; | | 522 | continue; |
523 | } | | 523 | } |
524 | | | 524 | |
525 | if (!pmap_md_ok_to_steal_p(bank, npgs)) { | | 525 | if (!pmap_md_ok_to_steal_p(bank, npgs)) { |
526 | continue; | | 526 | continue; |
527 | } | | 527 | } |
528 | | | 528 | |
529 | /* | | 529 | /* |
530 | * Always try to allocate from the segment with the least | | 530 | * Always try to allocate from the segment with the least |
531 | * amount of space left. | | 531 | * amount of space left. |
532 | */ | | 532 | */ |
533 | #define VM_PHYSMEM_SPACE(b) ((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b))) | | 533 | #define VM_PHYSMEM_SPACE(b) ((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b))) |
534 | if (uvm_physseg_valid_p(maybe_bank) == false | | 534 | if (uvm_physseg_valid_p(maybe_bank) == false |
535 | || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) { | | 535 | || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) { |
536 | maybe_bank = bank; | | 536 | maybe_bank = bank; |
537 | } | | 537 | } |
538 | } | | 538 | } |
539 | | | 539 | |
540 | if (uvm_physseg_valid_p(maybe_bank)) { | | 540 | if (uvm_physseg_valid_p(maybe_bank)) { |
541 | const uvm_physseg_t bank = maybe_bank; | | 541 | const uvm_physseg_t bank = maybe_bank; |
542 | | | 542 | |
543 | /* | | 543 | /* |
544 | * There are enough pages here; steal them! | | 544 | * There are enough pages here; steal them! |
545 | */ | | 545 | */ |
546 | pa = ptoa(uvm_physseg_get_start(bank)); | | 546 | pa = ptoa(uvm_physseg_get_start(bank)); |
547 | uvm_physseg_unplug(atop(pa), npgs); | | 547 | uvm_physseg_unplug(atop(pa), npgs); |
548 | | | 548 | |
549 | aprint_debug("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n", | | 549 | aprint_debug("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n", |
550 | __func__, bank, npgs, VM_PHYSMEM_SPACE(bank)); | | 550 | __func__, bank, npgs, VM_PHYSMEM_SPACE(bank)); |
551 | | | 551 | |
552 | va = pmap_md_map_poolpage(pa, size); | | 552 | va = pmap_md_map_poolpage(pa, size); |
553 | memset((void *)va, 0, size); | | 553 | memset((void *)va, 0, size); |
554 | return va; | | 554 | return va; |
555 | } | | 555 | } |
556 | | | 556 | |
557 | /* | | 557 | /* |
558 | * If we got here, there was no memory left. | | 558 | * If we got here, there was no memory left. |
559 | */ | | 559 | */ |
560 | panic("pmap_steal_memory: no memory to steal %zu pages", npgs); | | 560 | panic("pmap_steal_memory: no memory to steal %zu pages", npgs); |
561 | } | | 561 | } |
562 | | | 562 | |
563 | /* | | 563 | /* |
564 | * Bootstrap the system enough to run with virtual memory. | | 564 | * Bootstrap the system enough to run with virtual memory. |
565 | * (Common routine called by machine-dependent bootstrap code.) | | 565 | * (Common routine called by machine-dependent bootstrap code.) |
566 | */ | | 566 | */ |
567 | void | | 567 | void |
568 | pmap_bootstrap_common(void) | | 568 | pmap_bootstrap_common(void) |
569 | { | | 569 | { |
570 | pmap_tlb_miss_lock_init(); | | 570 | pmap_tlb_miss_lock_init(); |
571 | } | | 571 | } |
572 | | | 572 | |
573 | /* | | 573 | /* |
574 | * Initialize the pmap module. | | 574 | * Initialize the pmap module. |
575 | * Called by vm_init, to initialize any structures that the pmap | | 575 | * Called by vm_init, to initialize any structures that the pmap |
576 | * system needs to map virtual memory. | | 576 | * system needs to map virtual memory. |
577 | */ | | 577 | */ |
578 | void | | 578 | void |
579 | pmap_init(void) | | 579 | pmap_init(void) |
580 | { | | 580 | { |
581 | UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf); | | 581 | UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf); |
582 | UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf); | | 582 | UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf); |
583 | | | 583 | |
584 | UVMHIST_FUNC(__func__); | | 584 | UVMHIST_FUNC(__func__); |
585 | UVMHIST_CALLED(pmaphist); | | 585 | UVMHIST_CALLED(pmaphist); |
586 | | | 586 | |
587 | /* | | 587 | /* |
588 | * Initialize the segtab lock. | | 588 | * Initialize the segtab lock. |
589 | */ | | 589 | */ |
590 | mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH); | | 590 | mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH); |
591 | | | 591 | |
592 | /* | | 592 | /* |
593 | * Set a low water mark on the pv_entry pool, so that we are | | 593 | * Set a low water mark on the pv_entry pool, so that we are |
594 | * more likely to have these around even in extreme memory | | 594 | * more likely to have these around even in extreme memory |
595 | * starvation. | | 595 | * starvation. |
596 | */ | | 596 | */ |
597 | pool_setlowat(&pmap_pv_pool, pmap_pv_lowat); | | 597 | pool_setlowat(&pmap_pv_pool, pmap_pv_lowat); |
598 | | | 598 | |
599 | /* | | 599 | /* |
600 | * Set the page colormask but allow pmap_md_init to override it. | | 600 | * Set the page colormask but allow pmap_md_init to override it. |
601 | */ | | 601 | */ |
602 | pmap_page_colormask = ptoa(uvmexp.colormask); | | 602 | pmap_page_colormask = ptoa(uvmexp.colormask); |
603 | | | 603 | |
604 | pmap_md_init(); | | 604 | pmap_md_init(); |
605 | | | 605 | |
606 | /* | | 606 | /* |
607 | * Now it is safe to enable pv entry recording. | | 607 | * Now it is safe to enable pv entry recording. |
608 | */ | | 608 | */ |
609 | pmap_initialized = true; | | 609 | pmap_initialized = true; |
610 | } | | 610 | } |
611 | | | 611 | |
612 | /* | | 612 | /* |
613 | * Create and return a physical map. | | 613 | * Create and return a physical map. |
614 | * | | 614 | * |
615 | * If the size specified for the map | | 615 | * If the size specified for the map |
616 | * is zero, the map is an actual physical | | 616 | * is zero, the map is an actual physical |
617 | * map, and may be referenced by the | | 617 | * map, and may be referenced by the |
618 | * hardware. | | 618 | * hardware. |
619 | * | | 619 | * |
620 | * If the size specified is non-zero, | | 620 | * If the size specified is non-zero, |
621 | * the map will be used in software only, and | | 621 | * the map will be used in software only, and |
622 | * is bounded by that size. | | 622 | * is bounded by that size. |
623 | */ | | 623 | */ |
624 | pmap_t | | 624 | pmap_t |
625 | pmap_create(void) | | 625 | pmap_create(void) |
626 | { | | 626 | { |
627 | UVMHIST_FUNC(__func__); | | 627 | UVMHIST_FUNC(__func__); |
628 | UVMHIST_CALLED(pmaphist); | | 628 | UVMHIST_CALLED(pmaphist); |
629 | PMAP_COUNT(create); | | 629 | PMAP_COUNT(create); |
630 | | | 630 | |
631 | pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); | | 631 | pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); |
632 | memset(pmap, 0, PMAP_SIZE); | | 632 | memset(pmap, 0, PMAP_SIZE); |
633 | | | 633 | |
634 | KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL); | | 634 | KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL); |
635 | | | 635 | |
636 | pmap->pm_count = 1; | | 636 | pmap->pm_count = 1; |
637 | pmap->pm_minaddr = VM_MIN_ADDRESS; | | 637 | pmap->pm_minaddr = VM_MIN_ADDRESS; |
638 | pmap->pm_maxaddr = VM_MAXUSER_ADDRESS; | | 638 | pmap->pm_maxaddr = VM_MAXUSER_ADDRESS; |
639 | | | 639 | |
640 | pmap_segtab_init(pmap); | | 640 | pmap_segtab_init(pmap); |
641 | | | 641 | |
642 | #ifdef MULTIPROCESSOR | | 642 | #ifdef MULTIPROCESSOR |
643 | kcpuset_create(&pmap->pm_active, true); | | 643 | kcpuset_create(&pmap->pm_active, true); |
644 | kcpuset_create(&pmap->pm_onproc, true); | | 644 | kcpuset_create(&pmap->pm_onproc, true); |
645 | KASSERT(pmap->pm_active != NULL); | | 645 | KASSERT(pmap->pm_active != NULL); |
646 | KASSERT(pmap->pm_onproc != NULL); | | 646 | KASSERT(pmap->pm_onproc != NULL); |
647 | #endif | | 647 | #endif |
648 | | | 648 | |
649 | UVMHIST_LOG(pmaphist, " <-- done (pmap=%#jx)", (uintptr_t)pmap, | | 649 | UVMHIST_LOG(pmaphist, " <-- done (pmap=%#jx)", (uintptr_t)pmap, |
650 | 0, 0, 0); | | 650 | 0, 0, 0); |
651 | | | 651 | |
652 | return pmap; | | 652 | return pmap; |
653 | } | | 653 | } |
654 | | | 654 | |
655 | /* | | 655 | /* |
656 | * Retire the given physical map from service. | | 656 | * Retire the given physical map from service. |
657 | * Should only be called if the map contains | | 657 | * Should only be called if the map contains |
658 | * no valid mappings. | | 658 | * no valid mappings. |
659 | */ | | 659 | */ |
660 | void | | 660 | void |
661 | pmap_destroy(pmap_t pmap) | | 661 | pmap_destroy(pmap_t pmap) |
662 | { | | 662 | { |
663 | UVMHIST_FUNC(__func__); | | 663 | UVMHIST_FUNC(__func__); |
664 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0); | | 664 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0); |
665 | | | 665 | |
666 | if (atomic_dec_uint_nv(&pmap->pm_count) > 0) { | | 666 | if (atomic_dec_uint_nv(&pmap->pm_count) > 0) { |
667 | PMAP_COUNT(dereference); | | 667 | PMAP_COUNT(dereference); |
668 | UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0); | | 668 | UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0); |
669 | return; | | 669 | return; |
670 | } | | 670 | } |
671 | | | 671 | |
672 | PMAP_COUNT(destroy); | | 672 | PMAP_COUNT(destroy); |
673 | KASSERT(pmap->pm_count == 0); | | 673 | KASSERT(pmap->pm_count == 0); |
674 | kpreempt_disable(); | | 674 | kpreempt_disable(); |
675 | pmap_tlb_miss_lock_enter(); | | 675 | pmap_tlb_miss_lock_enter(); |
676 | pmap_tlb_asid_release_all(pmap); | | 676 | pmap_tlb_asid_release_all(pmap); |
677 | pmap_segtab_destroy(pmap, NULL, 0); | | 677 | pmap_segtab_destroy(pmap, NULL, 0); |
678 | pmap_tlb_miss_lock_exit(); | | 678 | pmap_tlb_miss_lock_exit(); |
679 | | | 679 | |
680 | #ifdef MULTIPROCESSOR | | 680 | #ifdef MULTIPROCESSOR |
681 | kcpuset_destroy(pmap->pm_active); | | 681 | kcpuset_destroy(pmap->pm_active); |
682 | kcpuset_destroy(pmap->pm_onproc); | | 682 | kcpuset_destroy(pmap->pm_onproc); |
683 | pmap->pm_active = NULL; | | 683 | pmap->pm_active = NULL; |
684 | pmap->pm_onproc = NULL; | | 684 | pmap->pm_onproc = NULL; |
685 | #endif | | 685 | #endif |
686 | | | 686 | |
687 | pool_put(&pmap_pmap_pool, pmap); | | 687 | pool_put(&pmap_pmap_pool, pmap); |
688 | kpreempt_enable(); | | 688 | kpreempt_enable(); |
689 | | | 689 | |
690 | UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0); | | 690 | UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0); |
691 | } | | 691 | } |
692 | | | 692 | |
693 | /* | | 693 | /* |
694 | * Add a reference to the specified pmap. | | 694 | * Add a reference to the specified pmap. |
695 | */ | | 695 | */ |
696 | void | | 696 | void |
697 | pmap_reference(pmap_t pmap) | | 697 | pmap_reference(pmap_t pmap) |
698 | { | | 698 | { |
699 | UVMHIST_FUNC(__func__); | | 699 | UVMHIST_FUNC(__func__); |
700 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0); | | 700 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0); |
701 | PMAP_COUNT(reference); | | 701 | PMAP_COUNT(reference); |
702 | | | 702 | |
703 | if (pmap != NULL) { | | 703 | if (pmap != NULL) { |
704 | atomic_inc_uint(&pmap->pm_count); | | 704 | atomic_inc_uint(&pmap->pm_count); |
705 | } | | 705 | } |
706 | | | 706 | |
707 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 707 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
708 | } | | 708 | } |
709 | | | 709 | |
710 | /* | | 710 | /* |
711 | * Make a new pmap (vmspace) active for the given process. | | 711 | * Make a new pmap (vmspace) active for the given process. |
712 | */ | | 712 | */ |
713 | void | | 713 | void |
714 | pmap_activate(struct lwp *l) | | 714 | pmap_activate(struct lwp *l) |
715 | { | | 715 | { |
716 | pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; | | 716 | pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; |
717 | | | 717 | |
718 | UVMHIST_FUNC(__func__); | | 718 | UVMHIST_FUNC(__func__); |
719 | UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l, | | 719 | UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l, |
720 | (uintptr_t)pmap, 0, 0); | | 720 | (uintptr_t)pmap, 0, 0); |
721 | PMAP_COUNT(activate); | | 721 | PMAP_COUNT(activate); |
722 | | | 722 | |
723 | kpreempt_disable(); | | 723 | kpreempt_disable(); |
724 | pmap_tlb_miss_lock_enter(); | | 724 | pmap_tlb_miss_lock_enter(); |
725 | pmap_tlb_asid_acquire(pmap, l); | | 725 | pmap_tlb_asid_acquire(pmap, l); |
726 | if (l == curlwp) { | | 726 | pmap_segtab_activate(pmap, l); |
727 | pmap_segtab_activate(pmap, l); | | | |
728 | } | | | |
729 | pmap_tlb_miss_lock_exit(); | | 727 | pmap_tlb_miss_lock_exit(); |
730 | kpreempt_enable(); | | 728 | kpreempt_enable(); |
731 | | | 729 | |
732 | UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid, | | 730 | UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid, |
733 | l->l_lid, 0, 0); | | 731 | l->l_lid, 0, 0); |
734 | } | | 732 | } |
735 | | | 733 | |
736 | /* | | 734 | /* |
737 | * Remove this page from all physical maps in which it resides. | | 735 | * Remove this page from all physical maps in which it resides. |
738 | * Reflects back modify bits to the pager. | | 736 | * Reflects back modify bits to the pager. |
739 | */ | | 737 | */ |
740 | void | | 738 | void |
741 | pmap_page_remove(struct vm_page *pg) | | 739 | pmap_page_remove(struct vm_page *pg) |
742 | { | | 740 | { |
743 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); | | 741 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); |
744 | | | 742 | |
745 | kpreempt_disable(); | | 743 | kpreempt_disable(); |
746 | VM_PAGEMD_PVLIST_LOCK(mdpg); | | 744 | VM_PAGEMD_PVLIST_LOCK(mdpg); |
747 | pmap_pvlist_check(mdpg); | | 745 | pmap_pvlist_check(mdpg); |
748 | | | 746 | |
749 | UVMHIST_FUNC(__func__); | | 747 | UVMHIST_FUNC(__func__); |
750 | UVMHIST_CALLARGS(pmapexechist, "pg %#jx (pa %#jx) [page removed]: " | | 748 | UVMHIST_CALLARGS(pmapexechist, "pg %#jx (pa %#jx) [page removed]: " |
751 | "execpage cleared", (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0); | | 749 | "execpage cleared", (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0); |
752 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES | | 750 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES |
753 | pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE|VM_PAGEMD_UNCACHED); | | 751 | pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE|VM_PAGEMD_UNCACHED); |
754 | #else | | 752 | #else |
755 | pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); | | 753 | pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); |
756 | #endif | | 754 | #endif |
757 | PMAP_COUNT(exec_uncached_remove); | | 755 | PMAP_COUNT(exec_uncached_remove); |
758 | | | 756 | |
759 | pv_entry_t pv = &mdpg->mdpg_first; | | 757 | pv_entry_t pv = &mdpg->mdpg_first; |
760 | if (pv->pv_pmap == NULL) { | | 758 | if (pv->pv_pmap == NULL) { |
761 | VM_PAGEMD_PVLIST_UNLOCK(mdpg); | | 759 | VM_PAGEMD_PVLIST_UNLOCK(mdpg); |
762 | kpreempt_enable(); | | 760 | kpreempt_enable(); |
763 | UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0); | | 761 | UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0); |
764 | return; | | 762 | return; |
765 | } | | 763 | } |
766 | | | 764 | |
767 | pv_entry_t npv; | | 765 | pv_entry_t npv; |
768 | pv_entry_t pvp = NULL; | | 766 | pv_entry_t pvp = NULL; |
769 | | | 767 | |
770 | for (; pv != NULL; pv = npv) { | | 768 | for (; pv != NULL; pv = npv) { |
771 | npv = pv->pv_next; | | 769 | npv = pv->pv_next; |
772 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES | | 770 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES |
773 | if (PV_ISKENTER_P(pv)) { | | 771 | if (PV_ISKENTER_P(pv)) { |
774 | UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %jx" | | 772 | UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %jx" |
775 | " skip", (uintptr_t)pv, (uintptr_t)pv->pv_pmap, | | 773 | " skip", (uintptr_t)pv, (uintptr_t)pv->pv_pmap, |
776 | pv->pv_va, 0); | | 774 | pv->pv_va, 0); |
777 | | | 775 | |
778 | KASSERT(pv->pv_pmap == pmap_kernel()); | | 776 | KASSERT(pv->pv_pmap == pmap_kernel()); |
779 | | | 777 | |
780 | /* Assume no more - it'll get fixed if there are */ | | 778 | /* Assume no more - it'll get fixed if there are */ |
781 | pv->pv_next = NULL; | | 779 | pv->pv_next = NULL; |
782 | | | 780 | |
783 | /* | | 781 | /* |
784 | * pvp is non-null when we already have a PV_KENTER | | 782 | * pvp is non-null when we already have a PV_KENTER |
785 | * pv in pvh_first; otherwise we haven't seen a | | 783 | * pv in pvh_first; otherwise we haven't seen a |
786 | * PV_KENTER pv and we need to copy this one to | | 784 | * PV_KENTER pv and we need to copy this one to |
787 | * pvh_first | | 785 | * pvh_first |
788 | */ | | 786 | */ |
789 | if (pvp) { | | 787 | if (pvp) { |
790 | /* | | 788 | /* |
791 | * The previous PV_KENTER pv needs to point to | | 789 | * The previous PV_KENTER pv needs to point to |
792 | * this PV_KENTER pv | | 790 | * this PV_KENTER pv |
793 | */ | | 791 | */ |
794 | pvp->pv_next = pv; | | 792 | pvp->pv_next = pv; |
795 | } else { | | 793 | } else { |
796 | pv_entry_t fpv = &mdpg->mdpg_first; | | 794 | pv_entry_t fpv = &mdpg->mdpg_first; |
797 | *fpv = *pv; | | 795 | *fpv = *pv; |
798 | KASSERT(fpv->pv_pmap == pmap_kernel()); | | 796 | KASSERT(fpv->pv_pmap == pmap_kernel()); |
799 | } | | 797 | } |
800 | pvp = pv; | | 798 | pvp = pv; |
801 | continue; | | 799 | continue; |
802 | } | | 800 | } |
803 | #endif | | 801 | #endif |
804 | const pmap_t pmap = pv->pv_pmap; | | 802 | const pmap_t pmap = pv->pv_pmap; |
805 | vaddr_t va = trunc_page(pv->pv_va); | | 803 | vaddr_t va = trunc_page(pv->pv_va); |
806 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); | | 804 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); |
807 | KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va, | | 805 | KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va, |
808 | pmap_limits.virtual_end); | | 806 | pmap_limits.virtual_end); |
809 | pt_entry_t pte = *ptep; | | 807 | pt_entry_t pte = *ptep; |
810 | UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %jx" | | 808 | UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %jx" |
811 | " pte %jx", (uintptr_t)pv, (uintptr_t)pmap, va, | | 809 | " pte %jx", (uintptr_t)pv, (uintptr_t)pmap, va, |
812 | pte_value(pte)); | | 810 | pte_value(pte)); |
813 | if (!pte_valid_p(pte)) | | 811 | if (!pte_valid_p(pte)) |
814 | continue; | | 812 | continue; |
815 | const bool is_kernel_pmap_p = (pmap == pmap_kernel()); | | 813 | const bool is_kernel_pmap_p = (pmap == pmap_kernel()); |
816 | if (is_kernel_pmap_p) { | | 814 | if (is_kernel_pmap_p) { |
817 | PMAP_COUNT(remove_kernel_pages); | | 815 | PMAP_COUNT(remove_kernel_pages); |
818 | } else { | | 816 | } else { |
819 | PMAP_COUNT(remove_user_pages); | | 817 | PMAP_COUNT(remove_user_pages); |
820 | } | | 818 | } |
821 | if (pte_wired_p(pte)) | | 819 | if (pte_wired_p(pte)) |
822 | pmap->pm_stats.wired_count--; | | 820 | pmap->pm_stats.wired_count--; |
823 | pmap->pm_stats.resident_count--; | | 821 | pmap->pm_stats.resident_count--; |
824 | | | 822 | |
825 | pmap_tlb_miss_lock_enter(); | | 823 | pmap_tlb_miss_lock_enter(); |
826 | const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p); | | 824 | const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p); |
827 | pte_set(ptep, npte); | | 825 | pte_set(ptep, npte); |
828 | if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) { | | 826 | if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) { |
829 | /* | | 827 | /* |
830 | * Flush the TLB for the given address. | | 828 | * Flush the TLB for the given address. |
831 | */ | | 829 | */ |
832 | pmap_tlb_invalidate_addr(pmap, va); | | 830 | pmap_tlb_invalidate_addr(pmap, va); |
833 | } | | 831 | } |
834 | pmap_tlb_miss_lock_exit(); | | 832 | pmap_tlb_miss_lock_exit(); |
835 | | | 833 | |
836 | /* | | 834 | /* |
837 | * non-null means this is a non-pvh_first pv, so we should | | 835 | * non-null means this is a non-pvh_first pv, so we should |
838 | * free it. | | 836 | * free it. |
839 | */ | | 837 | */ |
840 | if (pvp) { | | 838 | if (pvp) { |
841 | KASSERT(pvp->pv_pmap == pmap_kernel()); | | 839 | KASSERT(pvp->pv_pmap == pmap_kernel()); |
842 | KASSERT(pvp->pv_next == NULL); | | 840 | KASSERT(pvp->pv_next == NULL); |
843 | pmap_pv_free(pv); | | 841 | pmap_pv_free(pv); |
844 | } else { | | 842 | } else { |
845 | pv->pv_pmap = NULL; | | 843 | pv->pv_pmap = NULL; |
846 | pv->pv_next = NULL; | | 844 | pv->pv_next = NULL; |
847 | } | | 845 | } |
848 | } | | 846 | } |
849 | | | 847 | |
850 | pmap_pvlist_check(mdpg); | | 848 | pmap_pvlist_check(mdpg); |
851 | VM_PAGEMD_PVLIST_UNLOCK(mdpg); | | 849 | VM_PAGEMD_PVLIST_UNLOCK(mdpg); |
852 | kpreempt_enable(); | | 850 | kpreempt_enable(); |
853 | | | 851 | |
854 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 852 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
855 | } | | 853 | } |
856 | | | 854 | |
857 | | | 855 | |
858 | /* | | 856 | /* |
859 | * Make a previously active pmap (vmspace) inactive. | | 857 | * Make a previously active pmap (vmspace) inactive. |
860 | */ | | 858 | */ |
861 | void | | 859 | void |
862 | pmap_deactivate(struct lwp *l) | | 860 | pmap_deactivate(struct lwp *l) |
863 | { | | 861 | { |
864 | pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; | | 862 | pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; |
865 | | | 863 | |
866 | UVMHIST_FUNC(__func__); | | 864 | UVMHIST_FUNC(__func__); |
867 | UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l, | | 865 | UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l, |
868 | (uintptr_t)pmap, 0, 0); | | 866 | (uintptr_t)pmap, 0, 0); |
869 | PMAP_COUNT(deactivate); | | 867 | PMAP_COUNT(deactivate); |
870 | | | 868 | |
871 | kpreempt_disable(); | | 869 | kpreempt_disable(); |
872 | KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu); | | 870 | KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu); |
873 | pmap_tlb_miss_lock_enter(); | | 871 | pmap_tlb_miss_lock_enter(); |
874 | curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS; | | 872 | curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS; |
875 | #ifdef _LP64 | | 873 | #ifdef _LP64 |
876 | curcpu()->ci_pmap_user_seg0tab = NULL; | | 874 | curcpu()->ci_pmap_user_seg0tab = NULL; |
877 | #endif | | 875 | #endif |
878 | pmap_tlb_asid_deactivate(pmap); | | 876 | pmap_tlb_asid_deactivate(pmap); |
879 | pmap_tlb_miss_lock_exit(); | | 877 | pmap_tlb_miss_lock_exit(); |
880 | kpreempt_enable(); | | 878 | kpreempt_enable(); |
881 | | | 879 | |
882 | UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid, | | 880 | UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid, |
883 | l->l_lid, 0, 0); | | 881 | l->l_lid, 0, 0); |
884 | } | | 882 | } |
885 | | | 883 | |
886 | void | | 884 | void |
887 | pmap_update(struct pmap *pmap) | | 885 | pmap_update(struct pmap *pmap) |
888 | { | | 886 | { |
889 | UVMHIST_FUNC(__func__); | | 887 | UVMHIST_FUNC(__func__); |
890 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0); | | 888 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0); |
891 | PMAP_COUNT(update); | | 889 | PMAP_COUNT(update); |
892 | | | 890 | |
893 | kpreempt_disable(); | | 891 | kpreempt_disable(); |
894 | #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN) | | 892 | #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN) |
895 | u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0); | | 893 | u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0); |
896 | if (pending && pmap_tlb_shootdown_bystanders(pmap)) | | 894 | if (pending && pmap_tlb_shootdown_bystanders(pmap)) |
897 | PMAP_COUNT(shootdown_ipis); | | 895 | PMAP_COUNT(shootdown_ipis); |
898 | #endif | | 896 | #endif |
899 | pmap_tlb_miss_lock_enter(); | | 897 | pmap_tlb_miss_lock_enter(); |
900 | #if defined(DEBUG) && !defined(MULTIPROCESSOR) | | 898 | #if defined(DEBUG) && !defined(MULTIPROCESSOR) |
901 | pmap_tlb_check(pmap, pmap_md_tlb_check_entry); | | 899 | pmap_tlb_check(pmap, pmap_md_tlb_check_entry); |
902 | #endif /* DEBUG */ | | 900 | #endif /* DEBUG */ |
903 | | | 901 | |
904 | /* | | 902 | /* |
905 | * If pmap_remove_all was called, we deactivated ourselves and nuked | | 903 | * If pmap_remove_all was called, we deactivated ourselves and nuked |
906 | * our ASID. Now we have to reactivate ourselves. | | 904 | * our ASID. Now we have to reactivate ourselves. |
907 | */ | | 905 | */ |
908 | if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) { | | 906 | if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) { |
909 | pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE; | | 907 | pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE; |
910 | pmap_tlb_asid_acquire(pmap, curlwp); | | 908 | pmap_tlb_asid_acquire(pmap, curlwp); |
911 | pmap_segtab_activate(pmap, curlwp); | | 909 | pmap_segtab_activate(pmap, curlwp); |
912 | } | | 910 | } |
913 | pmap_tlb_miss_lock_exit(); | | 911 | pmap_tlb_miss_lock_exit(); |
914 | kpreempt_enable(); | | 912 | kpreempt_enable(); |
915 | | | 913 | |
916 | UVMHIST_LOG(pmaphist, " <-- done (kernel=%#jx)", | | 914 | UVMHIST_LOG(pmaphist, " <-- done (kernel=%#jx)", |
917 | (pmap == pmap_kernel() ? 1 : 0), 0, 0, 0); | | 915 | (pmap == pmap_kernel() ? 1 : 0), 0, 0, 0); |
918 | } | | 916 | } |
919 | | | 917 | |
920 | /* | | 918 | /* |
921 | * Remove the given range of addresses from the specified map. | | 919 | * Remove the given range of addresses from the specified map. |
922 | * | | 920 | * |
923 | * It is assumed that the start and end are properly | | 921 | * It is assumed that the start and end are properly |
924 | * rounded to the page size. | | 922 | * rounded to the page size. |
925 | */ | | 923 | */ |
926 | | | 924 | |
927 | static bool | | 925 | static bool |
928 | pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, | | 926 | pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, |
929 | uintptr_t flags) | | 927 | uintptr_t flags) |
930 | { | | 928 | { |
931 | const pt_entry_t npte = flags; | | 929 | const pt_entry_t npte = flags; |
932 | const bool is_kernel_pmap_p = (pmap == pmap_kernel()); | | 930 | const bool is_kernel_pmap_p = (pmap == pmap_kernel()); |
933 | | | 931 | |
934 | UVMHIST_FUNC(__func__); | | 932 | UVMHIST_FUNC(__func__); |
935 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)", | | 933 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)", |
936 | (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva); | | 934 | (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva); |
937 | UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)", | | 935 | UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)", |
938 | (uintptr_t)ptep, flags, 0, 0); | | 936 | (uintptr_t)ptep, flags, 0, 0); |
939 | | | 937 | |
940 | KASSERT(kpreempt_disabled()); | | 938 | KASSERT(kpreempt_disabled()); |
941 | | | 939 | |
942 | for (; sva < eva; sva += NBPG, ptep++) { | | 940 | for (; sva < eva; sva += NBPG, ptep++) { |
943 | const pt_entry_t pte = *ptep; | | 941 | const pt_entry_t pte = *ptep; |
944 | if (!pte_valid_p(pte)) | | 942 | if (!pte_valid_p(pte)) |
945 | continue; | | 943 | continue; |
946 | if (is_kernel_pmap_p) { | | 944 | if (is_kernel_pmap_p) { |
947 | PMAP_COUNT(remove_kernel_pages); | | 945 | PMAP_COUNT(remove_kernel_pages); |
948 | } else { | | 946 | } else { |
949 | PMAP_COUNT(remove_user_pages); | | 947 | PMAP_COUNT(remove_user_pages); |
950 | } | | 948 | } |
951 | if (pte_wired_p(pte)) | | 949 | if (pte_wired_p(pte)) |
952 | pmap->pm_stats.wired_count--; | | 950 | pmap->pm_stats.wired_count--; |
953 | pmap->pm_stats.resident_count--; | | 951 | pmap->pm_stats.resident_count--; |
954 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte)); | | 952 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte)); |
955 | if (__predict_true(pg != NULL)) { | | 953 | if (__predict_true(pg != NULL)) { |
956 | pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte)); | | 954 | pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte)); |
957 | } | | 955 | } |
958 | pmap_tlb_miss_lock_enter(); | | 956 | pmap_tlb_miss_lock_enter(); |
959 | pte_set(ptep, npte); | | 957 | pte_set(ptep, npte); |
960 | if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) { | | 958 | if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) { |
961 | | | 959 | |
962 | /* | | 960 | /* |
963 | * Flush the TLB for the given address. | | 961 | * Flush the TLB for the given address. |
964 | */ | | 962 | */ |
965 | pmap_tlb_invalidate_addr(pmap, sva); | | 963 | pmap_tlb_invalidate_addr(pmap, sva); |
966 | } | | 964 | } |
967 | pmap_tlb_miss_lock_exit(); | | 965 | pmap_tlb_miss_lock_exit(); |
968 | } | | 966 | } |
969 | | | 967 | |
970 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 968 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
971 | | | 969 | |
972 | return false; | | 970 | return false; |
973 | } | | 971 | } |
974 | | | 972 | |
975 | void | | 973 | void |
976 | pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) | | 974 | pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) |
977 | { | | 975 | { |
978 | const bool is_kernel_pmap_p = (pmap == pmap_kernel()); | | 976 | const bool is_kernel_pmap_p = (pmap == pmap_kernel()); |
979 | const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p); | | 977 | const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p); |
980 | | | 978 | |
981 | UVMHIST_FUNC(__func__); | | 979 | UVMHIST_FUNC(__func__); |
982 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)", | | 980 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)", |
983 | (uintptr_t)pmap, sva, eva, 0); | | 981 | (uintptr_t)pmap, sva, eva, 0); |
984 | | | 982 | |
985 | if (is_kernel_pmap_p) { | | 983 | if (is_kernel_pmap_p) { |
986 | PMAP_COUNT(remove_kernel_calls); | | 984 | PMAP_COUNT(remove_kernel_calls); |
987 | } else { | | 985 | } else { |
988 | PMAP_COUNT(remove_user_calls); | | 986 | PMAP_COUNT(remove_user_calls); |
989 | } | | 987 | } |
990 | #ifdef PMAP_FAULTINFO | | 988 | #ifdef PMAP_FAULTINFO |
991 | curpcb->pcb_faultinfo.pfi_faultaddr = 0; | | 989 | curpcb->pcb_faultinfo.pfi_faultaddr = 0; |
992 | curpcb->pcb_faultinfo.pfi_repeats = 0; | | 990 | curpcb->pcb_faultinfo.pfi_repeats = 0; |
993 | curpcb->pcb_faultinfo.pfi_faultpte = NULL; | | 991 | curpcb->pcb_faultinfo.pfi_faultpte = NULL; |
994 | #endif | | 992 | #endif |
995 | kpreempt_disable(); | | 993 | kpreempt_disable(); |
996 | pmap_addr_range_check(pmap, sva, eva, __func__); | | 994 | pmap_addr_range_check(pmap, sva, eva, __func__); |
997 | pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte); | | 995 | pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte); |
998 | kpreempt_enable(); | | 996 | kpreempt_enable(); |
999 | | | 997 | |
1000 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 998 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
1001 | } | | 999 | } |
1002 | | | 1000 | |
1003 | /* | | 1001 | /* |
1004 | * pmap_page_protect: | | 1002 | * pmap_page_protect: |
1005 | * | | 1003 | * |
1006 | * Lower the permission for all mappings to a given page. | | 1004 | * Lower the permission for all mappings to a given page. |
1007 | */ | | 1005 | */ |
1008 | void | | 1006 | void |
1009 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) | | 1007 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) |
1010 | { | | 1008 | { |
1011 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); | | 1009 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); |
1012 | pv_entry_t pv; | | 1010 | pv_entry_t pv; |
1013 | vaddr_t va; | | 1011 | vaddr_t va; |
1014 | | | 1012 | |
1015 | UVMHIST_FUNC(__func__); | | 1013 | UVMHIST_FUNC(__func__); |
1016 | UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)", | | 1014 | UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)", |
1017 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0); | | 1015 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0); |
1018 | PMAP_COUNT(page_protect); | | 1016 | PMAP_COUNT(page_protect); |
1019 | | | 1017 | |
1020 | switch (prot) { | | 1018 | switch (prot) { |
1021 | case VM_PROT_READ|VM_PROT_WRITE: | | 1019 | case VM_PROT_READ|VM_PROT_WRITE: |
1022 | case VM_PROT_ALL: | | 1020 | case VM_PROT_ALL: |
1023 | break; | | 1021 | break; |
1024 | | | 1022 | |
1025 | /* copy_on_write */ | | 1023 | /* copy_on_write */ |
1026 | case VM_PROT_READ: | | 1024 | case VM_PROT_READ: |
1027 | case VM_PROT_READ|VM_PROT_EXECUTE: | | 1025 | case VM_PROT_READ|VM_PROT_EXECUTE: |
1028 | pv = &mdpg->mdpg_first; | | 1026 | pv = &mdpg->mdpg_first; |
1029 | kpreempt_disable(); | | 1027 | kpreempt_disable(); |
1030 | VM_PAGEMD_PVLIST_READLOCK(mdpg); | | 1028 | VM_PAGEMD_PVLIST_READLOCK(mdpg); |
1031 | pmap_pvlist_check(mdpg); | | 1029 | pmap_pvlist_check(mdpg); |
1032 | /* | | 1030 | /* |
1033 | * Loop over all current mappings setting/clearing as | | 1031 | * Loop over all current mappings setting/clearing as |
1034 | * appropriate. | | 1032 | * appropriate. |
1035 | */ | | 1033 | */ |
1036 | if (pv->pv_pmap != NULL) { | | 1034 | if (pv->pv_pmap != NULL) { |
1037 | while (pv != NULL) { | | 1035 | while (pv != NULL) { |
1038 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES | | 1036 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES |
1039 | if (PV_ISKENTER_P(pv)) { | | 1037 | if (PV_ISKENTER_P(pv)) { |
1040 | pv = pv->pv_next; | | 1038 | pv = pv->pv_next; |
1041 | continue; | | 1039 | continue; |
1042 | } | | 1040 | } |
1043 | #endif | | 1041 | #endif |
1044 | const pmap_t pmap = pv->pv_pmap; | | 1042 | const pmap_t pmap = pv->pv_pmap; |
1045 | va = trunc_page(pv->pv_va); | | 1043 | va = trunc_page(pv->pv_va); |
1046 | const uintptr_t gen = | | 1044 | const uintptr_t gen = |
1047 | VM_PAGEMD_PVLIST_UNLOCK(mdpg); | | 1045 | VM_PAGEMD_PVLIST_UNLOCK(mdpg); |
1048 | pmap_protect(pmap, va, va + PAGE_SIZE, prot); | | 1046 | pmap_protect(pmap, va, va + PAGE_SIZE, prot); |
1049 | KASSERT(pv->pv_pmap == pmap); | | 1047 | KASSERT(pv->pv_pmap == pmap); |
1050 | pmap_update(pmap); | | 1048 | pmap_update(pmap); |
1051 | if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) { | | 1049 | if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) { |
1052 | pv = &mdpg->mdpg_first; | | 1050 | pv = &mdpg->mdpg_first; |
1053 | } else { | | 1051 | } else { |
1054 | pv = pv->pv_next; | | 1052 | pv = pv->pv_next; |
1055 | } | | 1053 | } |
1056 | pmap_pvlist_check(mdpg); | | 1054 | pmap_pvlist_check(mdpg); |
1057 | } | | 1055 | } |
1058 | } | | 1056 | } |
1059 | pmap_pvlist_check(mdpg); | | 1057 | pmap_pvlist_check(mdpg); |
1060 | VM_PAGEMD_PVLIST_UNLOCK(mdpg); | | 1058 | VM_PAGEMD_PVLIST_UNLOCK(mdpg); |
1061 | kpreempt_enable(); | | 1059 | kpreempt_enable(); |
1062 | break; | | 1060 | break; |
1063 | | | 1061 | |
1064 | /* remove_all */ | | 1062 | /* remove_all */ |
1065 | default: | | 1063 | default: |
1066 | pmap_page_remove(pg); | | 1064 | pmap_page_remove(pg); |
1067 | } | | 1065 | } |
1068 | | | 1066 | |
1069 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 1067 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
1070 | } | | 1068 | } |
1071 | | | 1069 | |
1072 | static bool | | 1070 | static bool |
1073 | pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, | | 1071 | pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, |
1074 | uintptr_t flags) | | 1072 | uintptr_t flags) |
1075 | { | | 1073 | { |
1076 | const vm_prot_t prot = (flags & VM_PROT_ALL); | | 1074 | const vm_prot_t prot = (flags & VM_PROT_ALL); |
1077 | | | 1075 | |
1078 | UVMHIST_FUNC(__func__); | | 1076 | UVMHIST_FUNC(__func__); |
1079 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)", | | 1077 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)", |
1080 | (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva); | | 1078 | (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva); |
1081 | UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)", | | 1079 | UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)", |
1082 | (uintptr_t)ptep, flags, 0, 0); | | 1080 | (uintptr_t)ptep, flags, 0, 0); |
1083 | | | 1081 | |
1084 | KASSERT(kpreempt_disabled()); | | 1082 | KASSERT(kpreempt_disabled()); |
1085 | /* | | 1083 | /* |
1086 | * Change protection on every valid mapping within this segment. | | 1084 | * Change protection on every valid mapping within this segment. |
1087 | */ | | 1085 | */ |
1088 | for (; sva < eva; sva += NBPG, ptep++) { | | 1086 | for (; sva < eva; sva += NBPG, ptep++) { |
1089 | pt_entry_t pte = *ptep; | | 1087 | pt_entry_t pte = *ptep; |
1090 | if (!pte_valid_p(pte)) | | 1088 | if (!pte_valid_p(pte)) |
1091 | continue; | | 1089 | continue; |
1092 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte)); | | 1090 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte)); |
1093 | if (pg != NULL && pte_modified_p(pte)) { | | 1091 | if (pg != NULL && pte_modified_p(pte)) { |
1094 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); | | 1092 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); |
1095 | if (VM_PAGEMD_EXECPAGE_P(mdpg)) { | | 1093 | if (VM_PAGEMD_EXECPAGE_P(mdpg)) { |
1096 | KASSERT(!VM_PAGEMD_PVLIST_EMPTY_P(mdpg)); | | 1094 | KASSERT(!VM_PAGEMD_PVLIST_EMPTY_P(mdpg)); |
1097 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES | | 1095 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES |
1098 | if (VM_PAGEMD_CACHED_P(mdpg)) { | | 1096 | if (VM_PAGEMD_CACHED_P(mdpg)) { |
1099 | #endif | | 1097 | #endif |
1100 | UVMHIST_LOG(pmapexechist, | | 1098 | UVMHIST_LOG(pmapexechist, |
1101 | "pg %#jx (pa %#jx): " | | 1099 | "pg %#jx (pa %#jx): " |
1102 | "syncicached performed", | | 1100 | "syncicached performed", |
1103 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), | | 1101 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), |
1104 | 0, 0); | | 1102 | 0, 0); |
1105 | pmap_page_syncicache(pg); | | 1103 | pmap_page_syncicache(pg); |
1106 | PMAP_COUNT(exec_synced_protect); | | 1104 | PMAP_COUNT(exec_synced_protect); |
1107 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES | | 1105 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES |
1108 | } | | 1106 | } |
1109 | #endif | | 1107 | #endif |
1110 | } | | 1108 | } |
1111 | } | | 1109 | } |
1112 | pte = pte_prot_downgrade(pte, prot); | | 1110 | pte = pte_prot_downgrade(pte, prot); |
1113 | if (*ptep != pte) { | | 1111 | if (*ptep != pte) { |
1114 | pmap_tlb_miss_lock_enter(); | | 1112 | pmap_tlb_miss_lock_enter(); |
1115 | pte_set(ptep, pte); | | 1113 | pte_set(ptep, pte); |
1116 | /* | | 1114 | /* |
1117 | * Update the TLB if needed. | | 1115 | * Update the TLB if needed. |
1118 | */ | | 1116 | */ |
1119 | pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI); | | 1117 | pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI); |
1120 | pmap_tlb_miss_lock_exit(); | | 1118 | pmap_tlb_miss_lock_exit(); |
1121 | } | | 1119 | } |
1122 | } | | 1120 | } |
1123 | | | 1121 | |
1124 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 1122 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
1125 | | | 1123 | |
1126 | return false; | | 1124 | return false; |
1127 | } | | 1125 | } |
1128 | | | 1126 | |
1129 | /* | | 1127 | /* |
1130 | * Set the physical protection on the | | 1128 | * Set the physical protection on the |
1131 | * specified range of this map as requested. | | 1129 | * specified range of this map as requested. |
1132 | */ | | 1130 | */ |
1133 | void | | 1131 | void |
1134 | pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) | | 1132 | pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) |
1135 | { | | 1133 | { |
1136 | UVMHIST_FUNC(__func__); | | 1134 | UVMHIST_FUNC(__func__); |
1137 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)", | | 1135 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)", |
1138 | (uintptr_t)pmap, sva, eva, prot); | | 1136 | (uintptr_t)pmap, sva, eva, prot); |
1139 | PMAP_COUNT(protect); | | 1137 | PMAP_COUNT(protect); |
1140 | | | 1138 | |
1141 | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | | 1139 | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { |
1142 | pmap_remove(pmap, sva, eva); | | 1140 | pmap_remove(pmap, sva, eva); |
1143 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 1141 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
1144 | return; | | 1142 | return; |
1145 | } | | 1143 | } |
1146 | | | 1144 | |
1147 | /* | | 1145 | /* |
1148 | * Change protection on every valid mapping within this segment. | | 1146 | * Change protection on every valid mapping within this segment. |
1149 | */ | | 1147 | */ |
1150 | kpreempt_disable(); | | 1148 | kpreempt_disable(); |
1151 | pmap_addr_range_check(pmap, sva, eva, __func__); | | 1149 | pmap_addr_range_check(pmap, sva, eva, __func__); |
1152 | pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot); | | 1150 | pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot); |
1153 | kpreempt_enable(); | | 1151 | kpreempt_enable(); |
1154 | | | 1152 | |
1155 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 1153 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
1156 | } | | 1154 | } |
1157 | | | 1155 | |
1158 | #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED) | | 1156 | #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED) |
1159 | /* | | 1157 | /* |
1160 | * pmap_page_cache: | | 1158 | * pmap_page_cache: |
1161 | * | | 1159 | * |
1162 | * Change all mappings of a managed page to cached/uncached. | | 1160 | * Change all mappings of a managed page to cached/uncached. |
1163 | */ | | 1161 | */ |
1164 | void | | 1162 | void |
1165 | pmap_page_cache(struct vm_page *pg, bool cached) | | 1163 | pmap_page_cache(struct vm_page *pg, bool cached) |
1166 | { | | 1164 | { |
1167 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); | | 1165 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); |
1168 | | | 1166 | |
1169 | UVMHIST_FUNC(__func__); | | 1167 | UVMHIST_FUNC(__func__); |
1170 | UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) cached=%jd)", | | 1168 | UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) cached=%jd)", |
1171 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), cached, 0); | | 1169 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), cached, 0); |
1172 | | | 1170 | |
1173 | KASSERT(kpreempt_disabled()); | | 1171 | KASSERT(kpreempt_disabled()); |
1174 | KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg)); | | 1172 | KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg)); |
1175 | | | 1173 | |
1176 | if (cached) { | | 1174 | if (cached) { |
1177 | pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED); | | 1175 | pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED); |
1178 | PMAP_COUNT(page_cache_restorations); | | 1176 | PMAP_COUNT(page_cache_restorations); |
1179 | } else { | | 1177 | } else { |
1180 | pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED); | | 1178 | pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED); |
1181 | PMAP_COUNT(page_cache_evictions); | | 1179 | PMAP_COUNT(page_cache_evictions); |
1182 | } | | 1180 | } |
1183 | | | 1181 | |
1184 | for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) { | | 1182 | for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) { |
1185 | pmap_t pmap = pv->pv_pmap; | | 1183 | pmap_t pmap = pv->pv_pmap; |
1186 | vaddr_t va = trunc_page(pv->pv_va); | | 1184 | vaddr_t va = trunc_page(pv->pv_va); |
1187 | | | 1185 | |
1188 | KASSERT(pmap != NULL); | | 1186 | KASSERT(pmap != NULL); |
1189 | KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); | | 1187 | KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); |
1190 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); | | 1188 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); |
1191 | if (ptep == NULL) | | 1189 | if (ptep == NULL) |
1192 | continue; | | 1190 | continue; |
1193 | pt_entry_t pte = *ptep; | | 1191 | pt_entry_t pte = *ptep; |
1194 | if (pte_valid_p(pte)) { | | 1192 | if (pte_valid_p(pte)) { |
1195 | pte = pte_cached_change(pte, cached); | | 1193 | pte = pte_cached_change(pte, cached); |
1196 | pmap_tlb_miss_lock_enter(); | | 1194 | pmap_tlb_miss_lock_enter(); |
1197 | pte_set(ptep, pte); | | 1195 | pte_set(ptep, pte); |
1198 | pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI); | | 1196 | pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI); |
1199 | pmap_tlb_miss_lock_exit(); | | 1197 | pmap_tlb_miss_lock_exit(); |
1200 | } | | 1198 | } |
1201 | } | | 1199 | } |
1202 | | | 1200 | |
1203 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 1201 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
1204 | } | | 1202 | } |
1205 | #endif /* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */ | | 1203 | #endif /* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */ |
1206 | | | 1204 | |
1207 | /* | | 1205 | /* |
1208 | * Insert the given physical page (p) at | | 1206 | * Insert the given physical page (p) at |
1209 | * the specified virtual address (v) in the | | 1207 | * the specified virtual address (v) in the |
1210 | * target physical map with the protection requested. | | 1208 | * target physical map with the protection requested. |
1211 | * | | 1209 | * |
1212 | * If specified, the page will be wired down, meaning | | 1210 | * If specified, the page will be wired down, meaning |
1213 | * that the related pte can not be reclaimed. | | 1211 | * that the related pte can not be reclaimed. |
1214 | * | | 1212 | * |
1215 | * NB: This is the only routine which MAY NOT lazy-evaluate | | 1213 | * NB: This is the only routine which MAY NOT lazy-evaluate |
1216 | * or lose information. That is, this routine must actually | | 1214 | * or lose information. That is, this routine must actually |
1217 | * insert this page into the given map NOW. | | 1215 | * insert this page into the given map NOW. |
1218 | */ | | 1216 | */ |
1219 | int | | 1217 | int |
1220 | pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 1218 | pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
1221 | { | | 1219 | { |
1222 | const bool wired = (flags & PMAP_WIRED) != 0; | | 1220 | const bool wired = (flags & PMAP_WIRED) != 0; |
1223 | const bool is_kernel_pmap_p = (pmap == pmap_kernel()); | | 1221 | const bool is_kernel_pmap_p = (pmap == pmap_kernel()); |
1224 | u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0; | | 1222 | u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0; |
1225 | #ifdef UVMHIST | | 1223 | #ifdef UVMHIST |
1226 | struct kern_history * const histp = | | 1224 | struct kern_history * const histp = |
1227 | ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist); | | 1225 | ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist); |
1228 | #endif | | 1226 | #endif |
1229 | | | 1227 | |
1230 | UVMHIST_FUNC(__func__); | | 1228 | UVMHIST_FUNC(__func__); |
1231 | UVMHIST_CALLARGS(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx", | | 1229 | UVMHIST_CALLARGS(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx", |
1232 | (uintptr_t)pmap, va, pa, 0); | | 1230 | (uintptr_t)pmap, va, pa, 0); |
1233 | UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0); | | 1231 | UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0); |
1234 | | | 1232 | |
1235 | const bool good_color = PMAP_PAGE_COLOROK_P(pa, va); | | 1233 | const bool good_color = PMAP_PAGE_COLOROK_P(pa, va); |
1236 | if (is_kernel_pmap_p) { | | 1234 | if (is_kernel_pmap_p) { |
1237 | PMAP_COUNT(kernel_mappings); | | 1235 | PMAP_COUNT(kernel_mappings); |
1238 | if (!good_color) | | 1236 | if (!good_color) |
1239 | PMAP_COUNT(kernel_mappings_bad); | | 1237 | PMAP_COUNT(kernel_mappings_bad); |
1240 | } else { | | 1238 | } else { |
1241 | PMAP_COUNT(user_mappings); | | 1239 | PMAP_COUNT(user_mappings); |
1242 | if (!good_color) | | 1240 | if (!good_color) |
1243 | PMAP_COUNT(user_mappings_bad); | | 1241 | PMAP_COUNT(user_mappings_bad); |
1244 | } | | 1242 | } |
1245 | pmap_addr_range_check(pmap, va, va, __func__); | | 1243 | pmap_addr_range_check(pmap, va, va, __func__); |
1246 | | | 1244 | |
1247 | KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x", | | 1245 | KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x", |
1248 | VM_PROT_READ, prot); | | 1246 | VM_PROT_READ, prot); |
1249 | | | 1247 | |
1250 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); | | 1248 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); |
1251 | struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL); | | 1249 | struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL); |
1252 | | | 1250 | |
1253 | if (pg) { | | 1251 | if (pg) { |
1254 | /* Set page referenced/modified status based on flags */ | | 1252 | /* Set page referenced/modified status based on flags */ |
1255 | if (flags & VM_PROT_WRITE) { | | 1253 | if (flags & VM_PROT_WRITE) { |
1256 | pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED); | | 1254 | pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED); |
1257 | } else if (flags & VM_PROT_ALL) { | | 1255 | } else if (flags & VM_PROT_ALL) { |
1258 | pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED); | | 1256 | pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED); |
1259 | } | | 1257 | } |
1260 | | | 1258 | |
1261 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES | | 1259 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES |
1262 | if (!VM_PAGEMD_CACHED_P(mdpg)) { | | 1260 | if (!VM_PAGEMD_CACHED_P(mdpg)) { |
1263 | flags |= PMAP_NOCACHE; | | 1261 | flags |= PMAP_NOCACHE; |
1264 | PMAP_COUNT(uncached_mappings); | | 1262 | PMAP_COUNT(uncached_mappings); |
1265 | } | | 1263 | } |
1266 | #endif | | 1264 | #endif |
1267 | | | 1265 | |
1268 | PMAP_COUNT(managed_mappings); | | 1266 | PMAP_COUNT(managed_mappings); |
1269 | } else { | | 1267 | } else { |
1270 | /* | | 1268 | /* |
1271 | * Assumption: if it is not part of our managed memory | | 1269 | * Assumption: if it is not part of our managed memory |
1272 | * then it must be device memory which may be volatile. | | 1270 | * then it must be device memory which may be volatile. |
1273 | */ | | 1271 | */ |
1274 | if ((flags & PMAP_CACHE_MASK) == 0) | | 1272 | if ((flags & PMAP_CACHE_MASK) == 0) |
1275 | flags |= PMAP_NOCACHE; | | 1273 | flags |= PMAP_NOCACHE; |
1276 | PMAP_COUNT(unmanaged_mappings); | | 1274 | PMAP_COUNT(unmanaged_mappings); |
1277 | } | | 1275 | } |
1278 | | | 1276 | |
1279 | pt_entry_t npte = pte_make_enter(pa, mdpg, prot, flags, | | 1277 | pt_entry_t npte = pte_make_enter(pa, mdpg, prot, flags, |
1280 | is_kernel_pmap_p); | | 1278 | is_kernel_pmap_p); |
1281 | | | 1279 | |
1282 | kpreempt_disable(); | | 1280 | kpreempt_disable(); |
1283 | | | 1281 | |
1284 | pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags); | | 1282 | pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags); |
1285 | if (__predict_false(ptep == NULL)) { | | 1283 | if (__predict_false(ptep == NULL)) { |
1286 | kpreempt_enable(); | | 1284 | kpreempt_enable(); |
1287 | UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0); | | 1285 | UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0); |
1288 | return ENOMEM; | | 1286 | return ENOMEM; |
1289 | } | | 1287 | } |
1290 | const pt_entry_t opte = *ptep; | | 1288 | const pt_entry_t opte = *ptep; |
1291 | const bool resident = pte_valid_p(opte); | | 1289 | const bool resident = pte_valid_p(opte); |
1292 | bool remap = false; | | 1290 | bool remap = false; |
1293 | if (resident) { | | 1291 | if (resident) { |
1294 | if (pte_to_paddr(opte) != pa) { | | 1292 | if (pte_to_paddr(opte) != pa) { |
1295 | KASSERT(!is_kernel_pmap_p); | | 1293 | KASSERT(!is_kernel_pmap_p); |
1296 | const pt_entry_t rpte = pte_nv_entry(false); | | 1294 | const pt_entry_t rpte = pte_nv_entry(false); |
1297 | | | 1295 | |
1298 | pmap_addr_range_check(pmap, va, va + NBPG, __func__); | | 1296 | pmap_addr_range_check(pmap, va, va + NBPG, __func__); |
1299 | pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove, | | 1297 | pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove, |
1300 | rpte); | | 1298 | rpte); |
1301 | PMAP_COUNT(user_mappings_changed); | | 1299 | PMAP_COUNT(user_mappings_changed); |
1302 | remap = true; | | 1300 | remap = true; |
1303 | } | | 1301 | } |
1304 | update_flags |= PMAP_TLB_NEED_IPI; | | 1302 | update_flags |= PMAP_TLB_NEED_IPI; |
1305 | } | | 1303 | } |
1306 | | | 1304 | |
1307 | if (!resident || remap) { | | 1305 | if (!resident || remap) { |
1308 | pmap->pm_stats.resident_count++; | | 1306 | pmap->pm_stats.resident_count++; |
1309 | } | | 1307 | } |
1310 | | | 1308 | |
1311 | /* Done after case that may sleep/return. */ | | 1309 | /* Done after case that may sleep/return. */ |
1312 | if (pg) | | 1310 | if (pg) |
1313 | pmap_enter_pv(pmap, va, pg, &npte, 0); | | 1311 | pmap_enter_pv(pmap, va, pg, &npte, 0); |
1314 | | | 1312 | |
1315 | /* | | 1313 | /* |
1316 | * Now validate mapping with desired protection/wiring. | | 1314 | * Now validate mapping with desired protection/wiring. |
1317 | * Assume uniform modified and referenced status for all | | 1315 | * Assume uniform modified and referenced status for all |
1318 | * MIPS pages in a MACH page. | | 1316 | * MIPS pages in a MACH page. |
1319 | */ | | 1317 | */ |
1320 | if (wired) { | | 1318 | if (wired) { |
1321 | pmap->pm_stats.wired_count++; | | 1319 | pmap->pm_stats.wired_count++; |
1322 | npte = pte_wire_entry(npte); | | 1320 | npte = pte_wire_entry(npte); |
1323 | } | | 1321 | } |
1324 | | | 1322 | |
1325 | UVMHIST_LOG(*histp, "new pte %#jx (pa %#jx)", | | 1323 | UVMHIST_LOG(*histp, "new pte %#jx (pa %#jx)", |
1326 | pte_value(npte), pa, 0, 0); | | 1324 | pte_value(npte), pa, 0, 0); |
1327 | | | 1325 | |
1328 | KASSERT(pte_valid_p(npte)); | | 1326 | KASSERT(pte_valid_p(npte)); |
1329 | | | 1327 | |
1330 | pmap_tlb_miss_lock_enter(); | | 1328 | pmap_tlb_miss_lock_enter(); |
1331 | pte_set(ptep, npte); | | 1329 | pte_set(ptep, npte); |
1332 | pmap_tlb_update_addr(pmap, va, npte, update_flags); | | 1330 | pmap_tlb_update_addr(pmap, va, npte, update_flags); |
1333 | pmap_tlb_miss_lock_exit(); | | 1331 | pmap_tlb_miss_lock_exit(); |
1334 | kpreempt_enable(); | | 1332 | kpreempt_enable(); |
1335 | | | 1333 | |
1336 | if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) { | | 1334 | if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) { |
1337 | KASSERT(mdpg != NULL); | | 1335 | KASSERT(mdpg != NULL); |
1338 | PMAP_COUNT(exec_mappings); | | 1336 | PMAP_COUNT(exec_mappings); |
1339 | if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) { | | 1337 | if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) { |
1340 | if (!pte_deferred_exec_p(npte)) { | | 1338 | if (!pte_deferred_exec_p(npte)) { |
1341 | UVMHIST_LOG(*histp, "va=%#jx pg %#jx: " | | 1339 | UVMHIST_LOG(*histp, "va=%#jx pg %#jx: " |
1342 | "immediate syncicache", | | 1340 | "immediate syncicache", |
1343 | va, (uintptr_t)pg, 0, 0); | | 1341 | va, (uintptr_t)pg, 0, 0); |
1344 | pmap_page_syncicache(pg); | | 1342 | pmap_page_syncicache(pg); |
1345 | pmap_page_set_attributes(mdpg, | | 1343 | pmap_page_set_attributes(mdpg, |
1346 | VM_PAGEMD_EXECPAGE); | | 1344 | VM_PAGEMD_EXECPAGE); |
1347 | PMAP_COUNT(exec_synced_mappings); | | 1345 | PMAP_COUNT(exec_synced_mappings); |
1348 | } else { | | 1346 | } else { |
1349 | UVMHIST_LOG(*histp, "va=%#jx pg %#jx: defer " | | 1347 | UVMHIST_LOG(*histp, "va=%#jx pg %#jx: defer " |
1350 | "syncicache: pte %#jx", | | 1348 | "syncicache: pte %#jx", |
1351 | va, (uintptr_t)pg, npte, 0); | | 1349 | va, (uintptr_t)pg, npte, 0); |
1352 | } | | 1350 | } |
1353 | } else { | | 1351 | } else { |
1354 | UVMHIST_LOG(*histp, | | 1352 | UVMHIST_LOG(*histp, |
1355 | "va=%#jx pg %#jx: no syncicache cached %jd", | | 1353 | "va=%#jx pg %#jx: no syncicache cached %jd", |
1356 | va, (uintptr_t)pg, pte_cached_p(npte), 0); | | 1354 | va, (uintptr_t)pg, pte_cached_p(npte), 0); |
1357 | } | | 1355 | } |
1358 | } else if (pg != NULL && (prot & VM_PROT_EXECUTE)) { | | 1356 | } else if (pg != NULL && (prot & VM_PROT_EXECUTE)) { |
1359 | KASSERT(mdpg != NULL); | | 1357 | KASSERT(mdpg != NULL); |
1360 | KASSERT(prot & VM_PROT_WRITE); | | 1358 | KASSERT(prot & VM_PROT_WRITE); |
1361 | PMAP_COUNT(exec_mappings); | | 1359 | PMAP_COUNT(exec_mappings); |
1362 | pmap_page_syncicache(pg); | | 1360 | pmap_page_syncicache(pg); |
1363 | pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); | | 1361 | pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); |
1364 | UVMHIST_LOG(*histp, | | 1362 | UVMHIST_LOG(*histp, |
1365 | "va=%#jx pg %#jx: immediate syncicache (writeable)", | | 1363 | "va=%#jx pg %#jx: immediate syncicache (writeable)", |
1366 | va, (uintptr_t)pg, 0, 0); | | 1364 | va, (uintptr_t)pg, 0, 0); |
1367 | } | | 1365 | } |
1368 | | | 1366 | |
1369 | UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0); | | 1367 | UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0); |
1370 | return 0; | | 1368 | return 0; |
1371 | } | | 1369 | } |
1372 | | | 1370 | |
1373 | void | | 1371 | void |
1374 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 1372 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
1375 | { | | 1373 | { |
1376 | pmap_t pmap = pmap_kernel(); | | 1374 | pmap_t pmap = pmap_kernel(); |
1377 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); | | 1375 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); |
1378 | struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL); | | 1376 | struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL); |
1379 | | | 1377 | |
1380 | UVMHIST_FUNC(__func__); | | 1378 | UVMHIST_FUNC(__func__); |
1381 | UVMHIST_CALLARGS(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)", | | 1379 | UVMHIST_CALLARGS(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)", |
1382 | va, pa, prot, flags); | | 1380 | va, pa, prot, flags); |
1383 | PMAP_COUNT(kenter_pa); | | 1381 | PMAP_COUNT(kenter_pa); |
1384 | | | 1382 | |
1385 | if (mdpg == NULL) { | | 1383 | if (mdpg == NULL) { |
1386 | PMAP_COUNT(kenter_pa_unmanaged); | | 1384 | PMAP_COUNT(kenter_pa_unmanaged); |
1387 | if ((flags & PMAP_CACHE_MASK) == 0) | | 1385 | if ((flags & PMAP_CACHE_MASK) == 0) |
1388 | flags |= PMAP_NOCACHE; | | 1386 | flags |= PMAP_NOCACHE; |
1389 | } else { | | 1387 | } else { |
1390 | if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va)) | | 1388 | if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va)) |
1391 | PMAP_COUNT(kenter_pa_bad); | | 1389 | PMAP_COUNT(kenter_pa_bad); |
1392 | } | | 1390 | } |
1393 | | | 1391 | |
1394 | pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags); | | 1392 | pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags); |
1395 | kpreempt_disable(); | | 1393 | kpreempt_disable(); |
1396 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); | | 1394 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); |
1397 | KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va, | | 1395 | KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va, |
1398 | pmap_limits.virtual_end); | | 1396 | pmap_limits.virtual_end); |
1399 | KASSERT(!pte_valid_p(*ptep)); | | 1397 | KASSERT(!pte_valid_p(*ptep)); |
1400 | | | 1398 | |
1401 | /* | | 1399 | /* |
1402 | * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases | | 1400 | * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases |
1403 | */ | | 1401 | */ |
1404 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES | | 1402 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES |
1405 | if (pg != NULL && (flags & PMAP_KMPAGE) == 0 | | 1403 | if (pg != NULL && (flags & PMAP_KMPAGE) == 0 |
1406 | && pmap_md_virtual_cache_aliasing_p()) { | | 1404 | && pmap_md_virtual_cache_aliasing_p()) { |
1407 | pmap_enter_pv(pmap, va, pg, &npte, PV_KENTER); | | 1405 | pmap_enter_pv(pmap, va, pg, &npte, PV_KENTER); |
1408 | } | | 1406 | } |
1409 | #endif | | 1407 | #endif |
1410 | | | 1408 | |
1411 | /* | | 1409 | /* |
1412 | * We have the option to force this mapping into the TLB but we | | 1410 | * We have the option to force this mapping into the TLB but we |
1413 | * don't. Instead let the next reference to the page do it. | | 1411 | * don't. Instead let the next reference to the page do it. |
1414 | */ | | 1412 | */ |
1415 | pmap_tlb_miss_lock_enter(); | | 1413 | pmap_tlb_miss_lock_enter(); |
1416 | pte_set(ptep, npte); | | 1414 | pte_set(ptep, npte); |
1417 | pmap_tlb_update_addr(pmap_kernel(), va, npte, 0); | | 1415 | pmap_tlb_update_addr(pmap_kernel(), va, npte, 0); |
1418 | pmap_tlb_miss_lock_exit(); | | 1416 | pmap_tlb_miss_lock_exit(); |
1419 | kpreempt_enable(); | | 1417 | kpreempt_enable(); |
1420 | #if DEBUG > 1 | | 1418 | #if DEBUG > 1 |
1421 | for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) { | | 1419 | for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) { |
1422 | if (((long *)va)[i] != ((long *)pa)[i]) | | 1420 | if (((long *)va)[i] != ((long *)pa)[i]) |
1423 | panic("%s: contents (%lx) of va %#"PRIxVADDR | | 1421 | panic("%s: contents (%lx) of va %#"PRIxVADDR |
1424 | " != contents (%lx) of pa %#"PRIxPADDR, __func__, | | 1422 | " != contents (%lx) of pa %#"PRIxPADDR, __func__, |
1425 | ((long *)va)[i], va, ((long *)pa)[i], pa); | | 1423 | ((long *)va)[i], va, ((long *)pa)[i], pa); |
1426 | } | | 1424 | } |
1427 | #endif | | 1425 | #endif |
1428 | | | 1426 | |
1429 | UVMHIST_LOG(pmaphist, " <-- done (ptep=%#jx)", (uintptr_t)ptep, 0, 0, | | 1427 | UVMHIST_LOG(pmaphist, " <-- done (ptep=%#jx)", (uintptr_t)ptep, 0, 0, |
1430 | 0); | | 1428 | 0); |
1431 | } | | 1429 | } |
1432 | | | 1430 | |
1433 | /* | | 1431 | /* |
1434 | * Remove the given range of addresses from the kernel map. | | 1432 | * Remove the given range of addresses from the kernel map. |
1435 | * | | 1433 | * |
1436 | * It is assumed that the start and end are properly | | 1434 | * It is assumed that the start and end are properly |
1437 | * rounded to the page size. | | 1435 | * rounded to the page size. |
1438 | */ | | 1436 | */ |
1439 | | | 1437 | |
1440 | static bool | | 1438 | static bool |
1441 | pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, | | 1439 | pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, |
1442 | uintptr_t flags) | | 1440 | uintptr_t flags) |
1443 | { | | 1441 | { |
1444 | const pt_entry_t new_pte = pte_nv_entry(true); | | 1442 | const pt_entry_t new_pte = pte_nv_entry(true); |
1445 | | | 1443 | |
1446 | UVMHIST_FUNC(__func__); | | 1444 | UVMHIST_FUNC(__func__); |
1447 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)", | | 1445 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)", |
1448 | (uintptr_t)pmap, sva, eva, (uintptr_t)ptep); | | 1446 | (uintptr_t)pmap, sva, eva, (uintptr_t)ptep); |
1449 | | | 1447 | |
1450 | KASSERT(kpreempt_disabled()); | | 1448 | KASSERT(kpreempt_disabled()); |
1451 | | | 1449 | |
1452 | for (; sva < eva; sva += NBPG, ptep++) { | | 1450 | for (; sva < eva; sva += NBPG, ptep++) { |
1453 | pt_entry_t pte = *ptep; | | 1451 | pt_entry_t pte = *ptep; |
1454 | if (!pte_valid_p(pte)) | | 1452 | if (!pte_valid_p(pte)) |
1455 | continue; | | 1453 | continue; |
1456 | | | 1454 | |
1457 | PMAP_COUNT(kremove_pages); | | 1455 | PMAP_COUNT(kremove_pages); |
1458 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES | | 1456 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES |
1459 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte)); | | 1457 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte)); |
1460 | if (pg != NULL && pmap_md_virtual_cache_aliasing_p()) { | | 1458 | if (pg != NULL && pmap_md_virtual_cache_aliasing_p()) { |
1461 | pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte)); | | 1459 | pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte)); |
1462 | } | | 1460 | } |
1463 | #endif | | 1461 | #endif |
1464 | | | 1462 | |
1465 | pmap_tlb_miss_lock_enter(); | | 1463 | pmap_tlb_miss_lock_enter(); |
1466 | pte_set(ptep, new_pte); | | 1464 | pte_set(ptep, new_pte); |
1467 | pmap_tlb_invalidate_addr(pmap, sva); | | 1465 | pmap_tlb_invalidate_addr(pmap, sva); |
1468 | pmap_tlb_miss_lock_exit(); | | 1466 | pmap_tlb_miss_lock_exit(); |
1469 | } | | 1467 | } |
1470 | | | 1468 | |
1471 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 1469 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
1472 | | | 1470 | |
1473 | return false; | | 1471 | return false; |
1474 | } | | 1472 | } |
1475 | | | 1473 | |
1476 | void | | 1474 | void |
1477 | pmap_kremove(vaddr_t va, vsize_t len) | | 1475 | pmap_kremove(vaddr_t va, vsize_t len) |
1478 | { | | 1476 | { |
1479 | const vaddr_t sva = trunc_page(va); | | 1477 | const vaddr_t sva = trunc_page(va); |
1480 | const vaddr_t eva = round_page(va + len); | | 1478 | const vaddr_t eva = round_page(va + len); |
1481 | | | 1479 | |
1482 | UVMHIST_FUNC(__func__); | | 1480 | UVMHIST_FUNC(__func__); |
1483 | UVMHIST_CALLARGS(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0); | | 1481 | UVMHIST_CALLARGS(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0); |
1484 | | | 1482 | |
1485 | kpreempt_disable(); | | 1483 | kpreempt_disable(); |
1486 | pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0); | | 1484 | pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0); |
1487 | kpreempt_enable(); | | 1485 | kpreempt_enable(); |
1488 | | | 1486 | |
1489 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 1487 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
1490 | } | | 1488 | } |
1491 | | | 1489 | |
1492 | bool | | 1490 | bool |
1493 | pmap_remove_all(struct pmap *pmap) | | 1491 | pmap_remove_all(struct pmap *pmap) |
1494 | { | | 1492 | { |
1495 | UVMHIST_FUNC(__func__); | | 1493 | UVMHIST_FUNC(__func__); |
1496 | UVMHIST_CALLARGS(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0); | | 1494 | UVMHIST_CALLARGS(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0); |
1497 | | | 1495 | |
1498 | KASSERT(pmap != pmap_kernel()); | | 1496 | KASSERT(pmap != pmap_kernel()); |
1499 | | | 1497 | |
1500 | kpreempt_disable(); | | 1498 | kpreempt_disable(); |
1501 | /* | | 1499 | /* |
1502 | * Free all of our ASIDs which means we can skip doing all the | | 1500 | * Free all of our ASIDs which means we can skip doing all the |
1503 | * tlb_invalidate_addrs(). | | 1501 | * tlb_invalidate_addrs(). |
1504 | */ | | 1502 | */ |
1505 | pmap_tlb_miss_lock_enter(); | | 1503 | pmap_tlb_miss_lock_enter(); |
1506 | #ifdef MULTIPROCESSOR | | 1504 | #ifdef MULTIPROCESSOR |
1507 | // This should be the last CPU with this pmap onproc | | 1505 | // This should be the last CPU with this pmap onproc |
1508 | KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu()))); | | 1506 | KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu()))); |
1509 | if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu()))) | | 1507 | if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu()))) |
1510 | #endif | | 1508 | #endif |
1511 | pmap_tlb_asid_deactivate(pmap); | | 1509 | pmap_tlb_asid_deactivate(pmap); |
1512 | #ifdef MULTIPROCESSOR | | 1510 | #ifdef MULTIPROCESSOR |
1513 | KASSERT(kcpuset_iszero(pmap->pm_onproc)); | | 1511 | KASSERT(kcpuset_iszero(pmap->pm_onproc)); |
1514 | #endif | | 1512 | #endif |
1515 | pmap_tlb_asid_release_all(pmap); | | 1513 | pmap_tlb_asid_release_all(pmap); |
1516 | pmap_tlb_miss_lock_exit(); | | 1514 | pmap_tlb_miss_lock_exit(); |
1517 | pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE; | | 1515 | pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE; |
1518 | | | 1516 | |
1519 | #ifdef PMAP_FAULTINFO | | 1517 | #ifdef PMAP_FAULTINFO |
1520 | curpcb->pcb_faultinfo.pfi_faultaddr = 0; | | 1518 | curpcb->pcb_faultinfo.pfi_faultaddr = 0; |
1521 | curpcb->pcb_faultinfo.pfi_repeats = 0; | | 1519 | curpcb->pcb_faultinfo.pfi_repeats = 0; |
1522 | curpcb->pcb_faultinfo.pfi_faultpte = NULL; | | 1520 | curpcb->pcb_faultinfo.pfi_faultpte = NULL; |
1523 | #endif | | 1521 | #endif |
1524 | kpreempt_enable(); | | 1522 | kpreempt_enable(); |
1525 | | | 1523 | |
1526 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 1524 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
1527 | return false; | | 1525 | return false; |
1528 | } | | 1526 | } |
1529 | | | 1527 | |
1530 | /* | | 1528 | /* |
1531 | * Routine: pmap_unwire | | 1529 | * Routine: pmap_unwire |
1532 | * Function: Clear the wired attribute for a map/virtual-address | | 1530 | * Function: Clear the wired attribute for a map/virtual-address |
1533 | * pair. | | 1531 | * pair. |
1534 | * In/out conditions: | | 1532 | * In/out conditions: |
1535 | * The mapping must already exist in the pmap. | | 1533 | * The mapping must already exist in the pmap. |
1536 | */ | | 1534 | */ |
1537 | void | | 1535 | void |
1538 | pmap_unwire(pmap_t pmap, vaddr_t va) | | 1536 | pmap_unwire(pmap_t pmap, vaddr_t va) |
1539 | { | | 1537 | { |
1540 | UVMHIST_FUNC(__func__); | | 1538 | UVMHIST_FUNC(__func__); |
1541 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va, | | 1539 | UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va, |
1542 | 0, 0); | | 1540 | 0, 0); |
1543 | PMAP_COUNT(unwire); | | 1541 | PMAP_COUNT(unwire); |
1544 | | | 1542 | |
1545 | /* | | 1543 | /* |
1546 | * Don't need to flush the TLB since PG_WIRED is only in software. | | 1544 | * Don't need to flush the TLB since PG_WIRED is only in software. |
1547 | */ | | 1545 | */ |
1548 | kpreempt_disable(); | | 1546 | kpreempt_disable(); |
1549 | pmap_addr_range_check(pmap, va, va, __func__); | | 1547 | pmap_addr_range_check(pmap, va, va, __func__); |
1550 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); | | 1548 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); |
1551 | KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE", | | 1549 | KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE", |
1552 | pmap, va); | | 1550 | pmap, va); |
1553 | pt_entry_t pte = *ptep; | | 1551 | pt_entry_t pte = *ptep; |
1554 | KASSERTMSG(pte_valid_p(pte), | | 1552 | KASSERTMSG(pte_valid_p(pte), |
1555 | "pmap %p va %#"PRIxVADDR" invalid PTE %#"PRIxPTE" @ %p", | | 1553 | "pmap %p va %#"PRIxVADDR" invalid PTE %#"PRIxPTE" @ %p", |
1556 | pmap, va, pte_value(pte), ptep); | | 1554 | pmap, va, pte_value(pte), ptep); |
1557 | | | 1555 | |
1558 | if (pte_wired_p(pte)) { | | 1556 | if (pte_wired_p(pte)) { |
1559 | pmap_tlb_miss_lock_enter(); | | 1557 | pmap_tlb_miss_lock_enter(); |
1560 | pte_set(ptep, pte_unwire_entry(pte)); | | 1558 | pte_set(ptep, pte_unwire_entry(pte)); |
1561 | pmap_tlb_miss_lock_exit(); | | 1559 | pmap_tlb_miss_lock_exit(); |
1562 | pmap->pm_stats.wired_count--; | | 1560 | pmap->pm_stats.wired_count--; |
1563 | } | | 1561 | } |
1564 | #ifdef DIAGNOSTIC | | 1562 | #ifdef DIAGNOSTIC |
1565 | else { | | 1563 | else { |
1566 | printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n", | | 1564 | printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n", |
1567 | __func__, pmap, va); | | 1565 | __func__, pmap, va); |
1568 | } | | 1566 | } |
1569 | #endif | | 1567 | #endif |
1570 | kpreempt_enable(); | | 1568 | kpreempt_enable(); |
1571 | | | 1569 | |
1572 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); | | 1570 | UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
1573 | } | | 1571 | } |
1574 | | | 1572 | |
1575 | /* | | 1573 | /* |
1576 | * Routine: pmap_extract | | 1574 | * Routine: pmap_extract |
1577 | * Function: | | 1575 | * Function: |
1578 | * Extract the physical page address associated | | 1576 | * Extract the physical page address associated |
1579 | * with the given map/virtual_address pair. | | 1577 | * with the given map/virtual_address pair. |
1580 | */ | | 1578 | */ |
1581 | bool | | 1579 | bool |
1582 | pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) | | 1580 | pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) |
1583 | { | | 1581 | { |
1584 | paddr_t pa; | | 1582 | paddr_t pa; |
1585 | | | 1583 | |
1586 | if (pmap == pmap_kernel()) { | | 1584 | if (pmap == pmap_kernel()) { |
1587 | if (pmap_md_direct_mapped_vaddr_p(va)) { | | 1585 | if (pmap_md_direct_mapped_vaddr_p(va)) { |
1588 | pa = pmap_md_direct_mapped_vaddr_to_paddr(va); | | 1586 | pa = pmap_md_direct_mapped_vaddr_to_paddr(va); |
1589 | goto done; | | 1587 | goto done; |
1590 | } | | 1588 | } |
1591 | if (pmap_md_io_vaddr_p(va)) | | 1589 | if (pmap_md_io_vaddr_p(va)) |
1592 | panic("pmap_extract: io address %#"PRIxVADDR"", va); | | 1590 | panic("pmap_extract: io address %#"PRIxVADDR"", va); |
1593 | | | 1591 | |
1594 | if (va >= pmap_limits.virtual_end) | | 1592 | if (va >= pmap_limits.virtual_end) |
1595 | panic("%s: illegal kernel mapped address %#"PRIxVADDR, | | 1593 | panic("%s: illegal kernel mapped address %#"PRIxVADDR, |
1596 | __func__, va); | | 1594 | __func__, va); |
1597 | } | | 1595 | } |
1598 | kpreempt_disable(); | | 1596 | kpreempt_disable(); |
1599 | const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); | | 1597 | const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); |
1600 | if (ptep == NULL || !pte_valid_p(*ptep)) { | | 1598 | if (ptep == NULL || !pte_valid_p(*ptep)) { |
1601 | kpreempt_enable(); | | 1599 | kpreempt_enable(); |
1602 | return false; | | 1600 | return false; |
1603 | } | | 1601 | } |
1604 | pa = pte_to_paddr(*ptep) | (va & PGOFSET); | | 1602 | pa = pte_to_paddr(*ptep) | (va & PGOFSET); |
1605 | kpreempt_enable(); | | 1603 | kpreempt_enable(); |
1606 | done: | | 1604 | done: |
1607 | if (pap != NULL) { | | 1605 | if (pap != NULL) { |
1608 | *pap = pa; | | 1606 | *pap = pa; |
1609 | } | | 1607 | } |
1610 | return true; | | 1608 | return true; |
1611 | } | | 1609 | } |
1612 | | | 1610 | |
1613 | /* | | 1611 | /* |
1614 | * Copy the range specified by src_addr/len | | 1612 | * Copy the range specified by src_addr/len |
1615 | * from the source map to the range dst_addr/len | | 1613 | * from the source map to the range dst_addr/len |
1616 | * in the destination map. | | 1614 | * in the destination map. |
1617 | * | | 1615 | * |
1618 | * This routine is only advisory and need not do anything. | | 1616 | * This routine is only advisory and need not do anything. |
1619 | */ | | 1617 | */ |
1620 | void | | 1618 | void |
1621 | pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len, | | 1619 | pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len, |
1622 | vaddr_t src_addr) | | 1620 | vaddr_t src_addr) |
1623 | { | | 1621 | { |
1624 | UVMHIST_FUNC(__func__); | | 1622 | UVMHIST_FUNC(__func__); |
1625 | UVMHIST_CALLED(pmaphist); | | 1623 | UVMHIST_CALLED(pmaphist); |
1626 | PMAP_COUNT(copy); | | 1624 | PMAP_COUNT(copy); |
1627 | } | | 1625 | } |
1628 | | | 1626 | |
1629 | /* | | 1627 | /* |
1630 | * pmap_clear_reference: | | 1628 | * pmap_clear_reference: |
1631 | * | | 1629 | * |
1632 | * Clear the reference bit on the specified physical page. | | 1630 | * Clear the reference bit on the specified physical page. |
1633 | */ | | 1631 | */ |
1634 | bool | | 1632 | bool |
1635 | pmap_clear_reference(struct vm_page *pg) | | 1633 | pmap_clear_reference(struct vm_page *pg) |
1636 | { | | 1634 | { |
1637 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); | | 1635 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); |
1638 | | | 1636 | |
1639 | UVMHIST_FUNC(__func__); | | 1637 | UVMHIST_FUNC(__func__); |
1640 | UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx))", | | 1638 | UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx))", |
1641 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0); | | 1639 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0); |
1642 | | | 1640 | |
1643 | bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED); | | 1641 | bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED); |
1644 | | | 1642 | |
1645 | UVMHIST_LOG(pmaphist, " <-- wasref %ju", rv, 0, 0, 0); | | 1643 | UVMHIST_LOG(pmaphist, " <-- wasref %ju", rv, 0, 0, 0); |
1646 | | | 1644 | |
1647 | return rv; | | 1645 | return rv; |
1648 | } | | 1646 | } |
1649 | | | 1647 | |
1650 | /* | | 1648 | /* |
1651 | * pmap_is_referenced: | | 1649 | * pmap_is_referenced: |
1652 | * | | 1650 | * |
1653 | * Return whether or not the specified physical page is referenced | | 1651 | * Return whether or not the specified physical page is referenced |
1654 | * by any physical maps. | | 1652 | * by any physical maps. |
1655 | */ | | 1653 | */ |
1656 | bool | | 1654 | bool |
1657 | pmap_is_referenced(struct vm_page *pg) | | 1655 | pmap_is_referenced(struct vm_page *pg) |
1658 | { | | 1656 | { |
1659 | return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg)); | | 1657 | return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg)); |
1660 | } | | 1658 | } |
1661 | | | 1659 | |
1662 | /* | | 1660 | /* |
1663 | * Clear the modify bits on the specified physical page. | | 1661 | * Clear the modify bits on the specified physical page. |
1664 | */ | | 1662 | */ |
1665 | bool | | 1663 | bool |
1666 | pmap_clear_modify(struct vm_page *pg) | | 1664 | pmap_clear_modify(struct vm_page *pg) |
1667 | { | | 1665 | { |
1668 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); | | 1666 | struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); |
1669 | pv_entry_t pv = &mdpg->mdpg_first; | | 1667 | pv_entry_t pv = &mdpg->mdpg_first; |
1670 | pv_entry_t pv_next; | | 1668 | pv_entry_t pv_next; |
1671 | | | 1669 | |
1672 | UVMHIST_FUNC(__func__); | | 1670 | UVMHIST_FUNC(__func__); |
1673 | UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (%#jx))", | | 1671 | UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (%#jx))", |
1674 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0); | | 1672 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0); |
1675 | PMAP_COUNT(clear_modify); | | 1673 | PMAP_COUNT(clear_modify); |
1676 | | | 1674 | |
1677 | if (VM_PAGEMD_EXECPAGE_P(mdpg)) { | | 1675 | if (VM_PAGEMD_EXECPAGE_P(mdpg)) { |
1678 | if (pv->pv_pmap == NULL) { | | 1676 | if (pv->pv_pmap == NULL) { |
1679 | UVMHIST_LOG(pmapexechist, | | 1677 | UVMHIST_LOG(pmapexechist, |
1680 | "pg %#jx (pa %#jx): execpage cleared", | | 1678 | "pg %#jx (pa %#jx): execpage cleared", |
1681 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0); | | 1679 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0); |
1682 | pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); | | 1680 | pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); |
1683 | PMAP_COUNT(exec_uncached_clear_modify); | | 1681 | PMAP_COUNT(exec_uncached_clear_modify); |
1684 | } else { | | 1682 | } else { |
1685 | UVMHIST_LOG(pmapexechist, | | 1683 | UVMHIST_LOG(pmapexechist, |
1686 | "pg %#jx (pa %#jx): syncicache performed", | | 1684 | "pg %#jx (pa %#jx): syncicache performed", |
1687 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0); | | 1685 | (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0); |
1688 | pmap_page_syncicache(pg); | | 1686 | pmap_page_syncicache(pg); |
1689 | PMAP_COUNT(exec_synced_clear_modify); | | 1687 | PMAP_COUNT(exec_synced_clear_modify); |
1690 | } | | 1688 | } |
1691 | } | | 1689 | } |
1692 | if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) { | | 1690 | if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) { |
1693 | UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0); | | 1691 | UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0); |
1694 | return false; | | 1692 | return false; |
1695 | } | | 1693 | } |
1696 | if (pv->pv_pmap == NULL) { | | 1694 | if (pv->pv_pmap == NULL) { |
1697 | UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0); | | 1695 | UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0); |
1698 | return true; | | 1696 | return true; |
1699 | } | | 1697 | } |
1700 | | | 1698 | |
1701 | /* | | 1699 | /* |
1702 | * remove write access from any pages that are dirty | | 1700 | * remove write access from any pages that are dirty |
1703 | * so we can tell if they are written to again later. | | 1701 | * so we can tell if they are written to again later. |
1704 | * flush the VAC first if there is one. | | 1702 | * flush the VAC first if there is one. |
1705 | */ | | 1703 | */ |
1706 | kpreempt_disable(); | | 1704 | kpreempt_disable(); |
1707 | VM_PAGEMD_PVLIST_READLOCK(mdpg); | | 1705 | VM_PAGEMD_PVLIST_READLOCK(mdpg); |
1708 | pmap_pvlist_check(mdpg); | | 1706 | pmap_pvlist_check(mdpg); |
1709 | for (; pv != NULL; pv = pv_next) { | | 1707 | for (; pv != NULL; pv = pv_next) { |
1710 | pmap_t pmap = pv->pv_pmap; | | 1708 | pmap_t pmap = pv->pv_pmap; |
1711 | vaddr_t va = trunc_page(pv->pv_va); | | 1709 | vaddr_t va = trunc_page(pv->pv_va); |
1712 | | | 1710 | |
1713 | pv_next = pv->pv_next; | | 1711 | pv_next = pv->pv_next; |
1714 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES | | 1712 | #ifdef PMAP_VIRTUAL_CACHE_ALIASES |
1715 | if (PV_ISKENTER_P(pv)) | | 1713 | if (PV_ISKENTER_P(pv)) |
1716 | continue; | | 1714 | continue; |
1717 | #endif | | 1715 | #endif |
1718 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); | | 1716 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); |
1719 | KASSERT(ptep); | | 1717 | KASSERT(ptep); |
1720 | pt_entry_t pte = pte_prot_nowrite(*ptep); | | 1718 | pt_entry_t pte = pte_prot_nowrite(*ptep); |
1721 | if (*ptep == pte) { | | 1719 | if (*ptep == pte) { |
1722 | continue; | | 1720 | continue; |
1723 | } | | 1721 | } |
1724 | KASSERT(pte_valid_p(pte)); | | 1722 | KASSERT(pte_valid_p(pte)); |
1725 | const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg); | | 1723 | const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg); |
1726 | pmap_tlb_miss_lock_enter(); | | 1724 | pmap_tlb_miss_lock_enter(); |
1727 | pte_set(ptep, pte); | | 1725 | pte_set(ptep, pte); |