| @@ -1,1245 +1,1243 @@ | | | @@ -1,1245 +1,1243 @@ |
1 | .\" $NetBSD: pmap.9,v 1.47 2020/03/14 14:05:42 ad Exp $ | | 1 | .\" $NetBSD: pmap.9,v 1.48 2020/08/16 16:48:08 thorpej Exp $ |
2 | .\" | | 2 | .\" |
3 | .\" Copyright (c) 2000, 2001, 2002, 2020 The NetBSD Foundation, Inc. | | 3 | .\" Copyright (c) 2000, 2001, 2002, 2020 The NetBSD Foundation, Inc. |
4 | .\" All rights reserved. | | 4 | .\" All rights reserved. |
5 | .\" | | 5 | .\" |
6 | .\" This code is derived from software contributed to The NetBSD Foundation | | 6 | .\" This code is derived from software contributed to The NetBSD Foundation |
7 | .\" by Jason R. Thorpe. | | 7 | .\" by Jason R. Thorpe. |
8 | .\" | | 8 | .\" |
9 | .\" Redistribution and use in source and binary forms, with or without | | 9 | .\" Redistribution and use in source and binary forms, with or without |
10 | .\" modification, are permitted provided that the following conditions | | 10 | .\" modification, are permitted provided that the following conditions |
11 | .\" are met: | | 11 | .\" are met: |
12 | .\" 1. Redistributions of source code must retain the above copyright | | 12 | .\" 1. Redistributions of source code must retain the above copyright |
13 | .\" notice, this list of conditions and the following disclaimer. | | 13 | .\" notice, this list of conditions and the following disclaimer. |
14 | .\" 2. Redistributions in binary form must reproduce the above copyright | | 14 | .\" 2. Redistributions in binary form must reproduce the above copyright |
15 | .\" notice, this list of conditions and the following disclaimer in the | | 15 | .\" notice, this list of conditions and the following disclaimer in the |
16 | .\" documentation and/or other materials provided with the distribution. | | 16 | .\" documentation and/or other materials provided with the distribution. |
17 | .\" | | 17 | .\" |
18 | .\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 18 | .\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
19 | .\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 19 | .\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
20 | .\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 20 | .\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
21 | .\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 21 | .\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
22 | .\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 22 | .\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
23 | .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 23 | .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
24 | .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 24 | .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
25 | .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 25 | .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
26 | .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 26 | .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
27 | .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 27 | .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
28 | .\" POSSIBILITY OF SUCH DAMAGE. | | 28 | .\" POSSIBILITY OF SUCH DAMAGE. |
29 | .\" | | 29 | .\" |
30 | .Dd March 14, 2020 | | 30 | .Dd August 16, 2020 |
31 | .Dt PMAP 9 | | 31 | .Dt PMAP 9 |
32 | .Os | | 32 | .Os |
33 | .Sh NAME | | 33 | .Sh NAME |
34 | .Nm pmap | | 34 | .Nm pmap |
35 | .Nd machine-dependent portion of the virtual memory system | | 35 | .Nd machine-dependent portion of the virtual memory system |
36 | .Sh SYNOPSIS | | 36 | .Sh SYNOPSIS |
37 | .In sys/param.h | | 37 | .In sys/param.h |
38 | .In uvm/uvm_extern.h | | 38 | .In uvm/uvm_extern.h |
39 | .Ft void | | 39 | .Ft void |
40 | .Fn "pmap_init" "void" | | 40 | .Fn "pmap_init" "void" |
41 | .Ft void | | 41 | .Ft void |
42 | .Fn "pmap_virtual_space" "vaddr_t *vstartp" "vaddr_t *vendp" | | 42 | .Fn "pmap_virtual_space" "vaddr_t *vstartp" "vaddr_t *vendp" |
43 | .Ft vaddr_t | | 43 | .Ft vaddr_t |
44 | .Fn "pmap_steal_memory" "vsize_t size" "vaddr_t *vstartp" "vaddr_t *vendp" | | 44 | .Fn "pmap_steal_memory" "vsize_t size" "vaddr_t *vstartp" "vaddr_t *vendp" |
45 | .Ft pmap_t | | 45 | .Ft pmap_t |
46 | .Fn "pmap_kernel" "void" | | 46 | .Fn "pmap_kernel" "void" |
47 | .Ft pmap_t | | 47 | .Ft pmap_t |
48 | .Fn "pmap_create" "void" | | 48 | .Fn "pmap_create" "void" |
49 | .Ft void | | 49 | .Ft void |
50 | .Fn "pmap_destroy" "pmap_t pmap" | | 50 | .Fn "pmap_destroy" "pmap_t pmap" |
51 | .Ft void | | 51 | .Ft void |
52 | .Fn "pmap_reference" "pmap_t pmap" | | 52 | .Fn "pmap_reference" "pmap_t pmap" |
53 | .Ft void | | 53 | .Ft void |
54 | .Fn "pmap_fork" "pmap_t src_map" "pmap_t dst_map" | | 54 | .Fn "pmap_fork" "pmap_t src_map" "pmap_t dst_map" |
55 | .Ft long | | 55 | .Ft long |
56 | .Fn "pmap_resident_count" "pmap_t pmap" | | 56 | .Fn "pmap_resident_count" "pmap_t pmap" |
57 | .Ft long | | 57 | .Ft long |
58 | .Fn "pmap_wired_count" "pmap_t pmap" | | 58 | .Fn "pmap_wired_count" "pmap_t pmap" |
59 | .Ft vaddr_t | | 59 | .Ft vaddr_t |
60 | .Fn "pmap_growkernel" "vaddr_t maxkvaddr" | | 60 | .Fn "pmap_growkernel" "vaddr_t maxkvaddr" |
61 | .Ft int | | 61 | .Ft int |
62 | .Fn "pmap_enter" "pmap_t pmap" "vaddr_t va" "paddr_t pa" "vm_prot_t prot" \ | | 62 | .Fn "pmap_enter" "pmap_t pmap" "vaddr_t va" "paddr_t pa" "vm_prot_t prot" \ |
63 | "u_int flags" | | 63 | "u_int flags" |
64 | .Ft void | | 64 | .Ft void |
65 | .Fn "pmap_remove" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" | | 65 | .Fn "pmap_remove" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" |
66 | .Ft bool | | 66 | .Ft bool |
67 | .Fn "pmap_remove_all" "pmap_t pmap" | | 67 | .Fn "pmap_remove_all" "pmap_t pmap" |
68 | .Ft void | | 68 | .Ft void |
69 | .Fn "pmap_protect" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" "vm_prot_t prot" | | 69 | .Fn "pmap_protect" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" "vm_prot_t prot" |
70 | .Ft void | | 70 | .Ft void |
71 | .Fn "pmap_unwire" "pmap_t pmap" "vaddr_t va" | | 71 | .Fn "pmap_unwire" "pmap_t pmap" "vaddr_t va" |
72 | .Ft bool | | 72 | .Ft bool |
73 | .Fn "pmap_extract" "pmap_t pmap" "vaddr_t va" "paddr_t *pap" | | 73 | .Fn "pmap_extract" "pmap_t pmap" "vaddr_t va" "paddr_t *pap" |
74 | .Ft void | | 74 | .Ft void |
75 | .Fn "pmap_kenter_pa" "vaddr_t va" "paddr_t pa" "vm_prot_t prot" "u_int flags" | | 75 | .Fn "pmap_kenter_pa" "vaddr_t va" "paddr_t pa" "vm_prot_t prot" "u_int flags" |
76 | .Ft void | | 76 | .Ft void |
77 | .Fn "pmap_kremove" "vaddr_t va" "vsize_t size" | | 77 | .Fn "pmap_kremove" "vaddr_t va" "vsize_t size" |
78 | .Ft void | | 78 | .Ft void |
79 | .Fn "pmap_copy" "pmap_t dst_map" "pmap_t src_map" "vaddr_t dst_addr" \ | | 79 | .Fn "pmap_copy" "pmap_t dst_map" "pmap_t src_map" "vaddr_t dst_addr" \ |
80 | "vsize_t len" "vaddr_t src_addr" | | 80 | "vsize_t len" "vaddr_t src_addr" |
81 | .Ft void | | 81 | .Ft void |
82 | .Fn "pmap_update" "pmap_t pmap" | | 82 | .Fn "pmap_update" "pmap_t pmap" |
83 | .Ft void | | 83 | .Ft void |
84 | .Fn "pmap_activate" "struct lwp *l" | | 84 | .Fn "pmap_activate" "struct lwp *l" |
85 | .Ft void | | 85 | .Ft void |
86 | .Fn "pmap_deactivate" "struct lwp *l" | | 86 | .Fn "pmap_deactivate" "struct lwp *l" |
87 | .Ft void | | 87 | .Ft void |
88 | .Fn "pmap_zero_page" "paddr_t pa" | | 88 | .Fn "pmap_zero_page" "paddr_t pa" |
89 | .Ft void | | 89 | .Ft void |
90 | .Fn "pmap_copy_page" "paddr_t src" "paddr_t dst" | | 90 | .Fn "pmap_copy_page" "paddr_t src" "paddr_t dst" |
91 | .Ft void | | 91 | .Ft void |
92 | .Fn "pmap_page_protect" "struct vm_page *pg" "vm_prot_t prot" | | 92 | .Fn "pmap_page_protect" "struct vm_page *pg" "vm_prot_t prot" |
93 | .Ft bool | | 93 | .Ft bool |
94 | .Fn "pmap_clear_modify" "struct vm_page *pg" | | 94 | .Fn "pmap_clear_modify" "struct vm_page *pg" |
95 | .Ft bool | | 95 | .Ft bool |
96 | .Fn "pmap_clear_reference" "struct vm_page *pg" | | 96 | .Fn "pmap_clear_reference" "struct vm_page *pg" |
97 | .Ft bool | | 97 | .Ft bool |
98 | .Fn "pmap_is_modified" "struct vm_page *pg" | | 98 | .Fn "pmap_is_modified" "struct vm_page *pg" |
99 | .Ft bool | | 99 | .Ft bool |
100 | .Fn "pmap_is_referenced" "struct vm_page *pg" | | 100 | .Fn "pmap_is_referenced" "struct vm_page *pg" |
101 | .Ft paddr_t | | 101 | .Ft paddr_t |
102 | .Fn "pmap_phys_address" "paddr_t cookie" | | 102 | .Fn "pmap_phys_address" "paddr_t cookie" |
103 | .Ft vaddr_t | | 103 | .Ft vaddr_t |
104 | .Fn "PMAP_MAP_POOLPAGE" "paddr_t pa" | | 104 | .Fn "PMAP_MAP_POOLPAGE" "paddr_t pa" |
105 | .Ft paddr_t | | 105 | .Ft paddr_t |
106 | .Fn "PMAP_UNMAP_POOLPAGE" "vaddr_t va" | | 106 | .Fn "PMAP_UNMAP_POOLPAGE" "vaddr_t va" |
107 | .Ft void | | 107 | .Ft void |
108 | .Fn "PMAP_PREFER" "vaddr_t hint" "vaddr_t *vap" "vsize_t sz" "int td" | | 108 | .Fn "PMAP_PREFER" "vaddr_t hint" "vaddr_t *vap" "vsize_t sz" "int td" |
109 | .Sh DESCRIPTION | | 109 | .Sh DESCRIPTION |
110 | The | | 110 | The |
111 | .Nm | | 111 | .Nm |
112 | module is the machine-dependent portion of the | | 112 | module is the machine-dependent portion of the |
113 | .Nx | | 113 | .Nx |
114 | virtual memory system | | 114 | virtual memory system |
115 | .Xr uvm 9 . | | 115 | .Xr uvm 9 . |
116 | The purpose of the | | 116 | The purpose of the |
117 | .Nm | | 117 | .Nm |
118 | module is to manage physical address maps, to program the | | 118 | module is to manage physical address maps, to program the |
119 | memory management hardware on the system, and perform any | | 119 | memory management hardware on the system, and perform any |
120 | cache operations necessary to ensure correct operation of | | 120 | cache operations necessary to ensure correct operation of |
121 | the virtual memory system. | | 121 | the virtual memory system. |
122 | The | | 122 | The |
123 | .Nm | | 123 | .Nm |
124 | module is also responsible for maintaining certain information | | 124 | module is also responsible for maintaining certain information |
125 | required by | | 125 | required by |
126 | .Xr uvm 9 . | | 126 | .Xr uvm 9 . |
127 | .Pp | | 127 | .Pp |
128 | In order to cope with hardware architectures that make the | | 128 | In order to cope with hardware architectures that make the |
129 | invalidation of virtual address mappings expensive (e.g., | | 129 | invalidation of virtual address mappings expensive (e.g., |
130 | TLB invalidations, TLB shootdown operations for multiple | | 130 | TLB invalidations, TLB shootdown operations for multiple |
131 | processors), the | | 131 | processors), the |
132 | .Nm | | 132 | .Nm |
133 | module is allowed to delay mapping invalidation or protection | | 133 | module is allowed to delay mapping invalidation or protection |
134 | operations until such time as they are actually necessary. | | 134 | operations until such time as they are actually necessary. |
135 | The functions that are allowed to delay such actions are | | 135 | The functions that are allowed to delay such actions are |
136 | .Fn pmap_enter , | | 136 | .Fn pmap_enter , |
137 | .Fn pmap_remove , | | 137 | .Fn pmap_remove , |
138 | .Fn pmap_protect , | | 138 | .Fn pmap_protect , |
139 | .Fn pmap_kenter_pa , | | 139 | .Fn pmap_kenter_pa , |
140 | and | | 140 | and |
141 | .Fn pmap_kremove . | | 141 | .Fn pmap_kremove . |
142 | Callers of these functions must use the | | 142 | Callers of these functions must use the |
143 | .Fn pmap_update | | 143 | .Fn pmap_update |
144 | function to notify the | | 144 | function to notify the |
145 | .Nm | | 145 | .Nm |
146 | module that the mappings need to be made correct. | | 146 | module that the mappings need to be made correct. |
147 | Since the | | 147 | Since the |
148 | .Nm | | 148 | .Nm |
149 | module is provided with information as to which processors are | | 149 | module is provided with information as to which processors are |
150 | using a given physical map, the | | 150 | using a given physical map, the |
151 | .Nm | | 151 | .Nm |
152 | module may use whatever optimizations it has available to reduce | | 152 | module may use whatever optimizations it has available to reduce |
153 | the expense of virtual-to-physical mapping synchronization. | | 153 | the expense of virtual-to-physical mapping synchronization. |
154 | .Ss HEADER FILES AND DATA STRUCTURES | | 154 | .Ss HEADER FILES AND DATA STRUCTURES |
155 | Machine-dependent code must provide the header file | | 155 | Machine-dependent code must provide the header file |
156 | .In machine/pmap.h . | | 156 | .In machine/pmap.h . |
157 | This file contains the definition of the | | 157 | This file contains the definition of the |
158 | .Dv pmap | | 158 | .Dv pmap |
159 | structure: | | 159 | structure: |
160 | .Bd -literal -offset indent | | 160 | .Bd -literal -offset indent |
161 | struct pmap { | | 161 | struct pmap { |
162 | /* Contents defined by pmap implementation. */ | | 162 | /* Contents defined by pmap implementation. */ |
163 | }; | | 163 | }; |
164 | typedef struct pmap *pmap_t; | | 164 | typedef struct pmap *pmap_t; |
165 | .Ed | | 165 | .Ed |
166 | .Pp | | 166 | .Pp |
167 | This header file may also define other data structures that the | | 167 | This header file may also define other data structures that the |
168 | .Nm | | 168 | .Nm |
169 | implementation uses. | | 169 | implementation uses. |
170 | .Pp | | 170 | .Pp |
171 | Note that all prototypes for | | 171 | Note that all prototypes for |
172 | .Nm | | 172 | .Nm |
173 | interface functions are provided by the header file | | 173 | interface functions are provided by the header file |
174 | .In uvm/uvm_pmap.h . | | 174 | .In uvm/uvm_pmap.h . |
175 | It is possible to override this behavior by defining the | | 175 | It is possible to override this behavior by defining the |
176 | C pre-processor macro | | 176 | C pre-processor macro |
177 | .Dv PMAP_EXCLUDE_DECLS . | | 177 | .Dv PMAP_EXCLUDE_DECLS . |
178 | This may be used to add a layer of indirection to | | 178 | This may be used to add a layer of indirection to |
179 | .Nm | | 179 | .Nm |
180 | API calls, for handling different MMU types in a single | | 180 | API calls, for handling different MMU types in a single |
181 | .Nm | | 181 | .Nm |
182 | module, for example. | | 182 | module, for example. |
183 | If the | | 183 | If the |
184 | .Dv PMAP_EXCLUDE_DECLS | | 184 | .Dv PMAP_EXCLUDE_DECLS |
185 | macro is defined, | | 185 | macro is defined, |
186 | .In machine/pmap.h | | 186 | .In machine/pmap.h |
187 | .Em must | | 187 | .Em must |
188 | provide function prototypes in a block like so: | | 188 | provide function prototypes in a block like so: |
189 | .Bd -literal -offset indent | | 189 | .Bd -literal -offset indent |
190 | #ifdef _KERNEL /* not exposed to user namespace */ | | 190 | #ifdef _KERNEL /* not exposed to user namespace */ |
191 | __BEGIN_DECLS /* make safe for C++ */ | | 191 | __BEGIN_DECLS /* make safe for C++ */ |
192 | /* Prototypes go here. */ | | 192 | /* Prototypes go here. */ |
193 | __END_DECLS | | 193 | __END_DECLS |
194 | #endif /* _KERNEL */ | | 194 | #endif /* _KERNEL */ |
195 | .Ed | | 195 | .Ed |
196 | .Pp | | 196 | .Pp |
197 | The header file | | 197 | The header file |
198 | .In uvm/uvm_pmap.h | | 198 | .In uvm/uvm_pmap.h |
199 | defines a structure for tracking | | 199 | defines a structure for tracking |
200 | .Nm | | 200 | .Nm |
201 | statistics (see below). | | 201 | statistics (see below). |
202 | This structure is defined as: | | 202 | This structure is defined as: |
203 | .Bd -literal -offset indent | | 203 | .Bd -literal -offset indent |
204 | struct pmap_statistics { | | 204 | struct pmap_statistics { |
205 | long resident_count; /* number of mapped pages */ | | 205 | long resident_count; /* number of mapped pages */ |
206 | long wired_count; /* number of wired pages */ | | 206 | long wired_count; /* number of wired pages */ |
207 | }; | | 207 | }; |
208 | .Ed | | 208 | .Ed |
209 | .Ss WIRED MAPPINGS | | 209 | .Ss WIRED MAPPINGS |
210 | The | | 210 | The |
211 | .Nm | | 211 | .Nm |
212 | module is based on the premise that all information contained | | 212 | module is based on the premise that all information contained |
213 | in the physical maps it manages is redundant. | | 213 | in the physical maps it manages is redundant. |
214 | That is, physical map information may be | | 214 | That is, physical map information may be |
215 | .Dq forgotten | | 215 | .Dq forgotten |
216 | by the | | 216 | by the |
217 | .Nm | | 217 | .Nm |
218 | module in the event that it is necessary to do so; it can be rebuilt | | 218 | module in the event that it is necessary to do so; it can be rebuilt |
219 | by | | 219 | by |
220 | .Xr uvm 9 | | 220 | .Xr uvm 9 |
221 | by taking a page fault. | | 221 | by taking a page fault. |
222 | There is one exception to this rule: so-called | | 222 | There is one exception to this rule: so-called |
223 | .Dq wired | | 223 | .Dq wired |
224 | mappings may not be forgotten. | | 224 | mappings may not be forgotten. |
225 | Wired mappings are those for which either no high-level information | | 225 | Wired mappings are those for which either no high-level information |
226 | exists with which to rebuild the mapping, or mappings which are needed | | 226 | exists with which to rebuild the mapping, or mappings which are needed |
227 | by critical sections of code where taking a page fault is unacceptable. | | 227 | by critical sections of code where taking a page fault is unacceptable. |
228 | Information about which mappings are wired is provided to the | | 228 | Information about which mappings are wired is provided to the |
229 | .Nm | | 229 | .Nm |
230 | module when a mapping is established. | | 230 | module when a mapping is established. |
231 | .Ss MODIFIED/REFERENCED INFORMATION | | 231 | .Ss MODIFIED/REFERENCED INFORMATION |
232 | The | | 232 | The |
233 | .Nm | | 233 | .Nm |
234 | module is required to keep track of whether or not a page managed | | 234 | module is required to keep track of whether or not a page managed |
235 | by the virtual memory system has been referenced or modified. | | 235 | by the virtual memory system has been referenced or modified. |
236 | This information is used by | | 236 | This information is used by |
237 | .Xr uvm 9 | | 237 | .Xr uvm 9 |
238 | to determine what happens to the page when scanned by the | | 238 | to determine what happens to the page when scanned by the |
239 | pagedaemon. | | 239 | pagedaemon. |
240 | .Pp | | 240 | .Pp |
241 | Many CPUs provide hardware support for tracking | | 241 | Many CPUs provide hardware support for tracking |
242 | modified/referenced information. | | 242 | modified/referenced information. |
243 | However, many CPUs, particularly modern RISC CPUs, do not. | | 243 | However, many CPUs, particularly modern RISC CPUs, do not. |
244 | On CPUs which lack hardware support for modified/referenced tracking, the | | 244 | On CPUs which lack hardware support for modified/referenced tracking, the |
245 | .Nm | | 245 | .Nm |
246 | module must emulate it in software. | | 246 | module must emulate it in software. |
247 | There are several strategies for doing this, and the best strategy | | 247 | There are several strategies for doing this, and the best strategy |
248 | depends on the CPU. | | 248 | depends on the CPU. |
249 | .Pp | | 249 | .Pp |
250 | The | | 250 | The |
251 | .Dq referenced | | 251 | .Dq referenced |
252 | attribute is used by the pagedaemon to determine if a page is | | 252 | attribute is used by the pagedaemon to determine if a page is |
253 | .Dq active . | | 253 | .Dq active . |
254 | Active pages are not candidates for re-use in the page replacement algorithm. | | 254 | Active pages are not candidates for re-use in the page replacement algorithm. |
255 | Accurate referenced information is not required for correct operation; if | | 255 | Accurate referenced information is not required for correct operation; if |
256 | supplying referenced information for a page is not feasible, then the | | 256 | supplying referenced information for a page is not feasible, then the |
257 | .Nm | | 257 | .Nm |
258 | implementation should always consider the | | 258 | implementation should always consider the |
259 | .Dq referenced | | 259 | .Dq referenced |
260 | attribute to be | | 260 | attribute to be |
261 | .Dv false . | | 261 | .Dv false . |
262 | .Pp | | 262 | .Pp |
263 | The | | 263 | The |
264 | .Dq modified | | 264 | .Dq modified |
265 | attribute is used by the pagedaemon to determine if a page needs | | 265 | attribute is used by the pagedaemon to determine if a page needs |
266 | to be cleaned (written to backing store; swap space, a regular file, etc.). | | 266 | to be cleaned (written to backing store; swap space, a regular file, etc.). |
267 | Accurate modified information | | 267 | Accurate modified information |
268 | .Em must | | 268 | .Em must |
269 | be provided by the | | 269 | be provided by the |
270 | .Nm | | 270 | .Nm |
271 | module for correct operation of the virtual memory system. | | 271 | module for correct operation of the virtual memory system. |
272 | .Pp | | 272 | .Pp |
273 | Note that modified/referenced information is only tracked for | | 273 | Note that modified/referenced information is only tracked for |
274 | pages managed by the virtual memory system (i.e., pages for | | 274 | pages managed by the virtual memory system (i.e., pages for |
275 | which a vm_page structure exists). | | 275 | which a vm_page structure exists). |
276 | In addition, only | | 276 | In addition, only |
277 | .Dq managed | | 277 | .Dq managed |
278 | mappings of those pages have modified/referenced tracking. | | 278 | mappings of those pages have modified/referenced tracking. |
279 | Mappings entered with the | | 279 | Mappings entered with the |
280 | .Fn pmap_enter | | 280 | .Fn pmap_enter |
281 | function are | | 281 | function are |
282 | .Dq managed | | 282 | .Dq managed |
283 | mappings. | | 283 | mappings. |
284 | It is possible for | | 284 | It is possible for |
285 | .Dq unmanaged | | 285 | .Dq unmanaged |
286 | mappings of a page to be created, using the | | 286 | mappings of a page to be created, using the |
287 | .Fn pmap_kenter_pa | | 287 | .Fn pmap_kenter_pa |
288 | function. | | 288 | function. |
289 | The use of | | 289 | The use of |
290 | .Dq unmanaged | | 290 | .Dq unmanaged |
291 | mappings should be limited to code which may execute in interrupt context | | 291 | mappings should be limited to code which may execute in interrupt context |
292 | (for example, the kernel memory allocator), or to enter mappings for | | 292 | (for example, the kernel memory allocator), or to enter mappings for |
293 | physical addresses which are not managed by the virtual memory system. | | 293 | physical addresses which are not managed by the virtual memory system. |
294 | .Dq Unmanaged | | 294 | .Dq Unmanaged |
295 | mappings may only be entered into the kernel's virtual address space. | | 295 | mappings may only be entered into the kernel's virtual address space. |
296 | This constraint is placed on the callers of the | | 296 | This constraint is placed on the callers of the |
297 | .Fn pmap_kenter_pa | | 297 | .Fn pmap_kenter_pa |
298 | and | | 298 | and |
299 | .Fn pmap_kremove | | 299 | .Fn pmap_kremove |
300 | functions so that the | | 300 | functions so that the |
301 | .Nm | | 301 | .Nm |
302 | implementation need not block interrupts when manipulating data | | 302 | implementation need not block interrupts when manipulating data |
303 | structures or holding locks. | | 303 | structures or holding locks. |
304 | .Pp | | 304 | .Pp |
305 | Also note that the modified/referenced information must be tracked | | 305 | Also note that the modified/referenced information must be tracked |
306 | on a per-page basis; they are not attributes of a mapping, but attributes | | 306 | on a per-page basis; they are not attributes of a mapping, but attributes |
307 | of a page. | | 307 | of a page. |
308 | Therefore, even after all mappings for a given page have | | 308 | Therefore, even after all mappings for a given page have |
309 | been removed, the modified/referenced information for that page | | 309 | been removed, the modified/referenced information for that page |
310 | .Em must | | 310 | .Em must |
311 | be preserved. | | 311 | be preserved. |
312 | The only time the modified/referenced attributes may | | 312 | The only time the modified/referenced attributes may |
313 | be cleared is when the virtual memory system explicitly calls the | | 313 | be cleared is when the virtual memory system explicitly calls the |
314 | .Fn pmap_clear_modify | | 314 | .Fn pmap_clear_modify |
315 | and | | 315 | and |
316 | .Fn pmap_clear_reference | | 316 | .Fn pmap_clear_reference |
317 | functions. | | 317 | functions. |
318 | These functions must also change any internal state necessary to detect | | 318 | These functions must also change any internal state necessary to detect |
319 | the page being modified or referenced again after the modified or | | 319 | the page being modified or referenced again after the modified or |
320 | referenced state is cleared. | | 320 | referenced state is cleared. |
321 | (Prior to | | 321 | (Prior to |
322 | .Nx 1.6 , | | 322 | .Nx 1.6 , |
323 | .Nm | | 323 | .Nm |
324 | implementations could get away without this because UVM (and Mach VM | | 324 | implementations could get away without this because UVM (and Mach VM |
325 | before that) always called | | 325 | before that) always called |
326 | .Fn pmap_page_protect | | 326 | .Fn pmap_page_protect |
327 | before clearing the modified or referenced state, but UVM has been changed | | 327 | before clearing the modified or referenced state, but UVM has been changed |
328 | to not do this anymore, so all | | 328 | to not do this anymore, so all |
329 | .Nm | | 329 | .Nm |
330 | implementations must now handle this.) | | 330 | implementations must now handle this.) |
331 | .Ss STATISTICS | | 331 | .Ss STATISTICS |
332 | The | | 332 | The |
333 | .Nm | | 333 | .Nm |
334 | is required to keep statistics as to the number of | | 334 | is required to keep statistics as to the number of |
335 | .Dq resident | | 335 | .Dq resident |
336 | pages and the number of | | 336 | pages and the number of |
337 | .Dq wired | | 337 | .Dq wired |
338 | pages. | | 338 | pages. |
339 | .Pp | | 339 | .Pp |
340 | A | | 340 | A |
341 | .Dq resident | | 341 | .Dq resident |
342 | page is one for which a mapping exists. | | 342 | page is one for which a mapping exists. |
343 | This statistic is used to compute the resident size of a process and | | 343 | This statistic is used to compute the resident size of a process and |
344 | enforce resource limits. | | 344 | enforce resource limits. |
345 | Only pages (whether managed by the virtual memory system or not) | | 345 | Only pages (whether managed by the virtual memory system or not) |
346 | which are mapped into a physical map should be counted in the resident | | 346 | which are mapped into a physical map should be counted in the resident |
347 | count. | | 347 | count. |
348 | .Pp | | 348 | .Pp |
349 | A | | 349 | A |
350 | .Dq wired | | 350 | .Dq wired |
351 | page is one for which a wired mapping exists. | | 351 | page is one for which a wired mapping exists. |
352 | This statistic is used to enforce resource limits. | | 352 | This statistic is used to enforce resource limits. |
353 | .Pp | | 353 | .Pp |
354 | Note that it is recommended (though not required) that the | | 354 | Note that it is recommended (though not required) that the |
355 | .Nm | | 355 | .Nm |
356 | implementation use the | | 356 | implementation use the |
357 | .Dv pmap_statistics | | 357 | .Dv pmap_statistics |
358 | structure in the tracking of | | 358 | structure in the tracking of |
359 | .Nm | | 359 | .Nm |
360 | statistics by placing it inside the | | 360 | statistics by placing it inside the |
361 | .Dv pmap | | 361 | .Dv pmap |
362 | structure and adjusting the counts when mappings are established, changed, | | 362 | structure and adjusting the counts when mappings are established, changed, |
363 | or removed. | | 363 | or removed. |
364 | This avoids potentially expensive data structure traversals when the | | 364 | This avoids potentially expensive data structure traversals when the |
365 | statistics are queried. | | 365 | statistics are queried. |
366 | .Ss REQUIRED FUNCTIONS | | 366 | .Ss REQUIRED FUNCTIONS |
367 | This section describes functions that a | | 367 | This section describes functions that a |
368 | .Nm | | 368 | .Nm |
369 | module must provide to the virtual memory system. | | 369 | module must provide to the virtual memory system. |
370 | .Bl -tag -width indent -offset indent | | 370 | .Bl -tag -width indent -offset indent |
371 | .It void Fn "pmap_init" "void" | | 371 | .It void Fn "pmap_init" "void" |
372 | This function initializes the | | 372 | This function initializes the |
373 | .Nm | | 373 | .Nm |
374 | module. | | 374 | module. |
375 | It is called by | | 375 | It is called by |
376 | .Fn uvm_init | | 376 | .Fn uvm_init |
377 | to initialize any data structures that the module needs to | | 377 | to initialize any data structures that the module needs to |
378 | manage physical maps. | | 378 | manage physical maps. |
379 | .It pmap_t Fn "pmap_kernel" "void" | | 379 | .It pmap_t Fn "pmap_kernel" "void" |
380 | A machine independent macro which expands to | | 380 | A machine independent macro which expands to |
381 | .Va kernel_pmap_ptr . | | 381 | .Va kernel_pmap_ptr . |
382 | This variable must be exported by the platform's pmap module and it | | 382 | This variable must be exported by the platform's pmap module and it |
383 | must point to the kernel pmap. | | 383 | must point to the kernel pmap. |
384 | .It void Fn "pmap_virtual_space" "vaddr_t *vstartp" "vaddr_t *vendp" | | 384 | .It void Fn "pmap_virtual_space" "vaddr_t *vstartp" "vaddr_t *vendp" |
385 | The | | 385 | The |
386 | .Fn pmap_virtual_space | | 386 | .Fn pmap_virtual_space |
387 | function is called to determine the initial kernel virtual address | | 387 | function is called to determine the initial kernel virtual address |
388 | space beginning and end. | | 388 | space beginning and end. |
389 | These values are used to create the kernel's virtual memory map. | | 389 | These values are used to create the kernel's virtual memory map. |
390 | The function must set | | 390 | The function must set |
391 | .Fa *vstartp | | 391 | .Fa *vstartp |
392 | to the first kernel virtual address that will be managed by | | 392 | to the first kernel virtual address that will be managed by |
393 | .Xr uvm 9 , | | 393 | .Xr uvm 9 , |
394 | and must set | | 394 | and must set |
395 | .Fa *vendp | | 395 | .Fa *vendp |
396 | to the last kernel virtual address that will be managed by | | 396 | to the last kernel virtual address that will be managed by |
397 | .Xr uvm 9 . | | 397 | .Xr uvm 9 . |
398 | .Pp | | 398 | .Pp |
399 | If the | | 399 | If the |
400 | .Fn pmap_growkernel | | 400 | .Fn pmap_growkernel |
401 | feature is used by a | | 401 | feature is used by a |
402 | .Nm | | 402 | .Nm |
403 | implementation, then | | 403 | implementation, then |
404 | .Fa *vendp | | 404 | .Fa *vendp |
405 | should be set to the maximum kernel virtual address allowed by the | | 405 | should be set to the maximum kernel virtual address allowed by the |
406 | implementation. | | 406 | implementation. |
407 | If | | 407 | If |
408 | .Fn pmap_growkernel | | 408 | .Fn pmap_growkernel |
409 | is not used, then | | 409 | is not used, then |
410 | .Fa *vendp | | 410 | .Fa *vendp |
411 | .Em must | | 411 | .Em must |
412 | be set to the maximum kernel virtual address that can be mapped with | | 412 | be set to the maximum kernel virtual address that can be mapped with |
413 | the resources currently allocated to map the kernel virtual address | | 413 | the resources currently allocated to map the kernel virtual address |
414 | space. | | 414 | space. |
415 | .It pmap_t Fn "pmap_create" "void" | | 415 | .It pmap_t Fn "pmap_create" "void" |
416 | Create a physical map and return it to the caller. | | 416 | Create a physical map and return it to the caller. |
417 | The reference count on the new map is 1. | | 417 | The reference count on the new map is 1. |
418 | .It void Fn "pmap_destroy" "pmap_t pmap" | | 418 | .It void Fn "pmap_destroy" "pmap_t pmap" |
419 | Drop the reference count on the specified physical map. | | 419 | Drop the reference count on the specified physical map. |
420 | If the reference count drops to 0, all resources associated with the | | 420 | If the reference count drops to 0, all resources associated with the |
421 | physical map are released and the physical map destroyed. | | 421 | physical map are released and the physical map destroyed. |
422 | In the case of a drop-to-0, no mappings will exist in the map. | | 422 | In the case of a drop-to-0, no mappings will exist in the map. |
423 | The | | 423 | The |
424 | .Nm | | 424 | .Nm |
425 | implementation may assert this. | | 425 | implementation may assert this. |
426 | .It void Fn "pmap_reference" "pmap_t pmap" | | 426 | .It void Fn "pmap_reference" "pmap_t pmap" |
427 | Increment the reference count on the specified physical map. | | 427 | Increment the reference count on the specified physical map. |
428 | .It long Fn "pmap_resident_count" "pmap_t pmap" | | 428 | .It long Fn "pmap_resident_count" "pmap_t pmap" |
429 | Query the | | 429 | Query the |
430 | .Dq resident pages | | 430 | .Dq resident pages |
431 | statistic for | | 431 | statistic for |
432 | .Fa pmap . | | 432 | .Fa pmap . |
433 | .Pp | | 433 | .Pp |
434 | Note that this function may be provided as a C pre-processor macro. | | 434 | Note that this function may be provided as a C pre-processor macro. |
435 | .It long Fn "pmap_wired_count" "pmap_t pmap" | | 435 | .It long Fn "pmap_wired_count" "pmap_t pmap" |
436 | Query the | | 436 | Query the |
437 | .Dq wired pages | | 437 | .Dq wired pages |
438 | statistic for | | 438 | statistic for |
439 | .Fa pmap . | | 439 | .Fa pmap . |
440 | .Pp | | 440 | .Pp |
441 | Note that this function may be provided as a C pre-processor macro. | | 441 | Note that this function may be provided as a C pre-processor macro. |
442 | .It int Fn "pmap_enter" "pmap_t pmap" "vaddr_t va" "paddr_t pa" \ | | 442 | .It int Fn "pmap_enter" "pmap_t pmap" "vaddr_t va" "paddr_t pa" \ |
443 | "vm_prot_t prot" "u_int flags" | | 443 | "vm_prot_t prot" "u_int flags" |
444 | Create a mapping in physical map | | 444 | Create a mapping in physical map |
445 | .Fa pmap | | 445 | .Fa pmap |
446 | for the physical address | | 446 | for the physical address |
447 | .Fa pa | | 447 | .Fa pa |
448 | at the virtual address | | 448 | at the virtual address |
449 | .Fa va | | 449 | .Fa va |
450 | with protection specified by bits in | | 450 | with protection specified by bits in |
451 | .Fa prot : | | 451 | .Fa prot : |
452 | .Bl -tag -width "VM_PROT_EXECUTE " -offset indent | | 452 | .Bl -tag -width "VM_PROT_EXECUTE " -offset indent |
453 | .It VM_PROT_READ | | 453 | .It VM_PROT_READ |
454 | The mapping must allow reading. | | 454 | The mapping must allow reading. |
455 | .It VM_PROT_WRITE | | 455 | .It VM_PROT_WRITE |
456 | The mapping must allow writing. | | 456 | The mapping must allow writing. |
457 | .It VM_PROT_EXECUTE | | 457 | .It VM_PROT_EXECUTE |
458 | The page mapped contains instructions that will be executed by the | | 458 | The page mapped contains instructions that will be executed by the |
459 | processor. | | 459 | processor. |
460 | .El | | 460 | .El |
461 | .Pp | | 461 | .Pp |
462 | The | | 462 | The |
463 | .Fa flags | | 463 | .Fa flags |
464 | argument contains protection bits (the same bits as used in the | | 464 | argument contains protection bits (the same bits as used in the |
465 | .Fa prot | | 465 | .Fa prot |
466 | argument) indicating the type of access that caused the mapping to | | 466 | argument) indicating the type of access that caused the mapping to |
467 | be created. | | 467 | be created. |
468 | This information may be used to seed modified/referenced | | 468 | This information may be used to seed modified/referenced |
469 | information for the page being mapped, possibly avoiding redundant faults | | 469 | information for the page being mapped, possibly avoiding redundant faults |
470 | on platforms that track modified/referenced information in software. | | 470 | on platforms that track modified/referenced information in software. |
471 | Other information provided by | | 471 | Other information provided by |
472 | .Fa flags : | | 472 | .Fa flags : |
473 | .Bl -tag -width "PMAP_CANFAIL " -offset indent | | 473 | .Bl -tag -width "PMAP_CANFAIL " -offset indent |
474 | .It PMAP_WIRED | | 474 | .It PMAP_WIRED |
475 | The mapping being created is a wired mapping. | | 475 | The mapping being created is a wired mapping. |
476 | .It PMAP_CANFAIL | | 476 | .It PMAP_CANFAIL |
477 | The call to | | 477 | The call to |
478 | .Fn pmap_enter | | 478 | .Fn pmap_enter |
479 | is allowed to fail. | | 479 | is allowed to fail. |
480 | If this flag is | | 480 | If this flag is |
481 | .Em not | | 481 | .Em not |
482 | set, and the | | 482 | set, and the |
483 | .Fn pmap_enter | | 483 | .Fn pmap_enter |
484 | call is unable to create the mapping, perhaps due to insufficient | | 484 | call is unable to create the mapping, perhaps due to insufficient |
485 | resources, the | | 485 | resources, the |
486 | .Nm | | 486 | .Nm |
487 | module must panic. | | 487 | module must panic. |
488 | .It PMAP_NOCACHE | | 488 | .It PMAP_NOCACHE |
489 | The mapping being created is | | 489 | The mapping being created is |
490 | .Em not | | 490 | .Em not |
491 | cached. | | 491 | cached. |
492 | Write accesses have a write-through policy. | | 492 | Write accesses have a write-through policy. |
493 | No speculative memory accesses. | | 493 | No speculative memory accesses. |
494 | .It PMAP_WRITE_COMBINE | | 494 | .It PMAP_WRITE_COMBINE |
495 | The mapping being created is | | 495 | The mapping being created is |
496 | .Em not | | 496 | .Em not |
497 | cached. | | 497 | cached. |
498 | Writes are combined and done in one burst. | | 498 | Writes are combined and done in one burst. |
499 | Speculative read accesses may be allowed. | | 499 | Speculative read accesses may be allowed. |
500 | .It PMAP_WRITE_BACK | | 500 | .It PMAP_WRITE_BACK |
501 | All accesses to the created mapping are cached. | | 501 | All accesses to the created mapping are cached. |
502 | On reads, cachelines become shared or exclusive if allocated on cache miss. | | 502 | On reads, cachelines become shared or exclusive if allocated on cache miss. |
503 | On writes, cachelines become modified on a cache miss. | | 503 | On writes, cachelines become modified on a cache miss. |
504 | .It PMAP_NOCACHE_OVR | | 504 | .It PMAP_NOCACHE_OVR |
505 | Same as PMAP_NOCACHE but mapping is overrideable (e.g. on x86 by MTRRs). | | 505 | Same as PMAP_NOCACHE but mapping is overrideable (e.g. on x86 by MTRRs). |
506 | .El | | 506 | .El |
507 | .Pp | | 507 | .Pp |
508 | The access type provided in the | | 508 | The access type provided in the |
509 | .Fa flags | | 509 | .Fa flags |
510 | argument will never exceed the protection specified by | | 510 | argument will never exceed the protection specified by |
511 | .Fa prot . | | 511 | .Fa prot . |
512 | The | | 512 | The |
513 | .Nm | | 513 | .Nm |
514 | implementation may assert this. | | 514 | implementation may assert this. |
515 | Note that on systems that do not provide hardware support for | | 515 | Note that on systems that do not provide hardware support for |
516 | tracking modified/referenced information, modified/referenced | | 516 | tracking modified/referenced information, modified/referenced |
517 | information for the page | | 517 | information for the page |
518 | .Em must | | 518 | .Em must |
519 | be seeded with the access type provided in | | 519 | be seeded with the access type provided in |
520 | .Fa flags | | 520 | .Fa flags |
521 | if the | | 521 | if the |
522 | .Dv PMAP_WIRED | | 522 | .Dv PMAP_WIRED |
523 | flag is set. | | 523 | flag is set. |
524 | This is to prevent a fault for the purpose of tracking | | 524 | This is to prevent a fault for the purpose of tracking |
525 | modified/referenced information from occurring while the system is in | | 525 | modified/referenced information from occurring while the system is in |
526 | a critical section where a fault would be unacceptable. | | 526 | a critical section where a fault would be unacceptable. |
527 | .Pp | | 527 | .Pp |
528 | Note that | | 528 | Note that |
529 | .Fn pmap_enter | | 529 | .Fn pmap_enter |
530 | is sometimes called to enter a mapping at a virtual address | | 530 | is sometimes called to enter a mapping at a virtual address |
531 | for which a mapping already exists. | | 531 | for which a mapping already exists. |
532 | In this situation, the implementation must take whatever action is | | 532 | In this situation, the implementation must take whatever action is |
533 | necessary to invalidate the previous mapping before entering the new one. | | 533 | necessary to invalidate the previous mapping before entering the new one. |
534 | .Pp | | 534 | .Pp |
535 | Also note that | | 535 | Also note that |
536 | .Fn pmap_enter | | 536 | .Fn pmap_enter |
537 | is sometimes called to change the protection for a pre-existing | | 537 | is sometimes called to change the protection for a pre-existing |
538 | mapping, or to change the | | 538 | mapping, or to change the |
539 | .Dq wired | | 539 | .Dq wired |
540 | attribute for a pre-existing mapping. | | 540 | attribute for a pre-existing mapping. |
541 | .Pp | | 541 | .Pp |
542 | The | | 542 | The |
543 | .Fn pmap_enter | | 543 | .Fn pmap_enter |
544 | function returns 0 on success or an error code indicating the mode | | 544 | function returns 0 on success or an error code indicating the mode |
545 | of failure. | | 545 | of failure. |
546 | .It void Fn "pmap_remove" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" | | 546 | .It void Fn "pmap_remove" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" |
547 | Remove mappings from the virtual address range | | 547 | Remove mappings from the virtual address range |
548 | .Fa sva | | 548 | .Fa sva |
549 | to | | 549 | to |
550 | .Fa eva | | 550 | .Fa eva |
551 | from the specified physical map. | | 551 | from the specified physical map. |
552 | .It bool Fn "pmap_remove_all" "pmap_t pmap" | | 552 | .It bool Fn "pmap_remove_all" "pmap_t pmap" |
553 | This function is a hint to the | | 553 | This function is a hint to the |
554 | .Nm pmap | | 554 | .Nm pmap |
555 | implementation that all entries in | | 555 | implementation that all entries in |
556 | .Fa pmap | | 556 | .Fa pmap |
557 | will be removed before any more entries are entered. | | 557 | will be removed before any more entries are entered. |
558 | Following this call, there will be | | 558 | Following this call, there will be |
559 | .Fn pmap_remove | | 559 | .Fn pmap_remove |
560 | calls resulting in every mapping being removed, followed by either | | 560 | calls resulting in every mapping being removed, followed by either |
561 | .Fn pmap_destroy | | 561 | .Fn pmap_destroy |
562 | or | | 562 | or |
563 | .Fn pmap_update . | | 563 | .Fn pmap_update . |
564 | No other | | 564 | No other |
565 | .Nm pmap | | 565 | .Nm pmap |
566 | interfaces which take | | 566 | interfaces which take |
567 | .Fa pmap | | 567 | .Fa pmap |
568 | as an argument will be called during this process. | | 568 | as an argument will be called during this process. |
569 | Other interfaces which might need to access | | 569 | Other interfaces which might need to access |
570 | .Fa pmap | | 570 | .Fa pmap |
571 | (such as | | 571 | (such as |
572 | .Fn pmap_page_protect ) | | 572 | .Fn pmap_page_protect ) |
573 | are permitted during this process. | | 573 | are permitted during this process. |
574 | .Pp | | 574 | .Pp |
575 | The | | 575 | The |
576 | .Nm pmap | | 576 | .Nm pmap |
577 | implementation is free to either remove all the | | 577 | implementation is free to either remove all the |
578 | .Nm pmap Ns 's | | 578 | .Nm pmap Ns 's |
579 | mappings immediately in | | 579 | mappings immediately in |
580 | .Fn pmap_remove_all , | | 580 | .Fn pmap_remove_all , |
581 | or to use the knowledge of the upcoming | | 581 | or to use the knowledge of the upcoming |
582 | .Fn pmap_remove | | 582 | .Fn pmap_remove |
583 | calls to optimize the removals (or to just ignore this call). | | 583 | calls to optimize the removals (or to just ignore this call). |
584 | .Pp | | 584 | .Pp |
585 | If all mappings in the address space have been removed, | | 585 | If all mappings in the address space have been removed, |
586 | .Fn pmap_remove_all | | 586 | .Fn pmap_remove_all |
587 | should return | | 587 | should return |
588 | .Dv true | | 588 | .Dv true |
589 | to indicate that that the pmap is now empty. | | 589 | to indicate that that the pmap is now empty. |
590 | In this case UVM will skip all subsequent calls to | | 590 | In this case UVM will skip all subsequent calls to |
591 | .Fn pmap_remove | | 591 | .Fn pmap_remove |
592 | and | | 592 | and |
593 | .Fn pmap_update | | 593 | .Fn pmap_update |
594 | for the pmap, that would otherwise be required to clean it out. | | 594 | for the pmap, that would otherwise be required to clean it out. |
595 | If any mappings could possibly remain, | | 595 | If any mappings could possibly remain, |
596 | .Fn pmap_remove_all | | 596 | .Fn pmap_remove_all |
597 | must return | | 597 | must return |
598 | .Dv false . | | 598 | .Dv false . |
599 | .It void Fn "pmap_protect" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" \ | | 599 | .It void Fn "pmap_protect" "pmap_t pmap" "vaddr_t sva" "vaddr_t eva" \ |
600 | "vm_prot_t prot" | | 600 | "vm_prot_t prot" |
601 | Set the protection of the mappings in the virtual address range | | 601 | Set the protection of the mappings in the virtual address range |
602 | .Fa sva | | 602 | .Fa sva |
603 | to | | 603 | to |
604 | .Fa eva | | 604 | .Fa eva |
605 | in the specified physical map. | | 605 | in the specified physical map. |
606 | .It void Fn "pmap_unwire" "pmap_t pmap" "vaddr_t va" | | 606 | .It void Fn "pmap_unwire" "pmap_t pmap" "vaddr_t va" |
607 | Clear the | | 607 | Clear the |
608 | .Dq wired | | 608 | .Dq wired |
609 | attribute on the mapping for virtual address | | 609 | attribute on the mapping for virtual address |
610 | .Fa va . | | 610 | .Fa va . |
611 | .It bool Fn "pmap_extract" "pmap_t pmap" "vaddr_t va" "paddr_t *pap" | | 611 | .It bool Fn "pmap_extract" "pmap_t pmap" "vaddr_t va" "paddr_t *pap" |
612 | This function extracts a mapping from the specified physical map. | | 612 | This function extracts a mapping from the specified physical map. |
613 | It serves two purposes: to determine if a mapping exists for the specified | | 613 | It serves two purposes: to determine if a mapping exists for the specified |
614 | virtual address, and to determine what physical address is mapped at the | | 614 | virtual address, and to determine what physical address is mapped at the |
615 | specified virtual address. | | 615 | specified virtual address. |
616 | The | | 616 | The |
617 | .Fn pmap_extract | | 617 | .Fn pmap_extract |
618 | should return the physical address for any kernel-accessible address, | | 618 | should return the physical address for any kernel-accessible address, |
619 | including KSEG-style direct-mapped kernel addresses. | | 619 | including KSEG-style direct-mapped kernel addresses. |
620 | .Pp | | 620 | .Pp |
621 | The | | 621 | The |
622 | .Fn pmap_extract | | 622 | .Fn pmap_extract |
623 | function returns | | 623 | function returns |
624 | .Dv false | | 624 | .Dv false |
625 | if a mapping for | | 625 | if a mapping for |
626 | .Fa va | | 626 | .Fa va |
627 | does not exist. | | 627 | does not exist. |
628 | Otherwise, it returns | | 628 | Otherwise, it returns |
629 | .Dv true | | 629 | .Dv true |
630 | and places the physical address mapped at | | 630 | and places the physical address mapped at |
631 | .Fa va | | 631 | .Fa va |
632 | into | | 632 | into |
633 | .Fa *pap | | 633 | .Fa *pap |
634 | if the | | 634 | if the |
635 | .Fa pap | | 635 | .Fa pap |
636 | argument is non-NULL. | | 636 | argument is non-NULL. |
637 | .It void Fn "pmap_kenter_pa" "vaddr_t va" "paddr_t pa" "vm_prot_t prot" \ | | 637 | .It void Fn "pmap_kenter_pa" "vaddr_t va" "paddr_t pa" "vm_prot_t prot" \ |
638 | "u_int flags" | | 638 | "u_int flags" |
639 | Enter an | | 639 | Enter an |
640 | .Dq unmanaged | | 640 | .Dq unmanaged |
641 | mapping for physical address | | 641 | mapping for physical address |
642 | .Fa pa | | 642 | .Fa pa |
643 | at virtual address | | 643 | at virtual address |
644 | .Fa va | | 644 | .Fa va |
645 | with protection specified by bits in | | 645 | with protection specified by bits in |
646 | .Fa prot : | | 646 | .Fa prot : |
647 | .Bl -tag -width "VM_PROT_EXECUTE " -offset indent | | 647 | .Bl -tag -width "VM_PROT_EXECUTE " -offset indent |
648 | .It VM_PROT_READ | | 648 | .It VM_PROT_READ |
649 | The mapping must allow reading. | | 649 | The mapping must allow reading. |
650 | .It VM_PROT_WRITE | | 650 | .It VM_PROT_WRITE |
651 | The mapping must allow writing. | | 651 | The mapping must allow writing. |
652 | .It VM_PROT_EXECUTE | | 652 | .It VM_PROT_EXECUTE |
653 | The page mapped contains instructions that will be executed by the | | 653 | The page mapped contains instructions that will be executed by the |
654 | processor. | | 654 | processor. |
655 | .El | | 655 | .El |
656 | .Pp | | 656 | .Pp |
657 | Information provided by | | 657 | Information provided by |
658 | .Fa flags : | | 658 | .Fa flags : |
659 | .Bl -tag -width "PMAP_NOCACHE " -offset indent | | 659 | .Bl -tag -width "PMAP_NOCACHE " -offset indent |
660 | .It PMAP_NOCACHE | | 660 | .It PMAP_NOCACHE |
661 | The mapping being created is | | 661 | The mapping being created is |
662 | .Em not | | 662 | .Em not |
663 | cached. | | 663 | cached. |
664 | Write accesses have a write-through policy. | | 664 | Write accesses have a write-through policy. |
665 | No speculative memory accesses. | | 665 | No speculative memory accesses. |
666 | .It PMAP_WRITE_COMBINE | | 666 | .It PMAP_WRITE_COMBINE |
667 | The mapping being created is | | 667 | The mapping being created is |
668 | .Em not | | 668 | .Em not |
669 | cached. | | 669 | cached. |
670 | Writes are combined and done in one burst. | | 670 | Writes are combined and done in one burst. |
671 | Speculative read accesses may be allowed. | | 671 | Speculative read accesses may be allowed. |
672 | .It PMAP_WRITE_BACK | | 672 | .It PMAP_WRITE_BACK |
673 | All accesses to the created mapping are cached. | | 673 | All accesses to the created mapping are cached. |
674 | On reads, cachelines become shared or exclusive if allocated on cache miss. | | 674 | On reads, cachelines become shared or exclusive if allocated on cache miss. |
675 | On writes, cachelines become modified on a cache miss. | | 675 | On writes, cachelines become modified on a cache miss. |
676 | .It PMAP_NOCACHE_OVR | | 676 | .It PMAP_NOCACHE_OVR |
677 | Same as PMAP_NOCACHE but mapping is overrideable (e.g. on x86 by MTRRs). | | 677 | Same as PMAP_NOCACHE but mapping is overrideable (e.g. on x86 by MTRRs). |
678 | .El | | 678 | .El |
679 | .Pp | | 679 | .Pp |
680 | Mappings of this type are always | | 680 | Mappings of this type are always |
681 | .Dq wired , | | 681 | .Dq wired , |
682 | and are unaffected by routines that alter the protection of pages | | 682 | and are unaffected by routines that alter the protection of pages |
683 | (such as | | 683 | (such as |
684 | .Fn pmap_page_protect ) . | | 684 | .Fn pmap_page_protect ) . |
685 | Such mappings are also not included in the gathering of modified/referenced | | 685 | Such mappings are also not included in the gathering of modified/referenced |
686 | information about a page. | | 686 | information about a page. |
687 | Mappings entered with | | 687 | Mappings entered with |
688 | .Fn pmap_kenter_pa | | 688 | .Fn pmap_kenter_pa |
689 | by machine-independent code | | 689 | by machine-independent code |
690 | .Em must not | | 690 | .Em must not |
691 | have execute permission, as the | | 691 | have execute permission, as the |
692 | data structures required to track execute permission of a page may not | | 692 | data structures required to track execute permission of a page may not |
693 | be available to | | 693 | be available to |
694 | .Fn pmap_kenter_pa . | | 694 | .Fn pmap_kenter_pa . |
695 | Machine-independent code is not allowed to enter a mapping with | | 695 | Machine-independent code is not allowed to enter a mapping with |
696 | .Fn pmap_kenter_pa | | 696 | .Fn pmap_kenter_pa |
697 | at a virtual address for which a valid mapping already exists. | | 697 | at a virtual address for which a valid mapping already exists. |
698 | Mappings created with | | 698 | Mappings created with |
699 | .Fn pmap_kenter_pa | | 699 | .Fn pmap_kenter_pa |
700 | may be removed only with a call to | | 700 | may be removed only with a call to |
701 | .Fn pmap_kremove . | | 701 | .Fn pmap_kremove . |
702 | .Pp | | 702 | .Pp |
703 | Note that | | 703 | Note that |
704 | .Fn pmap_kenter_pa | | 704 | .Fn pmap_kenter_pa |
705 | must be safe for use in interrupt context. | | 705 | must be safe for use in interrupt context. |
706 | .Fn splvm | | 706 | .Fn splvm |
707 | blocks interrupts that might cause | | 707 | blocks interrupts that might cause |
708 | .Fn pmap_kenter_pa | | 708 | .Fn pmap_kenter_pa |
709 | to be called. | | 709 | to be called. |
710 | .It void Fn "pmap_kremove" "vaddr_t va" "vsize_t size" | | 710 | .It void Fn "pmap_kremove" "vaddr_t va" "vsize_t size" |
711 | Remove all mappings starting at virtual address | | 711 | Remove all mappings starting at virtual address |
712 | .Fa va | | 712 | .Fa va |
713 | for | | 713 | for |
714 | .Fa size | | 714 | .Fa size |
715 | bytes from the kernel physical map. | | 715 | bytes from the kernel physical map. |
716 | All mappings that are removed must be the | | 716 | All mappings that are removed must be the |
717 | .Dq unmanaged | | 717 | .Dq unmanaged |
718 | type created with | | 718 | type created with |
719 | .Fn pmap_kenter_pa . | | 719 | .Fn pmap_kenter_pa . |
720 | The implementation may assert this. | | 720 | The implementation may assert this. |
721 | .It void Fn "pmap_copy" "pmap_t dst_map" "pmap_t src_map" "vaddr_t dst_addr" \ | | 721 | .It void Fn "pmap_copy" "pmap_t dst_map" "pmap_t src_map" "vaddr_t dst_addr" \ |
722 | "vsize_t len" "vaddr_t src_addr" | | 722 | "vsize_t len" "vaddr_t src_addr" |
723 | This function copies the mappings starting at | | 723 | This function copies the mappings starting at |
724 | .Fa src_addr | | 724 | .Fa src_addr |
725 | in | | 725 | in |
726 | .Fa src_map | | 726 | .Fa src_map |
727 | for | | 727 | for |
728 | .Fa len | | 728 | .Fa len |
729 | bytes into | | 729 | bytes into |
730 | .Fa dst_map | | 730 | .Fa dst_map |
731 | starting at | | 731 | starting at |
732 | .Fa dst_addr . | | 732 | .Fa dst_addr . |
733 | .Pp | | 733 | .Pp |
734 | Note that while this function is required to be provided by a | | 734 | Note that while this function is required to be provided by a |
735 | .Nm | | 735 | .Nm |
736 | implementation, it is not actually required to do anything. | | 736 | implementation, it is not actually required to do anything. |
737 | .Fn pmap_copy | | 737 | .Fn pmap_copy |
738 | is merely advisory (it is used in the | | 738 | is merely advisory (it is used in the |
739 | .Xr fork 2 | | 739 | .Xr fork 2 |
740 | path to | | 740 | path to |
741 | .Dq pre-fault | | 741 | .Dq pre-fault |
742 | the child's address space). | | 742 | the child's address space). |
743 | .It void Fn "pmap_update" "pmap_t pmap" | | 743 | .It void Fn "pmap_update" "pmap_t pmap" |
744 | This function is used to inform the | | 744 | This function is used to inform the |
745 | .Nm | | 745 | .Nm |
746 | module that all physical mappings, for the specified pmap, must now be | | 746 | module that all physical mappings, for the specified pmap, must now be |
747 | correct. | | 747 | correct. |
748 | That is, all delayed virtual-to-physical mappings updates (such as TLB | | 748 | That is, all delayed virtual-to-physical mappings updates (such as TLB |
749 | invalidation or address space identifier updates) must be completed. | | 749 | invalidation or address space identifier updates) must be completed. |
750 | This routine must be used after calls to | | 750 | This routine must be used after calls to |
751 | .Fn pmap_enter , | | 751 | .Fn pmap_enter , |
752 | .Fn pmap_remove , | | 752 | .Fn pmap_remove , |
753 | .Fn pmap_protect , | | 753 | .Fn pmap_protect , |
754 | .Fn pmap_kenter_pa , | | 754 | .Fn pmap_kenter_pa , |
755 | and | | 755 | and |
756 | .Fn pmap_kremove | | 756 | .Fn pmap_kremove |
757 | in order to ensure correct operation of the virtual memory system. | | 757 | in order to ensure correct operation of the virtual memory system. |
758 | .Pp | | 758 | .Pp |
759 | If a | | 759 | If a |
760 | .Nm | | 760 | .Nm |
761 | implementation does not delay virtual-to-physical mapping updates, | | 761 | implementation does not delay virtual-to-physical mapping updates, |
762 | .Fn pmap_update | | 762 | .Fn pmap_update |
763 | has no operation. | | 763 | has no operation. |
764 | In this case, the call may be deleted using a C pre-processor macro in | | 764 | In this case, the call may be deleted using a C pre-processor macro in |
765 | .In machine/pmap.h . | | 765 | .In machine/pmap.h . |
766 | .It void Fn "pmap_activate" "struct lwp *l" | | 766 | .It void Fn "pmap_activate" "struct lwp *l" |
767 | Activate the physical map used by the process behind lwp | | 767 | Activate the physical map used by the process behind lwp |
768 | .Fa l . | | 768 | .Fa l . |
| | | 769 | on the current CPU. |
769 | This is called by the virtual memory system when the | | 770 | This is called by the virtual memory system when the |
770 | virtual memory context for a process is changed, and is also | | 771 | virtual memory context for a process is changed, and is also |
771 | often used by machine-dependent context switch code to program | | 772 | used by the context switch code to program the memory management hardware |
772 | the memory management hardware with the process's page table | | 773 | with the process's page table base, etc. |
773 | base, etc. | | 774 | All calls to |
774 | Note that | | | |
775 | .Fn pmap_activate | | 775 | .Fn pmap_activate |
776 | may not always be called when | | 776 | from machine-independent code are made with preemption disabled and with |
777 | .Fa l | | 777 | .Fa l |
778 | is the current lwp. | | 778 | as the current lwp. |
779 | .Fn pmap_activate | | | |
780 | must be able to handle this scenario. | | | |
781 | .Pp | | 779 | .Pp |
782 | The | | 780 | The |
783 | .Fn pmap_activate | | 781 | .Fn pmap_activate |
784 | call, like | | 782 | call, like |
785 | .Fn pmap_deactivate , | | 783 | .Fn pmap_deactivate , |
786 | must never block, as it is used for context switching. | | 784 | must never block, as it is used for context switching. |
787 | .It void Fn "pmap_deactivate" "struct lwp *l" | | 785 | .It void Fn "pmap_deactivate" "struct lwp *l" |
788 | Deactivate the physical map used by the process behind lwp | | 786 | Deactivate the physical map used by the process behind lwp |
789 | .Fa l . | | 787 | .Fa l . |
790 | It is generally used in conjunction with | | 788 | It is generally used in conjunction with |
791 | .Fn pmap_activate . | | 789 | .Fn pmap_activate . |
792 | Like | | 790 | Like |
793 | .Fn pmap_activate , | | 791 | .Fn pmap_activate , |
794 | .Fn pmap_deactivate | | 792 | .Fn pmap_deactivate |
795 | may not always be called when | | 793 | is called by machine-independent code with preemption disabled and with |
796 | .Fa l | | 794 | .Fa l |
797 | is the current lwp. | | 795 | as the current lwp. |
798 | .Pp | | 796 | .Pp |
799 | As above, | | 797 | As above, |
800 | .Fn pmap_deactivate | | 798 | .Fn pmap_deactivate |
801 | must never block. | | 799 | must never block. |
802 | .It void Fn "pmap_zero_page" "paddr_t pa" | | 800 | .It void Fn "pmap_zero_page" "paddr_t pa" |
803 | Zero the PAGE_SIZE sized region starting at physical address | | 801 | Zero the PAGE_SIZE sized region starting at physical address |
804 | .Fa pa . | | 802 | .Fa pa . |
805 | The | | 803 | The |
806 | .Nm | | 804 | .Nm |
807 | implementation must take whatever steps are necessary to map the | | 805 | implementation must take whatever steps are necessary to map the |
808 | page to a kernel-accessible address and zero the page. | | 806 | page to a kernel-accessible address and zero the page. |
809 | It is suggested that implementations use an optimized zeroing algorithm, | | 807 | It is suggested that implementations use an optimized zeroing algorithm, |
810 | as the performance of this function directly impacts page fault performance. | | 808 | as the performance of this function directly impacts page fault performance. |
811 | The implementation may assume that the region is | | 809 | The implementation may assume that the region is |
812 | PAGE_SIZE aligned and exactly PAGE_SIZE bytes in length. | | 810 | PAGE_SIZE aligned and exactly PAGE_SIZE bytes in length. |
813 | .Pp | | 811 | .Pp |
814 | Note that the cache configuration of the platform should also be | | 812 | Note that the cache configuration of the platform should also be |
815 | considered in the implementation of | | 813 | considered in the implementation of |
816 | .Fn pmap_zero_page . | | 814 | .Fn pmap_zero_page . |
817 | For example, on systems with a physically-addressed cache, the cache | | 815 | For example, on systems with a physically-addressed cache, the cache |
818 | load caused by zeroing the page will not be wasted, as the zeroing is | | 816 | load caused by zeroing the page will not be wasted, as the zeroing is |
819 | usually done on-demand. | | 817 | usually done on-demand. |
820 | However, on systems with a virtually-addressed cached, the cache load | | 818 | However, on systems with a virtually-addressed cached, the cache load |
821 | caused by zeroing the page | | 819 | caused by zeroing the page |
822 | .Em will | | 820 | .Em will |
823 | be wasted, as the page will be mapped at a virtual address which is | | 821 | be wasted, as the page will be mapped at a virtual address which is |
824 | different from that used to zero the page. | | 822 | different from that used to zero the page. |
825 | In the virtually-addressed cache case, care should also be taken to | | 823 | In the virtually-addressed cache case, care should also be taken to |
826 | avoid cache alias problems. | | 824 | avoid cache alias problems. |
827 | .It void Fn "pmap_copy_page" "paddr_t src" "paddr_t dst" | | 825 | .It void Fn "pmap_copy_page" "paddr_t src" "paddr_t dst" |
828 | Copy the PAGE_SIZE sized region starting at physical address | | 826 | Copy the PAGE_SIZE sized region starting at physical address |
829 | .Fa src | | 827 | .Fa src |
830 | to the same sized region starting at physical address | | 828 | to the same sized region starting at physical address |
831 | .Fa dst . | | 829 | .Fa dst . |
832 | The | | 830 | The |
833 | .Nm | | 831 | .Nm |
834 | implementation must take whatever steps are necessary to map the | | 832 | implementation must take whatever steps are necessary to map the |
835 | source and destination pages to a kernel-accessible address and | | 833 | source and destination pages to a kernel-accessible address and |
836 | perform the copy. | | 834 | perform the copy. |
837 | It is suggested that implementations use an optimized copy algorithm, | | 835 | It is suggested that implementations use an optimized copy algorithm, |
838 | as the performance of this function directly impacts page fault performance. | | 836 | as the performance of this function directly impacts page fault performance. |
839 | The implementation may assume that both regions are PAGE_SIZE aligned | | 837 | The implementation may assume that both regions are PAGE_SIZE aligned |
840 | and exactly PAGE_SIZE bytes in length. | | 838 | and exactly PAGE_SIZE bytes in length. |
841 | .Pp | | 839 | .Pp |
842 | The same cache considerations that apply to | | 840 | The same cache considerations that apply to |
843 | .Fn pmap_zero_page | | 841 | .Fn pmap_zero_page |
844 | apply to | | 842 | apply to |
845 | .Fn pmap_copy_page . | | 843 | .Fn pmap_copy_page . |
846 | .It void Fn "pmap_page_protect" "struct vm_page *pg" "vm_prot_t prot" | | 844 | .It void Fn "pmap_page_protect" "struct vm_page *pg" "vm_prot_t prot" |
847 | Lower the permissions for all mappings of the page | | 845 | Lower the permissions for all mappings of the page |
848 | .Fa pg | | 846 | .Fa pg |
849 | to | | 847 | to |
850 | .Fa prot . | | 848 | .Fa prot . |
851 | This function is used by the virtual memory system to implement | | 849 | This function is used by the virtual memory system to implement |
852 | copy-on-write (called with VM_PROT_READ set in | | 850 | copy-on-write (called with VM_PROT_READ set in |
853 | .Fa prot ) | | 851 | .Fa prot ) |
854 | and to revoke all mappings when cleaning a page (called with | | 852 | and to revoke all mappings when cleaning a page (called with |
855 | no bits set in | | 853 | no bits set in |
856 | .Fa prot ) . | | 854 | .Fa prot ) . |
857 | Access permissions must never be added to a page as a result of | | 855 | Access permissions must never be added to a page as a result of |
858 | this call. | | 856 | this call. |
859 | .It bool Fn "pmap_clear_modify" "struct vm_page *pg" | | 857 | .It bool Fn "pmap_clear_modify" "struct vm_page *pg" |
860 | Clear the | | 858 | Clear the |
861 | .Dq modified | | 859 | .Dq modified |
862 | attribute on the page | | 860 | attribute on the page |
863 | .Fa pg . | | 861 | .Fa pg . |
864 | .Pp | | 862 | .Pp |
865 | The | | 863 | The |
866 | .Fn pmap_clear_modify | | 864 | .Fn pmap_clear_modify |
867 | function returns | | 865 | function returns |
868 | .Dv true | | 866 | .Dv true |
869 | or | | 867 | or |
870 | .Dv false | | 868 | .Dv false |
871 | indicating whether or not the | | 869 | indicating whether or not the |
872 | .Dq modified | | 870 | .Dq modified |
873 | attribute was set on the page before it was cleared. | | 871 | attribute was set on the page before it was cleared. |
874 | .Pp | | 872 | .Pp |
875 | Note that this function may be provided as a C pre-processor macro. | | 873 | Note that this function may be provided as a C pre-processor macro. |
876 | .It bool Fn "pmap_clear_reference" "struct vm_page *pg" | | 874 | .It bool Fn "pmap_clear_reference" "struct vm_page *pg" |
877 | Clear the | | 875 | Clear the |
878 | .Dq referenced | | 876 | .Dq referenced |
879 | attribute on the page | | 877 | attribute on the page |
880 | .Fa pg . | | 878 | .Fa pg . |
881 | .Pp | | 879 | .Pp |
882 | The | | 880 | The |
883 | .Fn pmap_clear_reference | | 881 | .Fn pmap_clear_reference |
884 | function returns | | 882 | function returns |
885 | .Dv true | | 883 | .Dv true |
886 | or | | 884 | or |
887 | .Dv false | | 885 | .Dv false |
888 | indicating whether or not the | | 886 | indicating whether or not the |
889 | .Dq referenced | | 887 | .Dq referenced |
890 | attribute was set on the page before it was cleared. | | 888 | attribute was set on the page before it was cleared. |
891 | .Pp | | 889 | .Pp |
892 | Note that this function may be provided as a C pre-processor macro. | | 890 | Note that this function may be provided as a C pre-processor macro. |
893 | .It bool Fn "pmap_is_modified" "struct vm_page *pg" | | 891 | .It bool Fn "pmap_is_modified" "struct vm_page *pg" |
894 | Test whether or not the | | 892 | Test whether or not the |
895 | .Dq modified | | 893 | .Dq modified |
896 | attribute is set on page | | 894 | attribute is set on page |
897 | .Fa pg . | | 895 | .Fa pg . |
898 | .Pp | | 896 | .Pp |
899 | Note that this function may be provided as a C pre-processor macro. | | 897 | Note that this function may be provided as a C pre-processor macro. |
900 | .It bool Fn "pmap_is_referenced" "struct vm_page *pg" | | 898 | .It bool Fn "pmap_is_referenced" "struct vm_page *pg" |
901 | Test whether or not the | | 899 | Test whether or not the |
902 | .Dq referenced | | 900 | .Dq referenced |
903 | attribute is set on page | | 901 | attribute is set on page |
904 | .Fa pg . | | 902 | .Fa pg . |
905 | .Pp | | 903 | .Pp |
906 | Note that this function may be provided as a C pre-processor macro. | | 904 | Note that this function may be provided as a C pre-processor macro. |
907 | .It paddr_t Fn "pmap_phys_address" "paddr_t cookie" | | 905 | .It paddr_t Fn "pmap_phys_address" "paddr_t cookie" |
908 | Convert a cookie returned by a device | | 906 | Convert a cookie returned by a device |
909 | .Fn mmap | | 907 | .Fn mmap |
910 | function into a physical address. | | 908 | function into a physical address. |
911 | This function is provided to accommodate systems which have physical | | 909 | This function is provided to accommodate systems which have physical |
912 | address spaces larger than can be directly addressed by the platform's | | 910 | address spaces larger than can be directly addressed by the platform's |
913 | .Fa paddr_t | | 911 | .Fa paddr_t |
914 | type. | | 912 | type. |
915 | The existence of this function is highly dubious, and it is | | 913 | The existence of this function is highly dubious, and it is |
916 | expected that this function will be removed from the | | 914 | expected that this function will be removed from the |
917 | .Nm pmap | | 915 | .Nm pmap |
918 | API in a future release of | | 916 | API in a future release of |
919 | .Nx . | | 917 | .Nx . |
920 | .Pp | | 918 | .Pp |
921 | Note that this function may be provided as a C pre-processor macro. | | 919 | Note that this function may be provided as a C pre-processor macro. |
922 | .El | | 920 | .El |
923 | .Ss OPTIONAL FUNCTIONS | | 921 | .Ss OPTIONAL FUNCTIONS |
924 | This section describes several optional functions in the | | 922 | This section describes several optional functions in the |
925 | .Nm | | 923 | .Nm |
926 | API. | | 924 | API. |
927 | .Bl -tag -width indent -offset indent | | 925 | .Bl -tag -width indent -offset indent |
928 | .It vaddr_t Fn "pmap_steal_memory" "vsize_t size" "vaddr_t *vstartp" \ | | 926 | .It vaddr_t Fn "pmap_steal_memory" "vsize_t size" "vaddr_t *vstartp" \ |
929 | "vaddr_t *vendp" | | 927 | "vaddr_t *vendp" |
930 | This function is a bootstrap memory allocator, which may be provided | | 928 | This function is a bootstrap memory allocator, which may be provided |
931 | as an alternative to the bootstrap memory allocator used within | | 929 | as an alternative to the bootstrap memory allocator used within |
932 | .Xr uvm 9 | | 930 | .Xr uvm 9 |
933 | itself. | | 931 | itself. |
934 | It is particularly useful on systems which provide for example a direct-mapped | | 932 | It is particularly useful on systems which provide for example a direct-mapped |
935 | memory segment. | | 933 | memory segment. |
936 | This function works by stealing pages from the (to be) managed memory | | 934 | This function works by stealing pages from the (to be) managed memory |
937 | pool, which has already been provided to | | 935 | pool, which has already been provided to |
938 | .Xr uvm 9 | | 936 | .Xr uvm 9 |
939 | in the vm_physmem[] array. | | 937 | in the vm_physmem[] array. |
940 | The pages are then mapped, or otherwise made accessible to the kernel, | | 938 | The pages are then mapped, or otherwise made accessible to the kernel, |
941 | in a machine-dependent way. | | 939 | in a machine-dependent way. |
942 | The memory must be zeroed by | | 940 | The memory must be zeroed by |
943 | .Fn pmap_steal_memory . | | 941 | .Fn pmap_steal_memory . |
944 | Note that memory allocated with | | 942 | Note that memory allocated with |
945 | .Fn pmap_steal_memory | | 943 | .Fn pmap_steal_memory |
946 | will never be freed, and mappings made by | | 944 | will never be freed, and mappings made by |
947 | .Fn pmap_steal_memory | | 945 | .Fn pmap_steal_memory |
948 | must never be | | 946 | must never be |
949 | .Dq forgotten . | | 947 | .Dq forgotten . |
950 | .Pp | | 948 | .Pp |
951 | Note that | | 949 | Note that |
952 | .Fn pmap_steal_memory | | 950 | .Fn pmap_steal_memory |
953 | should not be used as a general-purpose early-startup memory | | 951 | should not be used as a general-purpose early-startup memory |
954 | allocation routine. | | 952 | allocation routine. |
955 | It is intended to be used only by the | | 953 | It is intended to be used only by the |
956 | .Fn uvm_pageboot_alloc | | 954 | .Fn uvm_pageboot_alloc |
957 | routine and its supporting routines. | | 955 | routine and its supporting routines. |
958 | If you need to allocate memory before the virtual memory system is | | 956 | If you need to allocate memory before the virtual memory system is |
959 | initialized, use | | 957 | initialized, use |
960 | .Fn uvm_pageboot_alloc . | | 958 | .Fn uvm_pageboot_alloc . |
961 | See | | 959 | See |
962 | .Xr uvm 9 | | 960 | .Xr uvm 9 |
963 | for more information. | | 961 | for more information. |
964 | .Pp | | 962 | .Pp |
965 | The | | 963 | The |
966 | .Fn pmap_steal_memory | | 964 | .Fn pmap_steal_memory |
967 | function returns the kernel-accessible address of the allocated memory. | | 965 | function returns the kernel-accessible address of the allocated memory. |
968 | If no memory can be allocated, or if allocated memory cannot be mapped, | | 966 | If no memory can be allocated, or if allocated memory cannot be mapped, |
969 | the function must panic. | | 967 | the function must panic. |
970 | .Pp | | 968 | .Pp |
971 | If the | | 969 | If the |
972 | .Fn pmap_steal_memory | | 970 | .Fn pmap_steal_memory |
973 | function uses address space from the range provided to | | 971 | function uses address space from the range provided to |
974 | .Xr uvm 9 | | 972 | .Xr uvm 9 |
975 | by the | | 973 | by the |
976 | .Fn pmap_virtual_space | | 974 | .Fn pmap_virtual_space |
977 | call, then | | 975 | call, then |
978 | .Fn pmap_steal_memory | | 976 | .Fn pmap_steal_memory |
979 | must adjust | | 977 | must adjust |
980 | .Fa *vstartp | | 978 | .Fa *vstartp |
981 | and | | 979 | and |
982 | .Fa *vendp | | 980 | .Fa *vendp |
983 | upon return. | | 981 | upon return. |
984 | .Pp | | 982 | .Pp |
985 | The | | 983 | The |
986 | .Fn pmap_steal_memory | | 984 | .Fn pmap_steal_memory |
987 | function is enabled by defining the C pre-processor macro | | 985 | function is enabled by defining the C pre-processor macro |
988 | .Dv PMAP_STEAL_MEMORY | | 986 | .Dv PMAP_STEAL_MEMORY |
989 | in | | 987 | in |
990 | .In machine/pmap.h . | | 988 | .In machine/pmap.h . |
991 | .It vaddr_t Fn "pmap_growkernel" "vaddr_t maxkvaddr" | | 989 | .It vaddr_t Fn "pmap_growkernel" "vaddr_t maxkvaddr" |
992 | Management of the kernel virtual address space is complicated by the | | 990 | Management of the kernel virtual address space is complicated by the |
993 | fact that it is not always safe to wait for resources with which to | | 991 | fact that it is not always safe to wait for resources with which to |
994 | map a kernel virtual address. | | 992 | map a kernel virtual address. |
995 | However, it is not always desirable to pre-allocate all resources | | 993 | However, it is not always desirable to pre-allocate all resources |
996 | necessary to map the entire kernel virtual address space. | | 994 | necessary to map the entire kernel virtual address space. |
997 | .Pp | | 995 | .Pp |
998 | The | | 996 | The |
999 | .Fn pmap_growkernel | | 997 | .Fn pmap_growkernel |
1000 | interface is designed to help alleviate this problem. | | 998 | interface is designed to help alleviate this problem. |
1001 | The virtual memory startup code may choose to allocate an initial set | | 999 | The virtual memory startup code may choose to allocate an initial set |
1002 | of mapping resources (e.g., page tables) and set an internal variable | | 1000 | of mapping resources (e.g., page tables) and set an internal variable |
1003 | indicating how much kernel virtual address space can be mapped using | | 1001 | indicating how much kernel virtual address space can be mapped using |
1004 | those initial resources. | | 1002 | those initial resources. |
1005 | Then, when the virtual memory system wishes to map something | | 1003 | Then, when the virtual memory system wishes to map something |
1006 | at an address beyond that initial limit, it calls | | 1004 | at an address beyond that initial limit, it calls |
1007 | .Fn pmap_growkernel | | 1005 | .Fn pmap_growkernel |
1008 | to pre-allocate more sources with which to create the mapping. | | 1006 | to pre-allocate more sources with which to create the mapping. |
1009 | Note that once additional kernel virtual address space mapping resources | | 1007 | Note that once additional kernel virtual address space mapping resources |
1010 | have been allocated, they should not be freed; it is likely they will | | 1008 | have been allocated, they should not be freed; it is likely they will |
1011 | be needed again. | | 1009 | be needed again. |
1012 | .Pp | | 1010 | .Pp |
1013 | The | | 1011 | The |
1014 | .Fn pmap_growkernel | | 1012 | .Fn pmap_growkernel |
1015 | function returns the new maximum kernel virtual address that can be mapped | | 1013 | function returns the new maximum kernel virtual address that can be mapped |
1016 | with the resources it has available. | | 1014 | with the resources it has available. |
1017 | If new resources cannot be allocated, | | 1015 | If new resources cannot be allocated, |
1018 | .Fn pmap_growkernel | | 1016 | .Fn pmap_growkernel |
1019 | must panic. | | 1017 | must panic. |
1020 | .Pp | | 1018 | .Pp |
1021 | The | | 1019 | The |
1022 | .Fn pmap_growkernel | | 1020 | .Fn pmap_growkernel |
1023 | function is enabled by defining the C pre-processor macro | | 1021 | function is enabled by defining the C pre-processor macro |
1024 | .Dv PMAP_GROWKERNEL | | 1022 | .Dv PMAP_GROWKERNEL |
1025 | in | | 1023 | in |
1026 | .In machine/pmap.h . | | 1024 | .In machine/pmap.h . |
1027 | .It void Fn "pmap_fork" "pmap_t src_map" "pmap_t dst_map" | | 1025 | .It void Fn "pmap_fork" "pmap_t src_map" "pmap_t dst_map" |
1028 | Some | | 1026 | Some |
1029 | .Nm | | 1027 | .Nm |
1030 | implementations may need to keep track of other information not | | 1028 | implementations may need to keep track of other information not |
1031 | directly related to the virtual address space. | | 1029 | directly related to the virtual address space. |
1032 | For example, on the i386 port, the Local Descriptor Table state of a | | 1030 | For example, on the i386 port, the Local Descriptor Table state of a |
1033 | process is associated with the pmap (this is due to the fact that | | 1031 | process is associated with the pmap (this is due to the fact that |
1034 | applications manipulate the Local Descriptor Table directly expect it | | 1032 | applications manipulate the Local Descriptor Table directly expect it |
1035 | to be logically associated with the virtual memory state of the process). | | 1033 | to be logically associated with the virtual memory state of the process). |
1036 | .Pp | | 1034 | .Pp |
1037 | The | | 1035 | The |
1038 | .Fn pmap_fork | | 1036 | .Fn pmap_fork |
1039 | function is provided as a way to associate information from | | 1037 | function is provided as a way to associate information from |
1040 | .Fa src_map | | 1038 | .Fa src_map |
1041 | with | | 1039 | with |
1042 | .Fa dst_map | | 1040 | .Fa dst_map |
1043 | when a | | 1041 | when a |
1044 | .Dv vmspace | | 1042 | .Dv vmspace |
1045 | is forked. | | 1043 | is forked. |
1046 | .Fn pmap_fork | | 1044 | .Fn pmap_fork |
1047 | is called from | | 1045 | is called from |
1048 | .Fn uvmspace_fork . | | 1046 | .Fn uvmspace_fork . |
1049 | .Pp | | 1047 | .Pp |
1050 | The | | 1048 | The |
1051 | .Fn pmap_fork | | 1049 | .Fn pmap_fork |
1052 | function is enabled by defining the C pre-processor macro | | 1050 | function is enabled by defining the C pre-processor macro |
1053 | .Dv PMAP_FORK | | 1051 | .Dv PMAP_FORK |
1054 | in | | 1052 | in |
1055 | .In machine/pmap.h . | | 1053 | .In machine/pmap.h . |
1056 | .It vaddr_t Fn "PMAP_MAP_POOLPAGE" "paddr_t pa" | | 1054 | .It vaddr_t Fn "PMAP_MAP_POOLPAGE" "paddr_t pa" |
1057 | This function is used by the | | 1055 | This function is used by the |
1058 | .Xr pool 9 | | 1056 | .Xr pool 9 |
1059 | memory pool manager. | | 1057 | memory pool manager. |
1060 | Pools allocate backing pages one at a time. | | 1058 | Pools allocate backing pages one at a time. |
1061 | This is provided as a means to use hardware features such as a | | 1059 | This is provided as a means to use hardware features such as a |
1062 | direct-mapped memory segment to map the pages used by the | | 1060 | direct-mapped memory segment to map the pages used by the |
1063 | .Xr pool 9 | | 1061 | .Xr pool 9 |
1064 | allocator. | | 1062 | allocator. |
1065 | This can lead to better performance by e.g. reducing TLB contention. | | 1063 | This can lead to better performance by e.g. reducing TLB contention. |
1066 | .Pp | | 1064 | .Pp |
1067 | .Fn PMAP_MAP_POOLPAGE | | 1065 | .Fn PMAP_MAP_POOLPAGE |
1068 | returns the kernel-accessible address of the page being mapped. | | 1066 | returns the kernel-accessible address of the page being mapped. |
1069 | It must always succeed. | | 1067 | It must always succeed. |
1070 | .Pp | | 1068 | .Pp |
1071 | The use of | | 1069 | The use of |
1072 | .Fn PMAP_MAP_POOLPAGE | | 1070 | .Fn PMAP_MAP_POOLPAGE |
1073 | is enabled by defining it as a C pre-processor macro in | | 1071 | is enabled by defining it as a C pre-processor macro in |
1074 | .In machine/pmap.h . | | 1072 | .In machine/pmap.h . |
1075 | If | | 1073 | If |
1076 | .Fn PMAP_MAP_POOLPAGE | | 1074 | .Fn PMAP_MAP_POOLPAGE |
1077 | is defined, | | 1075 | is defined, |
1078 | .Fn PMAP_UNMAP_POOLPAGE | | 1076 | .Fn PMAP_UNMAP_POOLPAGE |
1079 | must also be defined. | | 1077 | must also be defined. |
1080 | .Pp | | 1078 | .Pp |
1081 | The following is an example of how to define | | 1079 | The following is an example of how to define |
1082 | .Fn PMAP_MAP_POOLPAGE : | | 1080 | .Fn PMAP_MAP_POOLPAGE : |
1083 | .Bd -literal -offset indent | | 1081 | .Bd -literal -offset indent |
1084 | #define PMAP_MAP_POOLPAGE(pa) MIPS_PHYS_TO_KSEG0((pa)) | | 1082 | #define PMAP_MAP_POOLPAGE(pa) MIPS_PHYS_TO_KSEG0((pa)) |
1085 | .Ed | | 1083 | .Ed |
1086 | .Pp | | 1084 | .Pp |
1087 | This takes the physical address of a page and returns the KSEG0 | | 1085 | This takes the physical address of a page and returns the KSEG0 |
1088 | address of that page on a MIPS processor. | | 1086 | address of that page on a MIPS processor. |
1089 | .It paddr_t Fn "PMAP_UNMAP_POOLPAGE" "vaddr_t va" | | 1087 | .It paddr_t Fn "PMAP_UNMAP_POOLPAGE" "vaddr_t va" |
1090 | This function is the inverse of | | 1088 | This function is the inverse of |
1091 | .Fn PMAP_MAP_POOLPAGE . | | 1089 | .Fn PMAP_MAP_POOLPAGE . |
1092 | .Pp | | 1090 | .Pp |
1093 | .Fn PMAP_UNMAP_POOLPAGE | | 1091 | .Fn PMAP_UNMAP_POOLPAGE |
1094 | returns the physical address of the page corresponding to the | | 1092 | returns the physical address of the page corresponding to the |
1095 | provided kernel-accessible address. | | 1093 | provided kernel-accessible address. |
1096 | .Pp | | 1094 | .Pp |
1097 | The use of | | 1095 | The use of |
1098 | .Fn PMAP_UNMAP_POOLPAGE | | 1096 | .Fn PMAP_UNMAP_POOLPAGE |
1099 | is enabled by defining it as a C pre-processor macro in | | 1097 | is enabled by defining it as a C pre-processor macro in |
1100 | .In machine/pmap.h . | | 1098 | .In machine/pmap.h . |
1101 | If | | 1099 | If |
1102 | .Fn PMAP_UNMAP_POOLPAGE | | 1100 | .Fn PMAP_UNMAP_POOLPAGE |
1103 | is defined, | | 1101 | is defined, |
1104 | .Fn PMAP_MAP_POOLPAGE | | 1102 | .Fn PMAP_MAP_POOLPAGE |
1105 | must also be defined. | | 1103 | must also be defined. |
1106 | .Pp | | 1104 | .Pp |
1107 | The following is an example of how to define | | 1105 | The following is an example of how to define |
1108 | .Fn PMAP_UNMAP_POOLPAGE : | | 1106 | .Fn PMAP_UNMAP_POOLPAGE : |
1109 | .Bd -literal -offset indent | | 1107 | .Bd -literal -offset indent |
1110 | #define PMAP_UNMAP_POOLPAGE(pa) MIPS_KSEG0_TO_PHYS((va)) | | 1108 | #define PMAP_UNMAP_POOLPAGE(pa) MIPS_KSEG0_TO_PHYS((va)) |
1111 | .Ed | | 1109 | .Ed |
1112 | .Pp | | 1110 | .Pp |
1113 | This takes the KSEG0 address of a previously-mapped pool page | | 1111 | This takes the KSEG0 address of a previously-mapped pool page |
1114 | and returns the physical address of that page on a MIPS processor. | | 1112 | and returns the physical address of that page on a MIPS processor. |
1115 | .It void Fn "PMAP_PREFER" "vaddr_t hint" "vaddr_t *vap" "vsize_t sz" "int td" | | 1113 | .It void Fn "PMAP_PREFER" "vaddr_t hint" "vaddr_t *vap" "vsize_t sz" "int td" |
1116 | This function is used by | | 1114 | This function is used by |
1117 | .Xr uvm_map 9 | | 1115 | .Xr uvm_map 9 |
1118 | to adjust a virtual address being allocated in order to avoid | | 1116 | to adjust a virtual address being allocated in order to avoid |
1119 | cache alias problems. | | 1117 | cache alias problems. |
1120 | If necessary, the virtual address pointed by | | 1118 | If necessary, the virtual address pointed by |
1121 | .Fa vap | | 1119 | .Fa vap |
1122 | will be advanced. | | 1120 | will be advanced. |
1123 | .Fa hint | | 1121 | .Fa hint |
1124 | is an object offset which will be mapped into the resulting virtual address, and | | 1122 | is an object offset which will be mapped into the resulting virtual address, and |
1125 | .Fa sz | | 1123 | .Fa sz |
1126 | is size of the region being mapped in bytes. | | 1124 | is size of the region being mapped in bytes. |
1127 | .Fa td | | 1125 | .Fa td |
1128 | indicates if the machine dependent pmap uses the topdown VM. | | 1126 | indicates if the machine dependent pmap uses the topdown VM. |
1129 | .Pp | | 1127 | .Pp |
1130 | The use of | | 1128 | The use of |
1131 | .Fn PMAP_PREFER | | 1129 | .Fn PMAP_PREFER |
1132 | is enabled by defining it as a C pre-processor macro in | | 1130 | is enabled by defining it as a C pre-processor macro in |
1133 | .In machine/pmap.h . | | 1131 | .In machine/pmap.h . |
1134 | .It void Fn "pmap_procwr" "struct proc *p" "vaddr_t va" "vsize_t size" | | 1132 | .It void Fn "pmap_procwr" "struct proc *p" "vaddr_t va" "vsize_t size" |
1135 | Synchronize CPU instruction caches of the specified range. | | 1133 | Synchronize CPU instruction caches of the specified range. |
1136 | The address space is designated by | | 1134 | The address space is designated by |
1137 | .Fa p . | | 1135 | .Fa p . |
1138 | This function is typically used to flush instruction caches | | 1136 | This function is typically used to flush instruction caches |
1139 | after code modification. | | 1137 | after code modification. |
1140 | .Pp | | 1138 | .Pp |
1141 | The use of | | 1139 | The use of |
1142 | .Fn pmap_procwr | | 1140 | .Fn pmap_procwr |
1143 | is enabled by defining a C pre-processor macro | | 1141 | is enabled by defining a C pre-processor macro |
1144 | .Dv PMAP_NEED_PROCWR | | 1142 | .Dv PMAP_NEED_PROCWR |
1145 | in | | 1143 | in |
1146 | .In machine/pmap.h . | | 1144 | .In machine/pmap.h . |
1147 | .El | | 1145 | .El |
1148 | .Sh SEE ALSO | | 1146 | .Sh SEE ALSO |
1149 | .Xr uvm 9 | | 1147 | .Xr uvm 9 |
1150 | .Sh HISTORY | | 1148 | .Sh HISTORY |
1151 | The | | 1149 | The |
1152 | .Nm | | 1150 | .Nm |
1153 | module was originally part of the design of the virtual memory system | | 1151 | module was originally part of the design of the virtual memory system |
1154 | in the Mach Operating System. | | 1152 | in the Mach Operating System. |
1155 | The goal was to provide a clean separation between the machine-independent | | 1153 | The goal was to provide a clean separation between the machine-independent |
1156 | and the machine-dependent portions of the virtual memory system, in | | 1154 | and the machine-dependent portions of the virtual memory system, in |
1157 | stark contrast to the original | | 1155 | stark contrast to the original |
1158 | .Bx 3 | | 1156 | .Bx 3 |
1159 | virtual memory system, which was specific to the VAX. | | 1157 | virtual memory system, which was specific to the VAX. |
1160 | .Pp | | 1158 | .Pp |
1161 | Between | | 1159 | Between |
1162 | .Bx 4.3 | | 1160 | .Bx 4.3 |
1163 | and | | 1161 | and |
1164 | .Bx 4.4 , | | 1162 | .Bx 4.4 , |
1165 | the Mach virtual memory system, including the | | 1163 | the Mach virtual memory system, including the |
1166 | .Nm | | 1164 | .Nm |
1167 | API, was ported to | | 1165 | API, was ported to |
1168 | .Bx | | 1166 | .Bx |
1169 | and included in the | | 1167 | and included in the |
1170 | .Bx 4.4 | | 1168 | .Bx 4.4 |
1171 | release. | | 1169 | release. |
1172 | .Pp | | 1170 | .Pp |
1173 | .Nx | | 1171 | .Nx |
1174 | inherited the | | 1172 | inherited the |
1175 | .Bx | | 1173 | .Bx |
1176 | version of the Mach virtual memory system. | | 1174 | version of the Mach virtual memory system. |
1177 | .Nx 1.4 | | 1175 | .Nx 1.4 |
1178 | was the first | | 1176 | was the first |
1179 | .Nx | | 1177 | .Nx |
1180 | release with the new | | 1178 | release with the new |
1181 | .Xr uvm 9 | | 1179 | .Xr uvm 9 |
1182 | virtual memory system, which included several changes to the | | 1180 | virtual memory system, which included several changes to the |
1183 | .Nm | | 1181 | .Nm |
1184 | API. | | 1182 | API. |
1185 | Since the introduction of | | 1183 | Since the introduction of |
1186 | .Xr uvm 9 , | | 1184 | .Xr uvm 9 , |
1187 | the | | 1185 | the |
1188 | .Nm | | 1186 | .Nm |
1189 | API has evolved further. | | 1187 | API has evolved further. |
1190 | .Sh AUTHORS | | 1188 | .Sh AUTHORS |
1191 | The original Mach VAX | | 1189 | The original Mach VAX |
1192 | .Nm | | 1190 | .Nm |
1193 | module was written by | | 1191 | module was written by |
1194 | .An Avadis Tevanian, Jr. | | 1192 | .An Avadis Tevanian, Jr. |
1195 | and | | 1193 | and |
1196 | .An Michael Wayne Young . | | 1194 | .An Michael Wayne Young . |
1197 | .Pp | | 1195 | .Pp |
1198 | .An Mike Hibler | | 1196 | .An Mike Hibler |
1199 | did the integration of the Mach virtual memory system into | | 1197 | did the integration of the Mach virtual memory system into |
1200 | .Bx 4.4 | | 1198 | .Bx 4.4 |
1201 | and implemented a | | 1199 | and implemented a |
1202 | .Nm | | 1200 | .Nm |
1203 | module for the Motorola 68020+68851/68030/68040. | | 1201 | module for the Motorola 68020+68851/68030/68040. |
1204 | .Pp | | 1202 | .Pp |
1205 | The | | 1203 | The |
1206 | .Nm | | 1204 | .Nm |
1207 | API as it exists in | | 1205 | API as it exists in |
1208 | .Nx | | 1206 | .Nx |
1209 | is derived from | | 1207 | is derived from |
1210 | .Bx 4.4 , | | 1208 | .Bx 4.4 , |
1211 | and has been modified by | | 1209 | and has been modified by |
1212 | .An Chuck Cranor , | | 1210 | .An Chuck Cranor , |
1213 | .An Charles M. Hannum , | | 1211 | .An Charles M. Hannum , |
1214 | .An Chuck Silvers , | | 1212 | .An Chuck Silvers , |
1215 | .An Wolfgang Solfrank , | | 1213 | .An Wolfgang Solfrank , |
1216 | .An Bill Sommerfeld , | | 1214 | .An Bill Sommerfeld , |
1217 | and | | 1215 | and |
1218 | .An Jason R. Thorpe . | | 1216 | .An Jason R. Thorpe . |
1219 | .Pp | | 1217 | .Pp |
1220 | The author of this document is | | 1218 | The author of this document is |
1221 | .An Jason R. Thorpe | | 1219 | .An Jason R. Thorpe |
1222 | .Aq thorpej@NetBSD.org . | | 1220 | .Aq thorpej@NetBSD.org . |
1223 | .Sh BUGS | | 1221 | .Sh BUGS |
1224 | The use and definition of | | 1222 | The use and definition of |
1225 | .Fn pmap_activate | | 1223 | .Fn pmap_activate |
1226 | and | | 1224 | and |
1227 | .Fn pmap_deactivate | | 1225 | .Fn pmap_deactivate |
1228 | needs to be reexamined. | | 1226 | needs to be reexamined. |
1229 | .Pp | | 1227 | .Pp |
1230 | The use of | | 1228 | The use of |
1231 | .Fn pmap_copy | | 1229 | .Fn pmap_copy |
1232 | needs to be reexamined. | | 1230 | needs to be reexamined. |
1233 | Empirical evidence suggests that performance of the system suffers when | | 1231 | Empirical evidence suggests that performance of the system suffers when |
1234 | .Fn pmap_copy | | 1232 | .Fn pmap_copy |
1235 | actually performs its defined function. | | 1233 | actually performs its defined function. |
1236 | This is largely due to the fact that the copy of the virtual-to-physical | | 1234 | This is largely due to the fact that the copy of the virtual-to-physical |
1237 | mappings is wasted if the process calls | | 1235 | mappings is wasted if the process calls |
1238 | .Xr execve 2 | | 1236 | .Xr execve 2 |
1239 | after | | 1237 | after |
1240 | .Xr fork 2 . | | 1238 | .Xr fork 2 . |
1241 | For this reason, it is recommended that | | 1239 | For this reason, it is recommended that |
1242 | .Nm | | 1240 | .Nm |
1243 | implementations leave the body of the | | 1241 | implementations leave the body of the |
1244 | .Fn pmap_copy | | 1242 | .Fn pmap_copy |
1245 | function empty for now. | | 1243 | function empty for now. |