| @@ -1,1574 +1,1574 @@ | | | @@ -1,1574 +1,1574 @@ |
1 | /* $NetBSD: uvm_map.c,v 1.284 2009/11/07 07:27:49 cegger Exp $ */ | | 1 | /* $NetBSD: uvm_map.c,v 1.285 2009/12/14 21:19:47 matt Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. | | 4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. |
5 | * Copyright (c) 1991, 1993, The Regents of the University of California. | | 5 | * Copyright (c) 1991, 1993, The Regents of the University of California. |
6 | * | | 6 | * |
7 | * All rights reserved. | | 7 | * All rights reserved. |
8 | * | | 8 | * |
9 | * This code is derived from software contributed to Berkeley by | | 9 | * This code is derived from software contributed to Berkeley by |
10 | * The Mach Operating System project at Carnegie-Mellon University. | | 10 | * The Mach Operating System project at Carnegie-Mellon University. |
11 | * | | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | | 12 | * Redistribution and use in source and binary forms, with or without |
13 | * modification, are permitted provided that the following conditions | | 13 | * modification, are permitted provided that the following conditions |
14 | * are met: | | 14 | * are met: |
15 | * 1. Redistributions of source code must retain the above copyright | | 15 | * 1. Redistributions of source code must retain the above copyright |
16 | * notice, this list of conditions and the following disclaimer. | | 16 | * notice, this list of conditions and the following disclaimer. |
17 | * 2. Redistributions in binary form must reproduce the above copyright | | 17 | * 2. Redistributions in binary form must reproduce the above copyright |
18 | * notice, this list of conditions and the following disclaimer in the | | 18 | * notice, this list of conditions and the following disclaimer in the |
19 | * documentation and/or other materials provided with the distribution. | | 19 | * documentation and/or other materials provided with the distribution. |
20 | * 3. All advertising materials mentioning features or use of this software | | 20 | * 3. All advertising materials mentioning features or use of this software |
21 | * must display the following acknowledgement: | | 21 | * must display the following acknowledgement: |
22 | * This product includes software developed by Charles D. Cranor, | | 22 | * This product includes software developed by Charles D. Cranor, |
23 | * Washington University, the University of California, Berkeley and | | 23 | * Washington University, the University of California, Berkeley and |
24 | * its contributors. | | 24 | * its contributors. |
25 | * 4. Neither the name of the University nor the names of its contributors | | 25 | * 4. Neither the name of the University nor the names of its contributors |
26 | * may be used to endorse or promote products derived from this software | | 26 | * may be used to endorse or promote products derived from this software |
27 | * without specific prior written permission. | | 27 | * without specific prior written permission. |
28 | * | | 28 | * |
29 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 29 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
30 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 30 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
33 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 33 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
34 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 34 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
35 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 35 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
36 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 36 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
37 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 37 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
38 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 38 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
39 | * SUCH DAMAGE. | | 39 | * SUCH DAMAGE. |
40 | * | | 40 | * |
41 | * @(#)vm_map.c 8.3 (Berkeley) 1/12/94 | | 41 | * @(#)vm_map.c 8.3 (Berkeley) 1/12/94 |
42 | * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp | | 42 | * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp |
43 | * | | 43 | * |
44 | * | | 44 | * |
45 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. | | 45 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. |
46 | * All rights reserved. | | 46 | * All rights reserved. |
47 | * | | 47 | * |
48 | * Permission to use, copy, modify and distribute this software and | | 48 | * Permission to use, copy, modify and distribute this software and |
49 | * its documentation is hereby granted, provided that both the copyright | | 49 | * its documentation is hereby granted, provided that both the copyright |
50 | * notice and this permission notice appear in all copies of the | | 50 | * notice and this permission notice appear in all copies of the |
51 | * software, derivative works or modified versions, and any portions | | 51 | * software, derivative works or modified versions, and any portions |
52 | * thereof, and that both notices appear in supporting documentation. | | 52 | * thereof, and that both notices appear in supporting documentation. |
53 | * | | 53 | * |
54 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | | 54 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
55 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | | 55 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND |
56 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | | 56 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
57 | * | | 57 | * |
58 | * Carnegie Mellon requests users of this software to return to | | 58 | * Carnegie Mellon requests users of this software to return to |
59 | * | | 59 | * |
60 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | | 60 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
61 | * School of Computer Science | | 61 | * School of Computer Science |
62 | * Carnegie Mellon University | | 62 | * Carnegie Mellon University |
63 | * Pittsburgh PA 15213-3890 | | 63 | * Pittsburgh PA 15213-3890 |
64 | * | | 64 | * |
65 | * any improvements or extensions that they make and grant Carnegie the | | 65 | * any improvements or extensions that they make and grant Carnegie the |
66 | * rights to redistribute these changes. | | 66 | * rights to redistribute these changes. |
67 | */ | | 67 | */ |
68 | | | 68 | |
69 | /* | | 69 | /* |
70 | * uvm_map.c: uvm map operations | | 70 | * uvm_map.c: uvm map operations |
71 | */ | | 71 | */ |
72 | | | 72 | |
73 | #include <sys/cdefs.h> | | 73 | #include <sys/cdefs.h> |
74 | __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.284 2009/11/07 07:27:49 cegger Exp $"); | | 74 | __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.285 2009/12/14 21:19:47 matt Exp $"); |
75 | | | 75 | |
76 | #include "opt_ddb.h" | | 76 | #include "opt_ddb.h" |
77 | #include "opt_uvmhist.h" | | 77 | #include "opt_uvmhist.h" |
78 | #include "opt_uvm.h" | | 78 | #include "opt_uvm.h" |
79 | #include "opt_sysv.h" | | 79 | #include "opt_sysv.h" |
80 | | | 80 | |
81 | #include <sys/param.h> | | 81 | #include <sys/param.h> |
82 | #include <sys/systm.h> | | 82 | #include <sys/systm.h> |
83 | #include <sys/mman.h> | | 83 | #include <sys/mman.h> |
84 | #include <sys/proc.h> | | 84 | #include <sys/proc.h> |
85 | #include <sys/malloc.h> | | 85 | #include <sys/malloc.h> |
86 | #include <sys/pool.h> | | 86 | #include <sys/pool.h> |
87 | #include <sys/kernel.h> | | 87 | #include <sys/kernel.h> |
88 | #include <sys/mount.h> | | 88 | #include <sys/mount.h> |
89 | #include <sys/vnode.h> | | 89 | #include <sys/vnode.h> |
90 | #include <sys/lockdebug.h> | | 90 | #include <sys/lockdebug.h> |
91 | #include <sys/atomic.h> | | 91 | #include <sys/atomic.h> |
92 | | | 92 | |
93 | #ifdef SYSVSHM | | 93 | #ifdef SYSVSHM |
94 | #include <sys/shm.h> | | 94 | #include <sys/shm.h> |
95 | #endif | | 95 | #endif |
96 | | | 96 | |
97 | #include <uvm/uvm.h> | | 97 | #include <uvm/uvm.h> |
98 | #include <uvm/uvm_readahead.h> | | 98 | #include <uvm/uvm_readahead.h> |
99 | | | 99 | |
100 | #if defined(DDB) || defined(DEBUGPRINT) | | 100 | #if defined(DDB) || defined(DEBUGPRINT) |
101 | #include <uvm/uvm_ddb.h> | | 101 | #include <uvm/uvm_ddb.h> |
102 | #endif | | 102 | #endif |
103 | | | 103 | |
104 | #if !defined(UVMMAP_COUNTERS) | | 104 | #if !defined(UVMMAP_COUNTERS) |
105 | | | 105 | |
106 | #define UVMMAP_EVCNT_DEFINE(name) /* nothing */ | | 106 | #define UVMMAP_EVCNT_DEFINE(name) /* nothing */ |
107 | #define UVMMAP_EVCNT_INCR(ev) /* nothing */ | | 107 | #define UVMMAP_EVCNT_INCR(ev) /* nothing */ |
108 | #define UVMMAP_EVCNT_DECR(ev) /* nothing */ | | 108 | #define UVMMAP_EVCNT_DECR(ev) /* nothing */ |
109 | | | 109 | |
110 | #else /* defined(UVMMAP_NOCOUNTERS) */ | | 110 | #else /* defined(UVMMAP_NOCOUNTERS) */ |
111 | | | 111 | |
112 | #include <sys/evcnt.h> | | 112 | #include <sys/evcnt.h> |
113 | #define UVMMAP_EVCNT_DEFINE(name) \ | | 113 | #define UVMMAP_EVCNT_DEFINE(name) \ |
114 | struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \ | | 114 | struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \ |
115 | "uvmmap", #name); \ | | 115 | "uvmmap", #name); \ |
116 | EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name); | | 116 | EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name); |
117 | #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++ | | 117 | #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++ |
118 | #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count-- | | 118 | #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count-- |
119 | | | 119 | |
120 | #endif /* defined(UVMMAP_NOCOUNTERS) */ | | 120 | #endif /* defined(UVMMAP_NOCOUNTERS) */ |
121 | | | 121 | |
122 | UVMMAP_EVCNT_DEFINE(ubackmerge) | | 122 | UVMMAP_EVCNT_DEFINE(ubackmerge) |
123 | UVMMAP_EVCNT_DEFINE(uforwmerge) | | 123 | UVMMAP_EVCNT_DEFINE(uforwmerge) |
124 | UVMMAP_EVCNT_DEFINE(ubimerge) | | 124 | UVMMAP_EVCNT_DEFINE(ubimerge) |
125 | UVMMAP_EVCNT_DEFINE(unomerge) | | 125 | UVMMAP_EVCNT_DEFINE(unomerge) |
126 | UVMMAP_EVCNT_DEFINE(kbackmerge) | | 126 | UVMMAP_EVCNT_DEFINE(kbackmerge) |
127 | UVMMAP_EVCNT_DEFINE(kforwmerge) | | 127 | UVMMAP_EVCNT_DEFINE(kforwmerge) |
128 | UVMMAP_EVCNT_DEFINE(kbimerge) | | 128 | UVMMAP_EVCNT_DEFINE(kbimerge) |
129 | UVMMAP_EVCNT_DEFINE(knomerge) | | 129 | UVMMAP_EVCNT_DEFINE(knomerge) |
130 | UVMMAP_EVCNT_DEFINE(map_call) | | 130 | UVMMAP_EVCNT_DEFINE(map_call) |
131 | UVMMAP_EVCNT_DEFINE(mlk_call) | | 131 | UVMMAP_EVCNT_DEFINE(mlk_call) |
132 | UVMMAP_EVCNT_DEFINE(mlk_hint) | | 132 | UVMMAP_EVCNT_DEFINE(mlk_hint) |
133 | UVMMAP_EVCNT_DEFINE(mlk_list) | | 133 | UVMMAP_EVCNT_DEFINE(mlk_list) |
134 | UVMMAP_EVCNT_DEFINE(mlk_tree) | | 134 | UVMMAP_EVCNT_DEFINE(mlk_tree) |
135 | UVMMAP_EVCNT_DEFINE(mlk_treeloop) | | 135 | UVMMAP_EVCNT_DEFINE(mlk_treeloop) |
136 | UVMMAP_EVCNT_DEFINE(mlk_listloop) | | 136 | UVMMAP_EVCNT_DEFINE(mlk_listloop) |
137 | | | 137 | |
138 | UVMMAP_EVCNT_DEFINE(uke_alloc) | | 138 | UVMMAP_EVCNT_DEFINE(uke_alloc) |
139 | UVMMAP_EVCNT_DEFINE(uke_free) | | 139 | UVMMAP_EVCNT_DEFINE(uke_free) |
140 | UVMMAP_EVCNT_DEFINE(ukh_alloc) | | 140 | UVMMAP_EVCNT_DEFINE(ukh_alloc) |
141 | UVMMAP_EVCNT_DEFINE(ukh_free) | | 141 | UVMMAP_EVCNT_DEFINE(ukh_free) |
142 | | | 142 | |
143 | const char vmmapbsy[] = "vmmapbsy"; | | 143 | const char vmmapbsy[] = "vmmapbsy"; |
144 | | | 144 | |
145 | /* | | 145 | /* |
146 | * cache for vmspace structures. | | 146 | * cache for vmspace structures. |
147 | */ | | 147 | */ |
148 | | | 148 | |
149 | static struct pool_cache uvm_vmspace_cache; | | 149 | static struct pool_cache uvm_vmspace_cache; |
150 | | | 150 | |
151 | /* | | 151 | /* |
152 | * cache for dynamically-allocated map entries. | | 152 | * cache for dynamically-allocated map entries. |
153 | */ | | 153 | */ |
154 | | | 154 | |
155 | static struct pool_cache uvm_map_entry_cache; | | 155 | static struct pool_cache uvm_map_entry_cache; |
156 | | | 156 | |
157 | MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures"); | | 157 | MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures"); |
158 | MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap"); | | 158 | MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap"); |
159 | | | 159 | |
160 | #ifdef PMAP_GROWKERNEL | | 160 | #ifdef PMAP_GROWKERNEL |
161 | /* | | 161 | /* |
162 | * This global represents the end of the kernel virtual address | | 162 | * This global represents the end of the kernel virtual address |
163 | * space. If we want to exceed this, we must grow the kernel | | 163 | * space. If we want to exceed this, we must grow the kernel |
164 | * virtual address space dynamically. | | 164 | * virtual address space dynamically. |
165 | * | | 165 | * |
166 | * Note, this variable is locked by kernel_map's lock. | | 166 | * Note, this variable is locked by kernel_map's lock. |
167 | */ | | 167 | */ |
168 | vaddr_t uvm_maxkaddr; | | 168 | vaddr_t uvm_maxkaddr; |
169 | #endif | | 169 | #endif |
170 | | | 170 | |
171 | /* | | 171 | /* |
172 | * macros | | 172 | * macros |
173 | */ | | 173 | */ |
174 | | | 174 | |
175 | /* | | 175 | /* |
176 | * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used | | 176 | * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used |
177 | * for the vm_map. | | 177 | * for the vm_map. |
178 | */ | | 178 | */ |
179 | extern struct vm_map *pager_map; /* XXX */ | | 179 | extern struct vm_map *pager_map; /* XXX */ |
180 | #define VM_MAP_USE_KMAPENT_FLAGS(flags) \ | | 180 | #define VM_MAP_USE_KMAPENT_FLAGS(flags) \ |
181 | (((flags) & VM_MAP_INTRSAFE) != 0) | | 181 | (((flags) & VM_MAP_INTRSAFE) != 0) |
182 | #define VM_MAP_USE_KMAPENT(map) \ | | 182 | #define VM_MAP_USE_KMAPENT(map) \ |
183 | (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map) | | 183 | (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map) |
184 | | | 184 | |
185 | /* | | 185 | /* |
186 | * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging | | 186 | * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging |
187 | */ | | 187 | */ |
188 | | | 188 | |
189 | #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \ | | 189 | #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \ |
190 | prot, maxprot, inh, adv, wire) \ | | 190 | prot, maxprot, inh, adv, wire) \ |
191 | ((ent)->etype == (type) && \ | | 191 | ((ent)->etype == (type) && \ |
192 | (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \ | | 192 | (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \ |
193 | == 0 && \ | | 193 | == 0 && \ |
194 | (ent)->object.uvm_obj == (uobj) && \ | | 194 | (ent)->object.uvm_obj == (uobj) && \ |
195 | (ent)->protection == (prot) && \ | | 195 | (ent)->protection == (prot) && \ |
196 | (ent)->max_protection == (maxprot) && \ | | 196 | (ent)->max_protection == (maxprot) && \ |
197 | (ent)->inheritance == (inh) && \ | | 197 | (ent)->inheritance == (inh) && \ |
198 | (ent)->advice == (adv) && \ | | 198 | (ent)->advice == (adv) && \ |
199 | (ent)->wired_count == (wire)) | | 199 | (ent)->wired_count == (wire)) |
200 | | | 200 | |
201 | /* | | 201 | /* |
202 | * uvm_map_entry_link: insert entry into a map | | 202 | * uvm_map_entry_link: insert entry into a map |
203 | * | | 203 | * |
204 | * => map must be locked | | 204 | * => map must be locked |
205 | */ | | 205 | */ |
206 | #define uvm_map_entry_link(map, after_where, entry) do { \ | | 206 | #define uvm_map_entry_link(map, after_where, entry) do { \ |
207 | uvm_mapent_check(entry); \ | | 207 | uvm_mapent_check(entry); \ |
208 | (map)->nentries++; \ | | 208 | (map)->nentries++; \ |
209 | (entry)->prev = (after_where); \ | | 209 | (entry)->prev = (after_where); \ |
210 | (entry)->next = (after_where)->next; \ | | 210 | (entry)->next = (after_where)->next; \ |
211 | (entry)->prev->next = (entry); \ | | 211 | (entry)->prev->next = (entry); \ |
212 | (entry)->next->prev = (entry); \ | | 212 | (entry)->next->prev = (entry); \ |
213 | uvm_rb_insert((map), (entry)); \ | | 213 | uvm_rb_insert((map), (entry)); \ |
214 | } while (/*CONSTCOND*/ 0) | | 214 | } while (/*CONSTCOND*/ 0) |
215 | | | 215 | |
216 | /* | | 216 | /* |
217 | * uvm_map_entry_unlink: remove entry from a map | | 217 | * uvm_map_entry_unlink: remove entry from a map |
218 | * | | 218 | * |
219 | * => map must be locked | | 219 | * => map must be locked |
220 | */ | | 220 | */ |
221 | #define uvm_map_entry_unlink(map, entry) do { \ | | 221 | #define uvm_map_entry_unlink(map, entry) do { \ |
222 | KASSERT((entry) != (map)->first_free); \ | | 222 | KASSERT((entry) != (map)->first_free); \ |
223 | KASSERT((entry) != (map)->hint); \ | | 223 | KASSERT((entry) != (map)->hint); \ |
224 | uvm_mapent_check(entry); \ | | 224 | uvm_mapent_check(entry); \ |
225 | (map)->nentries--; \ | | 225 | (map)->nentries--; \ |
226 | (entry)->next->prev = (entry)->prev; \ | | 226 | (entry)->next->prev = (entry)->prev; \ |
227 | (entry)->prev->next = (entry)->next; \ | | 227 | (entry)->prev->next = (entry)->next; \ |
228 | uvm_rb_remove((map), (entry)); \ | | 228 | uvm_rb_remove((map), (entry)); \ |
229 | } while (/*CONSTCOND*/ 0) | | 229 | } while (/*CONSTCOND*/ 0) |
230 | | | 230 | |
231 | /* | | 231 | /* |
232 | * SAVE_HINT: saves the specified entry as the hint for future lookups. | | 232 | * SAVE_HINT: saves the specified entry as the hint for future lookups. |
233 | * | | 233 | * |
234 | * => map need not be locked. | | 234 | * => map need not be locked. |
235 | */ | | 235 | */ |
236 | #define SAVE_HINT(map, check, value) do { \ | | 236 | #define SAVE_HINT(map, check, value) do { \ |
237 | if ((map)->hint == (check)) \ | | 237 | if ((map)->hint == (check)) \ |
238 | (map)->hint = (value); \ | | 238 | (map)->hint = (value); \ |
239 | } while (/*CONSTCOND*/ 0) | | 239 | } while (/*CONSTCOND*/ 0) |
240 | | | 240 | |
241 | /* | | 241 | /* |
242 | * clear_hints: ensure that hints don't point to the entry. | | 242 | * clear_hints: ensure that hints don't point to the entry. |
243 | * | | 243 | * |
244 | * => map must be write-locked. | | 244 | * => map must be write-locked. |
245 | */ | | 245 | */ |
246 | static void | | 246 | static void |
247 | clear_hints(struct vm_map *map, struct vm_map_entry *ent) | | 247 | clear_hints(struct vm_map *map, struct vm_map_entry *ent) |
248 | { | | 248 | { |
249 | | | 249 | |
250 | SAVE_HINT(map, ent, ent->prev); | | 250 | SAVE_HINT(map, ent, ent->prev); |
251 | if (map->first_free == ent) { | | 251 | if (map->first_free == ent) { |
252 | map->first_free = ent->prev; | | 252 | map->first_free = ent->prev; |
253 | } | | 253 | } |
254 | } | | 254 | } |
255 | | | 255 | |
256 | /* | | 256 | /* |
257 | * VM_MAP_RANGE_CHECK: check and correct range | | 257 | * VM_MAP_RANGE_CHECK: check and correct range |
258 | * | | 258 | * |
259 | * => map must at least be read locked | | 259 | * => map must at least be read locked |
260 | */ | | 260 | */ |
261 | | | 261 | |
262 | #define VM_MAP_RANGE_CHECK(map, start, end) do { \ | | 262 | #define VM_MAP_RANGE_CHECK(map, start, end) do { \ |
263 | if (start < vm_map_min(map)) \ | | 263 | if (start < vm_map_min(map)) \ |
264 | start = vm_map_min(map); \ | | 264 | start = vm_map_min(map); \ |
265 | if (end > vm_map_max(map)) \ | | 265 | if (end > vm_map_max(map)) \ |
266 | end = vm_map_max(map); \ | | 266 | end = vm_map_max(map); \ |
267 | if (start > end) \ | | 267 | if (start > end) \ |
268 | start = end; \ | | 268 | start = end; \ |
269 | } while (/*CONSTCOND*/ 0) | | 269 | } while (/*CONSTCOND*/ 0) |
270 | | | 270 | |
271 | /* | | 271 | /* |
272 | * local prototypes | | 272 | * local prototypes |
273 | */ | | 273 | */ |
274 | | | 274 | |
275 | static struct vm_map_entry * | | 275 | static struct vm_map_entry * |
276 | uvm_mapent_alloc(struct vm_map *, int); | | 276 | uvm_mapent_alloc(struct vm_map *, int); |
277 | static struct vm_map_entry * | | 277 | static struct vm_map_entry * |
278 | uvm_mapent_alloc_split(struct vm_map *, | | 278 | uvm_mapent_alloc_split(struct vm_map *, |
279 | const struct vm_map_entry *, int, | | 279 | const struct vm_map_entry *, int, |
280 | struct uvm_mapent_reservation *); | | 280 | struct uvm_mapent_reservation *); |
281 | static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *); | | 281 | static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *); |
282 | static void uvm_mapent_free(struct vm_map_entry *); | | 282 | static void uvm_mapent_free(struct vm_map_entry *); |
283 | #if defined(DEBUG) | | 283 | #if defined(DEBUG) |
284 | static void _uvm_mapent_check(const struct vm_map_entry *, const char *, | | 284 | static void _uvm_mapent_check(const struct vm_map_entry *, const char *, |
285 | int); | | 285 | int); |
286 | #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__) | | 286 | #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__) |
287 | #else /* defined(DEBUG) */ | | 287 | #else /* defined(DEBUG) */ |
288 | #define uvm_mapent_check(e) /* nothing */ | | 288 | #define uvm_mapent_check(e) /* nothing */ |
289 | #endif /* defined(DEBUG) */ | | 289 | #endif /* defined(DEBUG) */ |
290 | static struct vm_map_entry * | | 290 | static struct vm_map_entry * |
291 | uvm_kmapent_alloc(struct vm_map *, int); | | 291 | uvm_kmapent_alloc(struct vm_map *, int); |
292 | static void uvm_kmapent_free(struct vm_map_entry *); | | 292 | static void uvm_kmapent_free(struct vm_map_entry *); |
293 | static vsize_t uvm_kmapent_overhead(vsize_t); | | 293 | static vsize_t uvm_kmapent_overhead(vsize_t); |
294 | | | 294 | |
295 | static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *); | | 295 | static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *); |
296 | static void uvm_map_reference_amap(struct vm_map_entry *, int); | | 296 | static void uvm_map_reference_amap(struct vm_map_entry *, int); |
297 | static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int, | | 297 | static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int, |
298 | struct vm_map_entry *); | | 298 | struct vm_map_entry *); |
299 | static void uvm_map_unreference_amap(struct vm_map_entry *, int); | | 299 | static void uvm_map_unreference_amap(struct vm_map_entry *, int); |
300 | | | 300 | |
301 | int _uvm_map_sanity(struct vm_map *); | | 301 | int _uvm_map_sanity(struct vm_map *); |
302 | int _uvm_tree_sanity(struct vm_map *); | | 302 | int _uvm_tree_sanity(struct vm_map *); |
303 | static vsize_t uvm_rb_maxgap(const struct vm_map_entry *); | | 303 | static vsize_t uvm_rb_maxgap(const struct vm_map_entry *); |
304 | | | 304 | |
305 | CTASSERT(offsetof(struct vm_map_entry, rb_node) == 0); | | 305 | CTASSERT(offsetof(struct vm_map_entry, rb_node) == 0); |
306 | #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root) | | 306 | #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root) |
307 | #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left) | | 307 | #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left) |
308 | #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right) | | 308 | #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right) |
309 | #define PARENT_ENTRY(map, entry) \ | | 309 | #define PARENT_ENTRY(map, entry) \ |
310 | (ROOT_ENTRY(map) == (entry) \ | | 310 | (ROOT_ENTRY(map) == (entry) \ |
311 | ? NULL \ | | 311 | ? NULL \ |
312 | : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node)) | | 312 | : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node)) |
313 | | | 313 | |
314 | static int | | 314 | static int |
315 | uvm_map_compare_nodes(const struct rb_node *nparent, | | 315 | uvm_map_compare_nodes(const struct rb_node *nparent, |
316 | const struct rb_node *nkey) | | 316 | const struct rb_node *nkey) |
317 | { | | 317 | { |
318 | const struct vm_map_entry *eparent = (const void *) nparent; | | 318 | const struct vm_map_entry *eparent = (const void *) nparent; |
319 | const struct vm_map_entry *ekey = (const void *) nkey; | | 319 | const struct vm_map_entry *ekey = (const void *) nkey; |
320 | | | 320 | |
321 | KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end); | | 321 | KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end); |
322 | KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end); | | 322 | KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end); |
323 | | | 323 | |
324 | if (ekey->start < eparent->start) | | 324 | if (ekey->start < eparent->start) |
325 | return -1; | | 325 | return -1; |
326 | if (ekey->start >= eparent->end) | | 326 | if (ekey->start >= eparent->end) |
327 | return 1; | | 327 | return 1; |
328 | return 0; | | 328 | return 0; |
329 | } | | 329 | } |
330 | | | 330 | |
331 | static int | | 331 | static int |
332 | uvm_map_compare_key(const struct rb_node *nparent, const void *vkey) | | 332 | uvm_map_compare_key(const struct rb_node *nparent, const void *vkey) |
333 | { | | 333 | { |
334 | const struct vm_map_entry *eparent = (const void *) nparent; | | 334 | const struct vm_map_entry *eparent = (const void *) nparent; |
335 | const vaddr_t va = *(const vaddr_t *) vkey; | | 335 | const vaddr_t va = *(const vaddr_t *) vkey; |
336 | | | 336 | |
337 | if (va < eparent->start) | | 337 | if (va < eparent->start) |
338 | return -1; | | 338 | return -1; |
339 | if (va >= eparent->end) | | 339 | if (va >= eparent->end) |
340 | return 1; | | 340 | return 1; |
341 | return 0; | | 341 | return 0; |
342 | } | | 342 | } |
343 | | | 343 | |
344 | static const struct rb_tree_ops uvm_map_tree_ops = { | | 344 | static const struct rb_tree_ops uvm_map_tree_ops = { |
345 | .rbto_compare_nodes = uvm_map_compare_nodes, | | 345 | .rbto_compare_nodes = uvm_map_compare_nodes, |
346 | .rbto_compare_key = uvm_map_compare_key, | | 346 | .rbto_compare_key = uvm_map_compare_key, |
347 | }; | | 347 | }; |
348 | | | 348 | |
349 | static inline vsize_t | | 349 | static inline vsize_t |
350 | uvm_rb_gap(const struct vm_map_entry *entry) | | 350 | uvm_rb_gap(const struct vm_map_entry *entry) |
351 | { | | 351 | { |
352 | KASSERT(entry->next != NULL); | | 352 | KASSERT(entry->next != NULL); |
353 | return entry->next->start - entry->end; | | 353 | return entry->next->start - entry->end; |
354 | } | | 354 | } |
355 | | | 355 | |
356 | static vsize_t | | 356 | static vsize_t |
357 | uvm_rb_maxgap(const struct vm_map_entry *entry) | | 357 | uvm_rb_maxgap(const struct vm_map_entry *entry) |
358 | { | | 358 | { |
359 | struct vm_map_entry *child; | | 359 | struct vm_map_entry *child; |
360 | vsize_t maxgap = entry->gap; | | 360 | vsize_t maxgap = entry->gap; |
361 | | | 361 | |
362 | /* | | 362 | /* |
363 | * We need maxgap to be the largest gap of us or any of our | | 363 | * We need maxgap to be the largest gap of us or any of our |
364 | * descendents. Since each of our children's maxgap is the | | 364 | * descendents. Since each of our children's maxgap is the |
365 | * cached value of their largest gap of themselves or their | | 365 | * cached value of their largest gap of themselves or their |
366 | * descendents, we can just use that value and avoid recursing | | 366 | * descendents, we can just use that value and avoid recursing |
367 | * down the tree to calculate it. | | 367 | * down the tree to calculate it. |
368 | */ | | 368 | */ |
369 | if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap) | | 369 | if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap) |
370 | maxgap = child->maxgap; | | 370 | maxgap = child->maxgap; |
371 | | | 371 | |
372 | if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap) | | 372 | if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap) |
373 | maxgap = child->maxgap; | | 373 | maxgap = child->maxgap; |
374 | | | 374 | |
375 | return maxgap; | | 375 | return maxgap; |
376 | } | | 376 | } |
377 | | | 377 | |
378 | static void | | 378 | static void |
379 | uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry) | | 379 | uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry) |
380 | { | | 380 | { |
381 | struct vm_map_entry *parent; | | 381 | struct vm_map_entry *parent; |
382 | | | 382 | |
383 | KASSERT(entry->gap == uvm_rb_gap(entry)); | | 383 | KASSERT(entry->gap == uvm_rb_gap(entry)); |
384 | entry->maxgap = uvm_rb_maxgap(entry); | | 384 | entry->maxgap = uvm_rb_maxgap(entry); |
385 | | | 385 | |
386 | while ((parent = PARENT_ENTRY(map, entry)) != NULL) { | | 386 | while ((parent = PARENT_ENTRY(map, entry)) != NULL) { |
387 | struct vm_map_entry *brother; | | 387 | struct vm_map_entry *brother; |
388 | vsize_t maxgap = parent->gap; | | 388 | vsize_t maxgap = parent->gap; |
389 | | | 389 | |
390 | KDASSERT(parent->gap == uvm_rb_gap(parent)); | | 390 | KDASSERT(parent->gap == uvm_rb_gap(parent)); |
391 | if (maxgap < entry->maxgap) | | 391 | if (maxgap < entry->maxgap) |
392 | maxgap = entry->maxgap; | | 392 | maxgap = entry->maxgap; |
393 | /* | | 393 | /* |
394 | * Since we work our towards the root, we know entry's maxgap | | 394 | * Since we work our towards the root, we know entry's maxgap |
395 | * value is ok but its brothers may now be out-of-date due | | 395 | * value is ok but its brothers may now be out-of-date due |
396 | * rebalancing. So refresh it. | | 396 | * rebalancing. So refresh it. |
397 | */ | | 397 | */ |
398 | brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER]; | | 398 | brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER]; |
399 | if (brother != NULL) { | | 399 | if (brother != NULL) { |
400 | KDASSERT(brother->gap == uvm_rb_gap(brother)); | | 400 | KDASSERT(brother->gap == uvm_rb_gap(brother)); |
401 | brother->maxgap = uvm_rb_maxgap(brother); | | 401 | brother->maxgap = uvm_rb_maxgap(brother); |
402 | if (maxgap < brother->maxgap) | | 402 | if (maxgap < brother->maxgap) |
403 | maxgap = brother->maxgap; | | 403 | maxgap = brother->maxgap; |
404 | } | | 404 | } |
405 | | | 405 | |
406 | parent->maxgap = maxgap; | | 406 | parent->maxgap = maxgap; |
407 | entry = parent; | | 407 | entry = parent; |
408 | } | | 408 | } |
409 | } | | 409 | } |
410 | | | 410 | |
411 | static void | | 411 | static void |
412 | uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry) | | 412 | uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry) |
413 | { | | 413 | { |
414 | entry->gap = entry->maxgap = uvm_rb_gap(entry); | | 414 | entry->gap = entry->maxgap = uvm_rb_gap(entry); |
415 | if (entry->prev != &map->header) | | 415 | if (entry->prev != &map->header) |
416 | entry->prev->gap = uvm_rb_gap(entry->prev); | | 416 | entry->prev->gap = uvm_rb_gap(entry->prev); |
417 | | | 417 | |
418 | if (!rb_tree_insert_node(&map->rb_tree, &entry->rb_node)) | | 418 | if (!rb_tree_insert_node(&map->rb_tree, &entry->rb_node)) |
419 | panic("uvm_rb_insert: map %p: duplicate entry?", map); | | 419 | panic("uvm_rb_insert: map %p: duplicate entry?", map); |
420 | | | 420 | |
421 | /* | | 421 | /* |
422 | * If the previous entry is not our immediate left child, then it's an | | 422 | * If the previous entry is not our immediate left child, then it's an |
423 | * ancestor and will be fixed up on the way to the root. We don't | | 423 | * ancestor and will be fixed up on the way to the root. We don't |
424 | * have to check entry->prev against &map->header since &map->header | | 424 | * have to check entry->prev against &map->header since &map->header |
425 | * will never be in the tree. | | 425 | * will never be in the tree. |
426 | */ | | 426 | */ |
427 | uvm_rb_fixup(map, | | 427 | uvm_rb_fixup(map, |
428 | LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry); | | 428 | LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry); |
429 | } | | 429 | } |
430 | | | 430 | |
431 | static void | | 431 | static void |
432 | uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry) | | 432 | uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry) |
433 | { | | 433 | { |
434 | struct vm_map_entry *prev_parent = NULL, *next_parent = NULL; | | 434 | struct vm_map_entry *prev_parent = NULL, *next_parent = NULL; |
435 | | | 435 | |
436 | /* | | 436 | /* |
437 | * If we are removing an interior node, then an adjacent node will | | 437 | * If we are removing an interior node, then an adjacent node will |
438 | * be used to replace its position in the tree. Therefore we will | | 438 | * be used to replace its position in the tree. Therefore we will |
439 | * need to fixup the tree starting at the parent of the replacement | | 439 | * need to fixup the tree starting at the parent of the replacement |
440 | * node. So record their parents for later use. | | 440 | * node. So record their parents for later use. |
441 | */ | | 441 | */ |
442 | if (entry->prev != &map->header) | | 442 | if (entry->prev != &map->header) |
443 | prev_parent = PARENT_ENTRY(map, entry->prev); | | 443 | prev_parent = PARENT_ENTRY(map, entry->prev); |
444 | if (entry->next != &map->header) | | 444 | if (entry->next != &map->header) |
445 | next_parent = PARENT_ENTRY(map, entry->next); | | 445 | next_parent = PARENT_ENTRY(map, entry->next); |
446 | | | 446 | |
447 | rb_tree_remove_node(&map->rb_tree, &entry->rb_node); | | 447 | rb_tree_remove_node(&map->rb_tree, &entry->rb_node); |
448 | | | 448 | |
449 | /* | | 449 | /* |
450 | * If the previous node has a new parent, fixup the tree starting | | 450 | * If the previous node has a new parent, fixup the tree starting |
451 | * at the previous node's old parent. | | 451 | * at the previous node's old parent. |
452 | */ | | 452 | */ |
453 | if (entry->prev != &map->header) { | | 453 | if (entry->prev != &map->header) { |
454 | /* | | 454 | /* |
455 | * Update the previous entry's gap due to our absence. | | 455 | * Update the previous entry's gap due to our absence. |
456 | */ | | 456 | */ |
457 | entry->prev->gap = uvm_rb_gap(entry->prev); | | 457 | entry->prev->gap = uvm_rb_gap(entry->prev); |
458 | uvm_rb_fixup(map, entry->prev); | | 458 | uvm_rb_fixup(map, entry->prev); |
459 | if (prev_parent != NULL | | 459 | if (prev_parent != NULL |
460 | && prev_parent != entry | | 460 | && prev_parent != entry |
461 | && prev_parent != PARENT_ENTRY(map, entry->prev)) | | 461 | && prev_parent != PARENT_ENTRY(map, entry->prev)) |
462 | uvm_rb_fixup(map, prev_parent); | | 462 | uvm_rb_fixup(map, prev_parent); |
463 | } | | 463 | } |
464 | | | 464 | |
465 | /* | | 465 | /* |
466 | * If the next node has a new parent, fixup the tree starting | | 466 | * If the next node has a new parent, fixup the tree starting |
467 | * at the next node's old parent. | | 467 | * at the next node's old parent. |
468 | */ | | 468 | */ |
469 | if (entry->next != &map->header) { | | 469 | if (entry->next != &map->header) { |
470 | uvm_rb_fixup(map, entry->next); | | 470 | uvm_rb_fixup(map, entry->next); |
471 | if (next_parent != NULL | | 471 | if (next_parent != NULL |
472 | && next_parent != entry | | 472 | && next_parent != entry |
473 | && next_parent != PARENT_ENTRY(map, entry->next)) | | 473 | && next_parent != PARENT_ENTRY(map, entry->next)) |
474 | uvm_rb_fixup(map, next_parent); | | 474 | uvm_rb_fixup(map, next_parent); |
475 | } | | 475 | } |
476 | } | | 476 | } |
477 | | | 477 | |
478 | #if defined(DEBUG) | | 478 | #if defined(DEBUG) |
479 | int uvm_debug_check_map = 0; | | 479 | int uvm_debug_check_map = 0; |
480 | int uvm_debug_check_rbtree = 0; | | 480 | int uvm_debug_check_rbtree = 0; |
481 | #define uvm_map_check(map, name) \ | | 481 | #define uvm_map_check(map, name) \ |
482 | _uvm_map_check((map), (name), __FILE__, __LINE__) | | 482 | _uvm_map_check((map), (name), __FILE__, __LINE__) |
483 | static void | | 483 | static void |
484 | _uvm_map_check(struct vm_map *map, const char *name, | | 484 | _uvm_map_check(struct vm_map *map, const char *name, |
485 | const char *file, int line) | | 485 | const char *file, int line) |
486 | { | | 486 | { |
487 | | | 487 | |
488 | if ((uvm_debug_check_map && _uvm_map_sanity(map)) || | | 488 | if ((uvm_debug_check_map && _uvm_map_sanity(map)) || |
489 | (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) { | | 489 | (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) { |
490 | panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)", | | 490 | panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)", |
491 | name, map, file, line); | | 491 | name, map, file, line); |
492 | } | | 492 | } |
493 | } | | 493 | } |
494 | #else /* defined(DEBUG) */ | | 494 | #else /* defined(DEBUG) */ |
495 | #define uvm_map_check(map, name) /* nothing */ | | 495 | #define uvm_map_check(map, name) /* nothing */ |
496 | #endif /* defined(DEBUG) */ | | 496 | #endif /* defined(DEBUG) */ |
497 | | | 497 | |
498 | #if defined(DEBUG) || defined(DDB) | | 498 | #if defined(DEBUG) || defined(DDB) |
499 | int | | 499 | int |
500 | _uvm_map_sanity(struct vm_map *map) | | 500 | _uvm_map_sanity(struct vm_map *map) |
501 | { | | 501 | { |
502 | bool first_free_found = false; | | 502 | bool first_free_found = false; |
503 | bool hint_found = false; | | 503 | bool hint_found = false; |
504 | const struct vm_map_entry *e; | | 504 | const struct vm_map_entry *e; |
505 | struct vm_map_entry *hint = map->hint; | | 505 | struct vm_map_entry *hint = map->hint; |
506 | | | 506 | |
507 | e = &map->header; | | 507 | e = &map->header; |
508 | for (;;) { | | 508 | for (;;) { |
509 | if (map->first_free == e) { | | 509 | if (map->first_free == e) { |
510 | first_free_found = true; | | 510 | first_free_found = true; |
511 | } else if (!first_free_found && e->next->start > e->end) { | | 511 | } else if (!first_free_found && e->next->start > e->end) { |
512 | printf("first_free %p should be %p\n", | | 512 | printf("first_free %p should be %p\n", |
513 | map->first_free, e); | | 513 | map->first_free, e); |
514 | return -1; | | 514 | return -1; |
515 | } | | 515 | } |
516 | if (hint == e) { | | 516 | if (hint == e) { |
517 | hint_found = true; | | 517 | hint_found = true; |
518 | } | | 518 | } |
519 | | | 519 | |
520 | e = e->next; | | 520 | e = e->next; |
521 | if (e == &map->header) { | | 521 | if (e == &map->header) { |
522 | break; | | 522 | break; |
523 | } | | 523 | } |
524 | } | | 524 | } |
525 | if (!first_free_found) { | | 525 | if (!first_free_found) { |
526 | printf("stale first_free\n"); | | 526 | printf("stale first_free\n"); |
527 | return -1; | | 527 | return -1; |
528 | } | | 528 | } |
529 | if (!hint_found) { | | 529 | if (!hint_found) { |
530 | printf("stale hint\n"); | | 530 | printf("stale hint\n"); |
531 | return -1; | | 531 | return -1; |
532 | } | | 532 | } |
533 | return 0; | | 533 | return 0; |
534 | } | | 534 | } |
535 | | | 535 | |
536 | int | | 536 | int |
537 | _uvm_tree_sanity(struct vm_map *map) | | 537 | _uvm_tree_sanity(struct vm_map *map) |
538 | { | | 538 | { |
539 | struct vm_map_entry *tmp, *trtmp; | | 539 | struct vm_map_entry *tmp, *trtmp; |
540 | int n = 0, i = 1; | | 540 | int n = 0, i = 1; |
541 | | | 541 | |
542 | for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) { | | 542 | for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) { |
543 | if (tmp->gap != uvm_rb_gap(tmp)) { | | 543 | if (tmp->gap != uvm_rb_gap(tmp)) { |
544 | printf("%d/%d gap %lx != %lx %s\n", | | 544 | printf("%d/%d gap %lx != %lx %s\n", |
545 | n + 1, map->nentries, | | 545 | n + 1, map->nentries, |
546 | (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp), | | 546 | (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp), |
547 | tmp->next == &map->header ? "(last)" : ""); | | 547 | tmp->next == &map->header ? "(last)" : ""); |
548 | goto error; | | 548 | goto error; |
549 | } | | 549 | } |
550 | /* | | 550 | /* |
551 | * If any entries are out of order, tmp->gap will be unsigned | | 551 | * If any entries are out of order, tmp->gap will be unsigned |
552 | * and will likely exceed the size of the map. | | 552 | * and will likely exceed the size of the map. |
553 | */ | | 553 | */ |
554 | if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) { | | 554 | if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) { |
555 | printf("too large gap %zu\n", (size_t)tmp->gap); | | 555 | printf("too large gap %zu\n", (size_t)tmp->gap); |
556 | goto error; | | 556 | goto error; |
557 | } | | 557 | } |
558 | n++; | | 558 | n++; |
559 | } | | 559 | } |
560 | | | 560 | |
561 | if (n != map->nentries) { | | 561 | if (n != map->nentries) { |
562 | printf("nentries: %d vs %d\n", n, map->nentries); | | 562 | printf("nentries: %d vs %d\n", n, map->nentries); |
563 | goto error; | | 563 | goto error; |
564 | } | | 564 | } |
565 | | | 565 | |
566 | trtmp = NULL; | | 566 | trtmp = NULL; |
567 | for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) { | | 567 | for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) { |
568 | if (tmp->maxgap != uvm_rb_maxgap(tmp)) { | | 568 | if (tmp->maxgap != uvm_rb_maxgap(tmp)) { |
569 | printf("maxgap %lx != %lx\n", | | 569 | printf("maxgap %lx != %lx\n", |
570 | (ulong)tmp->maxgap, | | 570 | (ulong)tmp->maxgap, |
571 | (ulong)uvm_rb_maxgap(tmp)); | | 571 | (ulong)uvm_rb_maxgap(tmp)); |
572 | goto error; | | 572 | goto error; |
573 | } | | 573 | } |
574 | if (trtmp != NULL && trtmp->start >= tmp->start) { | | 574 | if (trtmp != NULL && trtmp->start >= tmp->start) { |
575 | printf("corrupt: 0x%lx >= 0x%lx\n", | | 575 | printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n", |
576 | trtmp->start, tmp->start); | | 576 | trtmp->start, tmp->start); |
577 | goto error; | | 577 | goto error; |
578 | } | | 578 | } |
579 | | | 579 | |
580 | trtmp = tmp; | | 580 | trtmp = tmp; |
581 | } | | 581 | } |
582 | | | 582 | |
583 | for (tmp = map->header.next; tmp != &map->header; | | 583 | for (tmp = map->header.next; tmp != &map->header; |
584 | tmp = tmp->next, i++) { | | 584 | tmp = tmp->next, i++) { |
585 | trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node, | | 585 | trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node, |
586 | RB_DIR_LEFT); | | 586 | RB_DIR_LEFT); |
587 | if (trtmp == NULL) | | 587 | if (trtmp == NULL) |
588 | trtmp = &map->header; | | 588 | trtmp = &map->header; |
589 | if (tmp->prev != trtmp) { | | 589 | if (tmp->prev != trtmp) { |
590 | printf("lookup: %d: %p->prev=%p: %p\n", | | 590 | printf("lookup: %d: %p->prev=%p: %p\n", |
591 | i, tmp, tmp->prev, trtmp); | | 591 | i, tmp, tmp->prev, trtmp); |
592 | goto error; | | 592 | goto error; |
593 | } | | 593 | } |
594 | trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node, | | 594 | trtmp = (void *) rb_tree_iterate(&map->rb_tree, &tmp->rb_node, |
595 | RB_DIR_RIGHT); | | 595 | RB_DIR_RIGHT); |
596 | if (trtmp == NULL) | | 596 | if (trtmp == NULL) |
597 | trtmp = &map->header; | | 597 | trtmp = &map->header; |
598 | if (tmp->next != trtmp) { | | 598 | if (tmp->next != trtmp) { |
599 | printf("lookup: %d: %p->next=%p: %p\n", | | 599 | printf("lookup: %d: %p->next=%p: %p\n", |
600 | i, tmp, tmp->next, trtmp); | | 600 | i, tmp, tmp->next, trtmp); |
601 | goto error; | | 601 | goto error; |
602 | } | | 602 | } |
603 | trtmp = (void *)rb_tree_find_node(&map->rb_tree, &tmp->start); | | 603 | trtmp = (void *)rb_tree_find_node(&map->rb_tree, &tmp->start); |
604 | if (trtmp != tmp) { | | 604 | if (trtmp != tmp) { |
605 | printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp, | | 605 | printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp, |
606 | PARENT_ENTRY(map, tmp)); | | 606 | PARENT_ENTRY(map, tmp)); |
607 | goto error; | | 607 | goto error; |
608 | } | | 608 | } |
609 | } | | 609 | } |
610 | | | 610 | |
611 | return (0); | | 611 | return (0); |
612 | error: | | 612 | error: |
613 | return (-1); | | 613 | return (-1); |
614 | } | | 614 | } |
615 | #endif /* defined(DEBUG) || defined(DDB) */ | | 615 | #endif /* defined(DEBUG) || defined(DDB) */ |
616 | | | 616 | |
617 | #ifdef DIAGNOSTIC | | 617 | #ifdef DIAGNOSTIC |
618 | static struct vm_map *uvm_kmapent_map(struct vm_map_entry *); | | 618 | static struct vm_map *uvm_kmapent_map(struct vm_map_entry *); |
619 | #endif | | 619 | #endif |
620 | | | 620 | |
621 | /* | | 621 | /* |
622 | * vm_map_lock: acquire an exclusive (write) lock on a map. | | 622 | * vm_map_lock: acquire an exclusive (write) lock on a map. |
623 | * | | 623 | * |
624 | * => Note that "intrsafe" maps use only exclusive, spin locks. | | 624 | * => Note that "intrsafe" maps use only exclusive, spin locks. |
625 | * | | 625 | * |
626 | * => The locking protocol provides for guaranteed upgrade from shared -> | | 626 | * => The locking protocol provides for guaranteed upgrade from shared -> |
627 | * exclusive by whichever thread currently has the map marked busy. | | 627 | * exclusive by whichever thread currently has the map marked busy. |
628 | * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among | | 628 | * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among |
629 | * other problems, it defeats any fairness guarantees provided by RW | | 629 | * other problems, it defeats any fairness guarantees provided by RW |
630 | * locks. | | 630 | * locks. |
631 | */ | | 631 | */ |
632 | | | 632 | |
633 | void | | 633 | void |
634 | vm_map_lock(struct vm_map *map) | | 634 | vm_map_lock(struct vm_map *map) |
635 | { | | 635 | { |
636 | | | 636 | |
637 | if ((map->flags & VM_MAP_INTRSAFE) != 0) { | | 637 | if ((map->flags & VM_MAP_INTRSAFE) != 0) { |
638 | mutex_spin_enter(&map->mutex); | | 638 | mutex_spin_enter(&map->mutex); |
639 | return; | | 639 | return; |
640 | } | | 640 | } |
641 | | | 641 | |
642 | for (;;) { | | 642 | for (;;) { |
643 | rw_enter(&map->lock, RW_WRITER); | | 643 | rw_enter(&map->lock, RW_WRITER); |
644 | if (map->busy == NULL) | | 644 | if (map->busy == NULL) |
645 | break; | | 645 | break; |
646 | if (map->busy == curlwp) | | 646 | if (map->busy == curlwp) |
647 | break; | | 647 | break; |
648 | mutex_enter(&map->misc_lock); | | 648 | mutex_enter(&map->misc_lock); |
649 | rw_exit(&map->lock); | | 649 | rw_exit(&map->lock); |
650 | if (map->busy != NULL) | | 650 | if (map->busy != NULL) |
651 | cv_wait(&map->cv, &map->misc_lock); | | 651 | cv_wait(&map->cv, &map->misc_lock); |
652 | mutex_exit(&map->misc_lock); | | 652 | mutex_exit(&map->misc_lock); |
653 | } | | 653 | } |
654 | | | 654 | |
655 | map->timestamp++; | | 655 | map->timestamp++; |
656 | } | | 656 | } |
657 | | | 657 | |
658 | /* | | 658 | /* |
659 | * vm_map_lock_try: try to lock a map, failing if it is already locked. | | 659 | * vm_map_lock_try: try to lock a map, failing if it is already locked. |
660 | */ | | 660 | */ |
661 | | | 661 | |
662 | bool | | 662 | bool |
663 | vm_map_lock_try(struct vm_map *map) | | 663 | vm_map_lock_try(struct vm_map *map) |
664 | { | | 664 | { |
665 | | | 665 | |
666 | if ((map->flags & VM_MAP_INTRSAFE) != 0) | | 666 | if ((map->flags & VM_MAP_INTRSAFE) != 0) |
667 | return mutex_tryenter(&map->mutex); | | 667 | return mutex_tryenter(&map->mutex); |
668 | if (!rw_tryenter(&map->lock, RW_WRITER)) | | 668 | if (!rw_tryenter(&map->lock, RW_WRITER)) |
669 | return false; | | 669 | return false; |
670 | if (map->busy != NULL) { | | 670 | if (map->busy != NULL) { |
671 | rw_exit(&map->lock); | | 671 | rw_exit(&map->lock); |
672 | return false; | | 672 | return false; |
673 | } | | 673 | } |
674 | | | 674 | |
675 | map->timestamp++; | | 675 | map->timestamp++; |
676 | return true; | | 676 | return true; |
677 | } | | 677 | } |
678 | | | 678 | |
679 | /* | | 679 | /* |
680 | * vm_map_unlock: release an exclusive lock on a map. | | 680 | * vm_map_unlock: release an exclusive lock on a map. |
681 | */ | | 681 | */ |
682 | | | 682 | |
683 | void | | 683 | void |
684 | vm_map_unlock(struct vm_map *map) | | 684 | vm_map_unlock(struct vm_map *map) |
685 | { | | 685 | { |
686 | | | 686 | |
687 | if ((map->flags & VM_MAP_INTRSAFE) != 0) | | 687 | if ((map->flags & VM_MAP_INTRSAFE) != 0) |
688 | mutex_spin_exit(&map->mutex); | | 688 | mutex_spin_exit(&map->mutex); |
689 | else { | | 689 | else { |
690 | KASSERT(rw_write_held(&map->lock)); | | 690 | KASSERT(rw_write_held(&map->lock)); |
691 | KASSERT(map->busy == NULL || map->busy == curlwp); | | 691 | KASSERT(map->busy == NULL || map->busy == curlwp); |
692 | rw_exit(&map->lock); | | 692 | rw_exit(&map->lock); |
693 | } | | 693 | } |
694 | } | | 694 | } |
695 | | | 695 | |
696 | /* | | 696 | /* |
697 | * vm_map_unbusy: mark the map as unbusy, and wake any waiters that | | 697 | * vm_map_unbusy: mark the map as unbusy, and wake any waiters that |
698 | * want an exclusive lock. | | 698 | * want an exclusive lock. |
699 | */ | | 699 | */ |
700 | | | 700 | |
701 | void | | 701 | void |
702 | vm_map_unbusy(struct vm_map *map) | | 702 | vm_map_unbusy(struct vm_map *map) |
703 | { | | 703 | { |
704 | | | 704 | |
705 | KASSERT(map->busy == curlwp); | | 705 | KASSERT(map->busy == curlwp); |
706 | | | 706 | |
707 | /* | | 707 | /* |
708 | * Safe to clear 'busy' and 'waiters' with only a read lock held: | | 708 | * Safe to clear 'busy' and 'waiters' with only a read lock held: |
709 | * | | 709 | * |
710 | * o they can only be set with a write lock held | | 710 | * o they can only be set with a write lock held |
711 | * o writers are blocked out with a read or write hold | | 711 | * o writers are blocked out with a read or write hold |
712 | * o at any time, only one thread owns the set of values | | 712 | * o at any time, only one thread owns the set of values |
713 | */ | | 713 | */ |
714 | mutex_enter(&map->misc_lock); | | 714 | mutex_enter(&map->misc_lock); |
715 | map->busy = NULL; | | 715 | map->busy = NULL; |
716 | cv_broadcast(&map->cv); | | 716 | cv_broadcast(&map->cv); |
717 | mutex_exit(&map->misc_lock); | | 717 | mutex_exit(&map->misc_lock); |
718 | } | | 718 | } |
719 | | | 719 | |
720 | /* | | 720 | /* |
721 | * vm_map_lock_read: acquire a shared (read) lock on a map. | | 721 | * vm_map_lock_read: acquire a shared (read) lock on a map. |
722 | */ | | 722 | */ |
723 | | | 723 | |
724 | void | | 724 | void |
725 | vm_map_lock_read(struct vm_map *map) | | 725 | vm_map_lock_read(struct vm_map *map) |
726 | { | | 726 | { |
727 | | | 727 | |
728 | KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); | | 728 | KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); |
729 | | | 729 | |
730 | rw_enter(&map->lock, RW_READER); | | 730 | rw_enter(&map->lock, RW_READER); |
731 | } | | 731 | } |
732 | | | 732 | |
733 | /* | | 733 | /* |
734 | * vm_map_unlock_read: release a shared lock on a map. | | 734 | * vm_map_unlock_read: release a shared lock on a map. |
735 | */ | | 735 | */ |
736 | | | 736 | |
737 | void | | 737 | void |
738 | vm_map_unlock_read(struct vm_map *map) | | 738 | vm_map_unlock_read(struct vm_map *map) |
739 | { | | 739 | { |
740 | | | 740 | |
741 | KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); | | 741 | KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); |
742 | | | 742 | |
743 | rw_exit(&map->lock); | | 743 | rw_exit(&map->lock); |
744 | } | | 744 | } |
745 | | | 745 | |
746 | /* | | 746 | /* |
747 | * vm_map_busy: mark a map as busy. | | 747 | * vm_map_busy: mark a map as busy. |
748 | * | | 748 | * |
749 | * => the caller must hold the map write locked | | 749 | * => the caller must hold the map write locked |
750 | */ | | 750 | */ |
751 | | | 751 | |
752 | void | | 752 | void |
753 | vm_map_busy(struct vm_map *map) | | 753 | vm_map_busy(struct vm_map *map) |
754 | { | | 754 | { |
755 | | | 755 | |
756 | KASSERT(rw_write_held(&map->lock)); | | 756 | KASSERT(rw_write_held(&map->lock)); |
757 | KASSERT(map->busy == NULL); | | 757 | KASSERT(map->busy == NULL); |
758 | | | 758 | |
759 | map->busy = curlwp; | | 759 | map->busy = curlwp; |
760 | } | | 760 | } |
761 | | | 761 | |
762 | /* | | 762 | /* |
763 | * vm_map_locked_p: return true if the map is write locked. | | 763 | * vm_map_locked_p: return true if the map is write locked. |
764 | * | | 764 | * |
765 | * => only for debug purposes like KASSERTs. | | 765 | * => only for debug purposes like KASSERTs. |
766 | * => should not be used to verify that a map is not locked. | | 766 | * => should not be used to verify that a map is not locked. |
767 | */ | | 767 | */ |
768 | | | 768 | |
769 | bool | | 769 | bool |
770 | vm_map_locked_p(struct vm_map *map) | | 770 | vm_map_locked_p(struct vm_map *map) |
771 | { | | 771 | { |
772 | | | 772 | |
773 | if ((map->flags & VM_MAP_INTRSAFE) != 0) { | | 773 | if ((map->flags & VM_MAP_INTRSAFE) != 0) { |
774 | return mutex_owned(&map->mutex); | | 774 | return mutex_owned(&map->mutex); |
775 | } else { | | 775 | } else { |
776 | return rw_write_held(&map->lock); | | 776 | return rw_write_held(&map->lock); |
777 | } | | 777 | } |
778 | } | | 778 | } |
779 | | | 779 | |
780 | /* | | 780 | /* |
781 | * uvm_mapent_alloc: allocate a map entry | | 781 | * uvm_mapent_alloc: allocate a map entry |
782 | */ | | 782 | */ |
783 | | | 783 | |
784 | static struct vm_map_entry * | | 784 | static struct vm_map_entry * |
785 | uvm_mapent_alloc(struct vm_map *map, int flags) | | 785 | uvm_mapent_alloc(struct vm_map *map, int flags) |
786 | { | | 786 | { |
787 | struct vm_map_entry *me; | | 787 | struct vm_map_entry *me; |
788 | int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK; | | 788 | int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK; |
789 | UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist); | | 789 | UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist); |
790 | | | 790 | |
791 | if (VM_MAP_USE_KMAPENT(map)) { | | 791 | if (VM_MAP_USE_KMAPENT(map)) { |
792 | me = uvm_kmapent_alloc(map, flags); | | 792 | me = uvm_kmapent_alloc(map, flags); |
793 | } else { | | 793 | } else { |
794 | me = pool_cache_get(&uvm_map_entry_cache, pflags); | | 794 | me = pool_cache_get(&uvm_map_entry_cache, pflags); |
795 | if (__predict_false(me == NULL)) | | 795 | if (__predict_false(me == NULL)) |
796 | return NULL; | | 796 | return NULL; |
797 | me->flags = 0; | | 797 | me->flags = 0; |
798 | } | | 798 | } |
799 | | | 799 | |
800 | UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me, | | 800 | UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me, |
801 | ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0); | | 801 | ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0); |
802 | return (me); | | 802 | return (me); |
803 | } | | 803 | } |
804 | | | 804 | |
805 | /* | | 805 | /* |
806 | * uvm_mapent_alloc_split: allocate a map entry for clipping. | | 806 | * uvm_mapent_alloc_split: allocate a map entry for clipping. |
807 | * | | 807 | * |
808 | * => map must be locked by caller if UVM_MAP_QUANTUM is set. | | 808 | * => map must be locked by caller if UVM_MAP_QUANTUM is set. |
809 | */ | | 809 | */ |
810 | | | 810 | |
811 | static struct vm_map_entry * | | 811 | static struct vm_map_entry * |
812 | uvm_mapent_alloc_split(struct vm_map *map, | | 812 | uvm_mapent_alloc_split(struct vm_map *map, |
813 | const struct vm_map_entry *old_entry, int flags, | | 813 | const struct vm_map_entry *old_entry, int flags, |
814 | struct uvm_mapent_reservation *umr) | | 814 | struct uvm_mapent_reservation *umr) |
815 | { | | 815 | { |
816 | struct vm_map_entry *me; | | 816 | struct vm_map_entry *me; |
817 | | | 817 | |
818 | KASSERT(!VM_MAP_USE_KMAPENT(map) || | | 818 | KASSERT(!VM_MAP_USE_KMAPENT(map) || |
819 | (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr)); | | 819 | (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr)); |
820 | | | 820 | |
821 | if (old_entry->flags & UVM_MAP_QUANTUM) { | | 821 | if (old_entry->flags & UVM_MAP_QUANTUM) { |
822 | struct vm_map_kernel *vmk = vm_map_to_kernel(map); | | 822 | struct vm_map_kernel *vmk = vm_map_to_kernel(map); |
823 | | | 823 | |
824 | KASSERT(vm_map_locked_p(map)); | | 824 | KASSERT(vm_map_locked_p(map)); |
825 | me = vmk->vmk_merged_entries; | | 825 | me = vmk->vmk_merged_entries; |
826 | KASSERT(me); | | 826 | KASSERT(me); |
827 | vmk->vmk_merged_entries = me->next; | | 827 | vmk->vmk_merged_entries = me->next; |
828 | KASSERT(me->flags & UVM_MAP_QUANTUM); | | 828 | KASSERT(me->flags & UVM_MAP_QUANTUM); |
829 | } else { | | 829 | } else { |
830 | me = uvm_mapent_alloc(map, flags); | | 830 | me = uvm_mapent_alloc(map, flags); |
831 | } | | 831 | } |
832 | | | 832 | |
833 | return me; | | 833 | return me; |
834 | } | | 834 | } |
835 | | | 835 | |
836 | /* | | 836 | /* |
837 | * uvm_mapent_free: free map entry | | 837 | * uvm_mapent_free: free map entry |
838 | */ | | 838 | */ |
839 | | | 839 | |
840 | static void | | 840 | static void |
841 | uvm_mapent_free(struct vm_map_entry *me) | | 841 | uvm_mapent_free(struct vm_map_entry *me) |
842 | { | | 842 | { |
843 | UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist); | | 843 | UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist); |
844 | | | 844 | |
845 | UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]", | | 845 | UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]", |
846 | me, me->flags, 0, 0); | | 846 | me, me->flags, 0, 0); |
847 | if (me->flags & UVM_MAP_KERNEL) { | | 847 | if (me->flags & UVM_MAP_KERNEL) { |
848 | uvm_kmapent_free(me); | | 848 | uvm_kmapent_free(me); |
849 | } else { | | 849 | } else { |
850 | pool_cache_put(&uvm_map_entry_cache, me); | | 850 | pool_cache_put(&uvm_map_entry_cache, me); |
851 | } | | 851 | } |
852 | } | | 852 | } |
853 | | | 853 | |
854 | /* | | 854 | /* |
855 | * uvm_mapent_free_merged: free merged map entry | | 855 | * uvm_mapent_free_merged: free merged map entry |
856 | * | | 856 | * |
857 | * => keep the entry if needed. | | 857 | * => keep the entry if needed. |
858 | * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true. | | 858 | * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true. |
859 | * => map should be locked if UVM_MAP_QUANTUM is set. | | 859 | * => map should be locked if UVM_MAP_QUANTUM is set. |
860 | */ | | 860 | */ |
861 | | | 861 | |
862 | static void | | 862 | static void |
863 | uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me) | | 863 | uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me) |
864 | { | | 864 | { |
865 | | | 865 | |
866 | KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map); | | 866 | KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map); |
867 | | | 867 | |
868 | if (me->flags & UVM_MAP_QUANTUM) { | | 868 | if (me->flags & UVM_MAP_QUANTUM) { |
869 | /* | | 869 | /* |
870 | * keep this entry for later splitting. | | 870 | * keep this entry for later splitting. |
871 | */ | | 871 | */ |
872 | struct vm_map_kernel *vmk; | | 872 | struct vm_map_kernel *vmk; |
873 | | | 873 | |
874 | KASSERT(vm_map_locked_p(map)); | | 874 | KASSERT(vm_map_locked_p(map)); |
875 | KASSERT(VM_MAP_IS_KERNEL(map)); | | 875 | KASSERT(VM_MAP_IS_KERNEL(map)); |
876 | KASSERT(!VM_MAP_USE_KMAPENT(map) || | | 876 | KASSERT(!VM_MAP_USE_KMAPENT(map) || |
877 | (me->flags & UVM_MAP_KERNEL)); | | 877 | (me->flags & UVM_MAP_KERNEL)); |
878 | | | 878 | |
879 | vmk = vm_map_to_kernel(map); | | 879 | vmk = vm_map_to_kernel(map); |
880 | me->next = vmk->vmk_merged_entries; | | 880 | me->next = vmk->vmk_merged_entries; |
881 | vmk->vmk_merged_entries = me; | | 881 | vmk->vmk_merged_entries = me; |
882 | } else { | | 882 | } else { |
883 | uvm_mapent_free(me); | | 883 | uvm_mapent_free(me); |
884 | } | | 884 | } |
885 | } | | 885 | } |
886 | | | 886 | |
887 | /* | | 887 | /* |
888 | * uvm_mapent_copy: copy a map entry, preserving flags | | 888 | * uvm_mapent_copy: copy a map entry, preserving flags |
889 | */ | | 889 | */ |
890 | | | 890 | |
891 | static inline void | | 891 | static inline void |
892 | uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst) | | 892 | uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst) |
893 | { | | 893 | { |
894 | | | 894 | |
895 | memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) - | | 895 | memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) - |
896 | ((char *)src)); | | 896 | ((char *)src)); |
897 | } | | 897 | } |
898 | | | 898 | |
899 | /* | | 899 | /* |
900 | * uvm_mapent_overhead: calculate maximum kva overhead necessary for | | 900 | * uvm_mapent_overhead: calculate maximum kva overhead necessary for |
901 | * map entries. | | 901 | * map entries. |
902 | * | | 902 | * |
903 | * => size and flags are the same as uvm_km_suballoc's ones. | | 903 | * => size and flags are the same as uvm_km_suballoc's ones. |
904 | */ | | 904 | */ |
905 | | | 905 | |
906 | vsize_t | | 906 | vsize_t |
907 | uvm_mapent_overhead(vsize_t size, int flags) | | 907 | uvm_mapent_overhead(vsize_t size, int flags) |
908 | { | | 908 | { |
909 | | | 909 | |
910 | if (VM_MAP_USE_KMAPENT_FLAGS(flags)) { | | 910 | if (VM_MAP_USE_KMAPENT_FLAGS(flags)) { |
911 | return uvm_kmapent_overhead(size); | | 911 | return uvm_kmapent_overhead(size); |
912 | } | | 912 | } |
913 | return 0; | | 913 | return 0; |
914 | } | | 914 | } |
915 | | | 915 | |
916 | #if defined(DEBUG) | | 916 | #if defined(DEBUG) |
917 | static void | | 917 | static void |
918 | _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line) | | 918 | _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line) |
919 | { | | 919 | { |
920 | | | 920 | |
921 | if (entry->start >= entry->end) { | | 921 | if (entry->start >= entry->end) { |
922 | goto bad; | | 922 | goto bad; |
923 | } | | 923 | } |
924 | if (UVM_ET_ISOBJ(entry)) { | | 924 | if (UVM_ET_ISOBJ(entry)) { |
925 | if (entry->object.uvm_obj == NULL) { | | 925 | if (entry->object.uvm_obj == NULL) { |
926 | goto bad; | | 926 | goto bad; |
927 | } | | 927 | } |
928 | } else if (UVM_ET_ISSUBMAP(entry)) { | | 928 | } else if (UVM_ET_ISSUBMAP(entry)) { |
929 | if (entry->object.sub_map == NULL) { | | 929 | if (entry->object.sub_map == NULL) { |
930 | goto bad; | | 930 | goto bad; |
931 | } | | 931 | } |
932 | } else { | | 932 | } else { |
933 | if (entry->object.uvm_obj != NULL || | | 933 | if (entry->object.uvm_obj != NULL || |
934 | entry->object.sub_map != NULL) { | | 934 | entry->object.sub_map != NULL) { |
935 | goto bad; | | 935 | goto bad; |
936 | } | | 936 | } |
937 | } | | 937 | } |
938 | if (!UVM_ET_ISOBJ(entry)) { | | 938 | if (!UVM_ET_ISOBJ(entry)) { |
939 | if (entry->offset != 0) { | | 939 | if (entry->offset != 0) { |
940 | goto bad; | | 940 | goto bad; |
941 | } | | 941 | } |
942 | } | | 942 | } |
943 | | | 943 | |
944 | return; | | 944 | return; |
945 | | | 945 | |
946 | bad: | | 946 | bad: |
947 | panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line); | | 947 | panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line); |
948 | } | | 948 | } |
949 | #endif /* defined(DEBUG) */ | | 949 | #endif /* defined(DEBUG) */ |
950 | | | 950 | |
951 | /* | | 951 | /* |
952 | * uvm_map_entry_unwire: unwire a map entry | | 952 | * uvm_map_entry_unwire: unwire a map entry |
953 | * | | 953 | * |
954 | * => map should be locked by caller | | 954 | * => map should be locked by caller |
955 | */ | | 955 | */ |
956 | | | 956 | |
957 | static inline void | | 957 | static inline void |
958 | uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry) | | 958 | uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry) |
959 | { | | 959 | { |
960 | | | 960 | |
961 | entry->wired_count = 0; | | 961 | entry->wired_count = 0; |
962 | uvm_fault_unwire_locked(map, entry->start, entry->end); | | 962 | uvm_fault_unwire_locked(map, entry->start, entry->end); |
963 | } | | 963 | } |
964 | | | 964 | |
965 | | | 965 | |
966 | /* | | 966 | /* |
967 | * wrapper for calling amap_ref() | | 967 | * wrapper for calling amap_ref() |
968 | */ | | 968 | */ |
969 | static inline void | | 969 | static inline void |
970 | uvm_map_reference_amap(struct vm_map_entry *entry, int flags) | | 970 | uvm_map_reference_amap(struct vm_map_entry *entry, int flags) |
971 | { | | 971 | { |
972 | | | 972 | |
973 | amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff, | | 973 | amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff, |
974 | (entry->end - entry->start) >> PAGE_SHIFT, flags); | | 974 | (entry->end - entry->start) >> PAGE_SHIFT, flags); |
975 | } | | 975 | } |
976 | | | 976 | |
977 | | | 977 | |
978 | /* | | 978 | /* |
979 | * wrapper for calling amap_unref() | | 979 | * wrapper for calling amap_unref() |
980 | */ | | 980 | */ |
981 | static inline void | | 981 | static inline void |
982 | uvm_map_unreference_amap(struct vm_map_entry *entry, int flags) | | 982 | uvm_map_unreference_amap(struct vm_map_entry *entry, int flags) |
983 | { | | 983 | { |
984 | | | 984 | |
985 | amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff, | | 985 | amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff, |
986 | (entry->end - entry->start) >> PAGE_SHIFT, flags); | | 986 | (entry->end - entry->start) >> PAGE_SHIFT, flags); |
987 | } | | 987 | } |
988 | | | 988 | |
989 | | | 989 | |
990 | /* | | 990 | /* |
991 | * uvm_map_init: init mapping system at boot time. | | 991 | * uvm_map_init: init mapping system at boot time. |
992 | */ | | 992 | */ |
993 | | | 993 | |
994 | void | | 994 | void |
995 | uvm_map_init(void) | | 995 | uvm_map_init(void) |
996 | { | | 996 | { |
997 | #if defined(UVMHIST) | | 997 | #if defined(UVMHIST) |
998 | static struct uvm_history_ent maphistbuf[100]; | | 998 | static struct uvm_history_ent maphistbuf[100]; |
999 | static struct uvm_history_ent pdhistbuf[100]; | | 999 | static struct uvm_history_ent pdhistbuf[100]; |
1000 | #endif | | 1000 | #endif |
1001 | | | 1001 | |
1002 | /* | | 1002 | /* |
1003 | * first, init logging system. | | 1003 | * first, init logging system. |
1004 | */ | | 1004 | */ |
1005 | | | 1005 | |
1006 | UVMHIST_FUNC("uvm_map_init"); | | 1006 | UVMHIST_FUNC("uvm_map_init"); |
1007 | UVMHIST_INIT_STATIC(maphist, maphistbuf); | | 1007 | UVMHIST_INIT_STATIC(maphist, maphistbuf); |
1008 | UVMHIST_INIT_STATIC(pdhist, pdhistbuf); | | 1008 | UVMHIST_INIT_STATIC(pdhist, pdhistbuf); |
1009 | UVMHIST_CALLED(maphist); | | 1009 | UVMHIST_CALLED(maphist); |
1010 | UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0); | | 1010 | UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0); |
1011 | | | 1011 | |
1012 | /* | | 1012 | /* |
1013 | * initialize the global lock for kernel map entry. | | 1013 | * initialize the global lock for kernel map entry. |
1014 | */ | | 1014 | */ |
1015 | | | 1015 | |
1016 | mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM); | | 1016 | mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM); |
1017 | | | 1017 | |
1018 | /* | | 1018 | /* |
1019 | * initialize caches. | | 1019 | * initialize caches. |
1020 | */ | | 1020 | */ |
1021 | | | 1021 | |
1022 | pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry), | | 1022 | pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry), |
1023 | 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL); | | 1023 | 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL); |
1024 | pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace), | | 1024 | pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace), |
1025 | 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL); | | 1025 | 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL); |
1026 | } | | 1026 | } |
1027 | | | 1027 | |
1028 | /* | | 1028 | /* |
1029 | * clippers | | 1029 | * clippers |
1030 | */ | | 1030 | */ |
1031 | | | 1031 | |
1032 | /* | | 1032 | /* |
1033 | * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy. | | 1033 | * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy. |
1034 | */ | | 1034 | */ |
1035 | | | 1035 | |
1036 | static void | | 1036 | static void |
1037 | uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2, | | 1037 | uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2, |
1038 | vaddr_t splitat) | | 1038 | vaddr_t splitat) |
1039 | { | | 1039 | { |
1040 | vaddr_t adj; | | 1040 | vaddr_t adj; |
1041 | | | 1041 | |
1042 | KASSERT(entry1->start < splitat); | | 1042 | KASSERT(entry1->start < splitat); |
1043 | KASSERT(splitat < entry1->end); | | 1043 | KASSERT(splitat < entry1->end); |
1044 | | | 1044 | |
1045 | adj = splitat - entry1->start; | | 1045 | adj = splitat - entry1->start; |
1046 | entry1->end = entry2->start = splitat; | | 1046 | entry1->end = entry2->start = splitat; |
1047 | | | 1047 | |
1048 | if (entry1->aref.ar_amap) { | | 1048 | if (entry1->aref.ar_amap) { |
1049 | amap_splitref(&entry1->aref, &entry2->aref, adj); | | 1049 | amap_splitref(&entry1->aref, &entry2->aref, adj); |
1050 | } | | 1050 | } |
1051 | if (UVM_ET_ISSUBMAP(entry1)) { | | 1051 | if (UVM_ET_ISSUBMAP(entry1)) { |
1052 | /* ... unlikely to happen, but play it safe */ | | 1052 | /* ... unlikely to happen, but play it safe */ |
1053 | uvm_map_reference(entry1->object.sub_map); | | 1053 | uvm_map_reference(entry1->object.sub_map); |
1054 | } else if (UVM_ET_ISOBJ(entry1)) { | | 1054 | } else if (UVM_ET_ISOBJ(entry1)) { |
1055 | KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */ | | 1055 | KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */ |
1056 | entry2->offset += adj; | | 1056 | entry2->offset += adj; |
1057 | if (entry1->object.uvm_obj->pgops && | | 1057 | if (entry1->object.uvm_obj->pgops && |
1058 | entry1->object.uvm_obj->pgops->pgo_reference) | | 1058 | entry1->object.uvm_obj->pgops->pgo_reference) |
1059 | entry1->object.uvm_obj->pgops->pgo_reference( | | 1059 | entry1->object.uvm_obj->pgops->pgo_reference( |
1060 | entry1->object.uvm_obj); | | 1060 | entry1->object.uvm_obj); |
1061 | } | | 1061 | } |
1062 | } | | 1062 | } |
1063 | | | 1063 | |
1064 | /* | | 1064 | /* |
1065 | * uvm_map_clip_start: ensure that the entry begins at or after | | 1065 | * uvm_map_clip_start: ensure that the entry begins at or after |
1066 | * the starting address, if it doesn't we split the entry. | | 1066 | * the starting address, if it doesn't we split the entry. |
1067 | * | | 1067 | * |
1068 | * => caller should use UVM_MAP_CLIP_START macro rather than calling | | 1068 | * => caller should use UVM_MAP_CLIP_START macro rather than calling |
1069 | * this directly | | 1069 | * this directly |
1070 | * => map must be locked by caller | | 1070 | * => map must be locked by caller |
1071 | */ | | 1071 | */ |
1072 | | | 1072 | |
1073 | void | | 1073 | void |
1074 | uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, | | 1074 | uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, |
1075 | vaddr_t start, struct uvm_mapent_reservation *umr) | | 1075 | vaddr_t start, struct uvm_mapent_reservation *umr) |
1076 | { | | 1076 | { |
1077 | struct vm_map_entry *new_entry; | | 1077 | struct vm_map_entry *new_entry; |
1078 | | | 1078 | |
1079 | /* uvm_map_simplify_entry(map, entry); */ /* XXX */ | | 1079 | /* uvm_map_simplify_entry(map, entry); */ /* XXX */ |
1080 | | | 1080 | |
1081 | uvm_map_check(map, "clip_start entry"); | | 1081 | uvm_map_check(map, "clip_start entry"); |
1082 | uvm_mapent_check(entry); | | 1082 | uvm_mapent_check(entry); |
1083 | | | 1083 | |
1084 | /* | | 1084 | /* |
1085 | * Split off the front portion. note that we must insert the new | | 1085 | * Split off the front portion. note that we must insert the new |
1086 | * entry BEFORE this one, so that this entry has the specified | | 1086 | * entry BEFORE this one, so that this entry has the specified |
1087 | * starting address. | | 1087 | * starting address. |
1088 | */ | | 1088 | */ |
1089 | new_entry = uvm_mapent_alloc_split(map, entry, 0, umr); | | 1089 | new_entry = uvm_mapent_alloc_split(map, entry, 0, umr); |
1090 | uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ | | 1090 | uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ |
1091 | uvm_mapent_splitadj(new_entry, entry, start); | | 1091 | uvm_mapent_splitadj(new_entry, entry, start); |
1092 | uvm_map_entry_link(map, entry->prev, new_entry); | | 1092 | uvm_map_entry_link(map, entry->prev, new_entry); |
1093 | | | 1093 | |
1094 | uvm_map_check(map, "clip_start leave"); | | 1094 | uvm_map_check(map, "clip_start leave"); |
1095 | } | | 1095 | } |
1096 | | | 1096 | |
1097 | /* | | 1097 | /* |
1098 | * uvm_map_clip_end: ensure that the entry ends at or before | | 1098 | * uvm_map_clip_end: ensure that the entry ends at or before |
1099 | * the ending address, if it does't we split the reference | | 1099 | * the ending address, if it does't we split the reference |
1100 | * | | 1100 | * |
1101 | * => caller should use UVM_MAP_CLIP_END macro rather than calling | | 1101 | * => caller should use UVM_MAP_CLIP_END macro rather than calling |
1102 | * this directly | | 1102 | * this directly |
1103 | * => map must be locked by caller | | 1103 | * => map must be locked by caller |
1104 | */ | | 1104 | */ |
1105 | | | 1105 | |
1106 | void | | 1106 | void |
1107 | uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end, | | 1107 | uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end, |
1108 | struct uvm_mapent_reservation *umr) | | 1108 | struct uvm_mapent_reservation *umr) |
1109 | { | | 1109 | { |
1110 | struct vm_map_entry *new_entry; | | 1110 | struct vm_map_entry *new_entry; |
1111 | | | 1111 | |
1112 | uvm_map_check(map, "clip_end entry"); | | 1112 | uvm_map_check(map, "clip_end entry"); |
1113 | uvm_mapent_check(entry); | | 1113 | uvm_mapent_check(entry); |
1114 | | | 1114 | |
1115 | /* | | 1115 | /* |
1116 | * Create a new entry and insert it | | 1116 | * Create a new entry and insert it |
1117 | * AFTER the specified entry | | 1117 | * AFTER the specified entry |
1118 | */ | | 1118 | */ |
1119 | new_entry = uvm_mapent_alloc_split(map, entry, 0, umr); | | 1119 | new_entry = uvm_mapent_alloc_split(map, entry, 0, umr); |
1120 | uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ | | 1120 | uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ |
1121 | uvm_mapent_splitadj(entry, new_entry, end); | | 1121 | uvm_mapent_splitadj(entry, new_entry, end); |
1122 | uvm_map_entry_link(map, entry, new_entry); | | 1122 | uvm_map_entry_link(map, entry, new_entry); |
1123 | | | 1123 | |
1124 | uvm_map_check(map, "clip_end leave"); | | 1124 | uvm_map_check(map, "clip_end leave"); |
1125 | } | | 1125 | } |
1126 | | | 1126 | |
1127 | static void | | 1127 | static void |
1128 | vm_map_drain(struct vm_map *map, uvm_flag_t flags) | | 1128 | vm_map_drain(struct vm_map *map, uvm_flag_t flags) |
1129 | { | | 1129 | { |
1130 | | | 1130 | |
1131 | if (!VM_MAP_IS_KERNEL(map)) { | | 1131 | if (!VM_MAP_IS_KERNEL(map)) { |
1132 | return; | | 1132 | return; |
1133 | } | | 1133 | } |
1134 | | | 1134 | |
1135 | uvm_km_va_drain(map, flags); | | 1135 | uvm_km_va_drain(map, flags); |
1136 | } | | 1136 | } |
1137 | | | 1137 | |
1138 | /* | | 1138 | /* |
1139 | * M A P - m a i n e n t r y p o i n t | | 1139 | * M A P - m a i n e n t r y p o i n t |
1140 | */ | | 1140 | */ |
1141 | /* | | 1141 | /* |
1142 | * uvm_map: establish a valid mapping in a map | | 1142 | * uvm_map: establish a valid mapping in a map |
1143 | * | | 1143 | * |
1144 | * => assume startp is page aligned. | | 1144 | * => assume startp is page aligned. |
1145 | * => assume size is a multiple of PAGE_SIZE. | | 1145 | * => assume size is a multiple of PAGE_SIZE. |
1146 | * => assume sys_mmap provides enough of a "hint" to have us skip | | 1146 | * => assume sys_mmap provides enough of a "hint" to have us skip |
1147 | * over text/data/bss area. | | 1147 | * over text/data/bss area. |
1148 | * => map must be unlocked (we will lock it) | | 1148 | * => map must be unlocked (we will lock it) |
1149 | * => <uobj,uoffset> value meanings (4 cases): | | 1149 | * => <uobj,uoffset> value meanings (4 cases): |
1150 | * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER | | 1150 | * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER |
1151 | * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER | | 1151 | * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER |
1152 | * [3] <uobj,uoffset> == normal mapping | | 1152 | * [3] <uobj,uoffset> == normal mapping |
1153 | * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA | | 1153 | * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA |
1154 | * | | 1154 | * |
1155 | * case [4] is for kernel mappings where we don't know the offset until | | 1155 | * case [4] is for kernel mappings where we don't know the offset until |
1156 | * we've found a virtual address. note that kernel object offsets are | | 1156 | * we've found a virtual address. note that kernel object offsets are |
1157 | * always relative to vm_map_min(kernel_map). | | 1157 | * always relative to vm_map_min(kernel_map). |
1158 | * | | 1158 | * |
1159 | * => if `align' is non-zero, we align the virtual address to the specified | | 1159 | * => if `align' is non-zero, we align the virtual address to the specified |
1160 | * alignment. | | 1160 | * alignment. |
1161 | * this is provided as a mechanism for large pages. | | 1161 | * this is provided as a mechanism for large pages. |
1162 | * | | 1162 | * |
1163 | * => XXXCDC: need way to map in external amap? | | 1163 | * => XXXCDC: need way to map in external amap? |
1164 | */ | | 1164 | */ |
1165 | | | 1165 | |
1166 | int | | 1166 | int |
1167 | uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size, | | 1167 | uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size, |
1168 | struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags) | | 1168 | struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags) |
1169 | { | | 1169 | { |
1170 | struct uvm_map_args args; | | 1170 | struct uvm_map_args args; |
1171 | struct vm_map_entry *new_entry; | | 1171 | struct vm_map_entry *new_entry; |
1172 | int error; | | 1172 | int error; |
1173 | | | 1173 | |
1174 | KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map)); | | 1174 | KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map)); |
1175 | KASSERT((size & PAGE_MASK) == 0); | | 1175 | KASSERT((size & PAGE_MASK) == 0); |
1176 | | | 1176 | |
1177 | /* | | 1177 | /* |
1178 | * for pager_map, allocate the new entry first to avoid sleeping | | 1178 | * for pager_map, allocate the new entry first to avoid sleeping |
1179 | * for memory while we have the map locked. | | 1179 | * for memory while we have the map locked. |
1180 | * | | 1180 | * |
1181 | * Also, because we allocate entries for in-kernel maps | | 1181 | * Also, because we allocate entries for in-kernel maps |
1182 | * a bit differently (cf. uvm_kmapent_alloc/free), we need to | | 1182 | * a bit differently (cf. uvm_kmapent_alloc/free), we need to |
1183 | * allocate them before locking the map. | | 1183 | * allocate them before locking the map. |
1184 | */ | | 1184 | */ |
1185 | | | 1185 | |
1186 | new_entry = NULL; | | 1186 | new_entry = NULL; |
1187 | if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) || | | 1187 | if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) || |
1188 | map == pager_map) { | | 1188 | map == pager_map) { |
1189 | new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT)); | | 1189 | new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT)); |
1190 | if (__predict_false(new_entry == NULL)) | | 1190 | if (__predict_false(new_entry == NULL)) |
1191 | return ENOMEM; | | 1191 | return ENOMEM; |
1192 | if (flags & UVM_FLAG_QUANTUM) | | 1192 | if (flags & UVM_FLAG_QUANTUM) |
1193 | new_entry->flags |= UVM_MAP_QUANTUM; | | 1193 | new_entry->flags |= UVM_MAP_QUANTUM; |
1194 | } | | 1194 | } |
1195 | if (map == pager_map) | | 1195 | if (map == pager_map) |
1196 | flags |= UVM_FLAG_NOMERGE; | | 1196 | flags |= UVM_FLAG_NOMERGE; |
1197 | | | 1197 | |
1198 | error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align, | | 1198 | error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align, |
1199 | flags, &args); | | 1199 | flags, &args); |
1200 | if (!error) { | | 1200 | if (!error) { |
1201 | error = uvm_map_enter(map, &args, new_entry); | | 1201 | error = uvm_map_enter(map, &args, new_entry); |
1202 | *startp = args.uma_start; | | 1202 | *startp = args.uma_start; |
1203 | } else if (new_entry) { | | 1203 | } else if (new_entry) { |
1204 | uvm_mapent_free(new_entry); | | 1204 | uvm_mapent_free(new_entry); |
1205 | } | | 1205 | } |
1206 | | | 1206 | |
1207 | #if defined(DEBUG) | | 1207 | #if defined(DEBUG) |
1208 | if (!error && VM_MAP_IS_KERNEL(map)) { | | 1208 | if (!error && VM_MAP_IS_KERNEL(map)) { |
1209 | uvm_km_check_empty(map, *startp, *startp + size); | | 1209 | uvm_km_check_empty(map, *startp, *startp + size); |
1210 | } | | 1210 | } |
1211 | #endif /* defined(DEBUG) */ | | 1211 | #endif /* defined(DEBUG) */ |
1212 | | | 1212 | |
1213 | return error; | | 1213 | return error; |
1214 | } | | 1214 | } |
1215 | | | 1215 | |
1216 | int | | 1216 | int |
1217 | uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size, | | 1217 | uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size, |
1218 | struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags, | | 1218 | struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags, |
1219 | struct uvm_map_args *args) | | 1219 | struct uvm_map_args *args) |
1220 | { | | 1220 | { |
1221 | struct vm_map_entry *prev_entry; | | 1221 | struct vm_map_entry *prev_entry; |
1222 | vm_prot_t prot = UVM_PROTECTION(flags); | | 1222 | vm_prot_t prot = UVM_PROTECTION(flags); |
1223 | vm_prot_t maxprot = UVM_MAXPROTECTION(flags); | | 1223 | vm_prot_t maxprot = UVM_MAXPROTECTION(flags); |
1224 | | | 1224 | |
1225 | UVMHIST_FUNC("uvm_map_prepare"); | | 1225 | UVMHIST_FUNC("uvm_map_prepare"); |
1226 | UVMHIST_CALLED(maphist); | | 1226 | UVMHIST_CALLED(maphist); |
1227 | | | 1227 | |
1228 | UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)", | | 1228 | UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)", |
1229 | map, start, size, flags); | | 1229 | map, start, size, flags); |
1230 | UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); | | 1230 | UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); |
1231 | | | 1231 | |
1232 | /* | | 1232 | /* |
1233 | * detect a popular device driver bug. | | 1233 | * detect a popular device driver bug. |
1234 | */ | | 1234 | */ |
1235 | | | 1235 | |
1236 | KASSERT(doing_shutdown || curlwp != NULL || | | 1236 | KASSERT(doing_shutdown || curlwp != NULL || |
1237 | (map->flags & VM_MAP_INTRSAFE)); | | 1237 | (map->flags & VM_MAP_INTRSAFE)); |
1238 | | | 1238 | |
1239 | /* | | 1239 | /* |
1240 | * zero-sized mapping doesn't make any sense. | | 1240 | * zero-sized mapping doesn't make any sense. |
1241 | */ | | 1241 | */ |
1242 | KASSERT(size > 0); | | 1242 | KASSERT(size > 0); |
1243 | | | 1243 | |
1244 | KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0); | | 1244 | KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0); |
1245 | | | 1245 | |
1246 | uvm_map_check(map, "map entry"); | | 1246 | uvm_map_check(map, "map entry"); |
1247 | | | 1247 | |
1248 | /* | | 1248 | /* |
1249 | * check sanity of protection code | | 1249 | * check sanity of protection code |
1250 | */ | | 1250 | */ |
1251 | | | 1251 | |
1252 | if ((prot & maxprot) != prot) { | | 1252 | if ((prot & maxprot) != prot) { |
1253 | UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x", | | 1253 | UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x", |
1254 | prot, maxprot,0,0); | | 1254 | prot, maxprot,0,0); |
1255 | return EACCES; | | 1255 | return EACCES; |
1256 | } | | 1256 | } |
1257 | | | 1257 | |
1258 | /* | | 1258 | /* |
1259 | * figure out where to put new VM range | | 1259 | * figure out where to put new VM range |
1260 | */ | | 1260 | */ |
1261 | | | 1261 | |
1262 | retry: | | 1262 | retry: |
1263 | if (vm_map_lock_try(map) == false) { | | 1263 | if (vm_map_lock_try(map) == false) { |
1264 | if ((flags & UVM_FLAG_TRYLOCK) != 0 && | | 1264 | if ((flags & UVM_FLAG_TRYLOCK) != 0 && |
1265 | (map->flags & VM_MAP_INTRSAFE) == 0) { | | 1265 | (map->flags & VM_MAP_INTRSAFE) == 0) { |
1266 | return EAGAIN; | | 1266 | return EAGAIN; |
1267 | } | | 1267 | } |
1268 | vm_map_lock(map); /* could sleep here */ | | 1268 | vm_map_lock(map); /* could sleep here */ |
1269 | } | | 1269 | } |
1270 | prev_entry = uvm_map_findspace(map, start, size, &start, | | 1270 | prev_entry = uvm_map_findspace(map, start, size, &start, |
1271 | uobj, uoffset, align, flags); | | 1271 | uobj, uoffset, align, flags); |
1272 | if (prev_entry == NULL) { | | 1272 | if (prev_entry == NULL) { |
1273 | unsigned int timestamp; | | 1273 | unsigned int timestamp; |
1274 | | | 1274 | |
1275 | timestamp = map->timestamp; | | 1275 | timestamp = map->timestamp; |
1276 | UVMHIST_LOG(maphist,"waiting va timestamp=0x%x", | | 1276 | UVMHIST_LOG(maphist,"waiting va timestamp=0x%x", |
1277 | timestamp,0,0,0); | | 1277 | timestamp,0,0,0); |
1278 | map->flags |= VM_MAP_WANTVA; | | 1278 | map->flags |= VM_MAP_WANTVA; |
1279 | vm_map_unlock(map); | | 1279 | vm_map_unlock(map); |
1280 | | | 1280 | |
1281 | /* | | 1281 | /* |
1282 | * try to reclaim kva and wait until someone does unmap. | | 1282 | * try to reclaim kva and wait until someone does unmap. |
1283 | * fragile locking here, so we awaken every second to | | 1283 | * fragile locking here, so we awaken every second to |
1284 | * recheck the condition. | | 1284 | * recheck the condition. |
1285 | */ | | 1285 | */ |
1286 | | | 1286 | |
1287 | vm_map_drain(map, flags); | | 1287 | vm_map_drain(map, flags); |
1288 | | | 1288 | |
1289 | mutex_enter(&map->misc_lock); | | 1289 | mutex_enter(&map->misc_lock); |
1290 | while ((map->flags & VM_MAP_WANTVA) != 0 && | | 1290 | while ((map->flags & VM_MAP_WANTVA) != 0 && |
1291 | map->timestamp == timestamp) { | | 1291 | map->timestamp == timestamp) { |
1292 | if ((flags & UVM_FLAG_WAITVA) == 0) { | | 1292 | if ((flags & UVM_FLAG_WAITVA) == 0) { |
1293 | mutex_exit(&map->misc_lock); | | 1293 | mutex_exit(&map->misc_lock); |
1294 | UVMHIST_LOG(maphist, | | 1294 | UVMHIST_LOG(maphist, |
1295 | "<- uvm_map_findspace failed!", 0,0,0,0); | | 1295 | "<- uvm_map_findspace failed!", 0,0,0,0); |
1296 | return ENOMEM; | | 1296 | return ENOMEM; |
1297 | } else { | | 1297 | } else { |
1298 | cv_timedwait(&map->cv, &map->misc_lock, hz); | | 1298 | cv_timedwait(&map->cv, &map->misc_lock, hz); |
1299 | } | | 1299 | } |
1300 | } | | 1300 | } |
1301 | mutex_exit(&map->misc_lock); | | 1301 | mutex_exit(&map->misc_lock); |
1302 | goto retry; | | 1302 | goto retry; |
1303 | } | | 1303 | } |
1304 | | | 1304 | |
1305 | #ifdef PMAP_GROWKERNEL | | 1305 | #ifdef PMAP_GROWKERNEL |
1306 | /* | | 1306 | /* |
1307 | * If the kernel pmap can't map the requested space, | | 1307 | * If the kernel pmap can't map the requested space, |
1308 | * then allocate more resources for it. | | 1308 | * then allocate more resources for it. |
1309 | */ | | 1309 | */ |
1310 | if (map == kernel_map && uvm_maxkaddr < (start + size)) | | 1310 | if (map == kernel_map && uvm_maxkaddr < (start + size)) |
1311 | uvm_maxkaddr = pmap_growkernel(start + size); | | 1311 | uvm_maxkaddr = pmap_growkernel(start + size); |
1312 | #endif | | 1312 | #endif |
1313 | | | 1313 | |
1314 | UVMMAP_EVCNT_INCR(map_call); | | 1314 | UVMMAP_EVCNT_INCR(map_call); |
1315 | | | 1315 | |
1316 | /* | | 1316 | /* |
1317 | * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER | | 1317 | * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER |
1318 | * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in | | 1318 | * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in |
1319 | * either case we want to zero it before storing it in the map entry | | 1319 | * either case we want to zero it before storing it in the map entry |
1320 | * (because it looks strange and confusing when debugging...) | | 1320 | * (because it looks strange and confusing when debugging...) |
1321 | * | | 1321 | * |
1322 | * if uobj is not null | | 1322 | * if uobj is not null |
1323 | * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping | | 1323 | * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping |
1324 | * and we do not need to change uoffset. | | 1324 | * and we do not need to change uoffset. |
1325 | * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset | | 1325 | * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset |
1326 | * now (based on the starting address of the map). this case is | | 1326 | * now (based on the starting address of the map). this case is |
1327 | * for kernel object mappings where we don't know the offset until | | 1327 | * for kernel object mappings where we don't know the offset until |
1328 | * the virtual address is found (with uvm_map_findspace). the | | 1328 | * the virtual address is found (with uvm_map_findspace). the |
1329 | * offset is the distance we are from the start of the map. | | 1329 | * offset is the distance we are from the start of the map. |
1330 | */ | | 1330 | */ |
1331 | | | 1331 | |
1332 | if (uobj == NULL) { | | 1332 | if (uobj == NULL) { |
1333 | uoffset = 0; | | 1333 | uoffset = 0; |
1334 | } else { | | 1334 | } else { |
1335 | if (uoffset == UVM_UNKNOWN_OFFSET) { | | 1335 | if (uoffset == UVM_UNKNOWN_OFFSET) { |
1336 | KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj)); | | 1336 | KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj)); |
1337 | uoffset = start - vm_map_min(kernel_map); | | 1337 | uoffset = start - vm_map_min(kernel_map); |
1338 | } | | 1338 | } |
1339 | } | | 1339 | } |
1340 | | | 1340 | |
1341 | args->uma_flags = flags; | | 1341 | args->uma_flags = flags; |
1342 | args->uma_prev = prev_entry; | | 1342 | args->uma_prev = prev_entry; |
1343 | args->uma_start = start; | | 1343 | args->uma_start = start; |
1344 | args->uma_size = size; | | 1344 | args->uma_size = size; |
1345 | args->uma_uobj = uobj; | | 1345 | args->uma_uobj = uobj; |
1346 | args->uma_uoffset = uoffset; | | 1346 | args->uma_uoffset = uoffset; |
1347 | | | 1347 | |
1348 | UVMHIST_LOG(maphist, "<- done!", 0,0,0,0); | | 1348 | UVMHIST_LOG(maphist, "<- done!", 0,0,0,0); |
1349 | return 0; | | 1349 | return 0; |
1350 | } | | 1350 | } |
1351 | | | 1351 | |
1352 | int | | 1352 | int |
1353 | uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args, | | 1353 | uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args, |
1354 | struct vm_map_entry *new_entry) | | 1354 | struct vm_map_entry *new_entry) |
1355 | { | | 1355 | { |
1356 | struct vm_map_entry *prev_entry = args->uma_prev; | | 1356 | struct vm_map_entry *prev_entry = args->uma_prev; |
1357 | struct vm_map_entry *dead = NULL; | | 1357 | struct vm_map_entry *dead = NULL; |
1358 | | | 1358 | |
1359 | const uvm_flag_t flags = args->uma_flags; | | 1359 | const uvm_flag_t flags = args->uma_flags; |
1360 | const vm_prot_t prot = UVM_PROTECTION(flags); | | 1360 | const vm_prot_t prot = UVM_PROTECTION(flags); |
1361 | const vm_prot_t maxprot = UVM_MAXPROTECTION(flags); | | 1361 | const vm_prot_t maxprot = UVM_MAXPROTECTION(flags); |
1362 | const vm_inherit_t inherit = UVM_INHERIT(flags); | | 1362 | const vm_inherit_t inherit = UVM_INHERIT(flags); |
1363 | const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ? | | 1363 | const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ? |
1364 | AMAP_EXTEND_NOWAIT : 0; | | 1364 | AMAP_EXTEND_NOWAIT : 0; |
1365 | const int advice = UVM_ADVICE(flags); | | 1365 | const int advice = UVM_ADVICE(flags); |
1366 | const int meflagval = (flags & UVM_FLAG_QUANTUM) ? | | 1366 | const int meflagval = (flags & UVM_FLAG_QUANTUM) ? |
1367 | UVM_MAP_QUANTUM : 0; | | 1367 | UVM_MAP_QUANTUM : 0; |
1368 | | | 1368 | |
1369 | vaddr_t start = args->uma_start; | | 1369 | vaddr_t start = args->uma_start; |
1370 | vsize_t size = args->uma_size; | | 1370 | vsize_t size = args->uma_size; |
1371 | struct uvm_object *uobj = args->uma_uobj; | | 1371 | struct uvm_object *uobj = args->uma_uobj; |
1372 | voff_t uoffset = args->uma_uoffset; | | 1372 | voff_t uoffset = args->uma_uoffset; |
1373 | | | 1373 | |
1374 | const int kmap = (vm_map_pmap(map) == pmap_kernel()); | | 1374 | const int kmap = (vm_map_pmap(map) == pmap_kernel()); |
1375 | int merged = 0; | | 1375 | int merged = 0; |
1376 | int error; | | 1376 | int error; |
1377 | int newetype; | | 1377 | int newetype; |
1378 | | | 1378 | |
1379 | UVMHIST_FUNC("uvm_map_enter"); | | 1379 | UVMHIST_FUNC("uvm_map_enter"); |
1380 | UVMHIST_CALLED(maphist); | | 1380 | UVMHIST_CALLED(maphist); |
1381 | | | 1381 | |
1382 | UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)", | | 1382 | UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)", |
1383 | map, start, size, flags); | | 1383 | map, start, size, flags); |
1384 | UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); | | 1384 | UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); |
1385 | | | 1385 | |
1386 | KASSERT(map->hint == prev_entry); /* bimerge case assumes this */ | | 1386 | KASSERT(map->hint == prev_entry); /* bimerge case assumes this */ |
1387 | | | 1387 | |
1388 | if (flags & UVM_FLAG_QUANTUM) { | | 1388 | if (flags & UVM_FLAG_QUANTUM) { |
1389 | KASSERT(new_entry); | | 1389 | KASSERT(new_entry); |
1390 | KASSERT(new_entry->flags & UVM_MAP_QUANTUM); | | 1390 | KASSERT(new_entry->flags & UVM_MAP_QUANTUM); |
1391 | } | | 1391 | } |
1392 | | | 1392 | |
1393 | if (uobj) | | 1393 | if (uobj) |
1394 | newetype = UVM_ET_OBJ; | | 1394 | newetype = UVM_ET_OBJ; |
1395 | else | | 1395 | else |
1396 | newetype = 0; | | 1396 | newetype = 0; |
1397 | | | 1397 | |
1398 | if (flags & UVM_FLAG_COPYONW) { | | 1398 | if (flags & UVM_FLAG_COPYONW) { |
1399 | newetype |= UVM_ET_COPYONWRITE; | | 1399 | newetype |= UVM_ET_COPYONWRITE; |
1400 | if ((flags & UVM_FLAG_OVERLAY) == 0) | | 1400 | if ((flags & UVM_FLAG_OVERLAY) == 0) |
1401 | newetype |= UVM_ET_NEEDSCOPY; | | 1401 | newetype |= UVM_ET_NEEDSCOPY; |
1402 | } | | 1402 | } |
1403 | | | 1403 | |
1404 | /* | | 1404 | /* |
1405 | * try and insert in map by extending previous entry, if possible. | | 1405 | * try and insert in map by extending previous entry, if possible. |
1406 | * XXX: we don't try and pull back the next entry. might be useful | | 1406 | * XXX: we don't try and pull back the next entry. might be useful |
1407 | * for a stack, but we are currently allocating our stack in advance. | | 1407 | * for a stack, but we are currently allocating our stack in advance. |
1408 | */ | | 1408 | */ |
1409 | | | 1409 | |
1410 | if (flags & UVM_FLAG_NOMERGE) | | 1410 | if (flags & UVM_FLAG_NOMERGE) |
1411 | goto nomerge; | | 1411 | goto nomerge; |
1412 | | | 1412 | |
1413 | if (prev_entry->end == start && | | 1413 | if (prev_entry->end == start && |
1414 | prev_entry != &map->header && | | 1414 | prev_entry != &map->header && |
1415 | UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval, | | 1415 | UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval, |
1416 | prot, maxprot, inherit, advice, 0)) { | | 1416 | prot, maxprot, inherit, advice, 0)) { |
1417 | | | 1417 | |
1418 | if (uobj && prev_entry->offset + | | 1418 | if (uobj && prev_entry->offset + |
1419 | (prev_entry->end - prev_entry->start) != uoffset) | | 1419 | (prev_entry->end - prev_entry->start) != uoffset) |
1420 | goto forwardmerge; | | 1420 | goto forwardmerge; |
1421 | | | 1421 | |
1422 | /* | | 1422 | /* |
1423 | * can't extend a shared amap. note: no need to lock amap to | | 1423 | * can't extend a shared amap. note: no need to lock amap to |
1424 | * look at refs since we don't care about its exact value. | | 1424 | * look at refs since we don't care about its exact value. |
1425 | * if it is one (i.e. we have only reference) it will stay there | | 1425 | * if it is one (i.e. we have only reference) it will stay there |
1426 | */ | | 1426 | */ |
1427 | | | 1427 | |
1428 | if (prev_entry->aref.ar_amap && | | 1428 | if (prev_entry->aref.ar_amap && |
1429 | amap_refs(prev_entry->aref.ar_amap) != 1) { | | 1429 | amap_refs(prev_entry->aref.ar_amap) != 1) { |
1430 | goto forwardmerge; | | 1430 | goto forwardmerge; |
1431 | } | | 1431 | } |
1432 | | | 1432 | |
1433 | if (prev_entry->aref.ar_amap) { | | 1433 | if (prev_entry->aref.ar_amap) { |
1434 | error = amap_extend(prev_entry, size, | | 1434 | error = amap_extend(prev_entry, size, |
1435 | amapwaitflag | AMAP_EXTEND_FORWARDS); | | 1435 | amapwaitflag | AMAP_EXTEND_FORWARDS); |
1436 | if (error) | | 1436 | if (error) |
1437 | goto nomerge; | | 1437 | goto nomerge; |
1438 | } | | 1438 | } |
1439 | | | 1439 | |
1440 | if (kmap) { | | 1440 | if (kmap) { |
1441 | UVMMAP_EVCNT_INCR(kbackmerge); | | 1441 | UVMMAP_EVCNT_INCR(kbackmerge); |
1442 | } else { | | 1442 | } else { |
1443 | UVMMAP_EVCNT_INCR(ubackmerge); | | 1443 | UVMMAP_EVCNT_INCR(ubackmerge); |
1444 | } | | 1444 | } |
1445 | UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0); | | 1445 | UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0); |
1446 | | | 1446 | |
1447 | /* | | 1447 | /* |
1448 | * drop our reference to uobj since we are extending a reference | | 1448 | * drop our reference to uobj since we are extending a reference |
1449 | * that we already have (the ref count can not drop to zero). | | 1449 | * that we already have (the ref count can not drop to zero). |
1450 | */ | | 1450 | */ |
1451 | | | 1451 | |
1452 | if (uobj && uobj->pgops->pgo_detach) | | 1452 | if (uobj && uobj->pgops->pgo_detach) |
1453 | uobj->pgops->pgo_detach(uobj); | | 1453 | uobj->pgops->pgo_detach(uobj); |
1454 | | | 1454 | |
1455 | /* | | 1455 | /* |
1456 | * Now that we've merged the entries, note that we've grown | | 1456 | * Now that we've merged the entries, note that we've grown |
1457 | * and our gap has shrunk. Then fix the tree. | | 1457 | * and our gap has shrunk. Then fix the tree. |
1458 | */ | | 1458 | */ |
1459 | prev_entry->end += size; | | 1459 | prev_entry->end += size; |
1460 | prev_entry->gap -= size; | | 1460 | prev_entry->gap -= size; |
1461 | uvm_rb_fixup(map, prev_entry); | | 1461 | uvm_rb_fixup(map, prev_entry); |
1462 | | | 1462 | |
1463 | uvm_map_check(map, "map backmerged"); | | 1463 | uvm_map_check(map, "map backmerged"); |
1464 | | | 1464 | |
1465 | UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0); | | 1465 | UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0); |
1466 | merged++; | | 1466 | merged++; |
1467 | } | | 1467 | } |
1468 | | | 1468 | |
1469 | forwardmerge: | | 1469 | forwardmerge: |
1470 | if (prev_entry->next->start == (start + size) && | | 1470 | if (prev_entry->next->start == (start + size) && |
1471 | prev_entry->next != &map->header && | | 1471 | prev_entry->next != &map->header && |
1472 | UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval, | | 1472 | UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval, |
1473 | prot, maxprot, inherit, advice, 0)) { | | 1473 | prot, maxprot, inherit, advice, 0)) { |
1474 | | | 1474 | |
1475 | if (uobj && prev_entry->next->offset != uoffset + size) | | 1475 | if (uobj && prev_entry->next->offset != uoffset + size) |
1476 | goto nomerge; | | 1476 | goto nomerge; |
1477 | | | 1477 | |
1478 | /* | | 1478 | /* |
1479 | * can't extend a shared amap. note: no need to lock amap to | | 1479 | * can't extend a shared amap. note: no need to lock amap to |
1480 | * look at refs since we don't care about its exact value. | | 1480 | * look at refs since we don't care about its exact value. |
1481 | * if it is one (i.e. we have only reference) it will stay there. | | 1481 | * if it is one (i.e. we have only reference) it will stay there. |
1482 | * | | 1482 | * |
1483 | * note that we also can't merge two amaps, so if we | | 1483 | * note that we also can't merge two amaps, so if we |
1484 | * merged with the previous entry which has an amap, | | 1484 | * merged with the previous entry which has an amap, |
1485 | * and the next entry also has an amap, we give up. | | 1485 | * and the next entry also has an amap, we give up. |
1486 | * | | 1486 | * |
1487 | * Interesting cases: | | 1487 | * Interesting cases: |
1488 | * amap, new, amap -> give up second merge (single fwd extend) | | 1488 | * amap, new, amap -> give up second merge (single fwd extend) |
1489 | * amap, new, none -> double forward extend (extend again here) | | 1489 | * amap, new, none -> double forward extend (extend again here) |
1490 | * none, new, amap -> double backward extend (done here) | | 1490 | * none, new, amap -> double backward extend (done here) |
1491 | * uobj, new, amap -> single backward extend (done here) | | 1491 | * uobj, new, amap -> single backward extend (done here) |
1492 | * | | 1492 | * |
1493 | * XXX should we attempt to deal with someone refilling | | 1493 | * XXX should we attempt to deal with someone refilling |
1494 | * the deallocated region between two entries that are | | 1494 | * the deallocated region between two entries that are |
1495 | * backed by the same amap (ie, arefs is 2, "prev" and | | 1495 | * backed by the same amap (ie, arefs is 2, "prev" and |
1496 | * "next" refer to it, and adding this allocation will | | 1496 | * "next" refer to it, and adding this allocation will |
1497 | * close the hole, thus restoring arefs to 1 and | | 1497 | * close the hole, thus restoring arefs to 1 and |
1498 | * deallocating the "next" vm_map_entry)? -- @@@ | | 1498 | * deallocating the "next" vm_map_entry)? -- @@@ |
1499 | */ | | 1499 | */ |
1500 | | | 1500 | |
1501 | if (prev_entry->next->aref.ar_amap && | | 1501 | if (prev_entry->next->aref.ar_amap && |
1502 | (amap_refs(prev_entry->next->aref.ar_amap) != 1 || | | 1502 | (amap_refs(prev_entry->next->aref.ar_amap) != 1 || |
1503 | (merged && prev_entry->aref.ar_amap))) { | | 1503 | (merged && prev_entry->aref.ar_amap))) { |
1504 | goto nomerge; | | 1504 | goto nomerge; |
1505 | } | | 1505 | } |
1506 | | | 1506 | |
1507 | if (merged) { | | 1507 | if (merged) { |
1508 | /* | | 1508 | /* |
1509 | * Try to extend the amap of the previous entry to | | 1509 | * Try to extend the amap of the previous entry to |
1510 | * cover the next entry as well. If it doesn't work | | 1510 | * cover the next entry as well. If it doesn't work |
1511 | * just skip on, don't actually give up, since we've | | 1511 | * just skip on, don't actually give up, since we've |
1512 | * already completed the back merge. | | 1512 | * already completed the back merge. |
1513 | */ | | 1513 | */ |
1514 | if (prev_entry->aref.ar_amap) { | | 1514 | if (prev_entry->aref.ar_amap) { |
1515 | if (amap_extend(prev_entry, | | 1515 | if (amap_extend(prev_entry, |
1516 | prev_entry->next->end - | | 1516 | prev_entry->next->end - |
1517 | prev_entry->next->start, | | 1517 | prev_entry->next->start, |
1518 | amapwaitflag | AMAP_EXTEND_FORWARDS)) | | 1518 | amapwaitflag | AMAP_EXTEND_FORWARDS)) |
1519 | goto nomerge; | | 1519 | goto nomerge; |
1520 | } | | 1520 | } |
1521 | | | 1521 | |
1522 | /* | | 1522 | /* |
1523 | * Try to extend the amap of the *next* entry | | 1523 | * Try to extend the amap of the *next* entry |
1524 | * back to cover the new allocation *and* the | | 1524 | * back to cover the new allocation *and* the |
1525 | * previous entry as well (the previous merge | | 1525 | * previous entry as well (the previous merge |
1526 | * didn't have an amap already otherwise we | | 1526 | * didn't have an amap already otherwise we |
1527 | * wouldn't be checking here for an amap). If | | 1527 | * wouldn't be checking here for an amap). If |
1528 | * it doesn't work just skip on, again, don't | | 1528 | * it doesn't work just skip on, again, don't |
1529 | * actually give up, since we've already | | 1529 | * actually give up, since we've already |
1530 | * completed the back merge. | | 1530 | * completed the back merge. |
1531 | */ | | 1531 | */ |
1532 | else if (prev_entry->next->aref.ar_amap) { | | 1532 | else if (prev_entry->next->aref.ar_amap) { |
1533 | if (amap_extend(prev_entry->next, | | 1533 | if (amap_extend(prev_entry->next, |
1534 | prev_entry->end - | | 1534 | prev_entry->end - |
1535 | prev_entry->start, | | 1535 | prev_entry->start, |
1536 | amapwaitflag | AMAP_EXTEND_BACKWARDS)) | | 1536 | amapwaitflag | AMAP_EXTEND_BACKWARDS)) |
1537 | goto nomerge; | | 1537 | goto nomerge; |
1538 | } | | 1538 | } |
1539 | } else { | | 1539 | } else { |
1540 | /* | | 1540 | /* |
1541 | * Pull the next entry's amap backwards to cover this | | 1541 | * Pull the next entry's amap backwards to cover this |
1542 | * new allocation. | | 1542 | * new allocation. |
1543 | */ | | 1543 | */ |
1544 | if (prev_entry->next->aref.ar_amap) { | | 1544 | if (prev_entry->next->aref.ar_amap) { |
1545 | error = amap_extend(prev_entry->next, size, | | 1545 | error = amap_extend(prev_entry->next, size, |
1546 | amapwaitflag | AMAP_EXTEND_BACKWARDS); | | 1546 | amapwaitflag | AMAP_EXTEND_BACKWARDS); |
1547 | if (error) | | 1547 | if (error) |
1548 | goto nomerge; | | 1548 | goto nomerge; |
1549 | } | | 1549 | } |
1550 | } | | 1550 | } |
1551 | | | 1551 | |
1552 | if (merged) { | | 1552 | if (merged) { |
1553 | if (kmap) { | | 1553 | if (kmap) { |
1554 | UVMMAP_EVCNT_DECR(kbackmerge); | | 1554 | UVMMAP_EVCNT_DECR(kbackmerge); |
1555 | UVMMAP_EVCNT_INCR(kbimerge); | | 1555 | UVMMAP_EVCNT_INCR(kbimerge); |
1556 | } else { | | 1556 | } else { |
1557 | UVMMAP_EVCNT_DECR(ubackmerge); | | 1557 | UVMMAP_EVCNT_DECR(ubackmerge); |
1558 | UVMMAP_EVCNT_INCR(ubimerge); | | 1558 | UVMMAP_EVCNT_INCR(ubimerge); |
1559 | } | | 1559 | } |
1560 | } else { | | 1560 | } else { |
1561 | if (kmap) { | | 1561 | if (kmap) { |
1562 | UVMMAP_EVCNT_INCR(kforwmerge); | | 1562 | UVMMAP_EVCNT_INCR(kforwmerge); |
1563 | } else { | | 1563 | } else { |
1564 | UVMMAP_EVCNT_INCR(uforwmerge); | | 1564 | UVMMAP_EVCNT_INCR(uforwmerge); |
1565 | } | | 1565 | } |
1566 | } | | 1566 | } |
1567 | UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0); | | 1567 | UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0); |
1568 | | | 1568 | |
1569 | /* | | 1569 | /* |
1570 | * drop our reference to uobj since we are extending a reference | | 1570 | * drop our reference to uobj since we are extending a reference |
1571 | * that we already have (the ref count can not drop to zero). | | 1571 | * that we already have (the ref count can not drop to zero). |
1572 | * (if merged, we've already detached) | | 1572 | * (if merged, we've already detached) |
1573 | */ | | 1573 | */ |
1574 | if (uobj && uobj->pgops->pgo_detach && !merged) | | 1574 | if (uobj && uobj->pgops->pgo_detach && !merged) |