| @@ -1,573 +1,583 @@ | | | @@ -1,573 +1,583 @@ |
1 | /* $NetBSD: uvm_pglist.c,v 1.54 2011/01/21 19:27:09 matt Exp $ */ | | 1 | /* $NetBSD: uvm_pglist.c,v 1.55 2011/01/22 01:36:27 matt Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1997 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1997 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center. | | 9 | * NASA Ames Research Center. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. | | 15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright | | 16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the | | 17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. | | 18 | * documentation and/or other materials provided with the distribution. |
19 | * | | 19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. | | 30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ | | 31 | */ |
32 | | | 32 | |
33 | /* | | 33 | /* |
34 | * uvm_pglist.c: pglist functions | | 34 | * uvm_pglist.c: pglist functions |
35 | */ | | 35 | */ |
36 | | | 36 | |
37 | #include <sys/cdefs.h> | | 37 | #include <sys/cdefs.h> |
38 | __KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.54 2011/01/21 19:27:09 matt Exp $"); | | 38 | __KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.55 2011/01/22 01:36:27 matt Exp $"); |
39 | | | 39 | |
40 | #include <sys/param.h> | | 40 | #include <sys/param.h> |
41 | #include <sys/systm.h> | | 41 | #include <sys/systm.h> |
42 | #include <sys/malloc.h> | | 42 | #include <sys/malloc.h> |
43 | #include <sys/proc.h> | | 43 | #include <sys/proc.h> |
44 | | | 44 | |
45 | #include <uvm/uvm.h> | | 45 | #include <uvm/uvm.h> |
46 | #include <uvm/uvm_pdpolicy.h> | | 46 | #include <uvm/uvm_pdpolicy.h> |
47 | | | 47 | |
48 | #ifdef VM_PAGE_ALLOC_MEMORY_STATS | | 48 | #ifdef VM_PAGE_ALLOC_MEMORY_STATS |
49 | #define STAT_INCR(v) (v)++ | | 49 | #define STAT_INCR(v) (v)++ |
50 | #define STAT_DECR(v) do { \ | | 50 | #define STAT_DECR(v) do { \ |
51 | if ((v) == 0) \ | | 51 | if ((v) == 0) \ |
52 | printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \ | | 52 | printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \ |
53 | else \ | | 53 | else \ |
54 | (v)--; \ | | 54 | (v)--; \ |
55 | } while (/*CONSTCOND*/ 0) | | 55 | } while (/*CONSTCOND*/ 0) |
56 | u_long uvm_pglistalloc_npages; | | 56 | u_long uvm_pglistalloc_npages; |
57 | #else | | 57 | #else |
58 | #define STAT_INCR(v) | | 58 | #define STAT_INCR(v) |
59 | #define STAT_DECR(v) | | 59 | #define STAT_DECR(v) |
60 | #endif | | 60 | #endif |
61 | | | 61 | |
62 | /* | | 62 | /* |
63 | * uvm_pglistalloc: allocate a list of pages | | 63 | * uvm_pglistalloc: allocate a list of pages |
64 | * | | 64 | * |
65 | * => allocated pages are placed onto an rlist. rlist is | | 65 | * => allocated pages are placed onto an rlist. rlist is |
66 | * initialized by uvm_pglistalloc. | | 66 | * initialized by uvm_pglistalloc. |
67 | * => returns 0 on success or errno on failure | | 67 | * => returns 0 on success or errno on failure |
68 | * => implementation allocates a single segment if any constraints are | | 68 | * => implementation allocates a single segment if any constraints are |
69 | * imposed by call arguments. | | 69 | * imposed by call arguments. |
70 | * => doesn't take into account clean non-busy pages on inactive list | | 70 | * => doesn't take into account clean non-busy pages on inactive list |
71 | * that could be used(?) | | 71 | * that could be used(?) |
72 | * => params: | | 72 | * => params: |
73 | * size the size of the allocation, rounded to page size. | | 73 | * size the size of the allocation, rounded to page size. |
74 | * low the low address of the allowed allocation range. | | 74 | * low the low address of the allowed allocation range. |
75 | * high the high address of the allowed allocation range. | | 75 | * high the high address of the allowed allocation range. |
76 | * alignment memory must be aligned to this power-of-two boundary. | | 76 | * alignment memory must be aligned to this power-of-two boundary. |
77 | * boundary no segment in the allocation may cross this | | 77 | * boundary no segment in the allocation may cross this |
78 | * power-of-two boundary (relative to zero). | | 78 | * power-of-two boundary (relative to zero). |
79 | */ | | 79 | */ |
80 | | | 80 | |
81 | static void | | 81 | static void |
82 | uvm_pglist_add(struct vm_page *pg, struct pglist *rlist) | | 82 | uvm_pglist_add(struct vm_page *pg, struct pglist *rlist) |
83 | { | | 83 | { |
84 | int free_list, color, pgflidx; | | 84 | int free_list, color, pgflidx; |
85 | | | 85 | |
86 | KASSERT(mutex_owned(&uvm_fpageqlock)); | | 86 | KASSERT(mutex_owned(&uvm_fpageqlock)); |
87 | | | 87 | |
88 | #if PGFL_NQUEUES != 2 | | 88 | #if PGFL_NQUEUES != 2 |
89 | #error uvm_pglistalloc needs to be updated | | 89 | #error uvm_pglistalloc needs to be updated |
90 | #endif | | 90 | #endif |
91 | | | 91 | |
92 | free_list = uvm_page_lookup_freelist(pg); | | 92 | free_list = uvm_page_lookup_freelist(pg); |
93 | color = VM_PGCOLOR_BUCKET(pg); | | 93 | color = VM_PGCOLOR_BUCKET(pg); |
94 | pgflidx = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN; | | 94 | pgflidx = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN; |
95 | #ifdef NOT_DEBUG | | 95 | #ifdef NOT_DEBUG |
96 | struct vm_page *tp; | | 96 | struct vm_page *tp; |
97 | LIST_FOREACH(tp, | | 97 | LIST_FOREACH(tp, |
98 | &uvm.page_free[free_list].pgfl_buckets[color].pgfl_queues[pgflidx], | | 98 | &uvm.page_free[free_list].pgfl_buckets[color].pgfl_queues[pgflidx], |
99 | pageq.list) { | | 99 | pageq.list) { |
100 | if (tp == pg) | | 100 | if (tp == pg) |
101 | break; | | 101 | break; |
102 | } | | 102 | } |
103 | if (tp == NULL) | | 103 | if (tp == NULL) |
104 | panic("uvm_pglistalloc: page not on freelist"); | | 104 | panic("uvm_pglistalloc: page not on freelist"); |
105 | #endif | | 105 | #endif |
106 | LIST_REMOVE(pg, pageq.list); /* global */ | | 106 | LIST_REMOVE(pg, pageq.list); /* global */ |
107 | LIST_REMOVE(pg, listq.list); /* cpu */ | | 107 | LIST_REMOVE(pg, listq.list); /* cpu */ |
108 | uvmexp.free--; | | 108 | uvmexp.free--; |
109 | if (pg->flags & PG_ZERO) | | 109 | if (pg->flags & PG_ZERO) |
110 | uvmexp.zeropages--; | | 110 | uvmexp.zeropages--; |
111 | VM_FREE_PAGE_TO_CPU(pg)->pages[pgflidx]--; | | 111 | VM_FREE_PAGE_TO_CPU(pg)->pages[pgflidx]--; |
112 | pg->flags = PG_CLEAN; | | 112 | pg->flags = PG_CLEAN; |
113 | pg->pqflags = 0; | | 113 | pg->pqflags = 0; |
114 | pg->uobject = NULL; | | 114 | pg->uobject = NULL; |
115 | pg->uanon = NULL; | | 115 | pg->uanon = NULL; |
116 | TAILQ_INSERT_TAIL(rlist, pg, pageq.queue); | | 116 | TAILQ_INSERT_TAIL(rlist, pg, pageq.queue); |
117 | STAT_INCR(uvm_pglistalloc_npages); | | 117 | STAT_INCR(uvm_pglistalloc_npages); |
118 | } | | 118 | } |
119 | | | 119 | |
120 | static int | | 120 | static int |
121 | uvm_pglistalloc_c_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high, | | 121 | uvm_pglistalloc_c_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high, |
122 | paddr_t alignment, paddr_t boundary, struct pglist *rlist) | | 122 | paddr_t alignment, paddr_t boundary, struct pglist *rlist) |
123 | { | | 123 | { |
124 | signed int try, limit, tryidx, end, idx, skip; | | 124 | signed int try, limit, tryidx, end, idx, skip; |
125 | struct vm_page *pgs; | | 125 | struct vm_page *pgs; |
126 | int pagemask; | | 126 | int pagemask; |
127 | bool second_pass; | | 127 | bool second_pass; |
128 | #ifdef DEBUG | | 128 | #ifdef DEBUG |
129 | paddr_t idxpa, lastidxpa; | | 129 | paddr_t idxpa, lastidxpa; |
130 | int cidx = 0; /* XXX: GCC */ | | 130 | int cidx = 0; /* XXX: GCC */ |
131 | #endif | | 131 | #endif |
132 | #ifdef PGALLOC_VERBOSE | | 132 | #ifdef PGALLOC_VERBOSE |
133 | printf("pgalloc: contig %d pgs from psi %ld\n", num, | | 133 | printf("pgalloc: contig %d pgs from psi %ld\n", num, |
134 | (long)(ps - vm_physmem)); | | 134 | (long)(ps - vm_physmem)); |
135 | #endif | | 135 | #endif |
136 | | | 136 | |
137 | KASSERT(mutex_owned(&uvm_fpageqlock)); | | 137 | KASSERT(mutex_owned(&uvm_fpageqlock)); |
138 | | | 138 | |
139 | low = atop(low); | | 139 | low = atop(low); |
140 | high = atop(high); | | 140 | high = atop(high); |
141 | alignment = atop(alignment); | | 141 | alignment = atop(alignment); |
142 | | | 142 | |
143 | /* | | 143 | /* |
144 | * We start our search at the just after where the last allocation | | 144 | * We start our search at the just after where the last allocation |
145 | * succeeded. | | 145 | * succeeded. |
146 | */ | | 146 | */ |
147 | try = roundup2(max(low, ps->avail_start + ps->start_hint), alignment); | | 147 | try = roundup2(max(low, ps->avail_start + ps->start_hint), alignment); |
148 | limit = min(high, ps->avail_end); | | 148 | limit = min(high, ps->avail_end); |
149 | pagemask = ~((boundary >> PAGE_SHIFT) - 1); | | 149 | pagemask = ~((boundary >> PAGE_SHIFT) - 1); |
150 | skip = 0; | | 150 | skip = 0; |
151 | second_pass = false; | | 151 | second_pass = false; |
152 | pgs = ps->pgs; | | 152 | pgs = ps->pgs; |
153 | | | 153 | |
154 | for (;;) { | | 154 | for (;;) { |
155 | bool ok = true; | | 155 | bool ok = true; |
156 | signed int cnt; | | 156 | signed int cnt; |
157 | | | 157 | |
158 | if (try + num > limit) { | | 158 | if (try + num > limit) { |
159 | if (ps->start_hint == 0 || second_pass) { | | 159 | if (ps->start_hint == 0 || second_pass) { |
160 | /* | | 160 | /* |
161 | * We've run past the allowable range. | | 161 | * We've run past the allowable range. |
162 | */ | | 162 | */ |
163 | return 0; /* FAIL = 0 pages*/ | | 163 | return 0; /* FAIL = 0 pages*/ |
164 | } | | 164 | } |
165 | /* | | 165 | /* |
166 | * We've wrapped around the end of this segment | | 166 | * We've wrapped around the end of this segment |
167 | * so restart at the beginning but now our limit | | 167 | * so restart at the beginning but now our limit |
168 | * is were we started. | | 168 | * is were we started. |
169 | */ | | 169 | */ |
170 | second_pass = true; | | 170 | second_pass = true; |
171 | try = roundup2(max(low, ps->avail_start), alignment); | | 171 | try = roundup2(max(low, ps->avail_start), alignment); |
172 | limit = min(high, ps->avail_start + ps->start_hint); | | 172 | limit = min(high, ps->avail_start + ps->start_hint); |
173 | skip = 0; | | 173 | skip = 0; |
174 | continue; | | 174 | continue; |
175 | } | | 175 | } |
176 | if (boundary != 0 && | | 176 | if (boundary != 0 && |
177 | ((try ^ (try + num - 1)) & pagemask) != 0) { | | 177 | ((try ^ (try + num - 1)) & pagemask) != 0) { |
178 | /* | | 178 | /* |
179 | * Region crosses boundary. Jump to the boundary | | 179 | * Region crosses boundary. Jump to the boundary |
180 | * just crossed and ensure alignment. | | 180 | * just crossed and ensure alignment. |
181 | */ | | 181 | */ |
182 | try = (try + num - 1) & pagemask; | | 182 | try = (try + num - 1) & pagemask; |
183 | try = roundup2(try, alignment); | | 183 | try = roundup2(try, alignment); |
184 | skip = 0; | | 184 | skip = 0; |
185 | continue; | | 185 | continue; |
186 | } | | 186 | } |
187 | #ifdef DEBUG | | 187 | #ifdef DEBUG |
188 | /* | | 188 | /* |
189 | * Make sure this is a managed physical page. | | 189 | * Make sure this is a managed physical page. |
190 | */ | | 190 | */ |
191 | | | 191 | |
192 | if (vm_physseg_find(try, &cidx) != ps - vm_physmem) | | 192 | if (vm_physseg_find(try, &cidx) != ps - vm_physmem) |
193 | panic("pgalloc contig: botch1"); | | 193 | panic("pgalloc contig: botch1"); |
194 | if (cidx != try - ps->start) | | 194 | if (cidx != try - ps->start) |
195 | panic("pgalloc contig: botch2"); | | 195 | panic("pgalloc contig: botch2"); |
196 | if (vm_physseg_find(try + num - 1, &cidx) != ps - vm_physmem) | | 196 | if (vm_physseg_find(try + num - 1, &cidx) != ps - vm_physmem) |
197 | panic("pgalloc contig: botch3"); | | 197 | panic("pgalloc contig: botch3"); |
198 | if (cidx != try - ps->start + num - 1) | | 198 | if (cidx != try - ps->start + num - 1) |
199 | panic("pgalloc contig: botch4"); | | 199 | panic("pgalloc contig: botch4"); |
200 | #endif | | 200 | #endif |
201 | tryidx = try - ps->start; | | 201 | tryidx = try - ps->start; |
202 | end = tryidx + num; | | 202 | end = tryidx + num; |
203 | | | 203 | |
204 | /* | | 204 | /* |
205 | * Found a suitable starting page. See if the range is free. | | 205 | * Found a suitable starting page. See if the range is free. |
206 | */ | | 206 | */ |
207 | #ifdef PGALLOC_VERBOSE | | 207 | #ifdef PGALLOC_VERBOSE |
208 | printf("%s: ps=%p try=%#x end=%#x skip=%#x, align=%#"PRIxPADDR, | | 208 | printf("%s: ps=%p try=%#x end=%#x skip=%#x, align=%#"PRIxPADDR, |
209 | __func__, ps, tryidx, end, skip, alignment); | | 209 | __func__, ps, tryidx, end, skip, alignment); |
210 | #endif | | 210 | #endif |
211 | /* | | 211 | /* |
212 | * We start at the end and work backwards since if we find a | | 212 | * We start at the end and work backwards since if we find a |
213 | * non-free page, it makes no sense to continue. | | 213 | * non-free page, it makes no sense to continue. |
214 | * | | 214 | * |
215 | * But on the plus size we have "vetted" some number of free | | 215 | * But on the plus size we have "vetted" some number of free |
216 | * pages. If this iteration fails, we may be able to skip | | 216 | * pages. If this iteration fails, we may be able to skip |
217 | * testing most of those pages again in the next pass. | | 217 | * testing most of those pages again in the next pass. |
218 | */ | | 218 | */ |
219 | for (idx = end - 1; idx >= tryidx + skip; idx--) { | | 219 | for (idx = end - 1; idx >= tryidx + skip; idx--) { |
220 | if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) { | | 220 | if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) { |
221 | ok = false; | | 221 | ok = false; |
222 | break; | | 222 | break; |
223 | } | | 223 | } |
224 | | | 224 | |
225 | #ifdef DEBUG | | 225 | #ifdef DEBUG |
226 | if (idx > tryidx) { | | 226 | if (idx > tryidx) { |
227 | idxpa = VM_PAGE_TO_PHYS(&pgs[idx]); | | 227 | idxpa = VM_PAGE_TO_PHYS(&pgs[idx]); |
228 | lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]); | | 228 | lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]); |
229 | if ((lastidxpa + PAGE_SIZE) != idxpa) { | | 229 | if ((lastidxpa + PAGE_SIZE) != idxpa) { |
230 | /* | | 230 | /* |
231 | * Region not contiguous. | | 231 | * Region not contiguous. |
232 | */ | | 232 | */ |
233 | panic("pgalloc contig: botch5"); | | 233 | panic("pgalloc contig: botch5"); |
234 | } | | 234 | } |
235 | if (boundary != 0 && | | 235 | if (boundary != 0 && |
236 | ((lastidxpa ^ idxpa) & ~(boundary - 1)) | | 236 | ((lastidxpa ^ idxpa) & ~(boundary - 1)) |
237 | != 0) { | | 237 | != 0) { |
238 | /* | | 238 | /* |
239 | * Region crosses boundary. | | 239 | * Region crosses boundary. |
240 | */ | | 240 | */ |
241 | panic("pgalloc contig: botch6"); | | 241 | panic("pgalloc contig: botch6"); |
242 | } | | 242 | } |
243 | } | | 243 | } |
244 | #endif | | 244 | #endif |
245 | } | | 245 | } |
246 | | | 246 | |
247 | if (ok) { | | 247 | if (ok) { |
248 | while (skip-- > 0) { | | 248 | while (skip-- > 0) { |
249 | KDASSERT(VM_PAGE_IS_FREE(&pgs[tryidx + skip])); | | 249 | KDASSERT(VM_PAGE_IS_FREE(&pgs[tryidx + skip])); |
250 | } | | 250 | } |
251 | #ifdef PGALLOC_VERBOSE | | 251 | #ifdef PGALLOC_VERBOSE |
252 | printf(": ok\n"); | | 252 | printf(": ok\n"); |
253 | #endif | | 253 | #endif |
254 | break; | | 254 | break; |
255 | } | | 255 | } |
256 | | | 256 | |
257 | #ifdef PGALLOC_VERBOSE | | 257 | #ifdef PGALLOC_VERBOSE |
258 | printf(": non-free at %#x\n", idx - tryidx); | | 258 | printf(": non-free at %#x\n", idx - tryidx); |
259 | #endif | | 259 | #endif |
260 | /* | | 260 | /* |
261 | * count the number of pages we can advance | | 261 | * count the number of pages we can advance |
262 | * since we know they aren't all free. | | 262 | * since we know they aren't all free. |
263 | */ | | 263 | */ |
264 | cnt = idx + 1 - tryidx; | | 264 | cnt = idx + 1 - tryidx; |
265 | /* | | 265 | /* |
266 | * now round up that to the needed alignment. | | 266 | * now round up that to the needed alignment. |
267 | */ | | 267 | */ |
268 | cnt = roundup2(cnt, alignment); | | 268 | cnt = roundup2(cnt, alignment); |
269 | /* | | 269 | /* |
270 | * The number of pages we can skip checking | | 270 | * The number of pages we can skip checking |
271 | * (might be 0 if cnt > num). | | 271 | * (might be 0 if cnt > num). |
272 | */ | | 272 | */ |
273 | skip = max(num - cnt, 0); | | 273 | skip = max(num - cnt, 0); |
274 | try += cnt; | | 274 | try += cnt; |
275 | } | | 275 | } |
276 | | | 276 | |
277 | /* | | 277 | /* |
278 | * we have a chunk of memory that conforms to the requested constraints. | | 278 | * we have a chunk of memory that conforms to the requested constraints. |
279 | */ | | 279 | */ |
280 | for (idx = tryidx, pgs += idx; idx < end; idx++, pgs++) | | 280 | for (idx = tryidx, pgs += idx; idx < end; idx++, pgs++) |
281 | uvm_pglist_add(pgs, rlist); | | 281 | uvm_pglist_add(pgs, rlist); |
282 | | | 282 | |
283 | /* | | 283 | /* |
284 | * the next time we need to search this segment, start after this | | 284 | * the next time we need to search this segment, start after this |
285 | * chunk of pages we just allocated. | | 285 | * chunk of pages we just allocated. |
286 | */ | | 286 | */ |
287 | ps->start_hint = tryidx + num; | | 287 | ps->start_hint = try + num - ps->avail_start; |
| | | 288 | KASSERTMSG(ps->start_hint <= ps->avail_end - ps->avail_start, |
| | | 289 | ("%x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")", |
| | | 290 | try + num, |
| | | 291 | ps->start_hint, ps->start_hint, ps->avail_end, ps->avail_start, |
| | | 292 | ps->avail_end - ps->avail_start)); |
288 | | | 293 | |
289 | #ifdef PGALLOC_VERBOSE | | 294 | #ifdef PGALLOC_VERBOSE |
290 | printf("got %d pgs\n", num); | | 295 | printf("got %d pgs\n", num); |
291 | #endif | | 296 | #endif |
292 | return num; /* number of pages allocated */ | | 297 | return num; /* number of pages allocated */ |
293 | } | | 298 | } |
294 | | | 299 | |
295 | static int | | 300 | static int |
296 | uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment, | | 301 | uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment, |
297 | paddr_t boundary, struct pglist *rlist) | | 302 | paddr_t boundary, struct pglist *rlist) |
298 | { | | 303 | { |
299 | int fl, psi; | | 304 | int fl, psi; |
300 | struct vm_physseg *ps; | | 305 | struct vm_physseg *ps; |
301 | int error; | | 306 | int error; |
302 | | | 307 | |
303 | /* Default to "lose". */ | | 308 | /* Default to "lose". */ |
304 | error = ENOMEM; | | 309 | error = ENOMEM; |
305 | | | 310 | |
306 | /* | | 311 | /* |
307 | * Block all memory allocation and lock the free list. | | 312 | * Block all memory allocation and lock the free list. |
308 | */ | | 313 | */ |
309 | mutex_spin_enter(&uvm_fpageqlock); | | 314 | mutex_spin_enter(&uvm_fpageqlock); |
310 | | | 315 | |
311 | /* Are there even any free pages? */ | | 316 | /* Are there even any free pages? */ |
312 | if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel)) | | 317 | if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel)) |
313 | goto out; | | 318 | goto out; |
314 | | | 319 | |
315 | for (fl = 0; fl < VM_NFREELIST; fl++) { | | 320 | for (fl = 0; fl < VM_NFREELIST; fl++) { |
316 | #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) | | 321 | #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) |
317 | for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--) | | 322 | for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--) |
318 | #else | | 323 | #else |
319 | for (psi = 0 ; psi < vm_nphysseg ; psi++) | | 324 | for (psi = 0 ; psi < vm_nphysseg ; psi++) |
320 | #endif | | 325 | #endif |
321 | { | | 326 | { |
322 | ps = &vm_physmem[psi]; | | 327 | ps = &vm_physmem[psi]; |
323 | | | 328 | |
324 | if (ps->free_list != fl) | | 329 | if (ps->free_list != fl) |
325 | continue; | | 330 | continue; |
326 | | | 331 | |
327 | num -= uvm_pglistalloc_c_ps(ps, num, low, high, | | 332 | num -= uvm_pglistalloc_c_ps(ps, num, low, high, |
328 | alignment, boundary, rlist); | | 333 | alignment, boundary, rlist); |
329 | if (num == 0) { | | 334 | if (num == 0) { |
330 | #ifdef PGALLOC_VERBOSE | | 335 | #ifdef PGALLOC_VERBOSE |
331 | printf("pgalloc: %"PRIxMAX"-%"PRIxMAX"\n", | | 336 | printf("pgalloc: %"PRIxMAX"-%"PRIxMAX"\n", |
332 | (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)), | | 337 | (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)), |
333 | (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist))); | | 338 | (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist))); |
334 | #endif | | 339 | #endif |
335 | error = 0; | | 340 | error = 0; |
336 | goto out; | | 341 | goto out; |
337 | } | | 342 | } |
338 | } | | 343 | } |
339 | } | | 344 | } |
340 | | | 345 | |
341 | out: | | 346 | out: |
342 | /* | | 347 | /* |
343 | * check to see if we need to generate some free pages waking | | 348 | * check to see if we need to generate some free pages waking |
344 | * the pagedaemon. | | 349 | * the pagedaemon. |
345 | */ | | 350 | */ |
346 | | | 351 | |
347 | uvm_kick_pdaemon(); | | 352 | uvm_kick_pdaemon(); |
348 | mutex_spin_exit(&uvm_fpageqlock); | | 353 | mutex_spin_exit(&uvm_fpageqlock); |
349 | return (error); | | 354 | return (error); |
350 | } | | 355 | } |
351 | | | 356 | |
352 | static int | | 357 | static int |
353 | uvm_pglistalloc_s_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high, | | 358 | uvm_pglistalloc_s_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high, |
354 | struct pglist *rlist) | | 359 | struct pglist *rlist) |
355 | { | | 360 | { |
356 | int todo, limit, try; | | 361 | int todo, limit, try; |
357 | struct vm_page *pg; | | 362 | struct vm_page *pg; |
358 | bool second_pass; | | 363 | bool second_pass; |
359 | #ifdef PGALLOC_VERBOSE | | 364 | #ifdef PGALLOC_VERBOSE |
360 | printf("pgalloc: simple %d pgs from psi %ld\n", num, | | 365 | printf("pgalloc: simple %d pgs from psi %ld\n", num, |
361 | (long)(ps - vm_physmem)); | | 366 | (long)(ps - vm_physmem)); |
362 | #endif | | 367 | #endif |
363 | | | 368 | |
364 | KASSERT(mutex_owned(&uvm_fpageqlock)); | | 369 | KASSERT(mutex_owned(&uvm_fpageqlock)); |
365 | KASSERT(ps->start <= ps->avail_start); | | 370 | KASSERT(ps->start <= ps->avail_start); |
366 | KASSERT(ps->start <= ps->avail_end); | | 371 | KASSERT(ps->start <= ps->avail_end); |
367 | KASSERT(ps->avail_start <= ps->end); | | 372 | KASSERT(ps->avail_start <= ps->end); |
368 | KASSERT(ps->avail_end <= ps->end); | | 373 | KASSERT(ps->avail_end <= ps->end); |
369 | | | 374 | |
370 | low = atop(low); | | 375 | low = atop(low); |
371 | high = atop(high); | | 376 | high = atop(high); |
372 | todo = num; | | 377 | todo = num; |
373 | try = max(low, ps->avail_start + ps->start_hint); | | 378 | try = max(low, ps->avail_start + ps->start_hint); |
374 | limit = min(high, ps->avail_end); | | 379 | limit = min(high, ps->avail_end); |
375 | pg = &ps->pgs[try - ps->start]; | | 380 | pg = &ps->pgs[try - ps->start]; |
376 | second_pass = false; | | 381 | second_pass = false; |
377 | | | 382 | |
378 | for (;; try++, pg++) { | | 383 | for (;; try++, pg++) { |
379 | if (try >= limit) { | | 384 | if (try >= limit) { |
380 | if (ps->start_hint == 0 || second_pass) | | 385 | if (ps->start_hint == 0 || second_pass) |
381 | break; | | 386 | break; |
382 | second_pass = true; | | 387 | second_pass = true; |
383 | try = max(low, ps->avail_start); | | 388 | try = max(low, ps->avail_start); |
384 | limit = min(high, ps->avail_start + ps->start_hint); | | 389 | limit = min(high, ps->avail_start + ps->start_hint); |
385 | pg = &ps->pgs[try - ps->start]; | | 390 | pg = &ps->pgs[try - ps->start]; |
386 | continue; | | 391 | continue; |
387 | } | | 392 | } |
388 | #ifdef DEBUG | | 393 | #ifdef DEBUG |
389 | { | | 394 | { |
390 | int cidx = 0; | | 395 | int cidx = 0; |
391 | const int bank = vm_physseg_find(try, &cidx); | | 396 | const int bank = vm_physseg_find(try, &cidx); |
392 | KASSERTMSG(bank == ps - vm_physmem, | | 397 | KASSERTMSG(bank == ps - vm_physmem, |
393 | ("vm_physseg_find(%#x) (%d) != ps %zd", | | 398 | ("vm_physseg_find(%#x) (%d) != ps %zd", |
394 | try, bank, ps - vm_physmem)); | | 399 | try, bank, ps - vm_physmem)); |
395 | KASSERTMSG(cidx == try - ps->start, | | 400 | KASSERTMSG(cidx == try - ps->start, |
396 | ("vm_physseg_find(%#x): %#x != off %"PRIxPADDR, | | 401 | ("vm_physseg_find(%#x): %#x != off %"PRIxPADDR, |
397 | try, cidx, try - ps->start)); | | 402 | try, cidx, try - ps->start)); |
398 | } | | 403 | } |
399 | #endif | | 404 | #endif |
400 | if (VM_PAGE_IS_FREE(pg) == 0) | | 405 | if (VM_PAGE_IS_FREE(pg) == 0) |
401 | continue; | | 406 | continue; |
402 | | | 407 | |
403 | uvm_pglist_add(pg, rlist); | | 408 | uvm_pglist_add(pg, rlist); |
404 | if (--todo == 0) { | | 409 | if (--todo == 0) { |
405 | break; | | 410 | break; |
406 | } | | 411 | } |
407 | } | | 412 | } |
408 | | | 413 | |
409 | /* | | 414 | /* |
410 | * The next time we need to search this segment, | | 415 | * The next time we need to search this segment, |
411 | * start just after the pages we just allocated. | | 416 | * start just after the pages we just allocated. |
412 | */ | | 417 | */ |
413 | ps->start_hint = try + 1 - ps->start; | | 418 | ps->start_hint = try + 1 - ps->avail_start; |
| | | 419 | KASSERTMSG(ps->start_hint <= ps->avail_end - ps->avail_start, |
| | | 420 | ("%#x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")", |
| | | 421 | try + 1, |
| | | 422 | ps->start_hint, ps->start_hint, ps->avail_end, ps->avail_start, |
| | | 423 | ps->avail_end - ps->avail_start)); |
414 | | | 424 | |
415 | #ifdef PGALLOC_VERBOSE | | 425 | #ifdef PGALLOC_VERBOSE |
416 | printf("got %d pgs\n", num - todo); | | 426 | printf("got %d pgs\n", num - todo); |
417 | #endif | | 427 | #endif |
418 | return (num - todo); /* number of pages allocated */ | | 428 | return (num - todo); /* number of pages allocated */ |
419 | } | | 429 | } |
420 | | | 430 | |
421 | static int | | 431 | static int |
422 | uvm_pglistalloc_simple(int num, paddr_t low, paddr_t high, | | 432 | uvm_pglistalloc_simple(int num, paddr_t low, paddr_t high, |
423 | struct pglist *rlist, int waitok) | | 433 | struct pglist *rlist, int waitok) |
424 | { | | 434 | { |
425 | int fl, psi, error; | | 435 | int fl, psi, error; |
426 | struct vm_physseg *ps; | | 436 | struct vm_physseg *ps; |
427 | | | 437 | |
428 | /* Default to "lose". */ | | 438 | /* Default to "lose". */ |
429 | error = ENOMEM; | | 439 | error = ENOMEM; |
430 | | | 440 | |
431 | again: | | 441 | again: |
432 | /* | | 442 | /* |
433 | * Block all memory allocation and lock the free list. | | 443 | * Block all memory allocation and lock the free list. |
434 | */ | | 444 | */ |
435 | mutex_spin_enter(&uvm_fpageqlock); | | 445 | mutex_spin_enter(&uvm_fpageqlock); |
436 | | | 446 | |
437 | /* Are there even any free pages? */ | | 447 | /* Are there even any free pages? */ |
438 | if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel)) | | 448 | if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel)) |
439 | goto out; | | 449 | goto out; |
440 | | | 450 | |
441 | for (fl = 0; fl < VM_NFREELIST; fl++) { | | 451 | for (fl = 0; fl < VM_NFREELIST; fl++) { |
442 | #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) | | 452 | #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) |
443 | for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--) | | 453 | for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--) |
444 | #else | | 454 | #else |
445 | for (psi = 0 ; psi < vm_nphysseg ; psi++) | | 455 | for (psi = 0 ; psi < vm_nphysseg ; psi++) |
446 | #endif | | 456 | #endif |
447 | { | | 457 | { |
448 | ps = &vm_physmem[psi]; | | 458 | ps = &vm_physmem[psi]; |
449 | | | 459 | |
450 | if (ps->free_list != fl) | | 460 | if (ps->free_list != fl) |
451 | continue; | | 461 | continue; |
452 | | | 462 | |
453 | num -= uvm_pglistalloc_s_ps(ps, num, low, high, rlist); | | 463 | num -= uvm_pglistalloc_s_ps(ps, num, low, high, rlist); |
454 | if (num == 0) { | | 464 | if (num == 0) { |
455 | error = 0; | | 465 | error = 0; |
456 | goto out; | | 466 | goto out; |
457 | } | | 467 | } |
458 | } | | 468 | } |
459 | | | 469 | |
460 | } | | 470 | } |
461 | | | 471 | |
462 | out: | | 472 | out: |
463 | /* | | 473 | /* |
464 | * check to see if we need to generate some free pages waking | | 474 | * check to see if we need to generate some free pages waking |
465 | * the pagedaemon. | | 475 | * the pagedaemon. |
466 | */ | | 476 | */ |
467 | | | 477 | |
468 | uvm_kick_pdaemon(); | | 478 | uvm_kick_pdaemon(); |
469 | mutex_spin_exit(&uvm_fpageqlock); | | 479 | mutex_spin_exit(&uvm_fpageqlock); |
470 | | | 480 | |
471 | if (error) { | | 481 | if (error) { |
472 | if (waitok) { | | 482 | if (waitok) { |
473 | /* XXX perhaps some time limitation? */ | | 483 | /* XXX perhaps some time limitation? */ |
474 | #ifdef DEBUG | | 484 | #ifdef DEBUG |
475 | printf("pglistalloc waiting\n"); | | 485 | printf("pglistalloc waiting\n"); |
476 | #endif | | 486 | #endif |
477 | uvm_wait("pglalloc"); | | 487 | uvm_wait("pglalloc"); |
478 | goto again; | | 488 | goto again; |
479 | } else | | 489 | } else |
480 | uvm_pglistfree(rlist); | | 490 | uvm_pglistfree(rlist); |
481 | } | | 491 | } |
482 | #ifdef PGALLOC_VERBOSE | | 492 | #ifdef PGALLOC_VERBOSE |
483 | if (!error) | | 493 | if (!error) |
484 | printf("pgalloc: %"PRIxMAX"..%"PRIxMAX"\n", | | 494 | printf("pgalloc: %"PRIxMAX"..%"PRIxMAX"\n", |
485 | (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)), | | 495 | (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)), |
486 | (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist))); | | 496 | (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist))); |
487 | #endif | | 497 | #endif |
488 | return (error); | | 498 | return (error); |
489 | } | | 499 | } |
490 | | | 500 | |
491 | int | | 501 | int |
492 | uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment, | | 502 | uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment, |
493 | paddr_t boundary, struct pglist *rlist, int nsegs, int waitok) | | 503 | paddr_t boundary, struct pglist *rlist, int nsegs, int waitok) |
494 | { | | 504 | { |
495 | int num, res; | | 505 | int num, res; |
496 | | | 506 | |
497 | KASSERT((alignment & (alignment - 1)) == 0); | | 507 | KASSERT((alignment & (alignment - 1)) == 0); |
498 | KASSERT((boundary & (boundary - 1)) == 0); | | 508 | KASSERT((boundary & (boundary - 1)) == 0); |
499 | | | 509 | |
500 | /* | | 510 | /* |
501 | * Our allocations are always page granularity, so our alignment | | 511 | * Our allocations are always page granularity, so our alignment |
502 | * must be, too. | | 512 | * must be, too. |
503 | */ | | 513 | */ |
504 | if (alignment < PAGE_SIZE) | | 514 | if (alignment < PAGE_SIZE) |
505 | alignment = PAGE_SIZE; | | 515 | alignment = PAGE_SIZE; |
506 | if (boundary != 0 && boundary < size) | | 516 | if (boundary != 0 && boundary < size) |
507 | return (EINVAL); | | 517 | return (EINVAL); |
508 | num = atop(round_page(size)); | | 518 | num = atop(round_page(size)); |
509 | low = roundup2(low, alignment); | | 519 | low = roundup2(low, alignment); |
510 | | | 520 | |
511 | TAILQ_INIT(rlist); | | 521 | TAILQ_INIT(rlist); |
512 | | | 522 | |
513 | if ((nsegs < size >> PAGE_SHIFT) || (alignment != PAGE_SIZE) || | | 523 | if ((nsegs < size >> PAGE_SHIFT) || (alignment != PAGE_SIZE) || |
514 | (boundary != 0)) | | 524 | (boundary != 0)) |
515 | res = uvm_pglistalloc_contig(num, low, high, alignment, | | 525 | res = uvm_pglistalloc_contig(num, low, high, alignment, |
516 | boundary, rlist); | | 526 | boundary, rlist); |
517 | else | | 527 | else |
518 | res = uvm_pglistalloc_simple(num, low, high, rlist, waitok); | | 528 | res = uvm_pglistalloc_simple(num, low, high, rlist, waitok); |
519 | | | 529 | |
520 | return (res); | | 530 | return (res); |
521 | } | | 531 | } |
522 | | | 532 | |
523 | /* | | 533 | /* |
524 | * uvm_pglistfree: free a list of pages | | 534 | * uvm_pglistfree: free a list of pages |
525 | * | | 535 | * |
526 | * => pages should already be unmapped | | 536 | * => pages should already be unmapped |
527 | */ | | 537 | */ |
528 | | | 538 | |
529 | void | | 539 | void |
530 | uvm_pglistfree(struct pglist *list) | | 540 | uvm_pglistfree(struct pglist *list) |
531 | { | | 541 | { |
532 | struct uvm_cpu *ucpu; | | 542 | struct uvm_cpu *ucpu; |
533 | struct vm_page *pg; | | 543 | struct vm_page *pg; |
534 | int index, color, queue; | | 544 | int index, color, queue; |
535 | bool iszero; | | 545 | bool iszero; |
536 | | | 546 | |
537 | /* | | 547 | /* |
538 | * Lock the free list and free each page. | | 548 | * Lock the free list and free each page. |
539 | */ | | 549 | */ |
540 | | | 550 | |
541 | mutex_spin_enter(&uvm_fpageqlock); | | 551 | mutex_spin_enter(&uvm_fpageqlock); |
542 | ucpu = curcpu()->ci_data.cpu_uvm; | | 552 | ucpu = curcpu()->ci_data.cpu_uvm; |
543 | while ((pg = TAILQ_FIRST(list)) != NULL) { | | 553 | while ((pg = TAILQ_FIRST(list)) != NULL) { |
544 | KASSERT(!uvmpdpol_pageisqueued_p(pg)); | | 554 | KASSERT(!uvmpdpol_pageisqueued_p(pg)); |
545 | TAILQ_REMOVE(list, pg, pageq.queue); | | 555 | TAILQ_REMOVE(list, pg, pageq.queue); |
546 | iszero = (pg->flags & PG_ZERO); | | 556 | iszero = (pg->flags & PG_ZERO); |
547 | pg->pqflags = PQ_FREE; | | 557 | pg->pqflags = PQ_FREE; |
548 | #ifdef DEBUG | | 558 | #ifdef DEBUG |
549 | pg->uobject = (void *)0xdeadbeef; | | 559 | pg->uobject = (void *)0xdeadbeef; |
550 | pg->uanon = (void *)0xdeadbeef; | | 560 | pg->uanon = (void *)0xdeadbeef; |
551 | #endif /* DEBUG */ | | 561 | #endif /* DEBUG */ |
552 | #ifdef DEBUG | | 562 | #ifdef DEBUG |
553 | if (iszero) | | 563 | if (iszero) |
554 | uvm_pagezerocheck(pg); | | 564 | uvm_pagezerocheck(pg); |
555 | #endif /* DEBUG */ | | 565 | #endif /* DEBUG */ |
556 | index = uvm_page_lookup_freelist(pg); | | 566 | index = uvm_page_lookup_freelist(pg); |
557 | color = VM_PGCOLOR_BUCKET(pg); | | 567 | color = VM_PGCOLOR_BUCKET(pg); |
558 | queue = iszero ? PGFL_ZEROS : PGFL_UNKNOWN; | | 568 | queue = iszero ? PGFL_ZEROS : PGFL_UNKNOWN; |
559 | pg->offset = (uintptr_t)ucpu; | | 569 | pg->offset = (uintptr_t)ucpu; |
560 | LIST_INSERT_HEAD(&uvm.page_free[index].pgfl_buckets[color]. | | 570 | LIST_INSERT_HEAD(&uvm.page_free[index].pgfl_buckets[color]. |
561 | pgfl_queues[queue], pg, pageq.list); | | 571 | pgfl_queues[queue], pg, pageq.list); |
562 | LIST_INSERT_HEAD(&ucpu->page_free[index].pgfl_buckets[color]. | | 572 | LIST_INSERT_HEAD(&ucpu->page_free[index].pgfl_buckets[color]. |
563 | pgfl_queues[queue], pg, listq.list); | | 573 | pgfl_queues[queue], pg, listq.list); |
564 | uvmexp.free++; | | 574 | uvmexp.free++; |
565 | if (iszero) | | 575 | if (iszero) |
566 | uvmexp.zeropages++; | | 576 | uvmexp.zeropages++; |
567 | ucpu->pages[queue]++; | | 577 | ucpu->pages[queue]++; |
568 | STAT_DECR(uvm_pglistalloc_npages); | | 578 | STAT_DECR(uvm_pglistalloc_npages); |
569 | } | | 579 | } |
570 | if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) | | 580 | if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) |
571 | ucpu->page_idle_zero = vm_page_zero_enable; | | 581 | ucpu->page_idle_zero = vm_page_zero_enable; |
572 | mutex_spin_exit(&uvm_fpageqlock); | | 582 | mutex_spin_exit(&uvm_fpageqlock); |
573 | } | | 583 | } |