| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: subr_kmem.c,v 1.39 2012/01/27 19:48:40 para Exp $ */ | | 1 | /* $NetBSD: subr_kmem.c,v 1.40 2012/01/28 23:09:06 rmind Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2009 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2009 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Andrew Doran. | | 8 | * by Andrew Doran. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
| @@ -51,48 +51,46 @@ | | | @@ -51,48 +51,46 @@ |
51 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 51 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
52 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 52 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
53 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 53 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
54 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 54 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
55 | * SUCH DAMAGE. | | 55 | * SUCH DAMAGE. |
56 | */ | | 56 | */ |
57 | | | 57 | |
58 | /* | | 58 | /* |
59 | * allocator of kernel wired memory. | | 59 | * allocator of kernel wired memory. |
60 | * | | 60 | * |
61 | */ | | 61 | */ |
62 | | | 62 | |
63 | #include <sys/cdefs.h> | | 63 | #include <sys/cdefs.h> |
64 | __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.39 2012/01/27 19:48:40 para Exp $"); | | 64 | __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.40 2012/01/28 23:09:06 rmind Exp $"); |
65 | | | 65 | |
66 | #include <sys/param.h> | | 66 | #include <sys/param.h> |
67 | #include <sys/callback.h> | | 67 | #include <sys/callback.h> |
68 | #include <sys/kmem.h> | | 68 | #include <sys/kmem.h> |
69 | #include <sys/pool.h> | | 69 | #include <sys/pool.h> |
70 | #include <sys/debug.h> | | 70 | #include <sys/debug.h> |
71 | #include <sys/lockdebug.h> | | 71 | #include <sys/lockdebug.h> |
72 | #include <sys/cpu.h> | | 72 | #include <sys/cpu.h> |
73 | | | 73 | |
74 | #include <uvm/uvm_extern.h> | | 74 | #include <uvm/uvm_extern.h> |
75 | #include <uvm/uvm_map.h> | | 75 | #include <uvm/uvm_map.h> |
76 | #include <uvm/uvm_kmguard.h> | | 76 | #include <uvm/uvm_kmguard.h> |
77 | | | 77 | |
78 | #include <lib/libkern/libkern.h> | | 78 | #include <lib/libkern/libkern.h> |
79 | | | 79 | |
80 | struct kmem_cache_info { | | 80 | static const struct kmem_cache_info { |
81 | int kc_size; | | 81 | size_t kc_size; |
82 | const char *kc_name; | | 82 | const char * kc_name; |
83 | }; | | 83 | } kmem_cache_sizes[] = { |
84 | | | | |
85 | static const struct kmem_cache_info kmem_cache_sizes[] = { | | | |
86 | { 8, "kmem-8" }, | | 84 | { 8, "kmem-8" }, |
87 | { 16, "kmem-16" }, | | 85 | { 16, "kmem-16" }, |
88 | { 24, "kmem-24" }, | | 86 | { 24, "kmem-24" }, |
89 | { 32, "kmem-32" }, | | 87 | { 32, "kmem-32" }, |
90 | { 40, "kmem-40" }, | | 88 | { 40, "kmem-40" }, |
91 | { 48, "kmem-48" }, | | 89 | { 48, "kmem-48" }, |
92 | { 56, "kmem-56" }, | | 90 | { 56, "kmem-56" }, |
93 | { 64, "kmem-64" }, | | 91 | { 64, "kmem-64" }, |
94 | { 80, "kmem-80" }, | | 92 | { 80, "kmem-80" }, |
95 | { 96, "kmem-96" }, | | 93 | { 96, "kmem-96" }, |
96 | { 112, "kmem-112" }, | | 94 | { 112, "kmem-112" }, |
97 | { 128, "kmem-128" }, | | 95 | { 128, "kmem-128" }, |
98 | { 160, "kmem-160" }, | | 96 | { 160, "kmem-160" }, |
| @@ -101,270 +99,261 @@ static const struct kmem_cache_info kmem | | | @@ -101,270 +99,261 @@ static const struct kmem_cache_info kmem |
101 | { 256, "kmem-256" }, | | 99 | { 256, "kmem-256" }, |
102 | { 320, "kmem-320" }, | | 100 | { 320, "kmem-320" }, |
103 | { 384, "kmem-384" }, | | 101 | { 384, "kmem-384" }, |
104 | { 448, "kmem-448" }, | | 102 | { 448, "kmem-448" }, |
105 | { 512, "kmem-512" }, | | 103 | { 512, "kmem-512" }, |
106 | { 768, "kmem-768" }, | | 104 | { 768, "kmem-768" }, |
107 | { 1024, "kmem-1024" }, | | 105 | { 1024, "kmem-1024" }, |
108 | { 2048, "kmem-2048" }, | | 106 | { 2048, "kmem-2048" }, |
109 | { 4096, "kmem-4096" }, | | 107 | { 4096, "kmem-4096" }, |
110 | { 0, NULL } | | 108 | { 0, NULL } |
111 | }; | | 109 | }; |
112 | | | 110 | |
113 | /* | | 111 | /* |
114 | * KMEM_ALIGN is the smalles guaranteed alignment and | | 112 | * KMEM_ALIGN is the smallest guaranteed alignment and also the |
115 | * also the smallest allocateable quanta. | | 113 | * smallest allocateable quantum. Every cache size is a multiply |
116 | * Every cache size which is a multiply of CACHE_LINE_SIZE | | 114 | * of CACHE_LINE_SIZE and gets CACHE_LINE_SIZE alignment. |
117 | * gets CACHE_LINE_SIZE alignment. | | | |
118 | */ | | 115 | */ |
119 | #define KMEM_ALIGN 8 | | 116 | #define KMEM_ALIGN 8 |
120 | #define KMEM_SHIFT 3 | | 117 | #define KMEM_SHIFT 3 |
121 | #define KMEM_MAXSIZE 4096 | | 118 | #define KMEM_MAXSIZE 4096 |
| | | 119 | #define KMEM_CACHE_COUNT (KMEM_MAXSIZE >> KMEM_SHIFT) |
122 | | | 120 | |
123 | static pool_cache_t kmem_cache[KMEM_MAXSIZE >> KMEM_SHIFT]; | | 121 | static pool_cache_t kmem_cache[KMEM_CACHE_COUNT] __cacheline_aligned; |
124 | static size_t kmem_cache_max; | | 122 | static size_t kmem_cache_maxidx __read_mostly; |
125 | | | 123 | |
126 | #if defined(DEBUG) | | 124 | #if defined(DEBUG) |
127 | int kmem_guard_depth = 0; | | 125 | int kmem_guard_depth = 0; |
128 | size_t kmem_guard_size; | | 126 | size_t kmem_guard_size; |
129 | static struct uvm_kmguard kmem_guard; | | 127 | static struct uvm_kmguard kmem_guard; |
130 | static void *kmem_freecheck; | | 128 | static void *kmem_freecheck; |
131 | #define KMEM_POISON | | 129 | #define KMEM_POISON |
132 | #define KMEM_REDZONE | | 130 | #define KMEM_REDZONE |
133 | #define KMEM_SIZE | | 131 | #define KMEM_SIZE |
134 | #define KMEM_GUARD | | 132 | #define KMEM_GUARD |
135 | #endif /* defined(DEBUG) */ | | 133 | #endif /* defined(DEBUG) */ |
136 | | | 134 | |
137 | #if defined(KMEM_POISON) | | 135 | #if defined(KMEM_POISON) |
138 | static int kmem_poison_ctor(void *, void *, int); | | 136 | static int kmem_poison_ctor(void *, void *, int); |
139 | static void kmem_poison_fill(void *, size_t); | | 137 | static void kmem_poison_fill(void *, size_t); |
140 | static void kmem_poison_check(void *, size_t); | | 138 | static void kmem_poison_check(void *, size_t); |
141 | #else /* defined(KMEM_POISON) */ | | 139 | #else /* defined(KMEM_POISON) */ |
142 | #define kmem_poison_fill(p, sz) /* nothing */ | | 140 | #define kmem_poison_fill(p, sz) /* nothing */ |
143 | #define kmem_poison_check(p, sz) /* nothing */ | | 141 | #define kmem_poison_check(p, sz) /* nothing */ |
144 | #endif /* defined(KMEM_POISON) */ | | 142 | #endif /* defined(KMEM_POISON) */ |
145 | | | 143 | |
146 | #if defined(KMEM_REDZONE) | | 144 | #if defined(KMEM_REDZONE) |
147 | #define REDZONE_SIZE 1 | | 145 | #define REDZONE_SIZE 1 |
148 | #else /* defined(KMEM_REDZONE) */ | | 146 | #else /* defined(KMEM_REDZONE) */ |
149 | #define REDZONE_SIZE 0 | | 147 | #define REDZONE_SIZE 0 |
150 | #endif /* defined(KMEM_REDZONE) */ | | 148 | #endif /* defined(KMEM_REDZONE) */ |
151 | | | 149 | |
152 | #if defined(KMEM_SIZE) | | 150 | #if defined(KMEM_SIZE) |
153 | #define SIZE_SIZE (max(KMEM_ALIGN, sizeof(size_t))) | | 151 | #define SIZE_SIZE (MAX(KMEM_ALIGN, sizeof(size_t))) |
154 | static void kmem_size_set(void *, size_t); | | 152 | static void kmem_size_set(void *, size_t); |
155 | static void kmem_size_check(void *, size_t); | | 153 | static void kmem_size_check(void *, size_t); |
156 | #else | | 154 | #else |
157 | #define SIZE_SIZE 0 | | 155 | #define SIZE_SIZE 0 |
158 | #define kmem_size_set(p, sz) /* nothing */ | | 156 | #define kmem_size_set(p, sz) /* nothing */ |
159 | #define kmem_size_check(p, sz) /* nothing */ | | 157 | #define kmem_size_check(p, sz) /* nothing */ |
160 | #endif | | 158 | #endif |
161 | | | 159 | |
162 | CTASSERT(KM_SLEEP == PR_WAITOK); | | 160 | CTASSERT(KM_SLEEP == PR_WAITOK); |
163 | CTASSERT(KM_NOSLEEP == PR_NOWAIT); | | 161 | CTASSERT(KM_NOSLEEP == PR_NOWAIT); |
164 | | | 162 | |
165 | void * kmem_intr_alloc(size_t size, km_flag_t kmflags); | | | |
166 | void * kmem_intr_zalloc(size_t size, km_flag_t kmflags); | | | |
167 | void kmem_intr_free(void *, size_t size); | | | |
168 | | | | |
169 | void * | | 163 | void * |
170 | kmem_intr_alloc(size_t size, km_flag_t kmflags) | | 164 | kmem_intr_alloc(size_t size, km_flag_t kmflags) |
171 | { | | 165 | { |
172 | size_t index; | | 166 | size_t allocsz, index; |
173 | size_t allocsz; | | | |
174 | pool_cache_t pc; | | 167 | pool_cache_t pc; |
175 | uint8_t *p; | | 168 | uint8_t *p; |
176 | | | 169 | |
177 | KASSERT(size > 0); | | 170 | KASSERT(size > 0); |
178 | | | 171 | |
179 | #ifdef KMEM_GUARD | | 172 | #ifdef KMEM_GUARD |
180 | if (size <= kmem_guard_size) { | | 173 | if (size <= kmem_guard_size) { |
181 | return uvm_kmguard_alloc(&kmem_guard, size, | | 174 | return uvm_kmguard_alloc(&kmem_guard, size, |
182 | (kmflags & KM_SLEEP) != 0); | | 175 | (kmflags & KM_SLEEP) != 0); |
183 | } | | 176 | } |
184 | #endif | | 177 | #endif |
185 | | | | |
186 | allocsz = kmem_roundup_size(size) + REDZONE_SIZE + SIZE_SIZE; | | 178 | allocsz = kmem_roundup_size(size) + REDZONE_SIZE + SIZE_SIZE; |
187 | if ((index = ((allocsz - 1) >> KMEM_SHIFT)) | | 179 | index = (allocsz - 1) >> KMEM_SHIFT; |
188 | < kmem_cache_max >> KMEM_SHIFT) { | | 180 | |
189 | pc = kmem_cache[index]; | | 181 | if (index >= kmem_cache_maxidx) { |
190 | } else { | | 182 | int ret = uvm_km_kmem_alloc(kmem_va_arena, |
191 | int rc; | | | |
192 | rc = uvm_km_kmem_alloc(kmem_va_arena, | | | |
193 | (vsize_t)round_page(allocsz), | | 183 | (vsize_t)round_page(allocsz), |
194 | ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP) | | 184 | ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP) |
195 | | VM_INSTANTFIT, (vmem_addr_t *)&p); | | 185 | | VM_INSTANTFIT, (vmem_addr_t *)&p); |
196 | return (rc != 0) ? NULL : p; | | 186 | return ret ? NULL : p; |
197 | } | | 187 | } |
198 | | | 188 | |
| | | 189 | pc = kmem_cache[index]; |
199 | p = pool_cache_get(pc, kmflags); | | 190 | p = pool_cache_get(pc, kmflags); |
200 | | | 191 | |
201 | if (__predict_true(p != NULL)) { | | 192 | if (__predict_true(p != NULL)) { |
202 | kmem_poison_check(p, kmem_roundup_size(size)); | | 193 | kmem_poison_check(p, kmem_roundup_size(size)); |
203 | FREECHECK_OUT(&kmem_freecheck, p); | | 194 | FREECHECK_OUT(&kmem_freecheck, p); |
204 | kmem_size_set(p, allocsz); | | 195 | kmem_size_set(p, allocsz); |
205 | } | | 196 | } |
206 | return p; | | 197 | return p; |
207 | } | | 198 | } |
208 | | | 199 | |
209 | void * | | 200 | void * |
210 | kmem_intr_zalloc(size_t size, km_flag_t kmflags) | | 201 | kmem_intr_zalloc(size_t size, km_flag_t kmflags) |
211 | { | | 202 | { |
212 | void *p; | | 203 | void *p; |
213 | | | 204 | |
214 | p = kmem_intr_alloc(size, kmflags); | | 205 | p = kmem_intr_alloc(size, kmflags); |
215 | if (p != NULL) { | | 206 | if (p != NULL) { |
216 | memset(p, 0, size); | | 207 | memset(p, 0, size); |
217 | } | | 208 | } |
218 | return p; | | 209 | return p; |
219 | } | | 210 | } |
220 | | | 211 | |
221 | void | | 212 | void |
222 | kmem_intr_free(void *p, size_t size) | | 213 | kmem_intr_free(void *p, size_t size) |
223 | { | | 214 | { |
224 | size_t index; | | 215 | size_t allocsz, index; |
225 | size_t allocsz; | | | |
226 | pool_cache_t pc; | | 216 | pool_cache_t pc; |
227 | | | 217 | |
228 | KASSERT(p != NULL); | | 218 | KASSERT(p != NULL); |
229 | KASSERT(size > 0); | | 219 | KASSERT(size > 0); |
230 | | | 220 | |
231 | #ifdef KMEM_GUARD | | 221 | #ifdef KMEM_GUARD |
232 | if (size <= kmem_guard_size) { | | 222 | if (size <= kmem_guard_size) { |
233 | uvm_kmguard_free(&kmem_guard, size, p); | | 223 | uvm_kmguard_free(&kmem_guard, size, p); |
234 | return; | | 224 | return; |
235 | } | | 225 | } |
236 | #endif | | 226 | #endif |
237 | | | | |
238 | allocsz = kmem_roundup_size(size) + REDZONE_SIZE + SIZE_SIZE; | | 227 | allocsz = kmem_roundup_size(size) + REDZONE_SIZE + SIZE_SIZE; |
239 | if ((index = ((allocsz - 1) >> KMEM_SHIFT)) | | 228 | index = (allocsz - 1) >> KMEM_SHIFT; |
240 | < kmem_cache_max >> KMEM_SHIFT) { | | 229 | |
241 | pc = kmem_cache[index]; | | 230 | if (index >= kmem_cache_maxidx) { |
242 | } else { | | | |
243 | uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p, | | 231 | uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p, |
244 | round_page(allocsz)); | | 232 | round_page(allocsz)); |
245 | return; | | 233 | return; |
246 | } | | 234 | } |
247 | | | 235 | |
248 | kmem_size_check(p, allocsz); | | 236 | kmem_size_check(p, allocsz); |
249 | FREECHECK_IN(&kmem_freecheck, p); | | 237 | FREECHECK_IN(&kmem_freecheck, p); |
250 | LOCKDEBUG_MEM_CHECK(p, allocsz - (REDZONE_SIZE + SIZE_SIZE)); | | 238 | LOCKDEBUG_MEM_CHECK(p, allocsz - (REDZONE_SIZE + SIZE_SIZE)); |
251 | kmem_poison_check((uint8_t *)p + size, allocsz - size - SIZE_SIZE); | | 239 | kmem_poison_check((uint8_t *)p + size, allocsz - size - SIZE_SIZE); |
252 | kmem_poison_fill(p, allocsz); | | 240 | kmem_poison_fill(p, allocsz); |
253 | | | 241 | |
| | | 242 | pc = kmem_cache[index]; |
254 | pool_cache_put(pc, p); | | 243 | pool_cache_put(pc, p); |
255 | } | | 244 | } |
256 | | | 245 | |
257 | | | | |
258 | /* ---- kmem API */ | | 246 | /* ---- kmem API */ |
259 | | | 247 | |
260 | /* | | 248 | /* |
261 | * kmem_alloc: allocate wired memory. | | 249 | * kmem_alloc: allocate wired memory. |
262 | * => must not be called from interrupt context. | | 250 | * => must not be called from interrupt context. |
263 | */ | | 251 | */ |
264 | | | 252 | |
265 | void * | | 253 | void * |
266 | kmem_alloc(size_t size, km_flag_t kmflags) | | 254 | kmem_alloc(size_t size, km_flag_t kmflags) |
267 | { | | 255 | { |
268 | | | 256 | |
269 | KASSERT(!cpu_intr_p()); | | 257 | KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()), |
270 | KASSERT(!cpu_softintr_p()); | | 258 | "kmem(9) should not be used from the interrupt context"); |
271 | return kmem_intr_alloc(size, kmflags); | | 259 | return kmem_intr_alloc(size, kmflags); |
272 | } | | 260 | } |
273 | | | 261 | |
274 | /* | | 262 | /* |
275 | * kmem_zalloc: allocate zeroed wired memory. | | 263 | * kmem_zalloc: allocate zeroed wired memory. |
276 | * => must not be called from interrupt context. | | 264 | * => must not be called from interrupt context. |
277 | */ | | 265 | */ |
278 | | | 266 | |
279 | void * | | 267 | void * |
280 | kmem_zalloc(size_t size, km_flag_t kmflags) | | 268 | kmem_zalloc(size_t size, km_flag_t kmflags) |
281 | { | | 269 | { |
282 | | | 270 | |
283 | KASSERT(!cpu_intr_p()); | | 271 | KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()), |
284 | KASSERT(!cpu_softintr_p()); | | 272 | "kmem(9) should not be used from the interrupt context"); |
285 | return kmem_intr_zalloc(size, kmflags); | | 273 | return kmem_intr_zalloc(size, kmflags); |
286 | } | | 274 | } |
287 | | | 275 | |
288 | /* | | 276 | /* |
289 | * kmem_free: free wired memory allocated by kmem_alloc. | | 277 | * kmem_free: free wired memory allocated by kmem_alloc. |
290 | * => must not be called from interrupt context. | | 278 | * => must not be called from interrupt context. |
291 | */ | | 279 | */ |
292 | | | 280 | |
293 | void | | 281 | void |
294 | kmem_free(void *p, size_t size) | | 282 | kmem_free(void *p, size_t size) |
295 | { | | 283 | { |
296 | | | 284 | |
297 | KASSERT(!cpu_intr_p()); | | 285 | KASSERT(!cpu_intr_p()); |
298 | KASSERT(!cpu_softintr_p()); | | 286 | KASSERT(!cpu_softintr_p()); |
299 | kmem_intr_free(p, size); | | 287 | kmem_intr_free(p, size); |
300 | } | | 288 | } |
301 | | | 289 | |
302 | static void | | 290 | static void |
303 | kmem_create_caches(const struct kmem_cache_info *array, | | 291 | kmem_create_caches(const struct kmem_cache_info *array, |
304 | pool_cache_t alloc_table[], size_t maxsize) | | 292 | pool_cache_t alloc_table[], size_t maxsize) |
305 | { | | 293 | { |
306 | size_t table_unit = (1 << KMEM_SHIFT); | | 294 | size_t table_unit = (1 << KMEM_SHIFT); |
307 | size_t size = table_unit; | | 295 | size_t size = table_unit; |
308 | int i; | | 296 | int i; |
309 | | | 297 | |
310 | for (i = 0; array[i].kc_size != 0 ; i++) { | | 298 | for (i = 0; array[i].kc_size != 0 ; i++) { |
| | | 299 | const char *name = array[i].kc_name; |
311 | size_t cache_size = array[i].kc_size; | | 300 | size_t cache_size = array[i].kc_size; |
| | | 301 | int flags = PR_NOALIGN; |
| | | 302 | pool_cache_t pc; |
312 | size_t align; | | 303 | size_t align; |
313 | | | 304 | |
314 | if ((cache_size & (CACHE_LINE_SIZE - 1)) == 0) | | 305 | if ((cache_size & (CACHE_LINE_SIZE - 1)) == 0) |
315 | align = CACHE_LINE_SIZE; | | 306 | align = CACHE_LINE_SIZE; |
316 | else if ((cache_size & (PAGE_SIZE - 1)) == 0) | | 307 | else if ((cache_size & (PAGE_SIZE - 1)) == 0) |
317 | align = PAGE_SIZE; | | 308 | align = PAGE_SIZE; |
318 | else | | 309 | else |
319 | align = KMEM_ALIGN; | | 310 | align = KMEM_ALIGN; |
320 | | | 311 | |
321 | const char *name = array[i].kc_name; | | | |
322 | pool_cache_t pc; | | | |
323 | int flags = PR_NOALIGN; | | | |
324 | if (cache_size < CACHE_LINE_SIZE) | | 312 | if (cache_size < CACHE_LINE_SIZE) |
325 | flags |= PR_NOTOUCH; | | 313 | flags |= PR_NOTOUCH; |
326 | | | 314 | |
327 | /* check if we reached the requested size */ | | 315 | /* check if we reached the requested size */ |
328 | if (cache_size > maxsize) | | 316 | if (cache_size > maxsize) { |
329 | break; | | 317 | break; |
330 | | | 318 | } |
331 | kmem_cache_max = cache_size; | | 319 | if ((cache_size >> KMEM_SHIFT) > kmem_cache_maxidx) { |
| | | 320 | kmem_cache_maxidx = cache_size >> KMEM_SHIFT; |
| | | 321 | } |
332 | | | 322 | |
333 | #if defined(KMEM_POISON) | | 323 | #if defined(KMEM_POISON) |
334 | pc = pool_cache_init(cache_size, align, 0, flags, | | 324 | pc = pool_cache_init(cache_size, align, 0, flags, |
335 | name, &pool_allocator_kmem, IPL_VM, kmem_poison_ctor, | | 325 | name, &pool_allocator_kmem, IPL_VM, kmem_poison_ctor, |
336 | NULL, (void *)cache_size); | | 326 | NULL, (void *)cache_size); |
337 | #else /* defined(KMEM_POISON) */ | | 327 | #else /* defined(KMEM_POISON) */ |
338 | pc = pool_cache_init(cache_size, align, 0, flags, | | 328 | pc = pool_cache_init(cache_size, align, 0, flags, |
339 | name, &pool_allocator_kmem, IPL_VM, NULL, NULL, NULL); | | 329 | name, &pool_allocator_kmem, IPL_VM, NULL, NULL, NULL); |
340 | #endif /* defined(KMEM_POISON) */ | | 330 | #endif /* defined(KMEM_POISON) */ |
341 | | | 331 | |
342 | while (size <= cache_size) { | | 332 | while (size <= cache_size) { |
343 | alloc_table[(size - 1) >> KMEM_SHIFT] = pc; | | 333 | alloc_table[(size - 1) >> KMEM_SHIFT] = pc; |
344 | size += table_unit; | | 334 | size += table_unit; |
345 | } | | 335 | } |
346 | } | | 336 | } |
347 | } | | 337 | } |
348 | | | 338 | |
349 | void | | 339 | void |
350 | kmem_init(void) | | 340 | kmem_init(void) |
351 | { | | 341 | { |
352 | | | 342 | |
353 | #ifdef KMEM_GUARD | | 343 | #ifdef KMEM_GUARD |
354 | uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size, | | 344 | uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size, |
355 | kernel_map); | | 345 | kernel_map); |
356 | #endif | | 346 | #endif |
357 | | | | |
358 | kmem_create_caches(kmem_cache_sizes, kmem_cache, KMEM_MAXSIZE); | | 347 | kmem_create_caches(kmem_cache_sizes, kmem_cache, KMEM_MAXSIZE); |
359 | } | | 348 | } |
360 | | | 349 | |
361 | size_t | | 350 | size_t |
362 | kmem_roundup_size(size_t size) | | 351 | kmem_roundup_size(size_t size) |
363 | { | | 352 | { |
364 | | | 353 | |
365 | return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1); | | 354 | return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1); |
366 | } | | 355 | } |
367 | | | 356 | |
368 | /* ---- debug */ | | 357 | /* ---- debug */ |
369 | | | 358 | |
370 | #if defined(KMEM_POISON) | | 359 | #if defined(KMEM_POISON) |