Wed Jan 19 09:02:53 2011 UTC ()
buildfix: uvm_extern.h -> uvm.h


(cegger)
diff -r1.24 -r1.25 src/sys/kern/kern_malloc_debug.c

cvs diff -r1.24 -r1.25 src/sys/kern/Attic/kern_malloc_debug.c (switch to unified diff)

--- src/sys/kern/Attic/kern_malloc_debug.c 2011/01/17 07:13:31 1.24
+++ src/sys/kern/Attic/kern_malloc_debug.c 2011/01/19 09:02:52 1.25
@@ -1,327 +1,327 @@ @@ -1,327 +1,327 @@
1/* $NetBSD: kern_malloc_debug.c,v 1.24 2011/01/17 07:13:31 uebayasi Exp $ */ 1/* $NetBSD: kern_malloc_debug.c,v 1.25 2011/01/19 09:02:52 cegger Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org> 4 * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 10 *
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products 16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission. 17 * derived from this software without specific prior written permission.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
25 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
26 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
27 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 27 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
28 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * 29 *
30 * OpenBSD: kern_malloc_debug.c,v 1.10 2001/07/26 13:33:52 art Exp 30 * OpenBSD: kern_malloc_debug.c,v 1.10 2001/07/26 13:33:52 art Exp
31 */ 31 */
32 32
33/* 33/*
34 * This really belongs in kern/kern_malloc.c, but it was too much pollution. 34 * This really belongs in kern/kern_malloc.c, but it was too much pollution.
35 */ 35 */
36 36
37/* 37/*
38 * It's only possible to debug one type/size at a time. The question is 38 * It's only possible to debug one type/size at a time. The question is
39 * if this is a limitation or a feature. We never want to run this as the 39 * if this is a limitation or a feature. We never want to run this as the
40 * default malloc because we'll run out of memory really fast. Adding 40 * default malloc because we'll run out of memory really fast. Adding
41 * more types will also add to the complexity of the code. 41 * more types will also add to the complexity of the code.
42 * 42 *
43 * This is really simple. Every malloc() allocates two virtual pages, 43 * This is really simple. Every malloc() allocates two virtual pages,
44 * the second page is left unmapped, and the value returned is aligned 44 * the second page is left unmapped, and the value returned is aligned
45 * so that it ends at (or very close to) the page boundary to catch overflows. 45 * so that it ends at (or very close to) the page boundary to catch overflows.
46 * Every free() changes the protection of the first page to VM_PROT_NONE so 46 * Every free() changes the protection of the first page to VM_PROT_NONE so
47 * that we can catch any dangling writes to it. 47 * that we can catch any dangling writes to it.
48 * To minimize the risk of writes to recycled chunks we keep an LRU of latest 48 * To minimize the risk of writes to recycled chunks we keep an LRU of latest
49 * freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS. 49 * freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS.
50 * 50 *
51 * Don't expect any performance. 51 * Don't expect any performance.
52 * 52 *
53 * TODO: 53 * TODO:
54 * - support for size >= PAGE_SIZE 54 * - support for size >= PAGE_SIZE
55 * - add support to the fault handler to give better diagnostics if we fail. 55 * - add support to the fault handler to give better diagnostics if we fail.
56 */ 56 */
57 57
58#include <sys/cdefs.h> 58#include <sys/cdefs.h>
59__KERNEL_RCSID(0, "$NetBSD: kern_malloc_debug.c,v 1.24 2011/01/17 07:13:31 uebayasi Exp $"); 59__KERNEL_RCSID(0, "$NetBSD: kern_malloc_debug.c,v 1.25 2011/01/19 09:02:52 cegger Exp $");
60 60
61#include <sys/param.h> 61#include <sys/param.h>
62#include <sys/proc.h> 62#include <sys/proc.h>
63#include <sys/kernel.h> 63#include <sys/kernel.h>
64#include <sys/malloc.h> 64#include <sys/malloc.h>
65#include <sys/systm.h> 65#include <sys/systm.h>
66#include <sys/pool.h> 66#include <sys/pool.h>
67 67
68#include <uvm/uvm_extern.h> 68#include <uvm/uvm.h>
69 69
70/* 70/*
71 * debug_malloc_type and debug_malloc_size define the type and size of 71 * debug_malloc_type and debug_malloc_size define the type and size of
72 * memory to be debugged. Use 0 for a wildcard. debug_malloc_size_lo 72 * memory to be debugged. Use 0 for a wildcard. debug_malloc_size_lo
73 * is the lower limit and debug_malloc_size_hi the upper limit of sizes 73 * is the lower limit and debug_malloc_size_hi the upper limit of sizes
74 * being debugged; 0 will not work as a wildcard for the upper limit. 74 * being debugged; 0 will not work as a wildcard for the upper limit.
75 * For any debugging to take place, type must be != NULL, size must be >= 0, 75 * For any debugging to take place, type must be != NULL, size must be >= 0,
76 * and if the limits are being used, size must be set to 0. 76 * and if the limits are being used, size must be set to 0.
77 * See /usr/src/sys/sys/malloc.h and malloc(9) for a list of types. 77 * See /usr/src/sys/sys/malloc.h and malloc(9) for a list of types.
78 * 78 *
79 * Although those are variables, it's a really bad idea to change the type 79 * Although those are variables, it's a really bad idea to change the type
80 * if any memory chunks of this type are used. It's ok to change the size 80 * if any memory chunks of this type are used. It's ok to change the size
81 * in runtime. 81 * in runtime.
82 */ 82 */
83struct malloc_type *debug_malloc_type = NULL; 83struct malloc_type *debug_malloc_type = NULL;
84int debug_malloc_size = -1; 84int debug_malloc_size = -1;
85int debug_malloc_size_lo = -1; 85int debug_malloc_size_lo = -1;
86int debug_malloc_size_hi = -1; 86int debug_malloc_size_hi = -1;
87 87
88/* 88/*
89 * MALLOC_DEBUG_CHUNKS is the number of memory chunks we require on the 89 * MALLOC_DEBUG_CHUNKS is the number of memory chunks we require on the
90 * freelist before we reuse them. 90 * freelist before we reuse them.
91 */ 91 */
92#define MALLOC_DEBUG_CHUNKS 16 92#define MALLOC_DEBUG_CHUNKS 16
93 93
94void debug_malloc_allocate_free(int); 94void debug_malloc_allocate_free(int);
95 95
96struct debug_malloc_entry { 96struct debug_malloc_entry {
97 TAILQ_ENTRY(debug_malloc_entry) md_list; 97 TAILQ_ENTRY(debug_malloc_entry) md_list;
98 vaddr_t md_va; 98 vaddr_t md_va;
99 paddr_t md_pa; 99 paddr_t md_pa;
100 size_t md_size; 100 size_t md_size;
101 struct malloc_type *md_type; 101 struct malloc_type *md_type;
102}; 102};
103 103
104TAILQ_HEAD(,debug_malloc_entry) debug_malloc_freelist = 104TAILQ_HEAD(,debug_malloc_entry) debug_malloc_freelist =
105 TAILQ_HEAD_INITIALIZER(debug_malloc_freelist); 105 TAILQ_HEAD_INITIALIZER(debug_malloc_freelist);
106TAILQ_HEAD(,debug_malloc_entry) debug_malloc_usedlist = 106TAILQ_HEAD(,debug_malloc_entry) debug_malloc_usedlist =
107 TAILQ_HEAD_INITIALIZER(debug_malloc_usedlist); 107 TAILQ_HEAD_INITIALIZER(debug_malloc_usedlist);
108 108
109int debug_malloc_allocs; 109int debug_malloc_allocs;
110int debug_malloc_frees; 110int debug_malloc_frees;
111int debug_malloc_pages; 111int debug_malloc_pages;
112int debug_malloc_chunks_on_freelist; 112int debug_malloc_chunks_on_freelist;
113 113
114static struct pool debug_malloc_pool; 114static struct pool debug_malloc_pool;
115 115
116void 116void
117debug_malloc_init(void) 117debug_malloc_init(void)
118{ 118{
119 119
120 pool_init(&debug_malloc_pool, sizeof(struct debug_malloc_entry), 120 pool_init(&debug_malloc_pool, sizeof(struct debug_malloc_entry),
121 0, 0, 0, "mdbepl", NULL, IPL_VM); 121 0, 0, 0, "mdbepl", NULL, IPL_VM);
122} 122}
123 123
124int 124int
125debug_malloc(unsigned long size, struct malloc_type *type, int flags, 125debug_malloc(unsigned long size, struct malloc_type *type, int flags,
126 void **addr) 126 void **addr)
127{ 127{
128 struct debug_malloc_entry *md = NULL; 128 struct debug_malloc_entry *md = NULL;
129 int s, wait = !(flags & M_NOWAIT); 129 int s, wait = !(flags & M_NOWAIT);
130 130
131 /* Careful not to compare unsigned long to int -1 */ 131 /* Careful not to compare unsigned long to int -1 */
132 if ((type != debug_malloc_type && debug_malloc_type != 0) || 132 if ((type != debug_malloc_type && debug_malloc_type != 0) ||
133 (size != debug_malloc_size && debug_malloc_size != 0) || 133 (size != debug_malloc_size && debug_malloc_size != 0) ||
134 (debug_malloc_size_lo != -1 && size < debug_malloc_size_lo) || 134 (debug_malloc_size_lo != -1 && size < debug_malloc_size_lo) ||
135 (debug_malloc_size_hi != -1 && size > debug_malloc_size_hi)) 135 (debug_malloc_size_hi != -1 && size > debug_malloc_size_hi))
136 return (0); 136 return (0);
137 137
138 /* XXX - fix later */ 138 /* XXX - fix later */
139 if (size > PAGE_SIZE) 139 if (size > PAGE_SIZE)
140 return (0); 140 return (0);
141 141
142 s = splvm(); 142 s = splvm();
143 if (debug_malloc_chunks_on_freelist < MALLOC_DEBUG_CHUNKS) 143 if (debug_malloc_chunks_on_freelist < MALLOC_DEBUG_CHUNKS)
144 debug_malloc_allocate_free(wait); 144 debug_malloc_allocate_free(wait);
145 145
146 md = TAILQ_FIRST(&debug_malloc_freelist); 146 md = TAILQ_FIRST(&debug_malloc_freelist);
147 if (md == NULL) { 147 if (md == NULL) {
148 splx(s); 148 splx(s);
149 return (0); 149 return (0);
150 } 150 }
151 TAILQ_REMOVE(&debug_malloc_freelist, md, md_list); 151 TAILQ_REMOVE(&debug_malloc_freelist, md, md_list);
152 debug_malloc_chunks_on_freelist--; 152 debug_malloc_chunks_on_freelist--;
153 153
154 TAILQ_INSERT_HEAD(&debug_malloc_usedlist, md, md_list); 154 TAILQ_INSERT_HEAD(&debug_malloc_usedlist, md, md_list);
155 debug_malloc_allocs++; 155 debug_malloc_allocs++;
156 splx(s); 156 splx(s);
157 157
158 pmap_kenter_pa(md->md_va, md->md_pa, 158 pmap_kenter_pa(md->md_va, md->md_pa,
159 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 159 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
160 pmap_update(pmap_kernel()); 160 pmap_update(pmap_kernel());
161 161
162 md->md_size = size; 162 md->md_size = size;
163 md->md_type = type; 163 md->md_type = type;
164 164
165 /* 165 /*
166 * Align the returned addr so that it ends where the first page 166 * Align the returned addr so that it ends where the first page
167 * ends. roundup to get decent alignment. 167 * ends. roundup to get decent alignment.
168 */ 168 */
169 *addr = (void *)(md->md_va + PAGE_SIZE - roundup(size, sizeof(long))); 169 *addr = (void *)(md->md_va + PAGE_SIZE - roundup(size, sizeof(long)));
170 if (*addr != NULL && (flags & M_ZERO)) 170 if (*addr != NULL && (flags & M_ZERO))
171 memset(*addr, 0, size); 171 memset(*addr, 0, size);
172 return (1); 172 return (1);
173} 173}
174 174
175int 175int
176debug_free(void *addr, struct malloc_type *type) 176debug_free(void *addr, struct malloc_type *type)
177{ 177{
178 struct debug_malloc_entry *md; 178 struct debug_malloc_entry *md;
179 vaddr_t va; 179 vaddr_t va;
180 int s; 180 int s;
181 181
182 if (type != debug_malloc_type && debug_malloc_type != 0) 182 if (type != debug_malloc_type && debug_malloc_type != 0)
183 return (0); 183 return (0);
184 184
185 /* 185 /*
186 * trunc_page to get the address of the page. 186 * trunc_page to get the address of the page.
187 */ 187 */
188 va = trunc_page((vaddr_t)addr); 188 va = trunc_page((vaddr_t)addr);
189 189
190 s = splvm(); 190 s = splvm();
191 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) 191 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
192 if (md->md_va == va) 192 if (md->md_va == va)
193 break; 193 break;
194 194
195 /* 195 /*
196 * If we are not responsible for this entry, let the normal free 196 * If we are not responsible for this entry, let the normal free
197 * handle it 197 * handle it
198 */ 198 */
199 if (md == NULL) { 199 if (md == NULL) {
200 /* 200 /*
201 * sanity check. Check for multiple frees. 201 * sanity check. Check for multiple frees.
202 */ 202 */
203 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) 203 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
204 if (md->md_va == va) 204 if (md->md_va == va)
205 panic("debug_free: already free"); 205 panic("debug_free: already free");
206 splx(s); 206 splx(s);
207 return (0); 207 return (0);
208 } 208 }
209 209
210 debug_malloc_frees++; 210 debug_malloc_frees++;
211 TAILQ_REMOVE(&debug_malloc_usedlist, md, md_list); 211 TAILQ_REMOVE(&debug_malloc_usedlist, md, md_list);
212 212
213 TAILQ_INSERT_TAIL(&debug_malloc_freelist, md, md_list); 213 TAILQ_INSERT_TAIL(&debug_malloc_freelist, md, md_list);
214 debug_malloc_chunks_on_freelist++; 214 debug_malloc_chunks_on_freelist++;
215 /* 215 /*
216 * unmap the page. 216 * unmap the page.
217 */ 217 */
218 pmap_kremove(md->md_va, PAGE_SIZE); 218 pmap_kremove(md->md_va, PAGE_SIZE);
219 pmap_update(pmap_kernel()); 219 pmap_update(pmap_kernel());
220 splx(s); 220 splx(s);
221 221
222 return (1); 222 return (1);
223} 223}
224 224
225/* 225/*
226 * Add one chunk to the freelist. 226 * Add one chunk to the freelist.
227 * 227 *
228 * called at splvm. 228 * called at splvm.
229 */ 229 */
230void 230void
231debug_malloc_allocate_free(int wait) 231debug_malloc_allocate_free(int wait)
232{ 232{
233 vaddr_t va, offset; 233 vaddr_t va, offset;
234 struct vm_page *pg; 234 struct vm_page *pg;
235 struct debug_malloc_entry *md; 235 struct debug_malloc_entry *md;
236 236
237 md = pool_get(&debug_malloc_pool, wait ? PR_WAITOK : PR_NOWAIT); 237 md = pool_get(&debug_malloc_pool, wait ? PR_WAITOK : PR_NOWAIT);
238 if (md == NULL) 238 if (md == NULL)
239 return; 239 return;
240 240
241 va = uvm_km_alloc(kmem_map, PAGE_SIZE * 2, 0, 241 va = uvm_km_alloc(kmem_map, PAGE_SIZE * 2, 0,
242 UVM_KMF_VAONLY | (wait ? UVM_KMF_NOWAIT : 0)); 242 UVM_KMF_VAONLY | (wait ? UVM_KMF_NOWAIT : 0));
243 if (va == 0) { 243 if (va == 0) {
244 pool_put(&debug_malloc_pool, md); 244 pool_put(&debug_malloc_pool, md);
245 return; 245 return;
246 } 246 }
247 247
248 offset = va - vm_map_min(kernel_map); 248 offset = va - vm_map_min(kernel_map);
249 for (;;) { 249 for (;;) {
250 pg = uvm_pagealloc(NULL, offset, NULL, 0); 250 pg = uvm_pagealloc(NULL, offset, NULL, 0);
251 if (pg) { 251 if (pg) {
252 pg->flags &= ~PG_BUSY; /* new page */ 252 pg->flags &= ~PG_BUSY; /* new page */
253 UVM_PAGE_OWN(pg, NULL); 253 UVM_PAGE_OWN(pg, NULL);
254 } 254 }
255 255
256 if (pg) 256 if (pg)
257 break; 257 break;
258 258
259 if (wait == 0) { 259 if (wait == 0) {
260 uvm_km_free(kmem_map, va, va + PAGE_SIZE * 2, 260 uvm_km_free(kmem_map, va, va + PAGE_SIZE * 2,
261 UVM_KMF_VAONLY); 261 UVM_KMF_VAONLY);
262 pool_put(&debug_malloc_pool, md); 262 pool_put(&debug_malloc_pool, md);
263 return; 263 return;
264 } 264 }
265 uvm_wait("debug_malloc"); 265 uvm_wait("debug_malloc");
266 } 266 }
267 267
268 md->md_va = va; 268 md->md_va = va;
269 md->md_pa = VM_PAGE_TO_PHYS(pg); 269 md->md_pa = VM_PAGE_TO_PHYS(pg);
270 270
271 debug_malloc_pages++; 271 debug_malloc_pages++;
272 TAILQ_INSERT_HEAD(&debug_malloc_freelist, md, md_list); 272 TAILQ_INSERT_HEAD(&debug_malloc_freelist, md, md_list);
273 debug_malloc_chunks_on_freelist++; 273 debug_malloc_chunks_on_freelist++;
274} 274}
275 275
276void 276void
277debug_malloc_print(void) 277debug_malloc_print(void)
278{ 278{
279 279
280 debug_malloc_printit(printf, 0); 280 debug_malloc_printit(printf, 0);
281} 281}
282 282
283void 283void
284debug_malloc_printit(void (*pr)(const char *, ...), vaddr_t addr) 284debug_malloc_printit(void (*pr)(const char *, ...), vaddr_t addr)
285{ 285{
286 struct debug_malloc_entry *md; 286 struct debug_malloc_entry *md;
287 287
288 if (addr) { 288 if (addr) {
289 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) { 289 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
290 if (addr >= md->md_va && 290 if (addr >= md->md_va &&
291 addr < md->md_va + 2 * PAGE_SIZE) { 291 addr < md->md_va + 2 * PAGE_SIZE) {
292 (*pr)("Memory at address 0x%x is in a freed " 292 (*pr)("Memory at address 0x%x is in a freed "
293 "area. type %s, size: %d\n ", 293 "area. type %s, size: %d\n ",
294 addr, md->md_type->ks_shortdesc, 294 addr, md->md_type->ks_shortdesc,
295 md->md_size); 295 md->md_size);
296 return; 296 return;
297 } 297 }
298 } 298 }
299 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) { 299 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
300 if (addr >= md->md_va + PAGE_SIZE && 300 if (addr >= md->md_va + PAGE_SIZE &&
301 addr < md->md_va + 2 * PAGE_SIZE) { 301 addr < md->md_va + 2 * PAGE_SIZE) {
302 (*pr)("Memory at address 0x%x is just outside " 302 (*pr)("Memory at address 0x%x is just outside "
303 "an allocated area. type %s, size: %d\n", 303 "an allocated area. type %s, size: %d\n",
304 addr, md->md_type->ks_shortdesc, 304 addr, md->md_type->ks_shortdesc,
305 md->md_size); 305 md->md_size);
306 return; 306 return;
307 } 307 }
308 } 308 }
309 (*pr)("Memory at address 0x%x is outside debugged malloc.\n"); 309 (*pr)("Memory at address 0x%x is outside debugged malloc.\n");
310 return; 310 return;
311 } 311 }
312 312
313 (*pr)("allocs: %d\n", debug_malloc_allocs); 313 (*pr)("allocs: %d\n", debug_malloc_allocs);
314 (*pr)("frees: %d\n", debug_malloc_frees); 314 (*pr)("frees: %d\n", debug_malloc_frees);
315 (*pr)("pages used: %d\n", debug_malloc_pages); 315 (*pr)("pages used: %d\n", debug_malloc_pages);
316 (*pr)("chunks on freelist: %d\n", debug_malloc_chunks_on_freelist); 316 (*pr)("chunks on freelist: %d\n", debug_malloc_chunks_on_freelist);
317 317
318 (*pr)("\taddr:\tsize:\n"); 318 (*pr)("\taddr:\tsize:\n");
319 (*pr)("free chunks:\n"); 319 (*pr)("free chunks:\n");
320 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) 320 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
321 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size, 321 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size,
322 md->md_type->ks_shortdesc); 322 md->md_type->ks_shortdesc);
323 (*pr)("used chunks:\n"); 323 (*pr)("used chunks:\n");
324 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) 324 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
325 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size, 325 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size,
326 md->md_type->ks_shortdesc); 326 md->md_type->ks_shortdesc);
327} 327}