Tue Jun 24 07:28:23 2014 UTC ()
KMEM_REDZONE+KMEM_POISON is supposed to detect buffer overflows. But it only
poisons memory after kmem_roundup_size(), which means that if an overflow
occurs in the page padding, it won't be detected.

Fix this by making KMEM_REDZONE independent from KMEM_POISON and making it
put a 2-byte pattern at the end of each requested buffer, and check it when
freeing memory to ensure the caller hasn't written outside the requested area.

Not enabled on DIAGNOSTIC for the moment.


(maxv)
diff -r1.53 -r1.54 src/sys/kern/subr_kmem.c

cvs diff -r1.53 -r1.54 src/sys/kern/subr_kmem.c (expand / switch to unified diff)

--- src/sys/kern/subr_kmem.c 2014/06/23 17:43:42 1.53
+++ src/sys/kern/subr_kmem.c 2014/06/24 07:28:23 1.54
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_kmem.c,v 1.53 2014/06/23 17:43:42 maxv Exp $ */ 1/* $NetBSD: subr_kmem.c,v 1.54 2014/06/24 07:28:23 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -62,28 +62,28 @@ @@ -62,28 +62,28 @@
62/* 62/*
63 * This allocator has some debug features enabled with "option DEBUG" and 63 * This allocator has some debug features enabled with "option DEBUG" and
64 * "option DIAGNOSTIC". 64 * "option DIAGNOSTIC".
65 * 65 *
66 * KMEM_POISON 66 * KMEM_POISON
67 * Try to detect modify-after-free bugs. 67 * Try to detect modify-after-free bugs.
68 * 68 *
69 * Fill freed (in the sense of kmem_free) memory with a garbage pattern. 69 * Fill freed (in the sense of kmem_free) memory with a garbage pattern.
70 * Check the pattern on allocation. 70 * Check the pattern on allocation.
71 * 71 *
72 * KMEM_REDZONE 72 * KMEM_REDZONE
73 * Try to detect overrun bugs. 73 * Try to detect overrun bugs.
74 * 74 *
75 * Allocate some more bytes for each allocation. 75 * Add a 2-byte pattern (allocate some more bytes if needed) at the end
76 * The extra bytes are checked by KMEM_POISON on kmem_free. 76 * of each allocated buffer. Check this pattern on kmem_free.
77 * 77 *
78 * KMEM_SIZE 78 * KMEM_SIZE
79 * Try to detect alloc/free size mismatch bugs. 79 * Try to detect alloc/free size mismatch bugs.
80 * 80 *
81 * Prefix each allocations with a fixed-sized header and record 81 * Prefix each allocations with a fixed-sized header and record
82 * the exact user-requested allocation size in it. 82 * the exact user-requested allocation size in it.
83 * When freeing, compare it with kmem_free's "size" argument. 83 * When freeing, compare it with kmem_free's "size" argument.
84 * 84 *
85 * KMEM_GUARD 85 * KMEM_GUARD
86 * See the below "kmguard" section. 86 * See the below "kmguard" section.
87 */ 87 */
88 88
89/* 89/*
@@ -93,27 +93,27 @@ @@ -93,27 +93,27 @@
93 * See the comment in uvm/uvm_kmguard.c for what kind of bugs it tries to 93 * See the comment in uvm/uvm_kmguard.c for what kind of bugs it tries to
94 * detect. Even if compiled in, it's disabled by default because it's very 94 * detect. Even if compiled in, it's disabled by default because it's very
95 * expensive. You can enable it on boot by: 95 * expensive. You can enable it on boot by:
96 * 96 *
97 * boot -d 97 * boot -d
98 * db> w kmem_guard_depth 0t30000 98 * db> w kmem_guard_depth 0t30000
99 * db> c 99 * db> c
100 * 100 *
101 * The default value of kmem_guard_depth is 0, which means disabled. 101 * The default value of kmem_guard_depth is 0, which means disabled.
102 * It can be changed by KMEM_GUARD_DEPTH kernel config option. 102 * It can be changed by KMEM_GUARD_DEPTH kernel config option.
103 */ 103 */
104 104
105#include <sys/cdefs.h> 105#include <sys/cdefs.h>
106__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.53 2014/06/23 17:43:42 maxv Exp $"); 106__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.54 2014/06/24 07:28:23 maxv Exp $");
107 107
108#include <sys/param.h> 108#include <sys/param.h>
109#include <sys/callback.h> 109#include <sys/callback.h>
110#include <sys/kmem.h> 110#include <sys/kmem.h>
111#include <sys/pool.h> 111#include <sys/pool.h>
112#include <sys/debug.h> 112#include <sys/debug.h>
113#include <sys/lockdebug.h> 113#include <sys/lockdebug.h>
114#include <sys/cpu.h> 114#include <sys/cpu.h>
115 115
116#include <uvm/uvm_extern.h> 116#include <uvm/uvm_extern.h>
117#include <uvm/uvm_map.h> 117#include <uvm/uvm_map.h>
118#include <uvm/uvm_kmguard.h> 118#include <uvm/uvm_kmguard.h>
119 119
@@ -189,29 +189,33 @@ static size_t kmem_cache_big_maxidx __re @@ -189,29 +189,33 @@ static size_t kmem_cache_big_maxidx __re
189#define KMEM_GUARD 189#define KMEM_GUARD
190#endif /* defined(DEBUG) */ 190#endif /* defined(DEBUG) */
191 191
192#if defined(KMEM_POISON) 192#if defined(KMEM_POISON)
193static int kmem_poison_ctor(void *, void *, int); 193static int kmem_poison_ctor(void *, void *, int);
194static void kmem_poison_fill(void *, size_t); 194static void kmem_poison_fill(void *, size_t);
195static void kmem_poison_check(void *, size_t); 195static void kmem_poison_check(void *, size_t);
196#else /* defined(KMEM_POISON) */ 196#else /* defined(KMEM_POISON) */
197#define kmem_poison_fill(p, sz) /* nothing */ 197#define kmem_poison_fill(p, sz) /* nothing */
198#define kmem_poison_check(p, sz) /* nothing */ 198#define kmem_poison_check(p, sz) /* nothing */
199#endif /* defined(KMEM_POISON) */ 199#endif /* defined(KMEM_POISON) */
200 200
201#if defined(KMEM_REDZONE) 201#if defined(KMEM_REDZONE)
202#define REDZONE_SIZE 1 202#define REDZONE_SIZE 2
 203static void kmem_redzone_fill(void *p, size_t sz);
 204static void kmem_redzone_check(void *p, size_t sz);
203#else /* defined(KMEM_REDZONE) */ 205#else /* defined(KMEM_REDZONE) */
204#define REDZONE_SIZE 0 206#define REDZONE_SIZE 0
 207#define kmem_redzone_fill(p, sz) /* nothing */
 208#define kmem_redzone_check(p, sz) /* nothing */
205#endif /* defined(KMEM_REDZONE) */ 209#endif /* defined(KMEM_REDZONE) */
206 210
207#if defined(KMEM_SIZE) 211#if defined(KMEM_SIZE)
208#define SIZE_SIZE (MAX(KMEM_ALIGN, sizeof(size_t))) 212#define SIZE_SIZE (MAX(KMEM_ALIGN, sizeof(size_t)))
209static void kmem_size_set(void *, size_t); 213static void kmem_size_set(void *, size_t);
210static void kmem_size_check(void *, size_t); 214static void kmem_size_check(void *, size_t);
211#else 215#else
212#define SIZE_SIZE 0 216#define SIZE_SIZE 0
213#define kmem_size_set(p, sz) /* nothing */ 217#define kmem_size_set(p, sz) /* nothing */
214#define kmem_size_check(p, sz) /* nothing */ 218#define kmem_size_check(p, sz) /* nothing */
215#endif 219#endif
216 220
217#if defined(KMEM_GUARD) 221#if defined(KMEM_GUARD)
@@ -238,52 +242,61 @@ kmem_intr_alloc(size_t requested_size, k @@ -238,52 +242,61 @@ kmem_intr_alloc(size_t requested_size, k
238 size_t size; 242 size_t size;
239 pool_cache_t pc; 243 pool_cache_t pc;
240 uint8_t *p; 244 uint8_t *p;
241 245
242 KASSERT(requested_size > 0); 246 KASSERT(requested_size > 0);
243 247
244#ifdef KMEM_GUARD 248#ifdef KMEM_GUARD
245 if (requested_size <= kmem_guard_size) { 249 if (requested_size <= kmem_guard_size) {
246 return uvm_kmguard_alloc(&kmem_guard, requested_size, 250 return uvm_kmguard_alloc(&kmem_guard, requested_size,
247 (kmflags & KM_SLEEP) != 0); 251 (kmflags & KM_SLEEP) != 0);
248 } 252 }
249#endif 253#endif
250 size = kmem_roundup_size(requested_size); 254 size = kmem_roundup_size(requested_size);
251 allocsz = size + REDZONE_SIZE + SIZE_SIZE; 255 allocsz = size + SIZE_SIZE;
 256
 257#ifdef KMEM_REDZONE
 258 if (size - requested_size < REDZONE_SIZE) {
 259 /* If there isn't enough space in the page padding,
 260 * allocate two more bytes for the red zone. */
 261 allocsz += REDZONE_SIZE;
 262 }
 263#endif
252 264
253 if ((index = ((allocsz -1) >> KMEM_SHIFT)) 265 if ((index = ((allocsz -1) >> KMEM_SHIFT))
254 < kmem_cache_maxidx) { 266 < kmem_cache_maxidx) {
255 pc = kmem_cache[index]; 267 pc = kmem_cache[index];
256 } else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT)) 268 } else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
257 < kmem_cache_big_maxidx) { 269 < kmem_cache_big_maxidx) {
258 pc = kmem_cache_big[index]; 270 pc = kmem_cache_big[index];
259 } else { 271 } else {
260 int ret = uvm_km_kmem_alloc(kmem_va_arena, 272 int ret = uvm_km_kmem_alloc(kmem_va_arena,
261 (vsize_t)round_page(size), 273 (vsize_t)round_page(size),
262 ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP) 274 ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP)
263 | VM_INSTANTFIT, (vmem_addr_t *)&p); 275 | VM_INSTANTFIT, (vmem_addr_t *)&p);
264 if (ret) { 276 if (ret) {
265 return NULL; 277 return NULL;
266 } 278 }
267 FREECHECK_OUT(&kmem_freecheck, p); 279 FREECHECK_OUT(&kmem_freecheck, p);
268 return p; 280 return p;
269 } 281 }
270 282
271 p = pool_cache_get(pc, kmflags); 283 p = pool_cache_get(pc, kmflags);
272 284
273 if (__predict_true(p != NULL)) { 285 if (__predict_true(p != NULL)) {
274 kmem_poison_check(p, size); 286 kmem_poison_check(p, size);
275 FREECHECK_OUT(&kmem_freecheck, p); 287 FREECHECK_OUT(&kmem_freecheck, p);
276 kmem_size_set(p, requested_size); 288 kmem_size_set(p, requested_size);
 289 kmem_redzone_fill(p, requested_size + SIZE_SIZE);
277 290
278 return p + SIZE_SIZE; 291 return p + SIZE_SIZE;
279 } 292 }
280 return p; 293 return p;
281} 294}
282 295
283/* 296/*
284 * kmem_intr_zalloc: allocate zeroed wired memory. 297 * kmem_intr_zalloc: allocate zeroed wired memory.
285 */ 298 */
286 299
287void * 300void *
288kmem_intr_zalloc(size_t size, km_flag_t kmflags) 301kmem_intr_zalloc(size_t size, km_flag_t kmflags)
289{ 302{
@@ -306,48 +319,54 @@ kmem_intr_free(void *p, size_t requested @@ -306,48 +319,54 @@ kmem_intr_free(void *p, size_t requested
306 size_t allocsz, index; 319 size_t allocsz, index;
307 size_t size; 320 size_t size;
308 pool_cache_t pc; 321 pool_cache_t pc;
309 322
310 KASSERT(p != NULL); 323 KASSERT(p != NULL);
311 KASSERT(requested_size > 0); 324 KASSERT(requested_size > 0);
312 325
313#ifdef KMEM_GUARD 326#ifdef KMEM_GUARD
314 if (requested_size <= kmem_guard_size) { 327 if (requested_size <= kmem_guard_size) {
315 uvm_kmguard_free(&kmem_guard, requested_size, p); 328 uvm_kmguard_free(&kmem_guard, requested_size, p);
316 return; 329 return;
317 } 330 }
318#endif 331#endif
 332
319 size = kmem_roundup_size(requested_size); 333 size = kmem_roundup_size(requested_size);
320 allocsz = size + REDZONE_SIZE + SIZE_SIZE; 334 allocsz = size + SIZE_SIZE;
 335
 336#ifdef KMEM_REDZONE
 337 if (size - requested_size < REDZONE_SIZE) {
 338 allocsz += REDZONE_SIZE;
 339 }
 340#endif
321 341
322 if ((index = ((allocsz -1) >> KMEM_SHIFT)) 342 if ((index = ((allocsz -1) >> KMEM_SHIFT))
323 < kmem_cache_maxidx) { 343 < kmem_cache_maxidx) {
324 pc = kmem_cache[index]; 344 pc = kmem_cache[index];
325 } else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT)) 345 } else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
326 < kmem_cache_big_maxidx) { 346 < kmem_cache_big_maxidx) {
327 pc = kmem_cache_big[index]; 347 pc = kmem_cache_big[index];
328 } else { 348 } else {
329 FREECHECK_IN(&kmem_freecheck, p); 349 FREECHECK_IN(&kmem_freecheck, p);
330 uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p, 350 uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p,
331 round_page(size)); 351 round_page(size));
332 return; 352 return;
333 } 353 }
334 354
335 p = (uint8_t *)p - SIZE_SIZE; 355 p = (uint8_t *)p - SIZE_SIZE;
336 kmem_size_check(p, requested_size); 356 kmem_size_check(p, requested_size);
 357 kmem_redzone_check(p, requested_size + SIZE_SIZE);
337 FREECHECK_IN(&kmem_freecheck, p); 358 FREECHECK_IN(&kmem_freecheck, p);
338 LOCKDEBUG_MEM_CHECK(p, size); 359 LOCKDEBUG_MEM_CHECK(p, size);
339 kmem_poison_check((uint8_t *)p + SIZE_SIZE + size, 
340 allocsz - (SIZE_SIZE + size)); 
341 kmem_poison_fill(p, allocsz); 360 kmem_poison_fill(p, allocsz);
342 361
343 pool_cache_put(pc, p); 362 pool_cache_put(pc, p);
344} 363}
345 364
346/* ---- kmem API */ 365/* ---- kmem API */
347 366
348/* 367/*
349 * kmem_alloc: allocate wired memory. 368 * kmem_alloc: allocate wired memory.
350 * => must not be called from interrupt context. 369 * => must not be called from interrupt context.
351 */ 370 */
352 371
353void * 372void *
@@ -455,40 +474,40 @@ kmem_init(void) @@ -455,40 +474,40 @@ kmem_init(void)
455 kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes, 474 kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
456 kmem_cache, KMEM_MAXSIZE, KMEM_SHIFT, IPL_VM); 475 kmem_cache, KMEM_MAXSIZE, KMEM_SHIFT, IPL_VM);
457 kmem_cache_big_maxidx = kmem_create_caches(kmem_cache_big_sizes, 476 kmem_cache_big_maxidx = kmem_create_caches(kmem_cache_big_sizes,
458 kmem_cache_big, PAGE_SIZE, KMEM_BIG_SHIFT, IPL_VM); 477 kmem_cache_big, PAGE_SIZE, KMEM_BIG_SHIFT, IPL_VM);
459} 478}
460 479
461size_t 480size_t
462kmem_roundup_size(size_t size) 481kmem_roundup_size(size_t size)
463{ 482{
464 483
465 return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1); 484 return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
466} 485}
467 486
468/* ---- debug */ 487/* ------------------ DEBUG / DIAGNOSTIC ------------------ */
469 
470#if defined(KMEM_POISON) 
471 488
 489#if defined(KMEM_POISON) || defined(KMEM_REDZONE)
472#if defined(_LP64) 490#if defined(_LP64)
473#define PRIME 0x9e37fffffffc0000UL 491#define PRIME 0x9e37fffffffc0000UL
474#else /* defined(_LP64) */ 492#else /* defined(_LP64) */
475#define PRIME 0x9e3779b1 493#define PRIME 0x9e3779b1
476#endif /* defined(_LP64) */ 494#endif /* defined(_LP64) */
 495#endif /* defined(KMEM_POISON) || defined(KMEM_REDZONE) */
477 496
 497#if defined(KMEM_POISON)
478static inline uint8_t 498static inline uint8_t
479kmem_poison_pattern(const void *p) 499kmem_poison_pattern(const void *p)
480{ 500{
481 
482 return (uint8_t)(((uintptr_t)p) * PRIME 501 return (uint8_t)(((uintptr_t)p) * PRIME
483 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT); 502 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
484} 503}
485 504
486static int 505static int
487kmem_poison_ctor(void *arg, void *obj, int flag) 506kmem_poison_ctor(void *arg, void *obj, int flag)
488{ 507{
489 size_t sz = (size_t)arg; 508 size_t sz = (size_t)arg;
490 509
491 kmem_poison_fill(obj, sz); 510 kmem_poison_fill(obj, sz);
492 511
493 return 0; 512 return 0;
494} 513}
@@ -515,49 +534,90 @@ kmem_poison_check(void *p, size_t sz) @@ -515,49 +534,90 @@ kmem_poison_check(void *p, size_t sz)
515 534
516 cp = p; 535 cp = p;
517 ep = cp + sz; 536 ep = cp + sz;
518 while (cp < ep) { 537 while (cp < ep) {
519 const uint8_t expected = kmem_poison_pattern(cp); 538 const uint8_t expected = kmem_poison_pattern(cp);
520 539
521 if (*cp != expected) { 540 if (*cp != expected) {
522 panic("%s: %p: 0x%02x != 0x%02x\n", 541 panic("%s: %p: 0x%02x != 0x%02x\n",
523 __func__, cp, *cp, expected); 542 __func__, cp, *cp, expected);
524 } 543 }
525 cp++; 544 cp++;
526 } 545 }
527} 546}
528 
529#endif /* defined(KMEM_POISON) */ 547#endif /* defined(KMEM_POISON) */
530 548
531#if defined(KMEM_SIZE) 549#if defined(KMEM_SIZE)
532static void 550static void
533kmem_size_set(void *p, size_t sz) 551kmem_size_set(void *p, size_t sz)
534{ 552{
535 
536 memcpy(p, &sz, sizeof(sz)); 553 memcpy(p, &sz, sizeof(sz));
537} 554}
538 555
539static void 556static void
540kmem_size_check(void *p, size_t sz) 557kmem_size_check(void *p, size_t sz)
541{ 558{
542 size_t psz; 559 size_t psz;
543 560
544 memcpy(&psz, p, sizeof(psz)); 561 memcpy(&psz, p, sizeof(psz));
545 if (psz != sz) { 562 if (psz != sz) {
546 panic("kmem_free(%p, %zu) != allocated size %zu", 563 panic("kmem_free(%p, %zu) != allocated size %zu",
547 (const uint8_t *)p + SIZE_SIZE, sz, psz); 564 (const uint8_t *)p + SIZE_SIZE, sz, psz);
548 } 565 }
549} 566}
550#endif /* defined(KMEM_SIZE) */ 567#endif /* defined(KMEM_SIZE) */
 568
 569#if defined(KMEM_REDZONE)
 570static inline uint8_t
 571kmem_redzone_pattern(const void *p)
 572{
 573 return (uint8_t)(((uintptr_t)p) * PRIME
 574 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
 575}
 576
 577static void
 578kmem_redzone_fill(void *p, size_t sz)
 579{
 580 uint8_t *cp;
 581 const uint8_t *ep;
 582
 583 cp = (uint8_t *)p + sz;
 584 ep = cp + REDZONE_SIZE;
 585 while (cp < ep) {
 586 *cp = kmem_redzone_pattern(cp);
 587 cp++;
 588 }
 589}
 590
 591static void
 592kmem_redzone_check(void *p, size_t sz)
 593{
 594 uint8_t *cp;
 595 const uint8_t *ep;
 596
 597 cp = (uint8_t *)p + sz;
 598 ep = (uint8_t *)p + sz + REDZONE_SIZE;
 599 while (cp < ep) {
 600 const uint8_t expected = kmem_redzone_pattern(cp);
 601
 602 if (*cp != expected) {
 603 panic("%s: %p: 0x%02x != 0x%02x\n",
 604 __func__, cp, *cp, expected);
 605 }
 606 cp++;
 607 }
 608}
 609#endif /* defined(KMEM_REDZONE) */
 610
551 611
552/* 612/*
553 * Used to dynamically allocate string with kmem accordingly to format. 613 * Used to dynamically allocate string with kmem accordingly to format.
554 */ 614 */
555char * 615char *
556kmem_asprintf(const char *fmt, ...) 616kmem_asprintf(const char *fmt, ...)
557{ 617{
558 int size __diagused, len; 618 int size __diagused, len;
559 va_list va; 619 va_list va;
560 char *str; 620 char *str;
561 621
562 va_start(va, fmt); 622 va_start(va, fmt);
563 len = vsnprintf(NULL, 0, fmt, va); 623 len = vsnprintf(NULL, 0, fmt, va);