Sun Feb 27 14:24:11 2022 UTC ()
vmem(9): Assert addresses are quantum-aligned.


(riastradh)
diff -r1.106 -r1.107 src/sys/kern/subr_vmem.c

cvs diff -r1.106 -r1.107 src/sys/kern/subr_vmem.c (expand / switch to unified diff)

--- src/sys/kern/subr_vmem.c 2021/08/17 22:00:32 1.106
+++ src/sys/kern/subr_vmem.c 2022/02/27 14:24:11 1.107
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_vmem.c,v 1.106 2021/08/17 22:00:32 andvar Exp $ */ 1/* $NetBSD: subr_vmem.c,v 1.107 2022/02/27 14:24:11 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -36,27 +36,27 @@ @@ -36,27 +36,27 @@
36 * - A pool(9) is used for vmem boundary tags 36 * - A pool(9) is used for vmem boundary tags
37 * - During a pool get call the global vmem_btag_refill_lock is taken, 37 * - During a pool get call the global vmem_btag_refill_lock is taken,
38 * to serialize access to the allocation reserve, but no other 38 * to serialize access to the allocation reserve, but no other
39 * vmem arena locks. 39 * vmem arena locks.
40 * - During pool_put calls no vmem mutexes are locked. 40 * - During pool_put calls no vmem mutexes are locked.
41 * - pool_drain doesn't hold the pool's mutex while releasing memory to 41 * - pool_drain doesn't hold the pool's mutex while releasing memory to
42 * its backing therefore no interferance with any vmem mutexes. 42 * its backing therefore no interferance with any vmem mutexes.
43 * - The boundary tag pool is forced to put page headers into pool pages 43 * - The boundary tag pool is forced to put page headers into pool pages
44 * (PR_PHINPAGE) and not off page to avoid pool recursion. 44 * (PR_PHINPAGE) and not off page to avoid pool recursion.
45 * (due to sizeof(bt_t) it should be the case anyway) 45 * (due to sizeof(bt_t) it should be the case anyway)
46 */ 46 */
47 47
48#include <sys/cdefs.h> 48#include <sys/cdefs.h>
49__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.106 2021/08/17 22:00:32 andvar Exp $"); 49__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.107 2022/02/27 14:24:11 riastradh Exp $");
50 50
51#if defined(_KERNEL) && defined(_KERNEL_OPT) 51#if defined(_KERNEL) && defined(_KERNEL_OPT)
52#include "opt_ddb.h" 52#include "opt_ddb.h"
53#endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */ 53#endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */
54 54
55#include <sys/param.h> 55#include <sys/param.h>
56#include <sys/hash.h> 56#include <sys/hash.h>
57#include <sys/queue.h> 57#include <sys/queue.h>
58#include <sys/bitops.h> 58#include <sys/bitops.h>
59 59
60#if defined(_KERNEL) 60#if defined(_KERNEL)
61#include <sys/systm.h> 61#include <sys/systm.h>
62#include <sys/kernel.h> /* hz */ 62#include <sys/kernel.h> /* hz */
@@ -1085,26 +1085,30 @@ vmem_alloc(vmem_t *vm, vmem_size_t size, @@ -1085,26 +1085,30 @@ vmem_alloc(vmem_t *vm, vmem_size_t size,
1085 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1085 qcache_t *qc = vm->vm_qcache[qidx - 1];
1086 1086
1087 p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags)); 1087 p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags));
1088 if (addrp != NULL) 1088 if (addrp != NULL)
1089 *addrp = (vmem_addr_t)p; 1089 *addrp = (vmem_addr_t)p;
1090 error = (p == NULL) ? ENOMEM : 0; 1090 error = (p == NULL) ? ENOMEM : 0;
1091 goto out; 1091 goto out;
1092 } 1092 }
1093#endif /* defined(QCACHE) */ 1093#endif /* defined(QCACHE) */
1094 1094
1095 error = vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1095 error = vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1096 flags, addrp); 1096 flags, addrp);
1097out: 1097out:
 1098 KASSERTMSG(error || addrp == NULL ||
 1099 (*addrp & vm->vm_quantum_mask) == 0,
 1100 "vmem %s mask=0x%jx addr=0x%jx",
 1101 vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)*addrp);
1098 KASSERT(error == 0 || (flags & VM_SLEEP) == 0); 1102 KASSERT(error == 0 || (flags & VM_SLEEP) == 0);
1099 return error; 1103 return error;
1100} 1104}
1101 1105
1102int 1106int
1103vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1107vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1104 const vmem_size_t phase, const vmem_size_t nocross, 1108 const vmem_size_t phase, const vmem_size_t nocross,
1105 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags, 1109 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags,
1106 vmem_addr_t *addrp) 1110 vmem_addr_t *addrp)
1107{ 1111{
1108 struct vmem_freelist *list; 1112 struct vmem_freelist *list;
1109 struct vmem_freelist *first; 1113 struct vmem_freelist *first;
1110 struct vmem_freelist *end; 1114 struct vmem_freelist *end;
@@ -1275,63 +1279,74 @@ gotit: @@ -1275,63 +1279,74 @@ gotit:
1275 bt_insbusy(vm, bt); 1279 bt_insbusy(vm, bt);
1276 vmem_check(vm); 1280 vmem_check(vm);
1277 bt_free(vm, btnew); 1281 bt_free(vm, btnew);
1278 btnew = bt; 1282 btnew = bt;
1279 } 1283 }
1280 if (btnew2 != NULL) { 1284 if (btnew2 != NULL) {
1281 bt_free(vm, btnew2); 1285 bt_free(vm, btnew2);
1282 } 1286 }
1283 KASSERT(btnew->bt_size >= size); 1287 KASSERT(btnew->bt_size >= size);
1284 btnew->bt_type = BT_TYPE_BUSY; 1288 btnew->bt_type = BT_TYPE_BUSY;
1285 if (addrp != NULL) 1289 if (addrp != NULL)
1286 *addrp = btnew->bt_start; 1290 *addrp = btnew->bt_start;
1287 VMEM_UNLOCK(vm); 1291 VMEM_UNLOCK(vm);
 1292 KASSERTMSG(addrp == NULL ||
 1293 (*addrp & vm->vm_quantum_mask) == 0,
 1294 "vmem %s mask=0x%jx addr=0x%jx",
 1295 vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)*addrp);
1288 return 0; 1296 return 0;
1289} 1297}
1290 1298
1291/* 1299/*
1292 * vmem_free: free the resource to the arena. 1300 * vmem_free: free the resource to the arena.
1293 */ 1301 */
1294 1302
1295void 1303void
1296vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1304vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1297{ 1305{
1298 1306
1299 KASSERT(size > 0); 1307 KASSERT(size > 0);
 1308 KASSERTMSG((addr & vm->vm_quantum_mask) == 0,
 1309 "vmem %s mask=0x%jx addr=0x%jx",
 1310 vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)addr);
1300 1311
1301#if defined(QCACHE) 1312#if defined(QCACHE)
1302 if (size <= vm->vm_qcache_max) { 1313 if (size <= vm->vm_qcache_max) {
1303 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1314 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1304 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1315 qcache_t *qc = vm->vm_qcache[qidx - 1];
1305 1316
1306 pool_cache_put(qc->qc_cache, (void *)addr); 1317 pool_cache_put(qc->qc_cache, (void *)addr);
1307 return; 1318 return;
1308 } 1319 }
1309#endif /* defined(QCACHE) */ 1320#endif /* defined(QCACHE) */
1310 1321
1311 vmem_xfree(vm, addr, size); 1322 vmem_xfree(vm, addr, size);
1312} 1323}
1313 1324
1314void 1325void
1315vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1326vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1316{ 1327{
1317 bt_t *bt; 1328 bt_t *bt;
1318 1329
1319 KASSERT(size > 0); 1330 KASSERT(size > 0);
 1331 KASSERTMSG((addr & vm->vm_quantum_mask) == 0,
 1332 "vmem %s mask=0x%jx addr=0x%jx",
 1333 vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)addr);
1320 1334
1321 VMEM_LOCK(vm); 1335 VMEM_LOCK(vm);
1322 1336
1323 bt = bt_lookupbusy(vm, addr); 1337 bt = bt_lookupbusy(vm, addr);
1324 KASSERT(bt != NULL); 1338 KASSERTMSG(bt != NULL, "vmem %s addr 0x%jx size 0x%jx",
 1339 vm->vm_name, (uintmax_t)addr, (uintmax_t)size);
1325 KASSERT(bt->bt_start == addr); 1340 KASSERT(bt->bt_start == addr);
1326 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1341 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1327 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1342 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1328 1343
1329 /* vmem_xfree_bt() drops the lock. */ 1344 /* vmem_xfree_bt() drops the lock. */
1330 vmem_xfree_bt(vm, bt); 1345 vmem_xfree_bt(vm, bt);
1331} 1346}
1332 1347
1333void 1348void
1334vmem_xfreeall(vmem_t *vm) 1349vmem_xfreeall(vmem_t *vm)
1335{ 1350{
1336 bt_t *bt; 1351 bt_t *bt;
1337 1352