Sun Jan 29 13:38:15 2012 UTC ()
move condvar calls under interlock protection
call uvm_kick_pdaemon in case we can sleep and no space in arena


(para)
diff -r1.67 -r1.68 src/sys/kern/subr_vmem.c

cvs diff -r1.67 -r1.68 src/sys/kern/subr_vmem.c (expand / switch to unified diff)

--- src/sys/kern/subr_vmem.c 2012/01/28 23:05:48 1.67
+++ src/sys/kern/subr_vmem.c 2012/01/29 13:38:15 1.68
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_vmem.c,v 1.67 2012/01/28 23:05:48 rmind Exp $ */ 1/* $NetBSD: subr_vmem.c,v 1.68 2012/01/29 13:38:15 para Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -24,27 +24,27 @@ @@ -24,27 +24,27 @@
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29/* 29/*
30 * reference: 30 * reference:
31 * - Magazines and Vmem: Extending the Slab Allocator 31 * - Magazines and Vmem: Extending the Slab Allocator
32 * to Many CPUs and Arbitrary Resources 32 * to Many CPUs and Arbitrary Resources
33 * http://www.usenix.org/event/usenix01/bonwick.html 33 * http://www.usenix.org/event/usenix01/bonwick.html
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.67 2012/01/28 23:05:48 rmind Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.68 2012/01/29 13:38:15 para Exp $");
38 38
39#if defined(_KERNEL) 39#if defined(_KERNEL)
40#include "opt_ddb.h" 40#include "opt_ddb.h"
41#define QCACHE 41#define QCACHE
42#endif /* defined(_KERNEL) */ 42#endif /* defined(_KERNEL) */
43 43
44#include <sys/param.h> 44#include <sys/param.h>
45#include <sys/hash.h> 45#include <sys/hash.h>
46#include <sys/queue.h> 46#include <sys/queue.h>
47#include <sys/bitops.h> 47#include <sys/bitops.h>
48 48
49#if defined(_KERNEL) 49#if defined(_KERNEL)
50#include <sys/systm.h> 50#include <sys/systm.h>
@@ -1237,26 +1237,33 @@ retry: @@ -1237,26 +1237,33 @@ retry:
1237 /* 1237 /*
1238 * XXX should try to import a region large enough to 1238 * XXX should try to import a region large enough to
1239 * satisfy restrictions? 1239 * satisfy restrictions?
1240 */ 1240 */
1241 1241
1242 goto fail; 1242 goto fail;
1243 } 1243 }
1244 /* XXX eeek, minaddr & maxaddr not respected */ 1244 /* XXX eeek, minaddr & maxaddr not respected */
1245 if (vmem_import(vm, size, flags) == 0) { 1245 if (vmem_import(vm, size, flags) == 0) {
1246 goto retry; 1246 goto retry;
1247 } 1247 }
1248 /* XXX */ 1248 /* XXX */
1249 1249
 1250 if ((flags & VM_SLEEP) != 0) {
 1251 uvm_kick_pdaemon();
 1252 VMEM_LOCK(vm);
 1253 VMEM_CONDVAR_WAIT(vm);
 1254 VMEM_UNLOCK(vm);
 1255 goto retry;
 1256 }
1250fail: 1257fail:
1251 bt_free(vm, btnew); 1258 bt_free(vm, btnew);
1252 bt_free(vm, btnew2); 1259 bt_free(vm, btnew2);
1253 return ENOMEM; 1260 return ENOMEM;
1254 1261
1255gotit: 1262gotit:
1256 KASSERT(bt->bt_type == BT_TYPE_FREE); 1263 KASSERT(bt->bt_type == BT_TYPE_FREE);
1257 KASSERT(bt->bt_size >= size); 1264 KASSERT(bt->bt_size >= size);
1258 bt_remfree(vm, bt); 1265 bt_remfree(vm, bt);
1259 vmem_check(vm); 1266 vmem_check(vm);
1260 if (bt->bt_start != start) { 1267 if (bt->bt_start != start) {
1261 btnew2->bt_type = BT_TYPE_FREE; 1268 btnew2->bt_type = BT_TYPE_FREE;
1262 btnew2->bt_start = bt->bt_start; 1269 btnew2->bt_start = bt->bt_start;
@@ -1375,41 +1382,44 @@ vmem_xfree(vmem_t *vm, vmem_addr_t addr, @@ -1375,41 +1382,44 @@ vmem_xfree(vmem_t *vm, vmem_addr_t addr,
1375 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1382 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1376 t->bt_size == bt->bt_size) { 1383 t->bt_size == bt->bt_size) {
1377 vmem_addr_t spanaddr; 1384 vmem_addr_t spanaddr;
1378 vmem_size_t spansize; 1385 vmem_size_t spansize;
1379 1386
1380 KASSERT(t->bt_start == bt->bt_start); 1387 KASSERT(t->bt_start == bt->bt_start);
1381 spanaddr = bt->bt_start; 1388 spanaddr = bt->bt_start;
1382 spansize = bt->bt_size; 1389 spansize = bt->bt_size;
1383 bt_remseg(vm, bt); 1390 bt_remseg(vm, bt);
1384 LIST_INSERT_HEAD(&tofree, bt, bt_freelist); 1391 LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
1385 bt_remseg(vm, t); 1392 bt_remseg(vm, t);
1386 LIST_INSERT_HEAD(&tofree, t, bt_freelist); 1393 LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1387 vm->vm_size -= spansize; 1394 vm->vm_size -= spansize;
 1395#if defined(_KERNEL)
 1396 VMEM_CONDVAR_BROADCAST(vm);
 1397#endif /* defined(_KERNEL) */
1388 VMEM_UNLOCK(vm); 1398 VMEM_UNLOCK(vm);
1389 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1399 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1390 } else { 1400 } else {
1391 bt_insfree(vm, bt); 1401 bt_insfree(vm, bt);
 1402#if defined(_KERNEL)
 1403 VMEM_CONDVAR_BROADCAST(vm);
 1404#endif /* defined(_KERNEL) */
1392 VMEM_UNLOCK(vm); 1405 VMEM_UNLOCK(vm);
1393 } 1406 }
1394 1407
1395 while (!LIST_EMPTY(&tofree)) { 1408 while (!LIST_EMPTY(&tofree)) {
1396 t = LIST_FIRST(&tofree); 1409 t = LIST_FIRST(&tofree);
1397 LIST_REMOVE(t, bt_freelist); 1410 LIST_REMOVE(t, bt_freelist);
1398 bt_free(vm, t); 1411 bt_free(vm, t);
1399 } 1412 }
1400#if defined(_KERNEL) 
1401 VMEM_CONDVAR_BROADCAST(vm); 
1402#endif /* defined(_KERNEL) */ 
1403} 1413}
1404 1414
1405/* 1415/*
1406 * vmem_add: 1416 * vmem_add:
1407 * 1417 *
1408 * => caller must ensure appropriate spl, 1418 * => caller must ensure appropriate spl,
1409 * if the arena can be accessed from interrupt context. 1419 * if the arena can be accessed from interrupt context.
1410 */ 1420 */
1411 1421
1412int 1422int
1413vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1423vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1414{ 1424{
1415 1425
@@ -1461,27 +1471,26 @@ vmem_rehash_all(struct work *wk, void *d @@ -1461,27 +1471,26 @@ vmem_rehash_all(struct work *wk, void *d
1461 } 1471 }
1462 desired = vm->vm_nbusytag; 1472 desired = vm->vm_nbusytag;
1463 current = vm->vm_hashsize; 1473 current = vm->vm_hashsize;
1464 VMEM_UNLOCK(vm); 1474 VMEM_UNLOCK(vm);
1465 1475
1466 if (desired > VMEM_HASHSIZE_MAX) { 1476 if (desired > VMEM_HASHSIZE_MAX) {
1467 desired = VMEM_HASHSIZE_MAX; 1477 desired = VMEM_HASHSIZE_MAX;
1468 } else if (desired < VMEM_HASHSIZE_MIN) { 1478 } else if (desired < VMEM_HASHSIZE_MIN) {
1469 desired = VMEM_HASHSIZE_MIN; 1479 desired = VMEM_HASHSIZE_MIN;
1470 } 1480 }
1471 if (desired > current * 2 || desired * 2 < current) { 1481 if (desired > current * 2 || desired * 2 < current) {
1472 vmem_rehash(vm, desired, VM_NOSLEEP); 1482 vmem_rehash(vm, desired, VM_NOSLEEP);
1473 } 1483 }
1474 VMEM_CONDVAR_BROADCAST(vm); 
1475 } 1484 }
1476 mutex_exit(&vmem_list_lock); 1485 mutex_exit(&vmem_list_lock);
1477 1486
1478 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1487 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1479} 1488}
1480 1489
1481static void 1490static void
1482vmem_rehash_all_kick(void *dummy) 1491vmem_rehash_all_kick(void *dummy)
1483{ 1492{
1484 1493
1485 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1494 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
1486} 1495}
1487 1496