Mon Mar 17 08:27:51 2008 UTC ()
- simplify ASSERT_SLEEPABLE.
- move it from proc.h to systm.h.
- add some more checks.
- make it a little more lkm friendly.


(yamt)
diff -r1.134 -r1.135 src/sys/kern/kern_lock.c
diff -r1.117 -r1.118 src/sys/kern/kern_malloc.c
diff -r1.182 -r1.183 src/sys/kern/kern_subr.c
diff -r1.69 -r1.70 src/sys/kern/subr_extent.c
diff -r1.2 -r1.3 src/sys/kern/subr_percpu.c
diff -r1.153 -r1.154 src/sys/kern/subr_pool.c
diff -r1.11 -r1.12 src/sys/kern/subr_specificdata.c
diff -r1.41 -r1.42 src/sys/kern/subr_vmem.c
diff -r1.17 -r1.18 src/sys/kern/vfs_trans.c
diff -r1.270 -r1.271 src/sys/sys/proc.h
diff -r1.214 -r1.215 src/sys/sys/systm.h

cvs diff -r1.134 -r1.135 src/sys/kern/kern_lock.c (expand / switch to unified diff)

--- src/sys/kern/kern_lock.c 2008/01/30 14:54:26 1.134
+++ src/sys/kern/kern_lock.c 2008/03/17 08:27:50 1.135
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_lock.c,v 1.134 2008/01/30 14:54:26 ad Exp $ */ 1/* $NetBSD: kern_lock.c,v 1.135 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran. 9 * NASA Ames Research Center, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -28,65 +28,83 @@ @@ -28,65 +28,83 @@
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE. 37 * POSSIBILITY OF SUCH DAMAGE.
38 */ 38 */
39 39
40#include <sys/cdefs.h> 40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.134 2008/01/30 14:54:26 ad Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.135 2008/03/17 08:27:50 yamt Exp $");
42 42
43#include "opt_multiprocessor.h" 43#include "opt_multiprocessor.h"
44 44
45#include <sys/param.h> 45#include <sys/param.h>
46#include <sys/proc.h> 46#include <sys/proc.h>
47#include <sys/lock.h> 47#include <sys/lock.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/kernel.h> 49#include <sys/kernel.h>
50#include <sys/lockdebug.h> 50#include <sys/lockdebug.h>
51#include <sys/cpu.h> 51#include <sys/cpu.h>
52#include <sys/syslog.h> 52#include <sys/syslog.h>
53#include <sys/atomic.h> 53#include <sys/atomic.h>
54 54
55#include <machine/stdarg.h> 55#include <machine/stdarg.h>
56#include <machine/lock.h> 56#include <machine/lock.h>
57 57
58#include <dev/lockstat.h> 58#include <dev/lockstat.h>
59 59
60#define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0) 60#define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0)
61 61
62bool kernel_lock_dodebug; 62bool kernel_lock_dodebug;
63 63
64__cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)] 64__cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
65 __aligned(CACHE_LINE_SIZE); 65 __aligned(CACHE_LINE_SIZE);
66 66
67#if defined(LOCKDEBUG) 67#if defined(DEBUG) || defined(LKM)
68void 68void
69assert_sleepable(struct simplelock *interlock, const char *msg) 69assert_sleepable(void)
70{ 70{
 71#if !defined(_RUMPKERNEL)
 72 const char *reason;
71 73
72 if (panicstr != NULL) 74 if (panicstr != NULL) {
73 return; 75 return;
 76 }
 77
74 LOCKDEBUG_BARRIER(kernel_lock, 1); 78 LOCKDEBUG_BARRIER(kernel_lock, 1);
 79
 80 reason = NULL;
75 if (CURCPU_IDLE_P() && !cold) { 81 if (CURCPU_IDLE_P() && !cold) {
76 panic("assert_sleepable: idle"); 82 reason = "idle";
 83 }
 84 if (cpu_intr_p()) {
 85 reason = "interrupt";
77 } 86 }
 87 if ((curlwp->l_pflag & LP_INTR) != 0) {
 88 reason = "softint";
 89 }
 90
 91 if (reason) {
 92 panic("%s: %s caller=%p", __func__, reason,
 93 (void *)RETURN_ADDRESS);
 94 }
 95#endif /* !defined(_RUMPKERNEL) */
78} 96}
79#endif 97#endif /* defined(DEBUG) || defined(LKM) */
80 98
81/* 99/*
82 * rump doesn't need the kernel lock so force it out. We cannot 100 * rump doesn't need the kernel lock so force it out. We cannot
83 * currently easily include it for compilation because of 101 * currently easily include it for compilation because of
84 * a) SPINLOCK_* b) membar_producer(). They are defined in different 102 * a) SPINLOCK_* b) membar_producer(). They are defined in different
85 * places / way for each arch, so just simply do not bother to 103 * places / way for each arch, so just simply do not bother to
86 * fight a lot for no gain (i.e. pain but still no gain). 104 * fight a lot for no gain (i.e. pain but still no gain).
87 */ 105 */
88#ifndef _RUMPKERNEL 106#ifndef _RUMPKERNEL
89/* 107/*
90 * Functions for manipulating the kernel_lock. We put them here 108 * Functions for manipulating the kernel_lock. We put them here
91 * so that they show up in profiles. 109 * so that they show up in profiles.
92 */ 110 */

cvs diff -r1.117 -r1.118 src/sys/kern/kern_malloc.c (expand / switch to unified diff)

--- src/sys/kern/kern_malloc.c 2008/01/03 01:21:08 1.117
+++ src/sys/kern/kern_malloc.c 2008/03/17 08:27:50 1.118
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_malloc.c,v 1.117 2008/01/03 01:21:08 yamt Exp $ */ 1/* $NetBSD: kern_malloc.c,v 1.118 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1987, 1991, 1993 4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -56,27 +56,27 @@ @@ -56,27 +56,27 @@
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 * 64 *
65 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95 65 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
66 */ 66 */
67 67
68#include <sys/cdefs.h> 68#include <sys/cdefs.h>
69__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.117 2008/01/03 01:21:08 yamt Exp $"); 69__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.118 2008/03/17 08:27:50 yamt Exp $");
70 70
71#include <sys/param.h> 71#include <sys/param.h>
72#include <sys/proc.h> 72#include <sys/proc.h>
73#include <sys/kernel.h> 73#include <sys/kernel.h>
74#include <sys/malloc.h> 74#include <sys/malloc.h>
75#include <sys/systm.h> 75#include <sys/systm.h>
76#include <sys/debug.h> 76#include <sys/debug.h>
77#include <sys/mutex.h> 77#include <sys/mutex.h>
78#include <sys/lockdebug.h> 78#include <sys/lockdebug.h>
79 79
80#include <uvm/uvm_extern.h> 80#include <uvm/uvm_extern.h>
81 81
82static struct vm_map_kernel kmem_map_store; 82static struct vm_map_kernel kmem_map_store;
@@ -318,27 +318,27 @@ malloc(unsigned long size, struct malloc @@ -318,27 +318,27 @@ malloc(unsigned long size, struct malloc
318{ 318{
319 struct kmembuckets *kbp; 319 struct kmembuckets *kbp;
320 struct kmemusage *kup; 320 struct kmemusage *kup;
321 struct freelist *freep; 321 struct freelist *freep;
322 long indx, npg, allocsize; 322 long indx, npg, allocsize;
323 char *va, *cp, *savedlist; 323 char *va, *cp, *savedlist;
324#ifdef DIAGNOSTIC 324#ifdef DIAGNOSTIC
325 uint32_t *end, *lp; 325 uint32_t *end, *lp;
326 int copysize; 326 int copysize;
327#endif 327#endif
328 328
329#ifdef LOCKDEBUG 329#ifdef LOCKDEBUG
330 if ((flags & M_NOWAIT) == 0) 330 if ((flags & M_NOWAIT) == 0)
331 ASSERT_SLEEPABLE(NULL, "malloc"); 331 ASSERT_SLEEPABLE();
332#endif 332#endif
333#ifdef MALLOC_DEBUG 333#ifdef MALLOC_DEBUG
334 if (debug_malloc(size, ksp, flags, (void *) &va)) { 334 if (debug_malloc(size, ksp, flags, (void *) &va)) {
335 if (va != 0) 335 if (va != 0)
336 FREECHECK_OUT(&malloc_freecheck, (void *)va); 336 FREECHECK_OUT(&malloc_freecheck, (void *)va);
337 return ((void *) va); 337 return ((void *) va);
338 } 338 }
339#endif 339#endif
340 indx = BUCKETINDX(size); 340 indx = BUCKETINDX(size);
341 kbp = &kmembuckets[indx]; 341 kbp = &kmembuckets[indx];
342 mutex_spin_enter(&malloc_lock); 342 mutex_spin_enter(&malloc_lock);
343#ifdef KMEMSTATS 343#ifdef KMEMSTATS
344 while (ksp->ks_memuse >= ksp->ks_limit) { 344 while (ksp->ks_memuse >= ksp->ks_limit) {
@@ -679,27 +679,27 @@ realloc(void *curaddr, unsigned long new @@ -679,27 +679,27 @@ realloc(void *curaddr, unsigned long new
679 if (curaddr == NULL) 679 if (curaddr == NULL)
680 return (malloc(newsize, ksp, flags)); 680 return (malloc(newsize, ksp, flags));
681 681
682 /* 682 /*
683 * realloc() with zero size is the same as free(). 683 * realloc() with zero size is the same as free().
684 */ 684 */
685 if (newsize == 0) { 685 if (newsize == 0) {
686 free(curaddr, ksp); 686 free(curaddr, ksp);
687 return (NULL); 687 return (NULL);
688 } 688 }
689 689
690#ifdef LOCKDEBUG 690#ifdef LOCKDEBUG
691 if ((flags & M_NOWAIT) == 0) 691 if ((flags & M_NOWAIT) == 0)
692 ASSERT_SLEEPABLE(NULL, "realloc"); 692 ASSERT_SLEEPABLE();
693#endif 693#endif
694 694
695 /* 695 /*
696 * Find out how large the old allocation was (and do some 696 * Find out how large the old allocation was (and do some
697 * sanity checking). 697 * sanity checking).
698 */ 698 */
699 kup = btokup(curaddr); 699 kup = btokup(curaddr);
700 cursize = 1 << kup->ku_indx; 700 cursize = 1 << kup->ku_indx;
701 701
702#ifdef DIAGNOSTIC 702#ifdef DIAGNOSTIC
703 /* 703 /*
704 * Check for returns of data that do not point to the 704 * Check for returns of data that do not point to the
705 * beginning of the allocation. 705 * beginning of the allocation.

cvs diff -r1.182 -r1.183 src/sys/kern/kern_subr.c (expand / switch to unified diff)

--- src/sys/kern/kern_subr.c 2008/02/28 14:25:12 1.182
+++ src/sys/kern/kern_subr.c 2008/03/17 08:27:50 1.183
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_subr.c,v 1.182 2008/02/28 14:25:12 drochner Exp $ */ 1/* $NetBSD: kern_subr.c,v 1.183 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2006 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Luke Mewburn. 9 * NASA Ames Research Center, and by Luke Mewburn.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -76,27 +76,27 @@ @@ -76,27 +76,27 @@
76 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 76 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
77 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 77 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 78 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 79 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 80 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 81 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 82 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83 * SUCH DAMAGE. 83 * SUCH DAMAGE.
84 * 84 *
85 * @(#)kern_subr.c 8.4 (Berkeley) 2/14/95 85 * @(#)kern_subr.c 8.4 (Berkeley) 2/14/95
86 */ 86 */
87 87
88#include <sys/cdefs.h> 88#include <sys/cdefs.h>
89__KERNEL_RCSID(0, "$NetBSD: kern_subr.c,v 1.182 2008/02/28 14:25:12 drochner Exp $"); 89__KERNEL_RCSID(0, "$NetBSD: kern_subr.c,v 1.183 2008/03/17 08:27:50 yamt Exp $");
90 90
91#include "opt_ddb.h" 91#include "opt_ddb.h"
92#include "opt_md.h" 92#include "opt_md.h"
93#include "opt_syscall_debug.h" 93#include "opt_syscall_debug.h"
94#include "opt_ktrace.h" 94#include "opt_ktrace.h"
95#include "opt_ptrace.h" 95#include "opt_ptrace.h"
96#include "opt_powerhook.h" 96#include "opt_powerhook.h"
97#include "opt_tftproot.h" 97#include "opt_tftproot.h"
98 98
99#include <sys/param.h> 99#include <sys/param.h>
100#include <sys/systm.h> 100#include <sys/systm.h>
101#include <sys/proc.h> 101#include <sys/proc.h>
102#include <sys/malloc.h> 102#include <sys/malloc.h>
@@ -150,27 +150,27 @@ uio_setup_sysspace(struct uio *uio) @@ -150,27 +150,27 @@ uio_setup_sysspace(struct uio *uio)
150 150
151 uio->uio_vmspace = vmspace_kernel(); 151 uio->uio_vmspace = vmspace_kernel();
152} 152}
153 153
154int 154int
155uiomove(void *buf, size_t n, struct uio *uio) 155uiomove(void *buf, size_t n, struct uio *uio)
156{ 156{
157 struct vmspace *vm = uio->uio_vmspace; 157 struct vmspace *vm = uio->uio_vmspace;
158 struct iovec *iov; 158 struct iovec *iov;
159 size_t cnt; 159 size_t cnt;
160 int error = 0; 160 int error = 0;
161 char *cp = buf; 161 char *cp = buf;
162 162
163 ASSERT_SLEEPABLE(NULL, "uiomove"); 163 ASSERT_SLEEPABLE();
164 164
165#ifdef DIAGNOSTIC 165#ifdef DIAGNOSTIC
166 if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE) 166 if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
167 panic("uiomove: mode"); 167 panic("uiomove: mode");
168#endif 168#endif
169 while (n > 0 && uio->uio_resid) { 169 while (n > 0 && uio->uio_resid) {
170 iov = uio->uio_iov; 170 iov = uio->uio_iov;
171 cnt = iov->iov_len; 171 cnt = iov->iov_len;
172 if (cnt == 0) { 172 if (cnt == 0) {
173 KASSERT(uio->uio_iovcnt > 0); 173 KASSERT(uio->uio_iovcnt > 0);
174 uio->uio_iov++; 174 uio->uio_iov++;
175 uio->uio_iovcnt--; 175 uio->uio_iovcnt--;
176 continue; 176 continue;

cvs diff -r1.69 -r1.70 src/sys/kern/subr_extent.c (expand / switch to unified diff)

--- src/sys/kern/subr_extent.c 2007/12/05 07:06:54 1.69
+++ src/sys/kern/subr_extent.c 2008/03/17 08:27:50 1.70
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_extent.c,v 1.69 2007/12/05 07:06:54 ad Exp $ */ 1/* $NetBSD: subr_extent.c,v 1.70 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1996, 1998, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1996, 1998, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Matthias Drochner. 8 * by Jason R. Thorpe and Matthias Drochner.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -31,27 +31,27 @@ @@ -31,27 +31,27 @@
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE. 36 * POSSIBILITY OF SUCH DAMAGE.
37 */ 37 */
38 38
39/* 39/*
40 * General purpose extent manager. 40 * General purpose extent manager.
41 */ 41 */
42 42
43#include <sys/cdefs.h> 43#include <sys/cdefs.h>
44__KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.69 2007/12/05 07:06:54 ad Exp $"); 44__KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.70 2008/03/17 08:27:50 yamt Exp $");
45 45
46#ifdef _KERNEL 46#ifdef _KERNEL
47#include "opt_lockdebug.h" 47#include "opt_lockdebug.h"
48 48
49#include <sys/param.h> 49#include <sys/param.h>
50#include <sys/extent.h> 50#include <sys/extent.h>
51#include <sys/malloc.h> 51#include <sys/malloc.h>
52#include <sys/pool.h> 52#include <sys/pool.h>
53#include <sys/time.h> 53#include <sys/time.h>
54#include <sys/systm.h> 54#include <sys/systm.h>
55#include <sys/proc.h> 55#include <sys/proc.h>
56 56
57#include <uvm/uvm_extern.h> 57#include <uvm/uvm_extern.h>
@@ -479,27 +479,27 @@ extent_alloc_region(struct extent *ex, u @@ -479,27 +479,27 @@ extent_alloc_region(struct extent *ex, u
479 printf("extent_alloc_region: extent `%s', size 0x%lx\n", 479 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
480 ex->ex_name, size); 480 ex->ex_name, size);
481 panic("extent_alloc_region: bad size"); 481 panic("extent_alloc_region: bad size");
482 } 482 }
483 if (end < start) { 483 if (end < start) {
484 printf( 484 printf(
485 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n", 485 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
486 ex->ex_name, start, size); 486 ex->ex_name, start, size);
487 panic("extent_alloc_region: overflow"); 487 panic("extent_alloc_region: overflow");
488 } 488 }
489#endif 489#endif
490#ifdef LOCKDEBUG 490#ifdef LOCKDEBUG
491 if (flags & EX_WAITSPACE) 491 if (flags & EX_WAITSPACE)
492 ASSERT_SLEEPABLE(NULL, "extent_alloc_region(EX_WAITSPACE)"); 492 ASSERT_SLEEPABLE();
493#endif 493#endif
494 494
495 /* 495 /*
496 * Make sure the requested region lies within the 496 * Make sure the requested region lies within the
497 * extent. 497 * extent.
498 * 498 *
499 * We don't lock to check the range, because those values 499 * We don't lock to check the range, because those values
500 * are never modified, and if another thread deletes the 500 * are never modified, and if another thread deletes the
501 * extent, we're screwed anyway. 501 * extent, we're screwed anyway.
502 */ 502 */
503 if ((start < ex->ex_start) || (end > ex->ex_end)) { 503 if ((start < ex->ex_start) || (end > ex->ex_end)) {
504#ifdef DIAGNOSTIC 504#ifdef DIAGNOSTIC
505 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n", 505 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
@@ -651,27 +651,27 @@ extent_alloc_subregion1(struct extent *e @@ -651,27 +651,27 @@ extent_alloc_subregion1(struct extent *e
651 panic("extent_alloc_subregion: bad size"); 651 panic("extent_alloc_subregion: bad size");
652 } 652 }
653 if (alignment == 0) 653 if (alignment == 0)
654 panic("extent_alloc_subregion: bad alignment"); 654 panic("extent_alloc_subregion: bad alignment");
655 if (boundary && (boundary < size)) { 655 if (boundary && (boundary < size)) {
656 printf( 656 printf(
657 "extent_alloc_subregion: extent `%s', size 0x%lx, " 657 "extent_alloc_subregion: extent `%s', size 0x%lx, "
658 "boundary 0x%lx\n", ex->ex_name, size, boundary); 658 "boundary 0x%lx\n", ex->ex_name, size, boundary);
659 panic("extent_alloc_subregion: bad boundary"); 659 panic("extent_alloc_subregion: bad boundary");
660 } 660 }
661#endif 661#endif
662#ifdef LOCKDEBUG 662#ifdef LOCKDEBUG
663 if (flags & EX_WAITSPACE) 663 if (flags & EX_WAITSPACE)
664 ASSERT_SLEEPABLE(NULL, "extent_alloc_subregion1(EX_WAITSPACE)"); 664 ASSERT_SLEEPABLE();
665#endif 665#endif
666 666
667 /* 667 /*
668 * Allocate the region descriptor. It will be freed later 668 * Allocate the region descriptor. It will be freed later
669 * if we can coalesce with another region. Don't lock before 669 * if we can coalesce with another region. Don't lock before
670 * here! This could block. 670 * here! This could block.
671 */ 671 */
672 myrp = extent_alloc_region_descriptor(ex, flags); 672 myrp = extent_alloc_region_descriptor(ex, flags);
673 if (myrp == NULL) { 673 if (myrp == NULL) {
674#ifdef DIAGNOSTIC 674#ifdef DIAGNOSTIC
675 printf( 675 printf(
676 "extent_alloc_subregion: can't allocate region descriptor\n"); 676 "extent_alloc_subregion: can't allocate region descriptor\n");
677#endif 677#endif

cvs diff -r1.2 -r1.3 src/sys/kern/subr_percpu.c (expand / switch to unified diff)

--- src/sys/kern/subr_percpu.c 2008/01/17 09:01:57 1.2
+++ src/sys/kern/subr_percpu.c 2008/03/17 08:27:50 1.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_percpu.c,v 1.2 2008/01/17 09:01:57 yamt Exp $ */ 1/* $NetBSD: subr_percpu.c,v 1.3 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c)2007,2008 YAMAMOTO Takashi, 4 * Copyright (c)2007,2008 YAMAMOTO Takashi,
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -21,27 +21,27 @@ @@ -21,27 +21,27 @@
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29/* 29/*
30 * per-cpu storage. 30 * per-cpu storage.
31 */ 31 */
32 32
33#include <sys/cdefs.h> 33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: subr_percpu.c,v 1.2 2008/01/17 09:01:57 yamt Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: subr_percpu.c,v 1.3 2008/03/17 08:27:50 yamt Exp $");
35 35
36#include <sys/param.h> 36#include <sys/param.h>
37#include <sys/cpu.h> 37#include <sys/cpu.h>
38#include <sys/kmem.h> 38#include <sys/kmem.h>
39#include <sys/kernel.h> 39#include <sys/kernel.h>
40#include <sys/mutex.h> 40#include <sys/mutex.h>
41#include <sys/percpu.h> 41#include <sys/percpu.h>
42#include <sys/rwlock.h> 42#include <sys/rwlock.h>
43#include <sys/vmem.h> 43#include <sys/vmem.h>
44#include <sys/xcall.h> 44#include <sys/xcall.h>
45 45
46#include <uvm/uvm_extern.h> 46#include <uvm/uvm_extern.h>
47 47
@@ -149,27 +149,27 @@ percpu_cpu_enlarge(size_t size) @@ -149,27 +149,27 @@ percpu_cpu_enlarge(size_t size)
149} 149}
150 150
151/* 151/*
152 * percpu_backend_alloc: vmem import callback for percpu_offset_arena 152 * percpu_backend_alloc: vmem import callback for percpu_offset_arena
153 */ 153 */
154 154
155static vmem_addr_t 155static vmem_addr_t
156percpu_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize, 156percpu_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
157 vm_flag_t vmflags) 157 vm_flag_t vmflags)
158{ 158{
159 unsigned int offset; 159 unsigned int offset;
160 unsigned int nextoff; 160 unsigned int nextoff;
161 161
162 ASSERT_SLEEPABLE(NULL, __func__); 162 ASSERT_SLEEPABLE();
163 KASSERT(dummy == NULL); 163 KASSERT(dummy == NULL);
164 164
165 if ((vmflags & VM_NOSLEEP) != 0) 165 if ((vmflags & VM_NOSLEEP) != 0)
166 return VMEM_ADDR_NULL; 166 return VMEM_ADDR_NULL;
167 167
168 size = roundup(size, PERCPU_IMPORT_SIZE); 168 size = roundup(size, PERCPU_IMPORT_SIZE);
169 mutex_enter(&percpu_allocation_lock); 169 mutex_enter(&percpu_allocation_lock);
170 offset = percpu_nextoff; 170 offset = percpu_nextoff;
171 percpu_nextoff = nextoff = percpu_nextoff + size; 171 percpu_nextoff = nextoff = percpu_nextoff + size;
172 mutex_exit(&percpu_allocation_lock); 172 mutex_exit(&percpu_allocation_lock);
173 173
174 percpu_cpu_enlarge(nextoff); 174 percpu_cpu_enlarge(nextoff);
175 175
@@ -194,87 +194,87 @@ percpu_zero(percpu_t *pc, size_t sz) @@ -194,87 +194,87 @@ percpu_zero(percpu_t *pc, size_t sz)
194{ 194{
195 195
196 percpu_foreach(pc, percpu_zero_cb, (void *)(uintptr_t)sz); 196 percpu_foreach(pc, percpu_zero_cb, (void *)(uintptr_t)sz);
197} 197}
198 198
199/* 199/*
200 * percpu_init: subsystem initialization 200 * percpu_init: subsystem initialization
201 */ 201 */
202 202
203void 203void
204percpu_init(void) 204percpu_init(void)
205{ 205{
206 206
207 ASSERT_SLEEPABLE(NULL, __func__); 207 ASSERT_SLEEPABLE();
208 rw_init(&percpu_swap_lock); 208 rw_init(&percpu_swap_lock);
209 mutex_init(&percpu_allocation_lock, MUTEX_DEFAULT, IPL_NONE); 209 mutex_init(&percpu_allocation_lock, MUTEX_DEFAULT, IPL_NONE);
210 210
211 percpu_offset_arena = vmem_create("percpu", 0, 0, PERCPU_QUANTUM_SIZE, 211 percpu_offset_arena = vmem_create("percpu", 0, 0, PERCPU_QUANTUM_SIZE,
212 percpu_backend_alloc, NULL, NULL, PERCPU_QCACHE_MAX, VM_SLEEP, 212 percpu_backend_alloc, NULL, NULL, PERCPU_QCACHE_MAX, VM_SLEEP,
213 IPL_NONE); 213 IPL_NONE);
214} 214}
215 215
216/* 216/*
217 * percpu_init_cpu: cpu initialization 217 * percpu_init_cpu: cpu initialization
218 * 218 *
219 * => should be called before the cpu appears on the list for CPU_INFO_FOREACH. 219 * => should be called before the cpu appears on the list for CPU_INFO_FOREACH.
220 */ 220 */
221 221
222void 222void
223percpu_init_cpu(struct cpu_info *ci) 223percpu_init_cpu(struct cpu_info *ci)
224{ 224{
225 percpu_cpu_t * const pcc = cpu_percpu(ci); 225 percpu_cpu_t * const pcc = cpu_percpu(ci);
226 size_t size = percpu_nextoff; /* XXX racy */ 226 size_t size = percpu_nextoff; /* XXX racy */
227 227
228 ASSERT_SLEEPABLE(NULL, __func__); 228 ASSERT_SLEEPABLE();
229 pcc->pcc_size = size; 229 pcc->pcc_size = size;
230 if (size) { 230 if (size) {
231 pcc->pcc_data = kmem_zalloc(pcc->pcc_size, KM_SLEEP); 231 pcc->pcc_data = kmem_zalloc(pcc->pcc_size, KM_SLEEP);
232 } 232 }
233} 233}
234 234
235/* 235/*
236 * percpu_alloc: allocate percpu storage 236 * percpu_alloc: allocate percpu storage
237 * 237 *
238 * => called in thread context. 238 * => called in thread context.
239 * => considered as an expensive and rare operation. 239 * => considered as an expensive and rare operation.
240 * => allocated storage is initialized with zeros. 240 * => allocated storage is initialized with zeros.
241 */ 241 */
242 242
243percpu_t * 243percpu_t *
244percpu_alloc(size_t size) 244percpu_alloc(size_t size)
245{ 245{
246 unsigned int offset; 246 unsigned int offset;
247 percpu_t *pc; 247 percpu_t *pc;
248 248
249 ASSERT_SLEEPABLE(NULL, __func__); 249 ASSERT_SLEEPABLE();
250 offset = vmem_alloc(percpu_offset_arena, size, VM_SLEEP | VM_BESTFIT); 250 offset = vmem_alloc(percpu_offset_arena, size, VM_SLEEP | VM_BESTFIT);
251 pc = (percpu_t *)(uintptr_t)offset; 251 pc = (percpu_t *)(uintptr_t)offset;
252 percpu_zero(pc, size); 252 percpu_zero(pc, size);
253 return pc; 253 return pc;
254} 254}
255 255
256/* 256/*
257 * percpu_alloc: free percpu storage 257 * percpu_alloc: free percpu storage
258 * 258 *
259 * => called in thread context. 259 * => called in thread context.
260 * => considered as an expensive and rare operation. 260 * => considered as an expensive and rare operation.
261 */ 261 */
262 262
263void 263void
264percpu_free(percpu_t *pc, size_t size) 264percpu_free(percpu_t *pc, size_t size)
265{ 265{
266 266
267 ASSERT_SLEEPABLE(NULL, __func__); 267 ASSERT_SLEEPABLE();
268 vmem_free(percpu_offset_arena, (vmem_addr_t)percpu_offset(pc), size); 268 vmem_free(percpu_offset_arena, (vmem_addr_t)percpu_offset(pc), size);
269} 269}
270 270
271/* 271/*
272 * percpu_getptr: 272 * percpu_getptr:
273 * 273 *
274 * => called with preemption disabled 274 * => called with preemption disabled
275 * => safe to be used in either thread or interrupt context 275 * => safe to be used in either thread or interrupt context
276 */ 276 */
277 277
278void * 278void *
279percpu_getptr(percpu_t *pc) 279percpu_getptr(percpu_t *pc)
280{ 280{
@@ -293,27 +293,27 @@ percpu_getptr(percpu_t *pc) @@ -293,27 +293,27 @@ percpu_getptr(percpu_t *pc)
293 * sum = 0; 293 * sum = 0;
294 * percpu_traverse_enter(); 294 * percpu_traverse_enter();
295 * for (CPU_INFO_FOREACH(cii, ci)) { 295 * for (CPU_INFO_FOREACH(cii, ci)) {
296 * unsigned int *p = percpu_getptr_remote(pc, ci); 296 * unsigned int *p = percpu_getptr_remote(pc, ci);
297 * sum += *p; 297 * sum += *p;
298 * } 298 * }
299 * percpu_traverse_exit(); 299 * percpu_traverse_exit();
300 */ 300 */
301 301
302void 302void
303percpu_traverse_enter(void) 303percpu_traverse_enter(void)
304{ 304{
305 305
306 ASSERT_SLEEPABLE(NULL, __func__); 306 ASSERT_SLEEPABLE();
307 rw_enter(&percpu_swap_lock, RW_READER); 307 rw_enter(&percpu_swap_lock, RW_READER);
308} 308}
309 309
310void 310void
311percpu_traverse_exit(void) 311percpu_traverse_exit(void)
312{ 312{
313 313
314 rw_exit(&percpu_swap_lock); 314 rw_exit(&percpu_swap_lock);
315} 315}
316 316
317void * 317void *
318percpu_getptr_remote(percpu_t *pc, struct cpu_info *ci) 318percpu_getptr_remote(percpu_t *pc, struct cpu_info *ci)
319{ 319{

cvs diff -r1.153 -r1.154 src/sys/kern/subr_pool.c (expand / switch to unified diff)

--- src/sys/kern/subr_pool.c 2008/03/10 22:20:14 1.153
+++ src/sys/kern/subr_pool.c 2008/03/17 08:27:50 1.154
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_pool.c,v 1.153 2008/03/10 22:20:14 martin Exp $ */ 1/* $NetBSD: subr_pool.c,v 1.154 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center, and by Andrew Doran. 9 * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -28,27 +28,27 @@ @@ -28,27 +28,27 @@
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE. 37 * POSSIBILITY OF SUCH DAMAGE.
38 */ 38 */
39 39
40#include <sys/cdefs.h> 40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.153 2008/03/10 22:20:14 martin Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.154 2008/03/17 08:27:50 yamt Exp $");
42 42
43#include "opt_ddb.h" 43#include "opt_ddb.h"
44#include "opt_pool.h" 44#include "opt_pool.h"
45#include "opt_poollog.h" 45#include "opt_poollog.h"
46#include "opt_lockdebug.h" 46#include "opt_lockdebug.h"
47 47
48#include <sys/param.h> 48#include <sys/param.h>
49#include <sys/systm.h> 49#include <sys/systm.h>
50#include <sys/bitops.h> 50#include <sys/bitops.h>
51#include <sys/proc.h> 51#include <sys/proc.h>
52#include <sys/errno.h> 52#include <sys/errno.h>
53#include <sys/kernel.h> 53#include <sys/kernel.h>
54#include <sys/malloc.h> 54#include <sys/malloc.h>
@@ -986,27 +986,27 @@ pool_get(struct pool *pp, int flags) @@ -986,27 +986,27 @@ pool_get(struct pool *pp, int flags)
986 void *v; 986 void *v;
987 987
988#ifdef DIAGNOSTIC 988#ifdef DIAGNOSTIC
989 if (__predict_false(pp->pr_itemsperpage == 0)) 989 if (__predict_false(pp->pr_itemsperpage == 0))
990 panic("pool_get: pool %p: pr_itemsperpage is zero, " 990 panic("pool_get: pool %p: pr_itemsperpage is zero, "
991 "pool not initialized?", pp); 991 "pool not initialized?", pp);
992 if (__predict_false(curlwp == NULL && doing_shutdown == 0 && 992 if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
993 (flags & PR_WAITOK) != 0)) 993 (flags & PR_WAITOK) != 0))
994 panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); 994 panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
995 995
996#endif /* DIAGNOSTIC */ 996#endif /* DIAGNOSTIC */
997#ifdef LOCKDEBUG 997#ifdef LOCKDEBUG
998 if (flags & PR_WAITOK) 998 if (flags & PR_WAITOK)
999 ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)"); 999 ASSERT_SLEEPABLE();
1000#endif 1000#endif
1001 1001
1002 mutex_enter(&pp->pr_lock); 1002 mutex_enter(&pp->pr_lock);
1003 pr_enter(pp, file, line); 1003 pr_enter(pp, file, line);
1004 1004
1005 startover: 1005 startover:
1006 /* 1006 /*
1007 * Check to see if we've reached the hard limit. If we have, 1007 * Check to see if we've reached the hard limit. If we have,
1008 * and we can wait, then wait until an item has been returned to 1008 * and we can wait, then wait until an item has been returned to
1009 * the pool. 1009 * the pool.
1010 */ 1010 */
1011#ifdef DIAGNOSTIC 1011#ifdef DIAGNOSTIC
1012 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { 1012 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
@@ -2512,27 +2512,27 @@ pool_cache_get_slow(pool_cache_cpu_t *cc @@ -2512,27 +2512,27 @@ pool_cache_get_slow(pool_cache_cpu_t *cc
2512 * Get an object from a pool cache (optionally returning 2512 * Get an object from a pool cache (optionally returning
2513 * the physical address of the object). 2513 * the physical address of the object).
2514 */ 2514 */
2515void * 2515void *
2516pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) 2516pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
2517{ 2517{
2518 pool_cache_cpu_t *cc; 2518 pool_cache_cpu_t *cc;
2519 pcg_t *pcg; 2519 pcg_t *pcg;
2520 void *object; 2520 void *object;
2521 int s; 2521 int s;
2522 2522
2523#ifdef LOCKDEBUG 2523#ifdef LOCKDEBUG
2524 if (flags & PR_WAITOK) 2524 if (flags & PR_WAITOK)
2525 ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); 2525 ASSERT_SLEEPABLE();
2526#endif 2526#endif
2527 2527
2528 cc = pool_cache_cpu_enter(pc, &s); 2528 cc = pool_cache_cpu_enter(pc, &s);
2529 do { 2529 do {
2530 /* Try and allocate an object from the current group. */ 2530 /* Try and allocate an object from the current group. */
2531 pcg = cc->cc_current; 2531 pcg = cc->cc_current;
2532 if (pcg != NULL && pcg->pcg_avail > 0) { 2532 if (pcg != NULL && pcg->pcg_avail > 0) {
2533 object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; 2533 object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2534 if (pap != NULL) 2534 if (pap != NULL)
2535 *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; 2535 *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2536#if defined(DIAGNOSTIC) 2536#if defined(DIAGNOSTIC)
2537 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; 2537 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2538#endif /* defined(DIAGNOSTIC) */ 2538#endif /* defined(DIAGNOSTIC) */

cvs diff -r1.11 -r1.12 src/sys/kern/subr_specificdata.c (expand / switch to unified diff)

--- src/sys/kern/subr_specificdata.c 2007/11/07 00:23:23 1.11
+++ src/sys/kern/subr_specificdata.c 2008/03/17 08:27:50 1.12
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_specificdata.c,v 1.11 2007/11/07 00:23:23 ad Exp $ */ 1/* $NetBSD: subr_specificdata.c,v 1.12 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe. 8 * by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -53,31 +53,30 @@ @@ -53,31 +53,30 @@
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE. 62 * SUCH DAMAGE.
63 */ 63 */
64 64
65#include <sys/cdefs.h> 65#include <sys/cdefs.h>
66__KERNEL_RCSID(0, "$NetBSD: subr_specificdata.c,v 1.11 2007/11/07 00:23:23 ad Exp $"); 66__KERNEL_RCSID(0, "$NetBSD: subr_specificdata.c,v 1.12 2008/03/17 08:27:50 yamt Exp $");
67 67
68#include <sys/param.h> 68#include <sys/param.h>
69#include <sys/kmem.h> 69#include <sys/kmem.h>
70#include <sys/proc.h> 
71#include <sys/specificdata.h> 70#include <sys/specificdata.h>
72#include <sys/queue.h> 71#include <sys/queue.h>
73#include <sys/mutex.h> 72#include <sys/mutex.h>
74 73
75/* 74/*
76 * Locking notes: 75 * Locking notes:
77 * 76 *
78 * The specdataref_container pointer in the specificdata_reference 77 * The specdataref_container pointer in the specificdata_reference
79 * is volatile. To read it, you must hold EITHER the domain lock 78 * is volatile. To read it, you must hold EITHER the domain lock
80 * or the ref lock. To write it, you must hold BOTH the domain lock 79 * or the ref lock. To write it, you must hold BOTH the domain lock
81 * and the ref lock. The locks must be acquired in the following 80 * and the ref lock. The locks must be acquired in the following
82 * order: 81 * order:
83 * domain -> ref 82 * domain -> ref
@@ -183,27 +182,27 @@ specificdata_domain_delete(specificdata_ @@ -183,27 +182,27 @@ specificdata_domain_delete(specificdata_
183 * specificdata_key_create -- 182 * specificdata_key_create --
184 * Create a specificdata key for a domain. 183 * Create a specificdata key for a domain.
185 * 184 *
186 * Note: This is a rare operation. 185 * Note: This is a rare operation.
187 */ 186 */
188int 187int
189specificdata_key_create(specificdata_domain_t sd, specificdata_key_t *keyp, 188specificdata_key_create(specificdata_domain_t sd, specificdata_key_t *keyp,
190 specificdata_dtor_t dtor) 189 specificdata_dtor_t dtor)
191{ 190{
192 specificdata_key_impl *newkeys; 191 specificdata_key_impl *newkeys;
193 specificdata_key_t key = 0; 192 specificdata_key_t key = 0;
194 size_t nsz; 193 size_t nsz;
195 194
196 ASSERT_SLEEPABLE(NULL, __func__); 195 ASSERT_SLEEPABLE();
197 196
198 if (dtor == NULL) 197 if (dtor == NULL)
199 dtor = specificdata_noop_dtor; 198 dtor = specificdata_noop_dtor;
200  199
201 mutex_enter(&sd->sd_lock); 200 mutex_enter(&sd->sd_lock);
202 201
203 if (sd->sd_keys == NULL) 202 if (sd->sd_keys == NULL)
204 goto needalloc; 203 goto needalloc;
205 204
206 for (; key < sd->sd_nkey; key++) { 205 for (; key < sd->sd_nkey; key++) {
207 if (sd->sd_keys[key].ski_dtor == NULL) 206 if (sd->sd_keys[key].ski_dtor == NULL)
208 goto gotit; 207 goto gotit;
209 } 208 }
@@ -279,27 +278,27 @@ specificdata_init(specificdata_domain_t  @@ -279,27 +278,27 @@ specificdata_init(specificdata_domain_t
279} 278}
280 279
281/* 280/*
282 * specificdata_fini -- 281 * specificdata_fini --
283 * Destroy a specificdata container. We destroy all of the datums 282 * Destroy a specificdata container. We destroy all of the datums
284 * stuffed into the container just as if the key were destroyed. 283 * stuffed into the container just as if the key were destroyed.
285 */ 284 */
286void 285void
287specificdata_fini(specificdata_domain_t sd, specificdata_reference *ref) 286specificdata_fini(specificdata_domain_t sd, specificdata_reference *ref)
288{ 287{
289 specificdata_container_t sc; 288 specificdata_container_t sc;
290 specificdata_key_t key; 289 specificdata_key_t key;
291 290
292 ASSERT_SLEEPABLE(NULL, __func__); 291 ASSERT_SLEEPABLE();
293 292
294 mutex_destroy(&ref->specdataref_lock); 293 mutex_destroy(&ref->specdataref_lock);
295 294
296 sc = ref->specdataref_container; 295 sc = ref->specdataref_container;
297 if (sc == NULL) 296 if (sc == NULL)
298 return; 297 return;
299 ref->specdataref_container = NULL; 298 ref->specdataref_container = NULL;
300  299
301 mutex_enter(&sd->sd_lock); 300 mutex_enter(&sd->sd_lock);
302 301
303 specificdata_container_unlink(sd, sc); 302 specificdata_container_unlink(sd, sc);
304 for (key = 0; key < sc->sc_nkey; key++) { 303 for (key = 0; key < sc->sc_nkey; key++) {
305 specificdata_destroy_datum(sd, sc, key); 304 specificdata_destroy_datum(sd, sc, key);
@@ -357,27 +356,27 @@ specificdata_getspecific_unlocked(specif @@ -357,27 +356,27 @@ specificdata_getspecific_unlocked(specif
357 356
358/* 357/*
359 * specificdata_setspecific -- 358 * specificdata_setspecific --
360 * Put a datum into a container. 359 * Put a datum into a container.
361 */ 360 */
362void 361void
363specificdata_setspecific(specificdata_domain_t sd, 362specificdata_setspecific(specificdata_domain_t sd,
364 specificdata_reference *ref, 363 specificdata_reference *ref,
365 specificdata_key_t key, void *data) 364 specificdata_key_t key, void *data)
366{ 365{
367 specificdata_container_t sc, newsc; 366 specificdata_container_t sc, newsc;
368 size_t newnkey, sz; 367 size_t newnkey, sz;
369 368
370 ASSERT_SLEEPABLE(NULL, __func__); 369 ASSERT_SLEEPABLE();
371 370
372 mutex_enter(&ref->specdataref_lock); 371 mutex_enter(&ref->specdataref_lock);
373 372
374 sc = ref->specdataref_container; 373 sc = ref->specdataref_container;
375 if (__predict_true(sc != NULL && key < sc->sc_nkey)) { 374 if (__predict_true(sc != NULL && key < sc->sc_nkey)) {
376 sc->sc_data[key] = data; 375 sc->sc_data[key] = data;
377 mutex_exit(&ref->specdataref_lock); 376 mutex_exit(&ref->specdataref_lock);
378 return; 377 return;
379 } 378 }
380 379
381 mutex_exit(&ref->specdataref_lock); 380 mutex_exit(&ref->specdataref_lock);
382 381
383 /* 382 /*

cvs diff -r1.41 -r1.42 src/sys/kern/subr_vmem.c (expand / switch to unified diff)

--- src/sys/kern/subr_vmem.c 2008/01/24 13:57:52 1.41
+++ src/sys/kern/subr_vmem.c 2008/03/17 08:27:50 1.42
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_vmem.c,v 1.41 2008/01/24 13:57:52 ad Exp $ */ 1/* $NetBSD: subr_vmem.c,v 1.42 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c)2006 YAMAMOTO Takashi, 4 * Copyright (c)2006 YAMAMOTO Takashi,
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -28,67 +28,66 @@ @@ -28,67 +28,66 @@
28 28
29/* 29/*
30 * reference: 30 * reference:
31 * - Magazines and Vmem: Extending the Slab Allocator 31 * - Magazines and Vmem: Extending the Slab Allocator
32 * to Many CPUs and Arbitrary Resources 32 * to Many CPUs and Arbitrary Resources
33 * http://www.usenix.org/event/usenix01/bonwick.html 33 * http://www.usenix.org/event/usenix01/bonwick.html
34 * 34 *
35 * todo: 35 * todo:
36 * - decide how to import segments for vmem_xalloc. 36 * - decide how to import segments for vmem_xalloc.
37 * - don't rely on malloc(9). 37 * - don't rely on malloc(9).
38 */ 38 */
39 39
40#include <sys/cdefs.h> 40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.41 2008/01/24 13:57:52 ad Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.42 2008/03/17 08:27:50 yamt Exp $");
42 42
43#define VMEM_DEBUG 43#define VMEM_DEBUG
44#if defined(_KERNEL) 44#if defined(_KERNEL)
45#include "opt_ddb.h" 45#include "opt_ddb.h"
46#define QCACHE 46#define QCACHE
47#endif /* defined(_KERNEL) */ 47#endif /* defined(_KERNEL) */
48 48
49#include <sys/param.h> 49#include <sys/param.h>
50#include <sys/hash.h> 50#include <sys/hash.h>
51#include <sys/queue.h> 51#include <sys/queue.h>
52 52
53#if defined(_KERNEL) 53#if defined(_KERNEL)
54#include <sys/systm.h> 54#include <sys/systm.h>
55#include <sys/kernel.h> /* hz */ 55#include <sys/kernel.h> /* hz */
56#include <sys/callout.h> 56#include <sys/callout.h>
57#include <sys/malloc.h> 57#include <sys/malloc.h>
58#include <sys/once.h> 58#include <sys/once.h>
59#include <sys/pool.h> 59#include <sys/pool.h>
60#include <sys/proc.h> 
61#include <sys/vmem.h> 60#include <sys/vmem.h>
62#include <sys/workqueue.h> 61#include <sys/workqueue.h>
63#else /* defined(_KERNEL) */ 62#else /* defined(_KERNEL) */
64#include "../sys/vmem.h" 63#include "../sys/vmem.h"
65#endif /* defined(_KERNEL) */ 64#endif /* defined(_KERNEL) */
66 65
67#if defined(_KERNEL) 66#if defined(_KERNEL)
68#define LOCK_DECL(name) kmutex_t name 67#define LOCK_DECL(name) kmutex_t name
69#else /* defined(_KERNEL) */ 68#else /* defined(_KERNEL) */
70#include <errno.h> 69#include <errno.h>
71#include <assert.h> 70#include <assert.h>
72#include <stdlib.h> 71#include <stdlib.h>
73 72
74#define KASSERT(a) assert(a) 73#define KASSERT(a) assert(a)
75#define LOCK_DECL(name) /* nothing */ 74#define LOCK_DECL(name) /* nothing */
76#define mutex_init(a, b, c) /* nothing */ 75#define mutex_init(a, b, c) /* nothing */
77#define mutex_destroy(a) /* nothing */ 76#define mutex_destroy(a) /* nothing */
78#define mutex_enter(a) /* nothing */ 77#define mutex_enter(a) /* nothing */
79#define mutex_exit(a) /* nothing */ 78#define mutex_exit(a) /* nothing */
80#define mutex_owned(a) /* nothing */ 79#define mutex_owned(a) /* nothing */
81#define ASSERT_SLEEPABLE(lk, msg) /* nothing */ 80#define ASSERT_SLEEPABLE() /* nothing */
82#define IPL_VM 0 81#define IPL_VM 0
83#endif /* defined(_KERNEL) */ 82#endif /* defined(_KERNEL) */
84 83
85struct vmem; 84struct vmem;
86struct vmem_btag; 85struct vmem_btag;
87 86
88#if defined(VMEM_DEBUG) 87#if defined(VMEM_DEBUG)
89void vmem_dump(const vmem_t *); 88void vmem_dump(const vmem_t *);
90#endif /* defined(VMEM_DEBUG) */ 89#endif /* defined(VMEM_DEBUG) */
91 90
92#define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT) 91#define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT)
93 92
94#define VMEM_HASHSIZE_MIN 1 /* XXX */ 93#define VMEM_HASHSIZE_MIN 1 /* XXX */
@@ -847,27 +846,27 @@ vmem_roundup_size(vmem_t *vm, vmem_size_ @@ -847,27 +846,27 @@ vmem_roundup_size(vmem_t *vm, vmem_size_
847 */ 846 */
848 847
849vmem_addr_t 848vmem_addr_t
850vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 849vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
851{ 850{
852 const vm_flag_t strat __unused = flags & VM_FITMASK; 851 const vm_flag_t strat __unused = flags & VM_FITMASK;
853 852
854 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 853 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
855 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 854 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
856 855
857 KASSERT(size > 0); 856 KASSERT(size > 0);
858 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 857 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
859 if ((flags & VM_SLEEP) != 0) { 858 if ((flags & VM_SLEEP) != 0) {
860 ASSERT_SLEEPABLE(NULL, __func__); 859 ASSERT_SLEEPABLE();
861 } 860 }
862 861
863#if defined(QCACHE) 862#if defined(QCACHE)
864 if (size <= vm->vm_qcache_max) { 863 if (size <= vm->vm_qcache_max) {
865 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 864 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
866 qcache_t *qc = vm->vm_qcache[qidx - 1]; 865 qcache_t *qc = vm->vm_qcache[qidx - 1];
867 866
868 return (vmem_addr_t)pool_cache_get(qc->qc_cache, 867 return (vmem_addr_t)pool_cache_get(qc->qc_cache,
869 vmf_to_prf(flags)); 868 vmf_to_prf(flags));
870 } 869 }
871#endif /* defined(QCACHE) */ 870#endif /* defined(QCACHE) */
872 871
873 return vmem_xalloc(vm, size, 0, 0, 0, 0, 0, flags); 872 return vmem_xalloc(vm, size, 0, 0, 0, 0, 0, flags);
@@ -882,27 +881,27 @@ vmem_xalloc(vmem_t *vm, vmem_size_t size @@ -882,27 +881,27 @@ vmem_xalloc(vmem_t *vm, vmem_size_t size
882 struct vmem_freelist *first; 881 struct vmem_freelist *first;
883 struct vmem_freelist *end; 882 struct vmem_freelist *end;
884 bt_t *bt; 883 bt_t *bt;
885 bt_t *btnew; 884 bt_t *btnew;
886 bt_t *btnew2; 885 bt_t *btnew2;
887 const vmem_size_t size = vmem_roundup_size(vm, size0); 886 const vmem_size_t size = vmem_roundup_size(vm, size0);
888 vm_flag_t strat = flags & VM_FITMASK; 887 vm_flag_t strat = flags & VM_FITMASK;
889 vmem_addr_t start; 888 vmem_addr_t start;
890 889
891 KASSERT(size0 > 0); 890 KASSERT(size0 > 0);
892 KASSERT(size > 0); 891 KASSERT(size > 0);
893 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 892 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
894 if ((flags & VM_SLEEP) != 0) { 893 if ((flags & VM_SLEEP) != 0) {
895 ASSERT_SLEEPABLE(NULL, __func__); 894 ASSERT_SLEEPABLE();
896 } 895 }
897 KASSERT((align & vm->vm_quantum_mask) == 0); 896 KASSERT((align & vm->vm_quantum_mask) == 0);
898 KASSERT((align & (align - 1)) == 0); 897 KASSERT((align & (align - 1)) == 0);
899 KASSERT((phase & vm->vm_quantum_mask) == 0); 898 KASSERT((phase & vm->vm_quantum_mask) == 0);
900 KASSERT((nocross & vm->vm_quantum_mask) == 0); 899 KASSERT((nocross & vm->vm_quantum_mask) == 0);
901 KASSERT((nocross & (nocross - 1)) == 0); 900 KASSERT((nocross & (nocross - 1)) == 0);
902 KASSERT((align == 0 && phase == 0) || phase < align); 901 KASSERT((align == 0 && phase == 0) || phase < align);
903 KASSERT(nocross == 0 || nocross >= size); 902 KASSERT(nocross == 0 || nocross >= size);
904 KASSERT(maxaddr == 0 || minaddr < maxaddr); 903 KASSERT(maxaddr == 0 || minaddr < maxaddr);
905 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 904 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
906 905
907 if (align == 0) { 906 if (align == 0) {
908 align = vm->vm_quantum_mask + 1; 907 align = vm->vm_quantum_mask + 1;

cvs diff -r1.17 -r1.18 src/sys/kern/vfs_trans.c (expand / switch to unified diff)

--- src/sys/kern/vfs_trans.c 2008/02/02 16:51:34 1.17
+++ src/sys/kern/vfs_trans.c 2008/03/17 08:27:50 1.18
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vfs_trans.c,v 1.17 2008/02/02 16:51:34 hannken Exp $ */ 1/* $NetBSD: vfs_trans.c,v 1.18 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Juergen Hannken-Illjes. 8 * by Juergen Hannken-Illjes.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -27,27 +27,27 @@ @@ -27,27 +27,27 @@
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE. 36 * POSSIBILITY OF SUCH DAMAGE.
37 */ 37 */
38 38
39#include <sys/cdefs.h> 39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: vfs_trans.c,v 1.17 2008/02/02 16:51:34 hannken Exp $"); 40__KERNEL_RCSID(0, "$NetBSD: vfs_trans.c,v 1.18 2008/03/17 08:27:50 yamt Exp $");
41 41
42/* 42/*
43 * File system transaction operations. 43 * File system transaction operations.
44 */ 44 */
45 45
46#include "opt_ddb.h" 46#include "opt_ddb.h"
47 47
48#if defined(DDB) 48#if defined(DDB)
49#define _LWP_API_PRIVATE /* Need _lwp_getspecific_by_lwp() */ 49#define _LWP_API_PRIVATE /* Need _lwp_getspecific_by_lwp() */
50#endif 50#endif
51 51
52#include <sys/param.h> 52#include <sys/param.h>
53#include <sys/systm.h> 53#include <sys/systm.h>
@@ -167,27 +167,27 @@ fstrans_unmount(struct mount *mp) @@ -167,27 +167,27 @@ fstrans_unmount(struct mount *mp)
167 * file system increment the reference counter. 167 * file system increment the reference counter.
168 * A thread with an exclusive transaction lock may get a shared or lazy one. 168 * A thread with an exclusive transaction lock may get a shared or lazy one.
169 * A thread with a shared or lazy transaction lock cannot upgrade to an 169 * A thread with a shared or lazy transaction lock cannot upgrade to an
170 * exclusive one yet. 170 * exclusive one yet.
171 */ 171 */
172int 172int
173_fstrans_start(struct mount *mp, enum fstrans_lock_type lock_type, int wait) 173_fstrans_start(struct mount *mp, enum fstrans_lock_type lock_type, int wait)
174{ 174{
175 krwlock_t *lock_p; 175 krwlock_t *lock_p;
176 krw_t lock_op; 176 krw_t lock_op;
177 struct fstrans_lwp_info *fli, *new_fli; 177 struct fstrans_lwp_info *fli, *new_fli;
178 struct fstrans_mount_info *fmi; 178 struct fstrans_mount_info *fmi;
179 179
180 ASSERT_SLEEPABLE(NULL, __func__); 180 ASSERT_SLEEPABLE();
181 181
182 if (mp == NULL || (mp->mnt_iflag & IMNT_HAS_TRANS) == 0) 182 if (mp == NULL || (mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
183 return 0; 183 return 0;
184 184
185 new_fli = NULL; 185 new_fli = NULL;
186 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) { 186 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
187 if (fli->fli_mount == NULL && new_fli == NULL) 187 if (fli->fli_mount == NULL && new_fli == NULL)
188 new_fli = fli; 188 new_fli = fli;
189 if (fli->fli_mount == mp) { 189 if (fli->fli_mount == mp) {
190 KASSERT(fli->fli_count > 0); 190 KASSERT(fli->fli_count > 0);
191 if (fli->fli_lock_type != FSTRANS_EXCL && 191 if (fli->fli_lock_type != FSTRANS_EXCL &&
192 lock_type == FSTRANS_EXCL) 192 lock_type == FSTRANS_EXCL)
193 panic("fstrans_start: cannot upgrade lock"); 193 panic("fstrans_start: cannot upgrade lock");

cvs diff -r1.270 -r1.271 src/sys/sys/proc.h (expand / switch to unified diff)

--- src/sys/sys/proc.h 2008/02/19 20:27:27 1.270
+++ src/sys/sys/proc.h 2008/03/17 08:27:50 1.271
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: proc.h,v 1.270 2008/02/19 20:27:27 ad Exp $ */ 1/* $NetBSD: proc.h,v 1.271 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -569,33 +569,26 @@ _proclist_skipmarker(struct proc *p0) @@ -569,33 +569,26 @@ _proclist_skipmarker(struct proc *p0)
569{ 569{
570 struct proc *p = p0; 570 struct proc *p = p0;
571 571
572 while (p != NULL && p->p_flag & PK_MARKER) 572 while (p != NULL && p->p_flag & PK_MARKER)
573 p = LIST_NEXT(p, p_list); 573 p = LIST_NEXT(p, p_list);
574 574
575 return p; 575 return p;
576} 576}
577#define PROCLIST_FOREACH(var, head) \ 577#define PROCLIST_FOREACH(var, head) \
578 for ((var) = LIST_FIRST(head); \ 578 for ((var) = LIST_FIRST(head); \
579 ((var) = _proclist_skipmarker(var)) != NULL; \ 579 ((var) = _proclist_skipmarker(var)) != NULL; \
580 (var) = LIST_NEXT(var, p_list)) 580 (var) = LIST_NEXT(var, p_list))
581 581
582#if defined(LOCKDEBUG) 
583void assert_sleepable(struct simplelock *, const char *); 
584#define ASSERT_SLEEPABLE(lk, msg) assert_sleepable((lk), (msg)) 
585#else /* defined(LOCKDEBUG) */ 
586#define ASSERT_SLEEPABLE(lk, msg) /* nothing */ 
587#endif /* defined(LOCKDEBUG) */ 
588 
589/* Compatibility with old, non-interlocked tsleep call */ 582/* Compatibility with old, non-interlocked tsleep call */
590#define tsleep(chan, pri, wmesg, timo) \ 583#define tsleep(chan, pri, wmesg, timo) \
591 ltsleep(chan, pri, wmesg, timo, NULL) 584 ltsleep(chan, pri, wmesg, timo, NULL)
592 585
593#ifdef KSTACK_CHECK_MAGIC 586#ifdef KSTACK_CHECK_MAGIC
594void kstack_setup_magic(const struct lwp *); 587void kstack_setup_magic(const struct lwp *);
595void kstack_check_magic(const struct lwp *); 588void kstack_check_magic(const struct lwp *);
596#endif 589#endif
597 590
598/* 591/*
599 * kernel stack paramaters 592 * kernel stack paramaters
600 * XXX require sizeof(struct user) 593 * XXX require sizeof(struct user)
601 */ 594 */

cvs diff -r1.214 -r1.215 src/sys/sys/systm.h (expand / switch to unified diff)

--- src/sys/sys/systm.h 2008/02/06 22:12:42 1.214
+++ src/sys/sys/systm.h 2008/03/17 08:27:50 1.215
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: systm.h,v 1.214 2008/02/06 22:12:42 dsl Exp $ */ 1/* $NetBSD: systm.h,v 1.215 2008/03/17 08:27:50 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1982, 1988, 1991, 1993 4 * Copyright (c) 1982, 1988, 1991, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc. 6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed 7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph 8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc. 10 * the permission of UNIX System Laboratories, Inc.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -474,14 +474,21 @@ do { \ @@ -474,14 +474,21 @@ do { \
474#else 474#else
475#define KERNEL_LOCK(count, lwp) /* nothing */ 475#define KERNEL_LOCK(count, lwp) /* nothing */
476#define KERNEL_UNLOCK(all, lwp, ptr) /* nothing */ 476#define KERNEL_UNLOCK(all, lwp, ptr) /* nothing */
477#endif 477#endif
478 478
479#define KERNEL_UNLOCK_LAST(l) KERNEL_UNLOCK(-1, (l), NULL) 479#define KERNEL_UNLOCK_LAST(l) KERNEL_UNLOCK(-1, (l), NULL)
480#define KERNEL_UNLOCK_ALL(l, p) KERNEL_UNLOCK(0, (l), (p)) 480#define KERNEL_UNLOCK_ALL(l, p) KERNEL_UNLOCK(0, (l), (p))
481#define KERNEL_UNLOCK_ONE(l) KERNEL_UNLOCK(1, (l), NULL) 481#define KERNEL_UNLOCK_ONE(l) KERNEL_UNLOCK(1, (l), NULL)
482 482
483/* Preemption control. */ 483/* Preemption control. */
484void crit_enter(void); 484void crit_enter(void);
485void crit_exit(void); 485void crit_exit(void);
486 486
 487void assert_sleepable(void);
 488#if defined(DEBUG)
 489#define ASSERT_SLEEPABLE() assert_sleepable()
 490#else /* defined(DEBUG) */
 491#define ASSERT_SLEEPABLE() /* nothing */
 492#endif /* defined(DEBUG) */
 493
487#endif /* !_SYS_SYSTM_H_ */ 494#endif /* !_SYS_SYSTM_H_ */