| @@ -1,205 +1,203 @@ | | | @@ -1,205 +1,203 @@ |
1 | /* $NetBSD: slab.h,v 1.4 2014/07/16 20:59:58 riastradh Exp $ */ | | 1 | /* $NetBSD: slab.h,v 1.5 2015/03/02 02:26:37 riastradh Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2013 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2013 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Taylor R. Campbell. | | 8 | * by Taylor R. Campbell. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | #ifndef _LINUX_SLAB_H_ | | 32 | #ifndef _LINUX_SLAB_H_ |
33 | #define _LINUX_SLAB_H_ | | 33 | #define _LINUX_SLAB_H_ |
34 | | | 34 | |
35 | #include <sys/kmem.h> | | 35 | #include <sys/kmem.h> |
36 | #include <sys/malloc.h> | | 36 | #include <sys/malloc.h> |
37 | | | 37 | |
38 | #include <machine/limits.h> | | 38 | #include <machine/limits.h> |
39 | | | 39 | |
40 | #include <uvm/uvm_extern.h> /* For PAGE_SIZE. */ | | 40 | #include <uvm/uvm_extern.h> /* For PAGE_SIZE. */ |
41 | | | 41 | |
42 | #include <linux/gfp.h> | | 42 | #include <linux/gfp.h> |
43 | | | 43 | |
44 | /* XXX Should use kmem, but Linux kfree doesn't take the size. */ | | 44 | /* XXX Should use kmem, but Linux kfree doesn't take the size. */ |
45 | | | 45 | |
46 | static inline int | | 46 | static inline int |
47 | linux_gfp_to_malloc(gfp_t gfp) | | 47 | linux_gfp_to_malloc(gfp_t gfp) |
48 | { | | 48 | { |
49 | int flags = 0; | | 49 | int flags = 0; |
50 | | | 50 | |
51 | /* This has no meaning to us. */ | | 51 | /* This has no meaning to us. */ |
52 | gfp &= ~__GFP_NOWARN; | | 52 | gfp &= ~__GFP_NOWARN; |
53 | gfp &= ~__GFP_RECLAIMABLE; | | 53 | gfp &= ~__GFP_RECLAIMABLE; |
54 | | | 54 | |
55 | /* Pretend this was the same as not passing __GFP_WAIT. */ | | 55 | /* Pretend this was the same as not passing __GFP_WAIT. */ |
56 | if (ISSET(gfp, __GFP_NORETRY)) { | | 56 | if (ISSET(gfp, __GFP_NORETRY)) { |
57 | gfp &= ~__GFP_NORETRY; | | 57 | gfp &= ~__GFP_NORETRY; |
58 | gfp &= ~__GFP_WAIT; | | 58 | gfp &= ~__GFP_WAIT; |
59 | } | | 59 | } |
60 | | | 60 | |
61 | if (ISSET(gfp, __GFP_ZERO)) { | | 61 | if (ISSET(gfp, __GFP_ZERO)) { |
62 | flags |= M_ZERO; | | 62 | flags |= M_ZERO; |
63 | gfp &= ~__GFP_ZERO; | | 63 | gfp &= ~__GFP_ZERO; |
64 | } | | 64 | } |
65 | | | 65 | |
66 | /* | | 66 | /* |
67 | * XXX Handle other cases as they arise -- prefer to fail early | | 67 | * XXX Handle other cases as they arise -- prefer to fail early |
68 | * rather than allocate memory without respecting parameters we | | 68 | * rather than allocate memory without respecting parameters we |
69 | * don't understand. | | 69 | * don't understand. |
70 | */ | | 70 | */ |
71 | KASSERT((gfp == GFP_ATOMIC) || | | 71 | KASSERT((gfp == GFP_ATOMIC) || |
72 | ((gfp & ~__GFP_WAIT) == (GFP_KERNEL & ~__GFP_WAIT))); | | 72 | ((gfp & ~__GFP_WAIT) == (GFP_KERNEL & ~__GFP_WAIT))); |
73 | | | 73 | |
74 | if (ISSET(gfp, __GFP_WAIT)) { | | 74 | if (ISSET(gfp, __GFP_WAIT)) { |
75 | flags |= M_WAITOK; | | 75 | flags |= M_WAITOK; |
76 | gfp &= ~__GFP_WAIT; | | 76 | gfp &= ~__GFP_WAIT; |
77 | } else { | | 77 | } else { |
78 | flags |= M_NOWAIT; | | 78 | flags |= M_NOWAIT; |
79 | } | | 79 | } |
80 | | | 80 | |
81 | return flags; | | 81 | return flags; |
82 | } | | 82 | } |
83 | | | 83 | |
84 | static inline void * | | 84 | static inline void * |
85 | kmalloc(size_t size, gfp_t gfp) | | 85 | kmalloc(size_t size, gfp_t gfp) |
86 | { | | 86 | { |
87 | return malloc(size, M_TEMP, linux_gfp_to_malloc(gfp)); | | 87 | return malloc(size, M_TEMP, linux_gfp_to_malloc(gfp)); |
88 | } | | 88 | } |
89 | | | 89 | |
90 | static inline void * | | 90 | static inline void * |
91 | kzalloc(size_t size, gfp_t gfp) | | 91 | kzalloc(size_t size, gfp_t gfp) |
92 | { | | 92 | { |
93 | return malloc(size, M_TEMP, (linux_gfp_to_malloc(gfp) | M_ZERO)); | | 93 | return malloc(size, M_TEMP, (linux_gfp_to_malloc(gfp) | M_ZERO)); |
94 | } | | 94 | } |
95 | | | 95 | |
96 | static inline void * | | 96 | static inline void * |
97 | kmalloc_array(size_t n, size_t size, gfp_t gfp) | | 97 | kmalloc_array(size_t n, size_t size, gfp_t gfp) |
98 | { | | 98 | { |
99 | KASSERT(size != 0); | | 99 | if ((size != 0) && (n > (SIZE_MAX / size))) |
100 | KASSERT(n <= (SIZE_MAX / size)); | | 100 | return NULL; |
101 | return malloc((n * size), M_TEMP, linux_gfp_to_malloc(gfp)); | | 101 | return malloc((n * size), M_TEMP, linux_gfp_to_malloc(gfp)); |
102 | } | | 102 | } |
103 | | | 103 | |
104 | static inline void * | | 104 | static inline void * |
105 | kcalloc(size_t n, size_t size, gfp_t gfp) | | 105 | kcalloc(size_t n, size_t size, gfp_t gfp) |
106 | { | | 106 | { |
107 | if ((size == 0) && (n > (SIZE_MAX / size))) | | | |
108 | return NULL; | | | |
109 | return kmalloc_array(n, size, (gfp | __GFP_ZERO)); | | 107 | return kmalloc_array(n, size, (gfp | __GFP_ZERO)); |
110 | } | | 108 | } |
111 | | | 109 | |
112 | static inline void * | | 110 | static inline void * |
113 | krealloc(void *ptr, size_t size, gfp_t gfp) | | 111 | krealloc(void *ptr, size_t size, gfp_t gfp) |
114 | { | | 112 | { |
115 | return realloc(ptr, size, M_TEMP, linux_gfp_to_malloc(gfp)); | | 113 | return realloc(ptr, size, M_TEMP, linux_gfp_to_malloc(gfp)); |
116 | } | | 114 | } |
117 | | | 115 | |
118 | static inline void | | 116 | static inline void |
119 | kfree(void *ptr) | | 117 | kfree(void *ptr) |
120 | { | | 118 | { |
121 | if (ptr != NULL) | | 119 | if (ptr != NULL) |
122 | free(ptr, M_TEMP); | | 120 | free(ptr, M_TEMP); |
123 | } | | 121 | } |
124 | | | 122 | |
125 | #define SLAB_HWCACHE_ALIGN 1 | | 123 | #define SLAB_HWCACHE_ALIGN 1 |
126 | | | 124 | |
127 | struct kmem_cache { | | 125 | struct kmem_cache { |
128 | pool_cache_t kc_pool_cache; | | 126 | pool_cache_t kc_pool_cache; |
129 | size_t kc_size; | | 127 | size_t kc_size; |
130 | void (*kc_ctor)(void *); | | 128 | void (*kc_ctor)(void *); |
131 | }; | | 129 | }; |
132 | | | 130 | |
133 | static int | | 131 | static int |
134 | kmem_cache_ctor(void *cookie, void *ptr, int flags __unused) | | 132 | kmem_cache_ctor(void *cookie, void *ptr, int flags __unused) |
135 | { | | 133 | { |
136 | struct kmem_cache *const kc = cookie; | | 134 | struct kmem_cache *const kc = cookie; |
137 | | | 135 | |
138 | if (kc->kc_ctor) | | 136 | if (kc->kc_ctor) |
139 | (*kc->kc_ctor)(ptr); | | 137 | (*kc->kc_ctor)(ptr); |
140 | | | 138 | |
141 | return 0; | | 139 | return 0; |
142 | } | | 140 | } |
143 | | | 141 | |
144 | static inline struct kmem_cache * | | 142 | static inline struct kmem_cache * |
145 | kmem_cache_create(const char *name, size_t size, size_t align, | | 143 | kmem_cache_create(const char *name, size_t size, size_t align, |
146 | unsigned long flags, void (*ctor)(void *)) | | 144 | unsigned long flags, void (*ctor)(void *)) |
147 | { | | 145 | { |
148 | struct kmem_cache *kc; | | 146 | struct kmem_cache *kc; |
149 | | | 147 | |
150 | if (ISSET(flags, SLAB_HWCACHE_ALIGN)) | | 148 | if (ISSET(flags, SLAB_HWCACHE_ALIGN)) |
151 | align = roundup(MAX(1, align), CACHE_LINE_SIZE); | | 149 | align = roundup(MAX(1, align), CACHE_LINE_SIZE); |
152 | | | 150 | |
153 | kc = kmem_alloc(sizeof(*kc), KM_SLEEP); | | 151 | kc = kmem_alloc(sizeof(*kc), KM_SLEEP); |
154 | kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, NULL, | | 152 | kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, NULL, |
155 | IPL_NONE, &kmem_cache_ctor, NULL, kc); | | 153 | IPL_NONE, &kmem_cache_ctor, NULL, kc); |
156 | kc->kc_size = size; | | 154 | kc->kc_size = size; |
157 | kc->kc_ctor = ctor; | | 155 | kc->kc_ctor = ctor; |
158 | | | 156 | |
159 | return kc; | | 157 | return kc; |
160 | } | | 158 | } |
161 | | | 159 | |
162 | static inline void | | 160 | static inline void |
163 | kmem_cache_destroy(struct kmem_cache *kc) | | 161 | kmem_cache_destroy(struct kmem_cache *kc) |
164 | { | | 162 | { |
165 | | | 163 | |
166 | pool_cache_destroy(kc->kc_pool_cache); | | 164 | pool_cache_destroy(kc->kc_pool_cache); |
167 | kmem_free(kc, sizeof(*kc)); | | 165 | kmem_free(kc, sizeof(*kc)); |
168 | } | | 166 | } |
169 | | | 167 | |
170 | static inline void * | | 168 | static inline void * |
171 | kmem_cache_alloc(struct kmem_cache *kc, gfp_t gfp) | | 169 | kmem_cache_alloc(struct kmem_cache *kc, gfp_t gfp) |
172 | { | | 170 | { |
173 | int flags = 0; | | 171 | int flags = 0; |
174 | void *ptr; | | 172 | void *ptr; |
175 | | | 173 | |
176 | if (gfp & __GFP_WAIT) | | 174 | if (gfp & __GFP_WAIT) |
177 | flags |= PR_NOWAIT; | | 175 | flags |= PR_NOWAIT; |
178 | else | | 176 | else |
179 | flags |= PR_WAITOK; | | 177 | flags |= PR_WAITOK; |
180 | | | 178 | |
181 | ptr = pool_cache_get(kc->kc_pool_cache, flags); | | 179 | ptr = pool_cache_get(kc->kc_pool_cache, flags); |
182 | if (ptr == NULL) | | 180 | if (ptr == NULL) |
183 | return NULL; | | 181 | return NULL; |
184 | | | 182 | |
185 | if (ISSET(gfp, __GFP_ZERO)) | | 183 | if (ISSET(gfp, __GFP_ZERO)) |
186 | (void)memset(ptr, 0, kc->kc_size); | | 184 | (void)memset(ptr, 0, kc->kc_size); |
187 | | | 185 | |
188 | return ptr; | | 186 | return ptr; |
189 | } | | 187 | } |
190 | | | 188 | |
191 | static inline void * | | 189 | static inline void * |
192 | kmem_cache_zalloc(struct kmem_cache *kc, gfp_t gfp) | | 190 | kmem_cache_zalloc(struct kmem_cache *kc, gfp_t gfp) |
193 | { | | 191 | { |
194 | | | 192 | |
195 | return kmem_cache_alloc(kc, (gfp | __GFP_ZERO)); | | 193 | return kmem_cache_alloc(kc, (gfp | __GFP_ZERO)); |
196 | } | | 194 | } |
197 | | | 195 | |
198 | static inline void | | 196 | static inline void |
199 | kmem_cache_free(struct kmem_cache *kc, void *ptr) | | 197 | kmem_cache_free(struct kmem_cache *kc, void *ptr) |
200 | { | | 198 | { |
201 | | | 199 | |
202 | pool_cache_put(kc->kc_pool_cache, ptr); | | 200 | pool_cache_put(kc->kc_pool_cache, ptr); |
203 | } | | 201 | } |
204 | | | 202 | |
205 | #endif /* _LINUX_SLAB_H_ */ | | 203 | #endif /* _LINUX_SLAB_H_ */ |