| @@ -1,1798 +1,1836 @@ | | | @@ -1,1798 +1,1836 @@ |
1 | /* $NetBSD: jemalloc.c,v 1.37 2015/01/20 18:31:25 christos Exp $ */ | | 1 | /* $NetBSD: jemalloc.c,v 1.38 2015/07/26 17:21:55 martin Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (C) 2006,2007 Jason Evans <jasone@FreeBSD.org>. | | 4 | * Copyright (C) 2006,2007 Jason Evans <jasone@FreeBSD.org>. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice(s), this list of conditions and the following disclaimer as | | 11 | * notice(s), this list of conditions and the following disclaimer as |
12 | * the first lines of this file unmodified other than the possible | | 12 | * the first lines of this file unmodified other than the possible |
13 | * addition of one or more copyright notices. | | 13 | * addition of one or more copyright notices. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice(s), this list of conditions and the following disclaimer in | | 15 | * notice(s), this list of conditions and the following disclaimer in |
16 | * the documentation and/or other materials provided with the | | 16 | * the documentation and/or other materials provided with the |
17 | * distribution. | | 17 | * distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY | | 19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE |
23 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
26 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | | 26 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
27 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | | 27 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE |
28 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, | | 28 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, |
29 | * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 29 | * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | * | | 30 | * |
31 | ******************************************************************************* | | 31 | ******************************************************************************* |
32 | * | | 32 | * |
33 | * This allocator implementation is designed to provide scalable performance | | 33 | * This allocator implementation is designed to provide scalable performance |
34 | * for multi-threaded programs on multi-processor systems. The following | | 34 | * for multi-threaded programs on multi-processor systems. The following |
35 | * features are included for this purpose: | | 35 | * features are included for this purpose: |
36 | * | | 36 | * |
37 | * + Multiple arenas are used if there are multiple CPUs, which reduces lock | | 37 | * + Multiple arenas are used if there are multiple CPUs, which reduces lock |
38 | * contention and cache sloshing. | | 38 | * contention and cache sloshing. |
39 | * | | 39 | * |
40 | * + Cache line sharing between arenas is avoided for internal data | | 40 | * + Cache line sharing between arenas is avoided for internal data |
41 | * structures. | | 41 | * structures. |
42 | * | | 42 | * |
43 | * + Memory is managed in chunks and runs (chunks can be split into runs), | | 43 | * + Memory is managed in chunks and runs (chunks can be split into runs), |
44 | * rather than as individual pages. This provides a constant-time | | 44 | * rather than as individual pages. This provides a constant-time |
45 | * mechanism for associating allocations with particular arenas. | | 45 | * mechanism for associating allocations with particular arenas. |
46 | * | | 46 | * |
47 | * Allocation requests are rounded up to the nearest size class, and no record | | 47 | * Allocation requests are rounded up to the nearest size class, and no record |
48 | * of the original request size is maintained. Allocations are broken into | | 48 | * of the original request size is maintained. Allocations are broken into |
49 | * categories according to size class. Assuming runtime defaults, 4 kB pages | | 49 | * categories according to size class. Assuming runtime defaults, 4 kB pages |
50 | * and a 16 byte quantum, the size classes in each category are as follows: | | 50 | * and a 16 byte quantum, the size classes in each category are as follows: |
51 | * | | 51 | * |
52 | * |=====================================| | | 52 | * |=====================================| |
53 | * | Category | Subcategory | Size | | | 53 | * | Category | Subcategory | Size | |
54 | * |=====================================| | | 54 | * |=====================================| |
55 | * | Small | Tiny | 2 | | | 55 | * | Small | Tiny | 2 | |
56 | * | | | 4 | | | 56 | * | | | 4 | |
57 | * | | | 8 | | | 57 | * | | | 8 | |
58 | * | |----------------+---------| | | 58 | * | |----------------+---------| |
59 | * | | Quantum-spaced | 16 | | | 59 | * | | Quantum-spaced | 16 | |
60 | * | | | 32 | | | 60 | * | | | 32 | |
61 | * | | | 48 | | | 61 | * | | | 48 | |
62 | * | | | ... | | | 62 | * | | | ... | |
63 | * | | | 480 | | | 63 | * | | | 480 | |
64 | * | | | 496 | | | 64 | * | | | 496 | |
65 | * | | | 512 | | | 65 | * | | | 512 | |
66 | * | |----------------+---------| | | 66 | * | |----------------+---------| |
67 | * | | Sub-page | 1 kB | | | 67 | * | | Sub-page | 1 kB | |
68 | * | | | 2 kB | | | 68 | * | | | 2 kB | |
69 | * |=====================================| | | 69 | * |=====================================| |
70 | * | Large | 4 kB | | | 70 | * | Large | 4 kB | |
71 | * | | 8 kB | | | 71 | * | | 8 kB | |
72 | * | | 12 kB | | | 72 | * | | 12 kB | |
73 | * | | ... | | | 73 | * | | ... | |
74 | * | | 1012 kB | | | 74 | * | | 1012 kB | |
75 | * | | 1016 kB | | | 75 | * | | 1016 kB | |
76 | * | | 1020 kB | | | 76 | * | | 1020 kB | |
77 | * |=====================================| | | 77 | * |=====================================| |
78 | * | Huge | 1 MB | | | 78 | * | Huge | 1 MB | |
79 | * | | 2 MB | | | 79 | * | | 2 MB | |
80 | * | | 3 MB | | | 80 | * | | 3 MB | |
81 | * | | ... | | | 81 | * | | ... | |
82 | * |=====================================| | | 82 | * |=====================================| |
83 | * | | 83 | * |
84 | * A different mechanism is used for each category: | | 84 | * A different mechanism is used for each category: |
85 | * | | 85 | * |
86 | * Small : Each size class is segregated into its own set of runs. Each run | | 86 | * Small : Each size class is segregated into its own set of runs. Each run |
87 | * maintains a bitmap of which regions are free/allocated. | | 87 | * maintains a bitmap of which regions are free/allocated. |
88 | * | | 88 | * |
89 | * Large : Each allocation is backed by a dedicated run. Metadata are stored | | 89 | * Large : Each allocation is backed by a dedicated run. Metadata are stored |
90 | * in the associated arena chunk header maps. | | 90 | * in the associated arena chunk header maps. |
91 | * | | 91 | * |
92 | * Huge : Each allocation is backed by a dedicated contiguous set of chunks. | | 92 | * Huge : Each allocation is backed by a dedicated contiguous set of chunks. |
93 | * Metadata are stored in a separate red-black tree. | | 93 | * Metadata are stored in a separate red-black tree. |
94 | * | | 94 | * |
95 | ******************************************************************************* | | 95 | ******************************************************************************* |
96 | */ | | 96 | */ |
97 | | | 97 | |
98 | /* LINTLIBRARY */ | | 98 | /* LINTLIBRARY */ |
99 | | | 99 | |
100 | #ifdef __NetBSD__ | | 100 | #ifdef __NetBSD__ |
101 | # define xutrace(a, b) utrace("malloc", (a), (b)) | | 101 | # define xutrace(a, b) utrace("malloc", (a), (b)) |
102 | # define __DECONST(x, y) ((x)__UNCONST(y)) | | 102 | # define __DECONST(x, y) ((x)__UNCONST(y)) |
103 | # define NO_TLS | | 103 | # define NO_TLS |
104 | #else | | 104 | #else |
105 | # define xutrace(a, b) utrace((a), (b)) | | 105 | # define xutrace(a, b) utrace((a), (b)) |
106 | #endif /* __NetBSD__ */ | | 106 | #endif /* __NetBSD__ */ |
107 | | | 107 | |
108 | /* | | 108 | /* |
109 | * MALLOC_PRODUCTION disables assertions and statistics gathering. It also | | 109 | * MALLOC_PRODUCTION disables assertions and statistics gathering. It also |
110 | * defaults the A and J runtime options to off. These settings are appropriate | | 110 | * defaults the A and J runtime options to off. These settings are appropriate |
111 | * for production systems. | | 111 | * for production systems. |
112 | */ | | 112 | */ |
113 | #define MALLOC_PRODUCTION | | 113 | #define MALLOC_PRODUCTION |
114 | | | 114 | |
115 | #ifndef MALLOC_PRODUCTION | | 115 | #ifndef MALLOC_PRODUCTION |
116 | # define MALLOC_DEBUG | | 116 | # define MALLOC_DEBUG |
117 | #endif | | 117 | #endif |
118 | | | 118 | |
119 | #include <sys/cdefs.h> | | 119 | #include <sys/cdefs.h> |
120 | /* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */ | | 120 | /* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */ |
121 | __RCSID("$NetBSD: jemalloc.c,v 1.37 2015/01/20 18:31:25 christos Exp $"); | | 121 | __RCSID("$NetBSD: jemalloc.c,v 1.38 2015/07/26 17:21:55 martin Exp $"); |
122 | | | 122 | |
123 | #ifdef __FreeBSD__ | | 123 | #ifdef __FreeBSD__ |
124 | #include "libc_private.h" | | 124 | #include "libc_private.h" |
125 | #ifdef MALLOC_DEBUG | | 125 | #ifdef MALLOC_DEBUG |
126 | # define _LOCK_DEBUG | | 126 | # define _LOCK_DEBUG |
127 | #endif | | 127 | #endif |
128 | #include "spinlock.h" | | 128 | #include "spinlock.h" |
129 | #endif | | 129 | #endif |
130 | #include "namespace.h" | | 130 | #include "namespace.h" |
131 | #include <sys/mman.h> | | 131 | #include <sys/mman.h> |
132 | #include <sys/param.h> | | 132 | #include <sys/param.h> |
133 | #ifdef __FreeBSD__ | | 133 | #ifdef __FreeBSD__ |
134 | #include <sys/stddef.h> | | 134 | #include <sys/stddef.h> |
135 | #endif | | 135 | #endif |
136 | #include <sys/time.h> | | 136 | #include <sys/time.h> |
137 | #include <sys/types.h> | | 137 | #include <sys/types.h> |
138 | #include <sys/sysctl.h> | | 138 | #include <sys/sysctl.h> |
139 | #include <sys/tree.h> | | 139 | #include <sys/tree.h> |
140 | #include <sys/uio.h> | | 140 | #include <sys/uio.h> |
141 | #include <sys/ktrace.h> /* Must come after several other sys/ includes. */ | | 141 | #include <sys/ktrace.h> /* Must come after several other sys/ includes. */ |
142 | | | 142 | |
143 | #ifdef __FreeBSD__ | | 143 | #ifdef __FreeBSD__ |
144 | #include <machine/atomic.h> | | 144 | #include <machine/atomic.h> |
145 | #include <machine/cpufunc.h> | | 145 | #include <machine/cpufunc.h> |
146 | #endif | | 146 | #endif |
147 | #include <machine/vmparam.h> | | 147 | #include <machine/vmparam.h> |
148 | | | 148 | |
149 | #include <errno.h> | | 149 | #include <errno.h> |
150 | #include <limits.h> | | 150 | #include <limits.h> |
151 | #include <pthread.h> | | 151 | #include <pthread.h> |
152 | #include <sched.h> | | 152 | #include <sched.h> |
153 | #include <stdarg.h> | | 153 | #include <stdarg.h> |
154 | #include <stdbool.h> | | 154 | #include <stdbool.h> |
155 | #include <stdio.h> | | 155 | #include <stdio.h> |
156 | #include <stdint.h> | | 156 | #include <stdint.h> |
157 | #include <stdlib.h> | | 157 | #include <stdlib.h> |
158 | #include <string.h> | | 158 | #include <string.h> |
159 | #include <strings.h> | | 159 | #include <strings.h> |
160 | #include <unistd.h> | | 160 | #include <unistd.h> |
161 | | | 161 | |
162 | #ifdef __NetBSD__ | | 162 | #ifdef __NetBSD__ |
163 | # include <reentrant.h> | | 163 | # include <reentrant.h> |
164 | # include "extern.h" | | 164 | # include "extern.h" |
165 | | | 165 | |
166 | #define STRERROR_R(a, b, c) __strerror_r(a, b, c); | | 166 | #define STRERROR_R(a, b, c) __strerror_r(a, b, c); |
167 | /* | | 167 | /* |
168 | * A non localized version of strerror, that avoids bringing in | | 168 | * A non localized version of strerror, that avoids bringing in |
169 | * stdio and the locale code. All the malloc messages are in English | | 169 | * stdio and the locale code. All the malloc messages are in English |
170 | * so why bother? | | 170 | * so why bother? |
171 | */ | | 171 | */ |
172 | static int | | 172 | static int |
173 | __strerror_r(int e, char *s, size_t l) | | 173 | __strerror_r(int e, char *s, size_t l) |
174 | { | | 174 | { |
175 | int rval; | | 175 | int rval; |
176 | size_t slen; | | 176 | size_t slen; |
177 | | | 177 | |
178 | if (e >= 0 && e < sys_nerr) { | | 178 | if (e >= 0 && e < sys_nerr) { |
179 | slen = strlcpy(s, sys_errlist[e], l); | | 179 | slen = strlcpy(s, sys_errlist[e], l); |
180 | rval = 0; | | 180 | rval = 0; |
181 | } else { | | 181 | } else { |
182 | slen = snprintf_ss(s, l, "Unknown error %u", e); | | 182 | slen = snprintf_ss(s, l, "Unknown error %u", e); |
183 | rval = EINVAL; | | 183 | rval = EINVAL; |
184 | } | | 184 | } |
185 | return slen >= l ? ERANGE : rval; | | 185 | return slen >= l ? ERANGE : rval; |
186 | } | | 186 | } |
187 | #endif | | 187 | #endif |
188 | | | 188 | |
189 | #ifdef __FreeBSD__ | | 189 | #ifdef __FreeBSD__ |
190 | #define STRERROR_R(a, b, c) strerror_r(a, b, c); | | 190 | #define STRERROR_R(a, b, c) strerror_r(a, b, c); |
191 | #include "un-namespace.h" | | 191 | #include "un-namespace.h" |
192 | #endif | | 192 | #endif |
193 | | | 193 | |
194 | /* MALLOC_STATS enables statistics calculation. */ | | 194 | /* MALLOC_STATS enables statistics calculation. */ |
195 | #ifndef MALLOC_PRODUCTION | | 195 | #ifndef MALLOC_PRODUCTION |
196 | # define MALLOC_STATS | | 196 | # define MALLOC_STATS |
197 | #endif | | 197 | #endif |
198 | | | 198 | |
199 | #ifdef MALLOC_DEBUG | | 199 | #ifdef MALLOC_DEBUG |
200 | # ifdef NDEBUG | | 200 | # ifdef NDEBUG |
201 | # undef NDEBUG | | 201 | # undef NDEBUG |
202 | # endif | | 202 | # endif |
203 | #else | | 203 | #else |
204 | # ifndef NDEBUG | | 204 | # ifndef NDEBUG |
205 | # define NDEBUG | | 205 | # define NDEBUG |
206 | # endif | | 206 | # endif |
207 | #endif | | 207 | #endif |
208 | #include <assert.h> | | 208 | #include <assert.h> |
209 | | | 209 | |
210 | #ifdef MALLOC_DEBUG | | 210 | #ifdef MALLOC_DEBUG |
211 | /* Disable inlining to make debugging easier. */ | | 211 | /* Disable inlining to make debugging easier. */ |
212 | # define inline | | 212 | # define inline |
213 | #endif | | 213 | #endif |
214 | | | 214 | |
215 | /* Size of stack-allocated buffer passed to strerror_r(). */ | | 215 | /* Size of stack-allocated buffer passed to strerror_r(). */ |
216 | #define STRERROR_BUF 64 | | 216 | #define STRERROR_BUF 64 |
217 | | | 217 | |
218 | /* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */ | | 218 | /* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */ |
219 | | | 219 | |
220 | /* | | 220 | /* |
221 | * If you touch the TINY_MIN_2POW definition for any architecture, please | | 221 | * If you touch the TINY_MIN_2POW definition for any architecture, please |
222 | * make sure to adjust the corresponding definition for JEMALLOC_TINY_MIN_2POW | | 222 | * make sure to adjust the corresponding definition for JEMALLOC_TINY_MIN_2POW |
223 | * in the gcc 4.8 tree in dist/gcc/tree-ssa-ccp.c and verify that a native | | 223 | * in the gcc 4.8 tree in dist/gcc/tree-ssa-ccp.c and verify that a native |
224 | * gcc is still buildable! | | 224 | * gcc is still buildable! |
225 | */ | | 225 | */ |
226 | | | 226 | |
227 | #ifdef __i386__ | | 227 | #ifdef __i386__ |
228 | # define QUANTUM_2POW_MIN 4 | | 228 | # define QUANTUM_2POW_MIN 4 |
229 | # define SIZEOF_PTR_2POW 2 | | 229 | # define SIZEOF_PTR_2POW 2 |
230 | # define USE_BRK | | 230 | # define USE_BRK |
231 | #endif | | 231 | #endif |
232 | #ifdef __ia64__ | | 232 | #ifdef __ia64__ |
233 | # define QUANTUM_2POW_MIN 4 | | 233 | # define QUANTUM_2POW_MIN 4 |
234 | # define SIZEOF_PTR_2POW 3 | | 234 | # define SIZEOF_PTR_2POW 3 |
235 | #endif | | 235 | #endif |
236 | #ifdef __aarch64__ | | 236 | #ifdef __aarch64__ |
237 | # define QUANTUM_2POW_MIN 4 | | 237 | # define QUANTUM_2POW_MIN 4 |
238 | # define SIZEOF_PTR_2POW 3 | | 238 | # define SIZEOF_PTR_2POW 3 |
239 | # define NO_TLS | | 239 | # define NO_TLS |
240 | #endif | | 240 | #endif |
241 | #ifdef __alpha__ | | 241 | #ifdef __alpha__ |
242 | # define QUANTUM_2POW_MIN 4 | | 242 | # define QUANTUM_2POW_MIN 4 |
243 | # define SIZEOF_PTR_2POW 3 | | 243 | # define SIZEOF_PTR_2POW 3 |
244 | # define TINY_MIN_2POW 3 | | 244 | # define TINY_MIN_2POW 3 |
245 | # define NO_TLS | | 245 | # define NO_TLS |
246 | #endif | | 246 | #endif |
247 | #ifdef __sparc64__ | | 247 | #ifdef __sparc64__ |
248 | # define QUANTUM_2POW_MIN 4 | | 248 | # define QUANTUM_2POW_MIN 4 |
249 | # define SIZEOF_PTR_2POW 3 | | 249 | # define SIZEOF_PTR_2POW 3 |
250 | # define TINY_MIN_2POW 3 | | 250 | # define TINY_MIN_2POW 3 |
251 | # define NO_TLS | | 251 | # define NO_TLS |
252 | #endif | | 252 | #endif |
253 | #ifdef __amd64__ | | 253 | #ifdef __amd64__ |
254 | # define QUANTUM_2POW_MIN 4 | | 254 | # define QUANTUM_2POW_MIN 4 |
255 | # define SIZEOF_PTR_2POW 3 | | 255 | # define SIZEOF_PTR_2POW 3 |
256 | # define TINY_MIN_2POW 3 | | 256 | # define TINY_MIN_2POW 3 |
257 | #endif | | 257 | #endif |
258 | #ifdef __arm__ | | 258 | #ifdef __arm__ |
259 | # define QUANTUM_2POW_MIN 3 | | 259 | # define QUANTUM_2POW_MIN 3 |
260 | # define SIZEOF_PTR_2POW 2 | | 260 | # define SIZEOF_PTR_2POW 2 |
261 | # define USE_BRK | | 261 | # define USE_BRK |
262 | # ifdef __ARM_EABI__ | | 262 | # ifdef __ARM_EABI__ |
263 | # define TINY_MIN_2POW 3 | | 263 | # define TINY_MIN_2POW 3 |
264 | # endif | | 264 | # endif |
265 | # define NO_TLS | | 265 | # define NO_TLS |
266 | #endif | | 266 | #endif |
267 | #ifdef __powerpc__ | | 267 | #ifdef __powerpc__ |
268 | # define QUANTUM_2POW_MIN 4 | | 268 | # define QUANTUM_2POW_MIN 4 |
269 | # define SIZEOF_PTR_2POW 2 | | 269 | # define SIZEOF_PTR_2POW 2 |
270 | # define USE_BRK | | 270 | # define USE_BRK |
271 | # define TINY_MIN_2POW 3 | | 271 | # define TINY_MIN_2POW 3 |
272 | #endif | | 272 | #endif |
273 | #if defined(__sparc__) && !defined(__sparc64__) | | 273 | #if defined(__sparc__) && !defined(__sparc64__) |
274 | # define QUANTUM_2POW_MIN 4 | | 274 | # define QUANTUM_2POW_MIN 4 |
275 | # define SIZEOF_PTR_2POW 2 | | 275 | # define SIZEOF_PTR_2POW 2 |
276 | # define USE_BRK | | 276 | # define USE_BRK |
277 | #endif | | 277 | #endif |
278 | #ifdef __or1k__ | | 278 | #ifdef __or1k__ |
279 | # define QUANTUM_2POW_MIN 4 | | 279 | # define QUANTUM_2POW_MIN 4 |
280 | # define SIZEOF_PTR_2POW 2 | | 280 | # define SIZEOF_PTR_2POW 2 |
281 | # define USE_BRK | | 281 | # define USE_BRK |
282 | #endif | | 282 | #endif |
283 | #ifdef __vax__ | | 283 | #ifdef __vax__ |
284 | # define QUANTUM_2POW_MIN 4 | | 284 | # define QUANTUM_2POW_MIN 4 |
285 | # define SIZEOF_PTR_2POW 2 | | 285 | # define SIZEOF_PTR_2POW 2 |
286 | # define USE_BRK | | 286 | # define USE_BRK |
287 | #endif | | 287 | #endif |
288 | #ifdef __sh__ | | 288 | #ifdef __sh__ |
289 | # define QUANTUM_2POW_MIN 4 | | 289 | # define QUANTUM_2POW_MIN 4 |
290 | # define SIZEOF_PTR_2POW 2 | | 290 | # define SIZEOF_PTR_2POW 2 |
291 | # define USE_BRK | | 291 | # define USE_BRK |
292 | #endif | | 292 | #endif |
293 | #ifdef __m68k__ | | 293 | #ifdef __m68k__ |
294 | # define QUANTUM_2POW_MIN 4 | | 294 | # define QUANTUM_2POW_MIN 4 |
295 | # define SIZEOF_PTR_2POW 2 | | 295 | # define SIZEOF_PTR_2POW 2 |
296 | # define USE_BRK | | 296 | # define USE_BRK |
297 | #endif | | 297 | #endif |
298 | #if defined(__mips__) || defined(__riscv__) | | 298 | #if defined(__mips__) || defined(__riscv__) |
299 | # ifdef _LP64 | | 299 | # ifdef _LP64 |
300 | # define SIZEOF_PTR_2POW 3 | | 300 | # define SIZEOF_PTR_2POW 3 |
301 | # define TINY_MIN_2POW 3 | | 301 | # define TINY_MIN_2POW 3 |
302 | # else | | 302 | # else |
303 | # define SIZEOF_PTR_2POW 2 | | 303 | # define SIZEOF_PTR_2POW 2 |
304 | # endif | | 304 | # endif |
305 | # define QUANTUM_2POW_MIN 4 | | 305 | # define QUANTUM_2POW_MIN 4 |
306 | # define USE_BRK | | 306 | # define USE_BRK |
307 | #endif | | 307 | #endif |
308 | #ifdef __hppa__ | | 308 | #ifdef __hppa__ |
309 | # define QUANTUM_2POW_MIN 4 | | 309 | # define QUANTUM_2POW_MIN 4 |
310 | # define SIZEOF_PTR_2POW 2 | | 310 | # define SIZEOF_PTR_2POW 2 |
311 | # define USE_BRK | | 311 | # define USE_BRK |
312 | #endif | | 312 | #endif |
313 | | | 313 | |
314 | #define SIZEOF_PTR (1 << SIZEOF_PTR_2POW) | | 314 | #define SIZEOF_PTR (1 << SIZEOF_PTR_2POW) |
315 | | | 315 | |
316 | /* sizeof(int) == (1 << SIZEOF_INT_2POW). */ | | 316 | /* sizeof(int) == (1 << SIZEOF_INT_2POW). */ |
317 | #ifndef SIZEOF_INT_2POW | | 317 | #ifndef SIZEOF_INT_2POW |
318 | # define SIZEOF_INT_2POW 2 | | 318 | # define SIZEOF_INT_2POW 2 |
319 | #endif | | 319 | #endif |
320 | | | 320 | |
321 | /* | | 321 | /* |
322 | * Size and alignment of memory chunks that are allocated by the OS's virtual | | 322 | * Size and alignment of memory chunks that are allocated by the OS's virtual |
323 | * memory system. | | 323 | * memory system. |
324 | */ | | 324 | */ |
325 | #define CHUNK_2POW_DEFAULT 20 | | 325 | #define CHUNK_2POW_DEFAULT 20 |
326 | | | 326 | |
327 | /* | | 327 | /* |
328 | * Maximum size of L1 cache line. This is used to avoid cache line aliasing, | | 328 | * Maximum size of L1 cache line. This is used to avoid cache line aliasing, |
329 | * so over-estimates are okay (up to a point), but under-estimates will | | 329 | * so over-estimates are okay (up to a point), but under-estimates will |
330 | * negatively affect performance. | | 330 | * negatively affect performance. |
331 | */ | | 331 | */ |
332 | #define CACHELINE_2POW 6 | | 332 | #define CACHELINE_2POW 6 |
333 | #define CACHELINE ((size_t)(1 << CACHELINE_2POW)) | | 333 | #define CACHELINE ((size_t)(1 << CACHELINE_2POW)) |
334 | | | 334 | |
335 | /* Smallest size class to support. */ | | 335 | /* Smallest size class to support. */ |
336 | #ifndef TINY_MIN_2POW | | 336 | #ifndef TINY_MIN_2POW |
337 | #define TINY_MIN_2POW 2 | | 337 | #define TINY_MIN_2POW 2 |
338 | #endif | | 338 | #endif |
339 | | | 339 | |
340 | /* | | 340 | /* |
341 | * Maximum size class that is a multiple of the quantum, but not (necessarily) | | 341 | * Maximum size class that is a multiple of the quantum, but not (necessarily) |
342 | * a power of 2. Above this size, allocations are rounded up to the nearest | | 342 | * a power of 2. Above this size, allocations are rounded up to the nearest |
343 | * power of 2. | | 343 | * power of 2. |
344 | */ | | 344 | */ |
345 | #define SMALL_MAX_2POW_DEFAULT 9 | | 345 | #define SMALL_MAX_2POW_DEFAULT 9 |
346 | #define SMALL_MAX_DEFAULT (1 << SMALL_MAX_2POW_DEFAULT) | | 346 | #define SMALL_MAX_DEFAULT (1 << SMALL_MAX_2POW_DEFAULT) |
347 | | | 347 | |
348 | /* | | 348 | /* |
349 | * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized | | 349 | * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized |
350 | * as small as possible such that this setting is still honored, without | | 350 | * as small as possible such that this setting is still honored, without |
351 | * violating other constraints. The goal is to make runs as small as possible | | 351 | * violating other constraints. The goal is to make runs as small as possible |
352 | * without exceeding a per run external fragmentation threshold. | | 352 | * without exceeding a per run external fragmentation threshold. |
353 | * | | 353 | * |
354 | * We use binary fixed point math for overhead computations, where the binary | | 354 | * We use binary fixed point math for overhead computations, where the binary |
355 | * point is implicitly RUN_BFP bits to the left. | | 355 | * point is implicitly RUN_BFP bits to the left. |
356 | * | | 356 | * |
357 | * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be | | 357 | * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be |
358 | * honored for some/all object sizes, since there is one bit of header overhead | | 358 | * honored for some/all object sizes, since there is one bit of header overhead |
359 | * per object (plus a constant). This constraint is relaxed (ignored) for runs | | 359 | * per object (plus a constant). This constraint is relaxed (ignored) for runs |
360 | * that are so small that the per-region overhead is greater than: | | 360 | * that are so small that the per-region overhead is greater than: |
361 | * | | 361 | * |
362 | * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP)) | | 362 | * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP)) |
363 | */ | | 363 | */ |
364 | #define RUN_BFP 12 | | 364 | #define RUN_BFP 12 |
365 | /* \/ Implicit binary fixed point. */ | | 365 | /* \/ Implicit binary fixed point. */ |
366 | #define RUN_MAX_OVRHD 0x0000003dU | | 366 | #define RUN_MAX_OVRHD 0x0000003dU |
367 | #define RUN_MAX_OVRHD_RELAX 0x00001800U | | 367 | #define RUN_MAX_OVRHD_RELAX 0x00001800U |
368 | | | 368 | |
369 | /* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */ | | 369 | /* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */ |
370 | #define RUN_MAX_SMALL_2POW 15 | | 370 | #define RUN_MAX_SMALL_2POW 15 |
371 | #define RUN_MAX_SMALL (1 << RUN_MAX_SMALL_2POW) | | 371 | #define RUN_MAX_SMALL (1 << RUN_MAX_SMALL_2POW) |
372 | | | 372 | |
373 | /******************************************************************************/ | | 373 | /******************************************************************************/ |
374 | | | 374 | |
375 | #ifdef __FreeBSD__ | | 375 | #ifdef __FreeBSD__ |
376 | /* | | 376 | /* |
377 | * Mutexes based on spinlocks. We can't use normal pthread mutexes, because | | 377 | * Mutexes based on spinlocks. We can't use normal pthread mutexes, because |
378 | * they require malloc()ed memory. | | 378 | * they require malloc()ed memory. |
379 | */ | | 379 | */ |
380 | typedef struct { | | 380 | typedef struct { |
381 | spinlock_t lock; | | 381 | spinlock_t lock; |
382 | } malloc_mutex_t; | | 382 | } malloc_mutex_t; |
383 | | | 383 | |
384 | /* Set to true once the allocator has been initialized. */ | | 384 | /* Set to true once the allocator has been initialized. */ |
385 | static bool malloc_initialized = false; | | 385 | static bool malloc_initialized = false; |
386 | | | 386 | |
387 | /* Used to avoid initialization races. */ | | 387 | /* Used to avoid initialization races. */ |
388 | static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER}; | | 388 | static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER}; |
389 | #else | | 389 | #else |
390 | #define malloc_mutex_t mutex_t | | 390 | #define malloc_mutex_t mutex_t |
391 | | | 391 | |
392 | /* Set to true once the allocator has been initialized. */ | | 392 | /* Set to true once the allocator has been initialized. */ |
393 | static bool malloc_initialized = false; | | 393 | static bool malloc_initialized = false; |
394 | | | 394 | |
395 | #ifdef _REENTRANT | | 395 | #ifdef _REENTRANT |
396 | /* Used to avoid initialization races. */ | | 396 | /* Used to avoid initialization races. */ |
397 | static mutex_t init_lock = MUTEX_INITIALIZER; | | 397 | static mutex_t init_lock = MUTEX_INITIALIZER; |
398 | #endif | | 398 | #endif |
399 | #endif | | 399 | #endif |
400 | | | 400 | |
401 | /******************************************************************************/ | | 401 | /******************************************************************************/ |
402 | /* | | 402 | /* |
403 | * Statistics data structures. | | 403 | * Statistics data structures. |
404 | */ | | 404 | */ |
405 | | | 405 | |
406 | #ifdef MALLOC_STATS | | 406 | #ifdef MALLOC_STATS |
407 | | | 407 | |
408 | typedef struct malloc_bin_stats_s malloc_bin_stats_t; | | 408 | typedef struct malloc_bin_stats_s malloc_bin_stats_t; |
409 | struct malloc_bin_stats_s { | | 409 | struct malloc_bin_stats_s { |
410 | /* | | 410 | /* |
411 | * Number of allocation requests that corresponded to the size of this | | 411 | * Number of allocation requests that corresponded to the size of this |
412 | * bin. | | 412 | * bin. |
413 | */ | | 413 | */ |
414 | uint64_t nrequests; | | 414 | uint64_t nrequests; |
415 | | | 415 | |
416 | /* Total number of runs created for this bin's size class. */ | | 416 | /* Total number of runs created for this bin's size class. */ |
417 | uint64_t nruns; | | 417 | uint64_t nruns; |
418 | | | 418 | |
419 | /* | | 419 | /* |
420 | * Total number of runs reused by extracting them from the runs tree for | | 420 | * Total number of runs reused by extracting them from the runs tree for |
421 | * this bin's size class. | | 421 | * this bin's size class. |
422 | */ | | 422 | */ |
423 | uint64_t reruns; | | 423 | uint64_t reruns; |
424 | | | 424 | |
425 | /* High-water mark for this bin. */ | | 425 | /* High-water mark for this bin. */ |
426 | unsigned long highruns; | | 426 | unsigned long highruns; |
427 | | | 427 | |
428 | /* Current number of runs in this bin. */ | | 428 | /* Current number of runs in this bin. */ |
429 | unsigned long curruns; | | 429 | unsigned long curruns; |
430 | }; | | 430 | }; |
431 | | | 431 | |
432 | typedef struct arena_stats_s arena_stats_t; | | 432 | typedef struct arena_stats_s arena_stats_t; |
433 | struct arena_stats_s { | | 433 | struct arena_stats_s { |
434 | /* Number of bytes currently mapped. */ | | 434 | /* Number of bytes currently mapped. */ |
435 | size_t mapped; | | 435 | size_t mapped; |
436 | | | 436 | |
437 | /* Per-size-category statistics. */ | | 437 | /* Per-size-category statistics. */ |
438 | size_t allocated_small; | | 438 | size_t allocated_small; |
439 | uint64_t nmalloc_small; | | 439 | uint64_t nmalloc_small; |
440 | uint64_t ndalloc_small; | | 440 | uint64_t ndalloc_small; |
441 | | | 441 | |
442 | size_t allocated_large; | | 442 | size_t allocated_large; |
443 | uint64_t nmalloc_large; | | 443 | uint64_t nmalloc_large; |
444 | uint64_t ndalloc_large; | | 444 | uint64_t ndalloc_large; |
445 | }; | | 445 | }; |
446 | | | 446 | |
447 | typedef struct chunk_stats_s chunk_stats_t; | | 447 | typedef struct chunk_stats_s chunk_stats_t; |
448 | struct chunk_stats_s { | | 448 | struct chunk_stats_s { |
449 | /* Number of chunks that were allocated. */ | | 449 | /* Number of chunks that were allocated. */ |
450 | uint64_t nchunks; | | 450 | uint64_t nchunks; |
451 | | | 451 | |
452 | /* High-water mark for number of chunks allocated. */ | | 452 | /* High-water mark for number of chunks allocated. */ |
453 | unsigned long highchunks; | | 453 | unsigned long highchunks; |
454 | | | 454 | |
455 | /* | | 455 | /* |
456 | * Current number of chunks allocated. This value isn't maintained for | | 456 | * Current number of chunks allocated. This value isn't maintained for |
457 | * any other purpose, so keep track of it in order to be able to set | | 457 | * any other purpose, so keep track of it in order to be able to set |
458 | * highchunks. | | 458 | * highchunks. |
459 | */ | | 459 | */ |
460 | unsigned long curchunks; | | 460 | unsigned long curchunks; |
461 | }; | | 461 | }; |
462 | | | 462 | |
463 | #endif /* #ifdef MALLOC_STATS */ | | 463 | #endif /* #ifdef MALLOC_STATS */ |
464 | | | 464 | |
465 | /******************************************************************************/ | | 465 | /******************************************************************************/ |
466 | /* | | 466 | /* |
467 | * Chunk data structures. | | 467 | * Chunk data structures. |
468 | */ | | 468 | */ |
469 | | | 469 | |
470 | /* Tree of chunks. */ | | 470 | /* Tree of chunks. */ |
471 | typedef struct chunk_node_s chunk_node_t; | | 471 | typedef struct chunk_node_s chunk_node_t; |
472 | struct chunk_node_s { | | 472 | struct chunk_node_s { |
473 | /* Linkage for the chunk tree. */ | | 473 | /* Linkage for the chunk tree. */ |
474 | RB_ENTRY(chunk_node_s) link; | | 474 | RB_ENTRY(chunk_node_s) link; |
475 | | | 475 | |
476 | /* | | 476 | /* |
477 | * Pointer to the chunk that this tree node is responsible for. In some | | 477 | * Pointer to the chunk that this tree node is responsible for. In some |
478 | * (but certainly not all) cases, this data structure is placed at the | | 478 | * (but certainly not all) cases, this data structure is placed at the |
479 | * beginning of the corresponding chunk, so this field may point to this | | 479 | * beginning of the corresponding chunk, so this field may point to this |
480 | * node. | | 480 | * node. |
481 | */ | | 481 | */ |
482 | void *chunk; | | 482 | void *chunk; |
483 | | | 483 | |
484 | /* Total chunk size. */ | | 484 | /* Total chunk size. */ |
485 | size_t size; | | 485 | size_t size; |
486 | }; | | 486 | }; |
487 | typedef struct chunk_tree_s chunk_tree_t; | | 487 | typedef struct chunk_tree_s chunk_tree_t; |
488 | RB_HEAD(chunk_tree_s, chunk_node_s); | | 488 | RB_HEAD(chunk_tree_s, chunk_node_s); |
489 | | | 489 | |
490 | /******************************************************************************/ | | 490 | /******************************************************************************/ |
491 | /* | | 491 | /* |
492 | * Arena data structures. | | 492 | * Arena data structures. |
493 | */ | | 493 | */ |
494 | | | 494 | |
495 | typedef struct arena_s arena_t; | | 495 | typedef struct arena_s arena_t; |
496 | typedef struct arena_bin_s arena_bin_t; | | 496 | typedef struct arena_bin_s arena_bin_t; |
497 | | | 497 | |
498 | typedef struct arena_chunk_map_s arena_chunk_map_t; | | 498 | typedef struct arena_chunk_map_s arena_chunk_map_t; |
499 | struct arena_chunk_map_s { | | 499 | struct arena_chunk_map_s { |
500 | /* Number of pages in run. */ | | 500 | /* Number of pages in run. */ |
501 | uint32_t npages; | | 501 | uint32_t npages; |
502 | /* | | 502 | /* |
503 | * Position within run. For a free run, this is POS_FREE for the first | | 503 | * Position within run. For a free run, this is POS_FREE for the first |
504 | * and last pages. The POS_FREE special value makes it possible to | | 504 | * and last pages. The POS_FREE special value makes it possible to |
505 | * quickly coalesce free runs. | | 505 | * quickly coalesce free runs. |
506 | * | | 506 | * |
507 | * This is the limiting factor for chunksize; there can be at most 2^31 | | 507 | * This is the limiting factor for chunksize; there can be at most 2^31 |
508 | * pages in a run. | | 508 | * pages in a run. |
509 | */ | | 509 | */ |
510 | #define POS_FREE ((uint32_t)0xffffffffU) | | 510 | #define POS_FREE ((uint32_t)0xffffffffU) |
511 | uint32_t pos; | | 511 | uint32_t pos; |
512 | }; | | 512 | }; |
513 | | | 513 | |
514 | /* Arena chunk header. */ | | 514 | /* Arena chunk header. */ |
515 | typedef struct arena_chunk_s arena_chunk_t; | | 515 | typedef struct arena_chunk_s arena_chunk_t; |
516 | struct arena_chunk_s { | | 516 | struct arena_chunk_s { |
517 | /* Arena that owns the chunk. */ | | 517 | /* Arena that owns the chunk. */ |
518 | arena_t *arena; | | 518 | arena_t *arena; |
519 | | | 519 | |
520 | /* Linkage for the arena's chunk tree. */ | | 520 | /* Linkage for the arena's chunk tree. */ |
521 | RB_ENTRY(arena_chunk_s) link; | | 521 | RB_ENTRY(arena_chunk_s) link; |
522 | | | 522 | |
523 | /* | | 523 | /* |
524 | * Number of pages in use. This is maintained in order to make | | 524 | * Number of pages in use. This is maintained in order to make |
525 | * detection of empty chunks fast. | | 525 | * detection of empty chunks fast. |
526 | */ | | 526 | */ |
527 | uint32_t pages_used; | | 527 | uint32_t pages_used; |
528 | | | 528 | |
529 | /* | | 529 | /* |
530 | * Every time a free run larger than this value is created/coalesced, | | 530 | * Every time a free run larger than this value is created/coalesced, |
531 | * this value is increased. The only way that the value decreases is if | | 531 | * this value is increased. The only way that the value decreases is if |
532 | * arena_run_alloc() fails to find a free run as large as advertised by | | 532 | * arena_run_alloc() fails to find a free run as large as advertised by |
533 | * this value. | | 533 | * this value. |
534 | */ | | 534 | */ |
535 | uint32_t max_frun_npages; | | 535 | uint32_t max_frun_npages; |
536 | | | 536 | |
537 | /* | | 537 | /* |
538 | * Every time a free run that starts at an earlier page than this value | | 538 | * Every time a free run that starts at an earlier page than this value |
539 | * is created/coalesced, this value is decreased. It is reset in a | | 539 | * is created/coalesced, this value is decreased. It is reset in a |
540 | * similar fashion to max_frun_npages. | | 540 | * similar fashion to max_frun_npages. |
541 | */ | | 541 | */ |
542 | uint32_t min_frun_ind; | | 542 | uint32_t min_frun_ind; |
543 | | | 543 | |
544 | /* | | 544 | /* |
545 | * Map of pages within chunk that keeps track of free/large/small. For | | 545 | * Map of pages within chunk that keeps track of free/large/small. For |
546 | * free runs, only the map entries for the first and last pages are | | 546 | * free runs, only the map entries for the first and last pages are |
547 | * kept up to date, so that free runs can be quickly coalesced. | | 547 | * kept up to date, so that free runs can be quickly coalesced. |
548 | */ | | 548 | */ |
549 | arena_chunk_map_t map[1]; /* Dynamically sized. */ | | 549 | arena_chunk_map_t map[1]; /* Dynamically sized. */ |
550 | }; | | 550 | }; |
551 | typedef struct arena_chunk_tree_s arena_chunk_tree_t; | | 551 | typedef struct arena_chunk_tree_s arena_chunk_tree_t; |
552 | RB_HEAD(arena_chunk_tree_s, arena_chunk_s); | | 552 | RB_HEAD(arena_chunk_tree_s, arena_chunk_s); |
553 | | | 553 | |
554 | typedef struct arena_run_s arena_run_t; | | 554 | typedef struct arena_run_s arena_run_t; |
555 | struct arena_run_s { | | 555 | struct arena_run_s { |
556 | /* Linkage for run trees. */ | | 556 | /* Linkage for run trees. */ |
557 | RB_ENTRY(arena_run_s) link; | | 557 | RB_ENTRY(arena_run_s) link; |
558 | | | 558 | |
559 | #ifdef MALLOC_DEBUG | | 559 | #ifdef MALLOC_DEBUG |
560 | uint32_t magic; | | 560 | uint32_t magic; |
561 | # define ARENA_RUN_MAGIC 0x384adf93 | | 561 | # define ARENA_RUN_MAGIC 0x384adf93 |
562 | #endif | | 562 | #endif |
563 | | | 563 | |
564 | /* Bin this run is associated with. */ | | 564 | /* Bin this run is associated with. */ |
565 | arena_bin_t *bin; | | 565 | arena_bin_t *bin; |
566 | | | 566 | |
567 | /* Index of first element that might have a free region. */ | | 567 | /* Index of first element that might have a free region. */ |
568 | unsigned regs_minelm; | | 568 | unsigned regs_minelm; |
569 | | | 569 | |
570 | /* Number of free regions in run. */ | | 570 | /* Number of free regions in run. */ |
571 | unsigned nfree; | | 571 | unsigned nfree; |
572 | | | 572 | |
573 | /* Bitmask of in-use regions (0: in use, 1: free). */ | | 573 | /* Bitmask of in-use regions (0: in use, 1: free). */ |
574 | unsigned regs_mask[1]; /* Dynamically sized. */ | | 574 | unsigned regs_mask[1]; /* Dynamically sized. */ |
575 | }; | | 575 | }; |
576 | typedef struct arena_run_tree_s arena_run_tree_t; | | 576 | typedef struct arena_run_tree_s arena_run_tree_t; |
577 | RB_HEAD(arena_run_tree_s, arena_run_s); | | 577 | RB_HEAD(arena_run_tree_s, arena_run_s); |
578 | | | 578 | |
579 | struct arena_bin_s { | | 579 | struct arena_bin_s { |
580 | /* | | 580 | /* |
581 | * Current run being used to service allocations of this bin's size | | 581 | * Current run being used to service allocations of this bin's size |
582 | * class. | | 582 | * class. |
583 | */ | | 583 | */ |
584 | arena_run_t *runcur; | | 584 | arena_run_t *runcur; |
585 | | | 585 | |
586 | /* | | 586 | /* |
587 | * Tree of non-full runs. This tree is used when looking for an | | 587 | * Tree of non-full runs. This tree is used when looking for an |
588 | * existing run when runcur is no longer usable. We choose the | | 588 | * existing run when runcur is no longer usable. We choose the |
589 | * non-full run that is lowest in memory; this policy tends to keep | | 589 | * non-full run that is lowest in memory; this policy tends to keep |
590 | * objects packed well, and it can also help reduce the number of | | 590 | * objects packed well, and it can also help reduce the number of |
591 | * almost-empty chunks. | | 591 | * almost-empty chunks. |
592 | */ | | 592 | */ |
593 | arena_run_tree_t runs; | | 593 | arena_run_tree_t runs; |
594 | | | 594 | |
595 | /* Size of regions in a run for this bin's size class. */ | | 595 | /* Size of regions in a run for this bin's size class. */ |
596 | size_t reg_size; | | 596 | size_t reg_size; |
597 | | | 597 | |
598 | /* Total size of a run for this bin's size class. */ | | 598 | /* Total size of a run for this bin's size class. */ |
599 | size_t run_size; | | 599 | size_t run_size; |
600 | | | 600 | |
601 | /* Total number of regions in a run for this bin's size class. */ | | 601 | /* Total number of regions in a run for this bin's size class. */ |
602 | uint32_t nregs; | | 602 | uint32_t nregs; |
603 | | | 603 | |
604 | /* Number of elements in a run's regs_mask for this bin's size class. */ | | 604 | /* Number of elements in a run's regs_mask for this bin's size class. */ |
605 | uint32_t regs_mask_nelms; | | 605 | uint32_t regs_mask_nelms; |
606 | | | 606 | |
607 | /* Offset of first region in a run for this bin's size class. */ | | 607 | /* Offset of first region in a run for this bin's size class. */ |
608 | uint32_t reg0_offset; | | 608 | uint32_t reg0_offset; |
609 | | | 609 | |
610 | #ifdef MALLOC_STATS | | 610 | #ifdef MALLOC_STATS |
611 | /* Bin statistics. */ | | 611 | /* Bin statistics. */ |
612 | malloc_bin_stats_t stats; | | 612 | malloc_bin_stats_t stats; |
613 | #endif | | 613 | #endif |
614 | }; | | 614 | }; |
615 | | | 615 | |
616 | struct arena_s { | | 616 | struct arena_s { |
617 | #ifdef MALLOC_DEBUG | | 617 | #ifdef MALLOC_DEBUG |
618 | uint32_t magic; | | 618 | uint32_t magic; |
619 | # define ARENA_MAGIC 0x947d3d24 | | 619 | # define ARENA_MAGIC 0x947d3d24 |
620 | #endif | | 620 | #endif |
621 | | | 621 | |
622 | /* All operations on this arena require that mtx be locked. */ | | 622 | /* All operations on this arena require that mtx be locked. */ |
623 | malloc_mutex_t mtx; | | 623 | malloc_mutex_t mtx; |
624 | | | 624 | |
625 | #ifdef MALLOC_STATS | | 625 | #ifdef MALLOC_STATS |
626 | arena_stats_t stats; | | 626 | arena_stats_t stats; |
627 | #endif | | 627 | #endif |
628 | | | 628 | |
629 | /* | | 629 | /* |
630 | * Tree of chunks this arena manages. | | 630 | * Tree of chunks this arena manages. |
631 | */ | | 631 | */ |
632 | arena_chunk_tree_t chunks; | | 632 | arena_chunk_tree_t chunks; |
633 | | | 633 | |
634 | /* | | 634 | /* |
635 | * In order to avoid rapid chunk allocation/deallocation when an arena | | 635 | * In order to avoid rapid chunk allocation/deallocation when an arena |
636 | * oscillates right on the cusp of needing a new chunk, cache the most | | 636 | * oscillates right on the cusp of needing a new chunk, cache the most |
637 | * recently freed chunk. This caching is disabled by opt_hint. | | 637 | * recently freed chunk. This caching is disabled by opt_hint. |
638 | * | | 638 | * |
639 | * There is one spare chunk per arena, rather than one spare total, in | | 639 | * There is one spare chunk per arena, rather than one spare total, in |
640 | * order to avoid interactions between multiple threads that could make | | 640 | * order to avoid interactions between multiple threads that could make |
641 | * a single spare inadequate. | | 641 | * a single spare inadequate. |
642 | */ | | 642 | */ |
643 | arena_chunk_t *spare; | | 643 | arena_chunk_t *spare; |
644 | | | 644 | |
645 | /* | | 645 | /* |
646 | * bins is used to store rings of free regions of the following sizes, | | 646 | * bins is used to store rings of free regions of the following sizes, |
647 | * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS. | | 647 | * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS. |
648 | * | | 648 | * |
649 | * bins[i] | size | | | 649 | * bins[i] | size | |
650 | * --------+------+ | | 650 | * --------+------+ |
651 | * 0 | 2 | | | 651 | * 0 | 2 | |
652 | * 1 | 4 | | | 652 | * 1 | 4 | |
653 | * 2 | 8 | | | 653 | * 2 | 8 | |
654 | * --------+------+ | | 654 | * --------+------+ |
655 | * 3 | 16 | | | 655 | * 3 | 16 | |
656 | * 4 | 32 | | | 656 | * 4 | 32 | |
657 | * 5 | 48 | | | 657 | * 5 | 48 | |
658 | * 6 | 64 | | | 658 | * 6 | 64 | |
659 | * : : | | 659 | * : : |
660 | * : : | | 660 | * : : |
661 | * 33 | 496 | | | 661 | * 33 | 496 | |
662 | * 34 | 512 | | | 662 | * 34 | 512 | |
663 | * --------+------+ | | 663 | * --------+------+ |
664 | * 35 | 1024 | | | 664 | * 35 | 1024 | |
665 | * 36 | 2048 | | | 665 | * 36 | 2048 | |
666 | * --------+------+ | | 666 | * --------+------+ |
667 | */ | | 667 | */ |
668 | arena_bin_t bins[1]; /* Dynamically sized. */ | | 668 | arena_bin_t bins[1]; /* Dynamically sized. */ |
669 | }; | | 669 | }; |
670 | | | 670 | |
671 | /******************************************************************************/ | | 671 | /******************************************************************************/ |
672 | /* | | 672 | /* |
673 | * Data. | | 673 | * Data. |
674 | */ | | 674 | */ |
675 | | | 675 | |
676 | /* Number of CPUs. */ | | 676 | /* Number of CPUs. */ |
677 | static unsigned ncpus; | | 677 | static unsigned ncpus; |
678 | | | 678 | |
679 | /* VM page size. */ | | 679 | /* VM page size. */ |
680 | static size_t pagesize; | | 680 | static size_t pagesize; |
681 | static size_t pagesize_mask; | | 681 | static size_t pagesize_mask; |
682 | static int pagesize_2pow; | | 682 | static int pagesize_2pow; |
683 | | | 683 | |
684 | /* Various bin-related settings. */ | | 684 | /* Various bin-related settings. */ |
685 | static size_t bin_maxclass; /* Max size class for bins. */ | | 685 | static size_t bin_maxclass; /* Max size class for bins. */ |
686 | static unsigned ntbins; /* Number of (2^n)-spaced tiny bins. */ | | 686 | static unsigned ntbins; /* Number of (2^n)-spaced tiny bins. */ |
687 | static unsigned nqbins; /* Number of quantum-spaced bins. */ | | 687 | static unsigned nqbins; /* Number of quantum-spaced bins. */ |
688 | static unsigned nsbins; /* Number of (2^n)-spaced sub-page bins. */ | | 688 | static unsigned nsbins; /* Number of (2^n)-spaced sub-page bins. */ |
689 | static size_t small_min; | | 689 | static size_t small_min; |
690 | static size_t small_max; | | 690 | static size_t small_max; |
691 | | | 691 | |
692 | /* Various quantum-related settings. */ | | 692 | /* Various quantum-related settings. */ |
693 | static size_t quantum; | | 693 | static size_t quantum; |
694 | static size_t quantum_mask; /* (quantum - 1). */ | | 694 | static size_t quantum_mask; /* (quantum - 1). */ |
695 | | | 695 | |
696 | /* Various chunk-related settings. */ | | 696 | /* Various chunk-related settings. */ |
697 | static size_t chunksize; | | 697 | static size_t chunksize; |
698 | static size_t chunksize_mask; /* (chunksize - 1). */ | | 698 | static size_t chunksize_mask; /* (chunksize - 1). */ |
699 | static int chunksize_2pow; | | 699 | static int chunksize_2pow; |
700 | static unsigned chunk_npages; | | 700 | static unsigned chunk_npages; |
701 | static unsigned arena_chunk_header_npages; | | 701 | static unsigned arena_chunk_header_npages; |
702 | static size_t arena_maxclass; /* Max size class for arenas. */ | | 702 | static size_t arena_maxclass; /* Max size class for arenas. */ |
703 | | | 703 | |
704 | /********/ | | 704 | /********/ |
705 | /* | | 705 | /* |
706 | * Chunks. | | 706 | * Chunks. |
707 | */ | | 707 | */ |
708 | | | 708 | |
709 | #ifdef _REENTRANT | | 709 | #ifdef _REENTRANT |
710 | /* Protects chunk-related data structures. */ | | 710 | /* Protects chunk-related data structures. */ |
711 | static malloc_mutex_t chunks_mtx; | | 711 | static malloc_mutex_t chunks_mtx; |
712 | #endif | | 712 | #endif |
713 | | | 713 | |
714 | /* Tree of chunks that are stand-alone huge allocations. */ | | 714 | /* Tree of chunks that are stand-alone huge allocations. */ |
715 | static chunk_tree_t huge; | | 715 | static chunk_tree_t huge; |
716 | | | 716 | |
717 | #ifdef USE_BRK | | 717 | #ifdef USE_BRK |
718 | /* | | 718 | /* |
719 | * Try to use brk for chunk-size allocations, due to address space constraints. | | 719 | * Try to use brk for chunk-size allocations, due to address space constraints. |
720 | */ | | 720 | */ |
721 | /* | | 721 | /* |
722 | * Protects sbrk() calls. This must be separate from chunks_mtx, since | | 722 | * Protects sbrk() calls. This must be separate from chunks_mtx, since |
723 | * base_pages_alloc() also uses sbrk(), but cannot lock chunks_mtx (doing so | | 723 | * base_pages_alloc() also uses sbrk(), but cannot lock chunks_mtx (doing so |
724 | * could cause recursive lock acquisition). | | 724 | * could cause recursive lock acquisition). |
725 | */ | | 725 | */ |
726 | static malloc_mutex_t brk_mtx; | | 726 | static malloc_mutex_t brk_mtx; |
727 | /* Result of first sbrk(0) call. */ | | 727 | /* Result of first sbrk(0) call. */ |
728 | static void *brk_base; | | 728 | static void *brk_base; |
729 | /* Current end of brk, or ((void *)-1) if brk is exhausted. */ | | 729 | /* Current end of brk, or ((void *)-1) if brk is exhausted. */ |
730 | static void *brk_prev; | | 730 | static void *brk_prev; |
731 | /* Current upper limit on brk addresses. */ | | 731 | /* Current upper limit on brk addresses. */ |
732 | static void *brk_max; | | 732 | static void *brk_max; |
733 | #endif | | 733 | #endif |
734 | | | 734 | |
735 | #ifdef MALLOC_STATS | | 735 | #ifdef MALLOC_STATS |
736 | /* Huge allocation statistics. */ | | 736 | /* Huge allocation statistics. */ |
737 | static uint64_t huge_nmalloc; | | 737 | static uint64_t huge_nmalloc; |
738 | static uint64_t huge_ndalloc; | | 738 | static uint64_t huge_ndalloc; |
739 | static uint64_t huge_nralloc; | | 739 | static uint64_t huge_nralloc; |
740 | static size_t huge_allocated; | | 740 | static size_t huge_allocated; |
741 | #endif | | 741 | #endif |
742 | | | 742 | |
743 | /* | | 743 | /* |
744 | * Tree of chunks that were previously allocated. This is used when allocating | | 744 | * Tree of chunks that were previously allocated. This is used when allocating |
745 | * chunks, in an attempt to re-use address space. | | 745 | * chunks, in an attempt to re-use address space. |
746 | */ | | 746 | */ |
747 | static chunk_tree_t old_chunks; | | 747 | static chunk_tree_t old_chunks; |
748 | | | 748 | |
749 | /****************************/ | | 749 | /****************************/ |
750 | /* | | 750 | /* |
751 | * base (internal allocation). | | 751 | * base (internal allocation). |
752 | */ | | 752 | */ |
753 | | | 753 | |
754 | /* | | 754 | /* |
755 | * Current pages that are being used for internal memory allocations. These | | 755 | * Current pages that are being used for internal memory allocations. These |
756 | * pages are carved up in cacheline-size quanta, so that there is no chance of | | 756 | * pages are carved up in cacheline-size quanta, so that there is no chance of |
757 | * false cache line sharing. | | 757 | * false cache line sharing. |
758 | */ | | 758 | */ |
759 | static void *base_pages; | | 759 | static void *base_pages; |
760 | static void *base_next_addr; | | 760 | static void *base_next_addr; |
761 | static void *base_past_addr; /* Addr immediately past base_pages. */ | | 761 | static void *base_past_addr; /* Addr immediately past base_pages. */ |
762 | static chunk_node_t *base_chunk_nodes; /* LIFO cache of chunk nodes. */ | | 762 | static chunk_node_t *base_chunk_nodes; /* LIFO cache of chunk nodes. */ |
763 | #ifdef _REENTRANT | | 763 | #ifdef _REENTRANT |
764 | static malloc_mutex_t base_mtx; | | 764 | static malloc_mutex_t base_mtx; |
765 | #endif | | 765 | #endif |
766 | #ifdef MALLOC_STATS | | 766 | #ifdef MALLOC_STATS |
767 | static size_t base_mapped; | | 767 | static size_t base_mapped; |
768 | #endif | | 768 | #endif |
769 | | | 769 | |
770 | /********/ | | 770 | /********/ |
771 | /* | | 771 | /* |
772 | * Arenas. | | 772 | * Arenas. |
773 | */ | | 773 | */ |
774 | | | 774 | |
775 | /* | | 775 | /* |
776 | * Arenas that are used to service external requests. Not all elements of the | | 776 | * Arenas that are used to service external requests. Not all elements of the |
777 | * arenas array are necessarily used; arenas are created lazily as needed. | | 777 | * arenas array are necessarily used; arenas are created lazily as needed. |
778 | */ | | 778 | */ |
779 | static arena_t **arenas; | | 779 | static arena_t **arenas; |
780 | static unsigned narenas; | | 780 | static unsigned narenas; |
781 | static unsigned next_arena; | | 781 | static unsigned next_arena; |
782 | #ifdef _REENTRANT | | 782 | #ifdef _REENTRANT |
783 | static malloc_mutex_t arenas_mtx; /* Protects arenas initialization. */ | | 783 | static malloc_mutex_t arenas_mtx; /* Protects arenas initialization. */ |
784 | #endif | | 784 | #endif |
785 | | | 785 | |
786 | #ifndef NO_TLS | | | |
787 | /* | | 786 | /* |
788 | * Map of pthread_self() --> arenas[???], used for selecting an arena to use | | 787 | * Map of pthread_self() --> arenas[???], used for selecting an arena to use |
789 | * for allocations. | | 788 | * for allocations. |
790 | */ | | 789 | */ |
791 | static __thread arena_t *arenas_map; | | 790 | #ifndef NO_TLS |
792 | #define get_arenas_map() (arenas_map) | | 791 | static __thread arena_t **arenas_map; |
793 | #define set_arenas_map(x) (arenas_map = x) | | | |
794 | #else | | 792 | #else |
795 | #ifdef _REENTRANT | | 793 | static arena_t **arenas_map; |
796 | static thread_key_t arenas_map_key; | | | |
797 | #endif | | 794 | #endif |
798 | #define get_arenas_map() thr_getspecific(arenas_map_key) | | 795 | |
799 | #define set_arenas_map(x) thr_setspecific(arenas_map_key, x) | | 796 | #if !defined(NO_TLS) || !defined(_REENTRANT) |
| | | 797 | # define get_arenas_map() (arenas_map) |
| | | 798 | # define set_arenas_map(x) (arenas_map = x) |
| | | 799 | #else |
| | | 800 | |
| | | 801 | static thread_key_t arenas_map_key = -1; |
| | | 802 | |
| | | 803 | static inline arena_t ** |
| | | 804 | get_arenas_map(void) |
| | | 805 | { |
| | | 806 | if (!__isthreaded) |
| | | 807 | return arenas_map; |
| | | 808 | |
| | | 809 | if (arenas_map_key == -1) { |
| | | 810 | (void)thr_keycreate(&arenas_map_key, NULL); |
| | | 811 | if (arenas_map != NULL) { |
| | | 812 | thr_setspecific(arenas_map_key, arenas_map); |
| | | 813 | arenas_map = NULL; |
| | | 814 | } |
| | | 815 | } |
| | | 816 | |
| | | 817 | return thr_getspecific(arenas_map_key); |
| | | 818 | } |
| | | 819 | |
| | | 820 | static __inline void |
| | | 821 | set_arenas_map(arena_t **a) |
| | | 822 | { |
| | | 823 | if (!__isthreaded) { |
| | | 824 | arenas_map = a; |
| | | 825 | return; |
| | | 826 | } |
| | | 827 | |
| | | 828 | if (arenas_map_key == -1) { |
| | | 829 | (void)thr_keycreate(&arenas_map_key, NULL); |
| | | 830 | if (arenas_map != NULL) { |
| | | 831 | _DIAGASSERT(arenas_map == a); |
| | | 832 | arenas_map = NULL; |
| | | 833 | } |
| | | 834 | } |
| | | 835 | |
| | | 836 | thr_setspecific(arenas_map_key, a); |
| | | 837 | } |
800 | #endif | | 838 | #endif |
801 | | | 839 | |
802 | #ifdef MALLOC_STATS | | 840 | #ifdef MALLOC_STATS |
803 | /* Chunk statistics. */ | | 841 | /* Chunk statistics. */ |
804 | static chunk_stats_t stats_chunks; | | 842 | static chunk_stats_t stats_chunks; |
805 | #endif | | 843 | #endif |
806 | | | 844 | |
807 | /*******************************/ | | 845 | /*******************************/ |
808 | /* | | 846 | /* |
809 | * Runtime configuration options. | | 847 | * Runtime configuration options. |
810 | */ | | 848 | */ |
811 | const char *_malloc_options; | | 849 | const char *_malloc_options; |
812 | | | 850 | |
813 | #ifndef MALLOC_PRODUCTION | | 851 | #ifndef MALLOC_PRODUCTION |
814 | static bool opt_abort = true; | | 852 | static bool opt_abort = true; |
815 | static bool opt_junk = true; | | 853 | static bool opt_junk = true; |
816 | #else | | 854 | #else |
817 | static bool opt_abort = false; | | 855 | static bool opt_abort = false; |
818 | static bool opt_junk = false; | | 856 | static bool opt_junk = false; |
819 | #endif | | 857 | #endif |
820 | static bool opt_hint = false; | | 858 | static bool opt_hint = false; |
821 | static bool opt_print_stats = false; | | 859 | static bool opt_print_stats = false; |
822 | static int opt_quantum_2pow = QUANTUM_2POW_MIN; | | 860 | static int opt_quantum_2pow = QUANTUM_2POW_MIN; |
823 | static int opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT; | | 861 | static int opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT; |
824 | static int opt_chunk_2pow = CHUNK_2POW_DEFAULT; | | 862 | static int opt_chunk_2pow = CHUNK_2POW_DEFAULT; |
825 | static bool opt_utrace = false; | | 863 | static bool opt_utrace = false; |
826 | static bool opt_sysv = false; | | 864 | static bool opt_sysv = false; |
827 | static bool opt_xmalloc = false; | | 865 | static bool opt_xmalloc = false; |
828 | static bool opt_zero = false; | | 866 | static bool opt_zero = false; |
829 | static int32_t opt_narenas_lshift = 0; | | 867 | static int32_t opt_narenas_lshift = 0; |
830 | | | 868 | |
831 | typedef struct { | | 869 | typedef struct { |
832 | void *p; | | 870 | void *p; |
833 | size_t s; | | 871 | size_t s; |
834 | void *r; | | 872 | void *r; |
835 | } malloc_utrace_t; | | 873 | } malloc_utrace_t; |
836 | | | 874 | |
837 | #define UTRACE(a, b, c) \ | | 875 | #define UTRACE(a, b, c) \ |
838 | if (opt_utrace) { \ | | 876 | if (opt_utrace) { \ |
839 | malloc_utrace_t ut; \ | | 877 | malloc_utrace_t ut; \ |
840 | ut.p = a; \ | | 878 | ut.p = a; \ |
841 | ut.s = b; \ | | 879 | ut.s = b; \ |
842 | ut.r = c; \ | | 880 | ut.r = c; \ |
843 | xutrace(&ut, sizeof(ut)); \ | | 881 | xutrace(&ut, sizeof(ut)); \ |
844 | } | | 882 | } |
845 | | | 883 | |
846 | /******************************************************************************/ | | 884 | /******************************************************************************/ |
847 | /* | | 885 | /* |
848 | * Begin function prototypes for non-inline static functions. | | 886 | * Begin function prototypes for non-inline static functions. |
849 | */ | | 887 | */ |
850 | | | 888 | |
851 | static void wrtmessage(const char *p1, const char *p2, const char *p3, | | 889 | static void wrtmessage(const char *p1, const char *p2, const char *p3, |
852 | const char *p4); | | 890 | const char *p4); |
853 | #ifdef MALLOC_STATS | | 891 | #ifdef MALLOC_STATS |
854 | static void malloc_printf(const char *format, ...); | | 892 | static void malloc_printf(const char *format, ...); |
855 | #endif | | 893 | #endif |
856 | static char *size_t2s(size_t x, char *s); | | 894 | static char *size_t2s(size_t x, char *s); |
857 | static bool base_pages_alloc(size_t minsize); | | 895 | static bool base_pages_alloc(size_t minsize); |
858 | static void *base_alloc(size_t size); | | 896 | static void *base_alloc(size_t size); |
859 | static chunk_node_t *base_chunk_node_alloc(void); | | 897 | static chunk_node_t *base_chunk_node_alloc(void); |
860 | static void base_chunk_node_dealloc(chunk_node_t *node); | | 898 | static void base_chunk_node_dealloc(chunk_node_t *node); |
861 | #ifdef MALLOC_STATS | | 899 | #ifdef MALLOC_STATS |
862 | static void stats_print(arena_t *arena); | | 900 | static void stats_print(arena_t *arena); |
863 | #endif | | 901 | #endif |
864 | static void *pages_map(void *addr, size_t size); | | 902 | static void *pages_map(void *addr, size_t size); |
865 | static void *pages_map_align(void *addr, size_t size, int align); | | 903 | static void *pages_map_align(void *addr, size_t size, int align); |
866 | static void pages_unmap(void *addr, size_t size); | | 904 | static void pages_unmap(void *addr, size_t size); |
867 | static void *chunk_alloc(size_t size); | | 905 | static void *chunk_alloc(size_t size); |
868 | static void chunk_dealloc(void *chunk, size_t size); | | 906 | static void chunk_dealloc(void *chunk, size_t size); |
869 | static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size); | | 907 | static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size); |
870 | static arena_chunk_t *arena_chunk_alloc(arena_t *arena); | | 908 | static arena_chunk_t *arena_chunk_alloc(arena_t *arena); |
871 | static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); | | 909 | static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); |
872 | static arena_run_t *arena_run_alloc(arena_t *arena, size_t size); | | 910 | static arena_run_t *arena_run_alloc(arena_t *arena, size_t size); |
873 | static void arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size); | | 911 | static void arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size); |
874 | static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin); | | 912 | static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin); |
875 | static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin); | | 913 | static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin); |
876 | static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size); | | 914 | static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size); |
877 | static void *arena_malloc(arena_t *arena, size_t size); | | 915 | static void *arena_malloc(arena_t *arena, size_t size); |
878 | static void *arena_palloc(arena_t *arena, size_t alignment, size_t size, | | 916 | static void *arena_palloc(arena_t *arena, size_t alignment, size_t size, |
879 | size_t alloc_size); | | 917 | size_t alloc_size); |
880 | static size_t arena_salloc(const void *ptr); | | 918 | static size_t arena_salloc(const void *ptr); |
881 | static void *arena_ralloc(void *ptr, size_t size, size_t oldsize); | | 919 | static void *arena_ralloc(void *ptr, size_t size, size_t oldsize); |
882 | static void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr); | | 920 | static void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr); |
883 | static bool arena_new(arena_t *arena); | | 921 | static bool arena_new(arena_t *arena); |
884 | static arena_t *arenas_extend(unsigned ind); | | 922 | static arena_t *arenas_extend(unsigned ind); |
885 | static void *huge_malloc(size_t size); | | 923 | static void *huge_malloc(size_t size); |
886 | static void *huge_palloc(size_t alignment, size_t size); | | 924 | static void *huge_palloc(size_t alignment, size_t size); |
887 | static void *huge_ralloc(void *ptr, size_t size, size_t oldsize); | | 925 | static void *huge_ralloc(void *ptr, size_t size, size_t oldsize); |
888 | static void huge_dalloc(void *ptr); | | 926 | static void huge_dalloc(void *ptr); |
889 | static void *imalloc(size_t size); | | 927 | static void *imalloc(size_t size); |
890 | static void *ipalloc(size_t alignment, size_t size); | | 928 | static void *ipalloc(size_t alignment, size_t size); |
891 | static void *icalloc(size_t size); | | 929 | static void *icalloc(size_t size); |
892 | static size_t isalloc(const void *ptr); | | 930 | static size_t isalloc(const void *ptr); |
893 | static void *iralloc(void *ptr, size_t size); | | 931 | static void *iralloc(void *ptr, size_t size); |
894 | static void idalloc(void *ptr); | | 932 | static void idalloc(void *ptr); |
895 | static void malloc_print_stats(void); | | 933 | static void malloc_print_stats(void); |
896 | static bool malloc_init_hard(void); | | 934 | static bool malloc_init_hard(void); |
897 | | | 935 | |
898 | /* | | 936 | /* |
899 | * End function prototypes. | | 937 | * End function prototypes. |
900 | */ | | 938 | */ |
901 | /******************************************************************************/ | | 939 | /******************************************************************************/ |
902 | /* | | 940 | /* |
903 | * Begin mutex. | | 941 | * Begin mutex. |
904 | */ | | 942 | */ |
905 | | | 943 | |
906 | #ifdef __NetBSD__ | | 944 | #ifdef __NetBSD__ |
907 | #define malloc_mutex_init(m) mutex_init(m, NULL) | | 945 | #define malloc_mutex_init(m) mutex_init(m, NULL) |
908 | #define malloc_mutex_lock(m) mutex_lock(m) | | 946 | #define malloc_mutex_lock(m) mutex_lock(m) |
909 | #define malloc_mutex_unlock(m) mutex_unlock(m) | | 947 | #define malloc_mutex_unlock(m) mutex_unlock(m) |
910 | #else /* __NetBSD__ */ | | 948 | #else /* __NetBSD__ */ |
911 | static inline void | | 949 | static inline void |
912 | malloc_mutex_init(malloc_mutex_t *a_mutex) | | 950 | malloc_mutex_init(malloc_mutex_t *a_mutex) |
913 | { | | 951 | { |
914 | static const spinlock_t lock = _SPINLOCK_INITIALIZER; | | 952 | static const spinlock_t lock = _SPINLOCK_INITIALIZER; |
915 | | | 953 | |
916 | a_mutex->lock = lock; | | 954 | a_mutex->lock = lock; |
917 | } | | 955 | } |
918 | | | 956 | |
919 | static inline void | | 957 | static inline void |
920 | malloc_mutex_lock(malloc_mutex_t *a_mutex) | | 958 | malloc_mutex_lock(malloc_mutex_t *a_mutex) |
921 | { | | 959 | { |
922 | | | 960 | |
923 | if (__isthreaded) | | 961 | if (__isthreaded) |
924 | _SPINLOCK(&a_mutex->lock); | | 962 | _SPINLOCK(&a_mutex->lock); |
925 | } | | 963 | } |
926 | | | 964 | |
927 | static inline void | | 965 | static inline void |
928 | malloc_mutex_unlock(malloc_mutex_t *a_mutex) | | 966 | malloc_mutex_unlock(malloc_mutex_t *a_mutex) |
929 | { | | 967 | { |
930 | | | 968 | |
931 | if (__isthreaded) | | 969 | if (__isthreaded) |
932 | _SPINUNLOCK(&a_mutex->lock); | | 970 | _SPINUNLOCK(&a_mutex->lock); |
933 | } | | 971 | } |
934 | #endif /* __NetBSD__ */ | | 972 | #endif /* __NetBSD__ */ |
935 | | | 973 | |
936 | /* | | 974 | /* |
937 | * End mutex. | | 975 | * End mutex. |
938 | */ | | 976 | */ |
939 | /******************************************************************************/ | | 977 | /******************************************************************************/ |
940 | /* | | 978 | /* |
941 | * Begin Utility functions/macros. | | 979 | * Begin Utility functions/macros. |
942 | */ | | 980 | */ |
943 | | | 981 | |
944 | /* Return the chunk address for allocation address a. */ | | 982 | /* Return the chunk address for allocation address a. */ |
945 | #define CHUNK_ADDR2BASE(a) \ | | 983 | #define CHUNK_ADDR2BASE(a) \ |
946 | ((void *)((uintptr_t)(a) & ~chunksize_mask)) | | 984 | ((void *)((uintptr_t)(a) & ~chunksize_mask)) |
947 | | | 985 | |
948 | /* Return the chunk offset of address a. */ | | 986 | /* Return the chunk offset of address a. */ |
949 | #define CHUNK_ADDR2OFFSET(a) \ | | 987 | #define CHUNK_ADDR2OFFSET(a) \ |
950 | ((size_t)((uintptr_t)(a) & chunksize_mask)) | | 988 | ((size_t)((uintptr_t)(a) & chunksize_mask)) |
951 | | | 989 | |
952 | /* Return the smallest chunk multiple that is >= s. */ | | 990 | /* Return the smallest chunk multiple that is >= s. */ |
953 | #define CHUNK_CEILING(s) \ | | 991 | #define CHUNK_CEILING(s) \ |
954 | (((s) + chunksize_mask) & ~chunksize_mask) | | 992 | (((s) + chunksize_mask) & ~chunksize_mask) |
955 | | | 993 | |
956 | /* Return the smallest cacheline multiple that is >= s. */ | | 994 | /* Return the smallest cacheline multiple that is >= s. */ |
957 | #define CACHELINE_CEILING(s) \ | | 995 | #define CACHELINE_CEILING(s) \ |
958 | (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1)) | | 996 | (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1)) |
959 | | | 997 | |
960 | /* Return the smallest quantum multiple that is >= a. */ | | 998 | /* Return the smallest quantum multiple that is >= a. */ |
961 | #define QUANTUM_CEILING(a) \ | | 999 | #define QUANTUM_CEILING(a) \ |
962 | (((a) + quantum_mask) & ~quantum_mask) | | 1000 | (((a) + quantum_mask) & ~quantum_mask) |
963 | | | 1001 | |
964 | /* Return the smallest pagesize multiple that is >= s. */ | | 1002 | /* Return the smallest pagesize multiple that is >= s. */ |
965 | #define PAGE_CEILING(s) \ | | 1003 | #define PAGE_CEILING(s) \ |
966 | (((s) + pagesize_mask) & ~pagesize_mask) | | 1004 | (((s) + pagesize_mask) & ~pagesize_mask) |
967 | | | 1005 | |
968 | /* Compute the smallest power of 2 that is >= x. */ | | 1006 | /* Compute the smallest power of 2 that is >= x. */ |
969 | static inline size_t | | 1007 | static inline size_t |
970 | pow2_ceil(size_t x) | | 1008 | pow2_ceil(size_t x) |
971 | { | | 1009 | { |
972 | | | 1010 | |
973 | x--; | | 1011 | x--; |
974 | x |= x >> 1; | | 1012 | x |= x >> 1; |
975 | x |= x >> 2; | | 1013 | x |= x >> 2; |
976 | x |= x >> 4; | | 1014 | x |= x >> 4; |
977 | x |= x >> 8; | | 1015 | x |= x >> 8; |
978 | x |= x >> 16; | | 1016 | x |= x >> 16; |
979 | #if (SIZEOF_PTR == 8) | | 1017 | #if (SIZEOF_PTR == 8) |
980 | x |= x >> 32; | | 1018 | x |= x >> 32; |
981 | #endif | | 1019 | #endif |
982 | x++; | | 1020 | x++; |
983 | return (x); | | 1021 | return (x); |
984 | } | | 1022 | } |
985 | | | 1023 | |
986 | static void | | 1024 | static void |
987 | wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4) | | 1025 | wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4) |
988 | { | | 1026 | { |
989 | | | 1027 | |
990 | write(STDERR_FILENO, p1, strlen(p1)); | | 1028 | write(STDERR_FILENO, p1, strlen(p1)); |
991 | write(STDERR_FILENO, p2, strlen(p2)); | | 1029 | write(STDERR_FILENO, p2, strlen(p2)); |
992 | write(STDERR_FILENO, p3, strlen(p3)); | | 1030 | write(STDERR_FILENO, p3, strlen(p3)); |
993 | write(STDERR_FILENO, p4, strlen(p4)); | | 1031 | write(STDERR_FILENO, p4, strlen(p4)); |
994 | } | | 1032 | } |
995 | | | 1033 | |
996 | void (*_malloc_message)(const char *p1, const char *p2, const char *p3, | | 1034 | void (*_malloc_message)(const char *p1, const char *p2, const char *p3, |
997 | const char *p4) = wrtmessage; | | 1035 | const char *p4) = wrtmessage; |
998 | | | 1036 | |
999 | #ifdef MALLOC_STATS | | 1037 | #ifdef MALLOC_STATS |
1000 | /* | | 1038 | /* |
1001 | * Print to stderr in such a way as to (hopefully) avoid memory allocation. | | 1039 | * Print to stderr in such a way as to (hopefully) avoid memory allocation. |
1002 | */ | | 1040 | */ |
1003 | static void | | 1041 | static void |
1004 | malloc_printf(const char *format, ...) | | 1042 | malloc_printf(const char *format, ...) |
1005 | { | | 1043 | { |
1006 | char buf[4096]; | | 1044 | char buf[4096]; |
1007 | va_list ap; | | 1045 | va_list ap; |
1008 | | | 1046 | |
1009 | va_start(ap, format); | | 1047 | va_start(ap, format); |
1010 | vsnprintf(buf, sizeof(buf), format, ap); | | 1048 | vsnprintf(buf, sizeof(buf), format, ap); |
1011 | va_end(ap); | | 1049 | va_end(ap); |
1012 | _malloc_message(buf, "", "", ""); | | 1050 | _malloc_message(buf, "", "", ""); |
1013 | } | | 1051 | } |
1014 | #endif | | 1052 | #endif |
1015 | | | 1053 | |
1016 | /* | | 1054 | /* |
1017 | * We don't want to depend on vsnprintf() for production builds, since that can | | 1055 | * We don't want to depend on vsnprintf() for production builds, since that can |
1018 | * cause unnecessary bloat for static binaries. size_t2s() provides minimal | | 1056 | * cause unnecessary bloat for static binaries. size_t2s() provides minimal |
1019 | * integer printing functionality, so that malloc_printf() use can be limited to | | 1057 | * integer printing functionality, so that malloc_printf() use can be limited to |
1020 | * MALLOC_STATS code. | | 1058 | * MALLOC_STATS code. |
1021 | */ | | 1059 | */ |
1022 | #define UMAX2S_BUFSIZE 21 | | 1060 | #define UMAX2S_BUFSIZE 21 |
1023 | static char * | | 1061 | static char * |
1024 | size_t2s(size_t x, char *s) | | 1062 | size_t2s(size_t x, char *s) |
1025 | { | | 1063 | { |
1026 | unsigned i; | | 1064 | unsigned i; |
1027 | | | 1065 | |
1028 | /* Make sure UMAX2S_BUFSIZE is large enough. */ | | 1066 | /* Make sure UMAX2S_BUFSIZE is large enough. */ |
1029 | /* LINTED */ | | 1067 | /* LINTED */ |
1030 | assert(sizeof(size_t) <= 8); | | 1068 | assert(sizeof(size_t) <= 8); |
1031 | | | 1069 | |
1032 | i = UMAX2S_BUFSIZE - 1; | | 1070 | i = UMAX2S_BUFSIZE - 1; |
1033 | s[i] = '\0'; | | 1071 | s[i] = '\0'; |
1034 | do { | | 1072 | do { |
1035 | i--; | | 1073 | i--; |
1036 | s[i] = "0123456789"[(int)x % 10]; | | 1074 | s[i] = "0123456789"[(int)x % 10]; |
1037 | x /= (uintmax_t)10LL; | | 1075 | x /= (uintmax_t)10LL; |
1038 | } while (x > 0); | | 1076 | } while (x > 0); |
1039 | | | 1077 | |
1040 | return (&s[i]); | | 1078 | return (&s[i]); |
1041 | } | | 1079 | } |
1042 | | | 1080 | |
1043 | /******************************************************************************/ | | 1081 | /******************************************************************************/ |
1044 | | | 1082 | |
1045 | static bool | | 1083 | static bool |
1046 | base_pages_alloc(size_t minsize) | | 1084 | base_pages_alloc(size_t minsize) |
1047 | { | | 1085 | { |
1048 | size_t csize = 0; | | 1086 | size_t csize = 0; |
1049 | | | 1087 | |
1050 | #ifdef USE_BRK | | 1088 | #ifdef USE_BRK |
1051 | /* | | 1089 | /* |
1052 | * Do special brk allocation here, since base allocations don't need to | | 1090 | * Do special brk allocation here, since base allocations don't need to |
1053 | * be chunk-aligned. | | 1091 | * be chunk-aligned. |
1054 | */ | | 1092 | */ |
1055 | if (brk_prev != (void *)-1) { | | 1093 | if (brk_prev != (void *)-1) { |
1056 | void *brk_cur; | | 1094 | void *brk_cur; |
1057 | intptr_t incr; | | 1095 | intptr_t incr; |
1058 | | | 1096 | |
1059 | if (minsize != 0) | | 1097 | if (minsize != 0) |
1060 | csize = CHUNK_CEILING(minsize); | | 1098 | csize = CHUNK_CEILING(minsize); |
1061 | | | 1099 | |
1062 | malloc_mutex_lock(&brk_mtx); | | 1100 | malloc_mutex_lock(&brk_mtx); |
1063 | do { | | 1101 | do { |
1064 | /* Get the current end of brk. */ | | 1102 | /* Get the current end of brk. */ |
1065 | brk_cur = sbrk(0); | | 1103 | brk_cur = sbrk(0); |
1066 | | | 1104 | |
1067 | /* | | 1105 | /* |
1068 | * Calculate how much padding is necessary to | | 1106 | * Calculate how much padding is necessary to |
1069 | * chunk-align the end of brk. Don't worry about | | 1107 | * chunk-align the end of brk. Don't worry about |
1070 | * brk_cur not being chunk-aligned though. | | 1108 | * brk_cur not being chunk-aligned though. |
1071 | */ | | 1109 | */ |
1072 | incr = (intptr_t)chunksize | | 1110 | incr = (intptr_t)chunksize |
1073 | - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); | | 1111 | - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); |
1074 | assert(incr >= 0); | | 1112 | assert(incr >= 0); |
1075 | if ((size_t)incr < minsize) | | 1113 | if ((size_t)incr < minsize) |
1076 | incr += csize; | | 1114 | incr += csize; |
1077 | | | 1115 | |
1078 | brk_prev = sbrk(incr); | | 1116 | brk_prev = sbrk(incr); |
1079 | if (brk_prev == brk_cur) { | | 1117 | if (brk_prev == brk_cur) { |
1080 | /* Success. */ | | 1118 | /* Success. */ |
1081 | malloc_mutex_unlock(&brk_mtx); | | 1119 | malloc_mutex_unlock(&brk_mtx); |
1082 | base_pages = brk_cur; | | 1120 | base_pages = brk_cur; |
1083 | base_next_addr = base_pages; | | 1121 | base_next_addr = base_pages; |
1084 | base_past_addr = (void *)((uintptr_t)base_pages | | 1122 | base_past_addr = (void *)((uintptr_t)base_pages |
1085 | + incr); | | 1123 | + incr); |
1086 | #ifdef MALLOC_STATS | | 1124 | #ifdef MALLOC_STATS |
1087 | base_mapped += incr; | | 1125 | base_mapped += incr; |
1088 | #endif | | 1126 | #endif |
1089 | return (false); | | 1127 | return (false); |
1090 | } | | 1128 | } |
1091 | } while (brk_prev != (void *)-1); | | 1129 | } while (brk_prev != (void *)-1); |
1092 | malloc_mutex_unlock(&brk_mtx); | | 1130 | malloc_mutex_unlock(&brk_mtx); |
1093 | } | | 1131 | } |
1094 | if (minsize == 0) { | | 1132 | if (minsize == 0) { |
1095 | /* | | 1133 | /* |
1096 | * Failure during initialization doesn't matter, so avoid | | 1134 | * Failure during initialization doesn't matter, so avoid |
1097 | * falling through to the mmap-based page mapping code. | | 1135 | * falling through to the mmap-based page mapping code. |
1098 | */ | | 1136 | */ |
1099 | return (true); | | 1137 | return (true); |
1100 | } | | 1138 | } |
1101 | #endif | | 1139 | #endif |
1102 | assert(minsize != 0); | | 1140 | assert(minsize != 0); |
1103 | csize = PAGE_CEILING(minsize); | | 1141 | csize = PAGE_CEILING(minsize); |
1104 | base_pages = pages_map(NULL, csize); | | 1142 | base_pages = pages_map(NULL, csize); |
1105 | if (base_pages == NULL) | | 1143 | if (base_pages == NULL) |
1106 | return (true); | | 1144 | return (true); |
1107 | base_next_addr = base_pages; | | 1145 | base_next_addr = base_pages; |
1108 | base_past_addr = (void *)((uintptr_t)base_pages + csize); | | 1146 | base_past_addr = (void *)((uintptr_t)base_pages + csize); |
1109 | #ifdef MALLOC_STATS | | 1147 | #ifdef MALLOC_STATS |
1110 | base_mapped += csize; | | 1148 | base_mapped += csize; |
1111 | #endif | | 1149 | #endif |
1112 | return (false); | | 1150 | return (false); |
1113 | } | | 1151 | } |
1114 | | | 1152 | |
1115 | static void * | | 1153 | static void * |
1116 | base_alloc(size_t size) | | 1154 | base_alloc(size_t size) |
1117 | { | | 1155 | { |
1118 | void *ret; | | 1156 | void *ret; |
1119 | size_t csize; | | 1157 | size_t csize; |
1120 | | | 1158 | |
1121 | /* Round size up to nearest multiple of the cacheline size. */ | | 1159 | /* Round size up to nearest multiple of the cacheline size. */ |
1122 | csize = CACHELINE_CEILING(size); | | 1160 | csize = CACHELINE_CEILING(size); |
1123 | | | 1161 | |
1124 | malloc_mutex_lock(&base_mtx); | | 1162 | malloc_mutex_lock(&base_mtx); |
1125 | | | 1163 | |
1126 | /* Make sure there's enough space for the allocation. */ | | 1164 | /* Make sure there's enough space for the allocation. */ |
1127 | if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { | | 1165 | if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { |
1128 | if (base_pages_alloc(csize)) { | | 1166 | if (base_pages_alloc(csize)) { |
1129 | ret = NULL; | | 1167 | ret = NULL; |
1130 | goto RETURN; | | 1168 | goto RETURN; |
1131 | } | | 1169 | } |
1132 | } | | 1170 | } |
1133 | | | 1171 | |
1134 | /* Allocate. */ | | 1172 | /* Allocate. */ |
1135 | ret = base_next_addr; | | 1173 | ret = base_next_addr; |
1136 | base_next_addr = (void *)((uintptr_t)base_next_addr + csize); | | 1174 | base_next_addr = (void *)((uintptr_t)base_next_addr + csize); |
1137 | | | 1175 | |
1138 | RETURN: | | 1176 | RETURN: |
1139 | malloc_mutex_unlock(&base_mtx); | | 1177 | malloc_mutex_unlock(&base_mtx); |
1140 | return (ret); | | 1178 | return (ret); |
1141 | } | | 1179 | } |
1142 | | | 1180 | |
1143 | static chunk_node_t * | | 1181 | static chunk_node_t * |
1144 | base_chunk_node_alloc(void) | | 1182 | base_chunk_node_alloc(void) |
1145 | { | | 1183 | { |
1146 | chunk_node_t *ret; | | 1184 | chunk_node_t *ret; |
1147 | | | 1185 | |
1148 | malloc_mutex_lock(&base_mtx); | | 1186 | malloc_mutex_lock(&base_mtx); |
1149 | if (base_chunk_nodes != NULL) { | | 1187 | if (base_chunk_nodes != NULL) { |
1150 | ret = base_chunk_nodes; | | 1188 | ret = base_chunk_nodes; |
1151 | /* LINTED */ | | 1189 | /* LINTED */ |
1152 | base_chunk_nodes = *(chunk_node_t **)ret; | | 1190 | base_chunk_nodes = *(chunk_node_t **)ret; |
1153 | malloc_mutex_unlock(&base_mtx); | | 1191 | malloc_mutex_unlock(&base_mtx); |
1154 | } else { | | 1192 | } else { |
1155 | malloc_mutex_unlock(&base_mtx); | | 1193 | malloc_mutex_unlock(&base_mtx); |
1156 | ret = (chunk_node_t *)base_alloc(sizeof(chunk_node_t)); | | 1194 | ret = (chunk_node_t *)base_alloc(sizeof(chunk_node_t)); |
1157 | } | | 1195 | } |
1158 | | | 1196 | |
1159 | return (ret); | | 1197 | return (ret); |
1160 | } | | 1198 | } |
1161 | | | 1199 | |
1162 | static void | | 1200 | static void |
1163 | base_chunk_node_dealloc(chunk_node_t *node) | | 1201 | base_chunk_node_dealloc(chunk_node_t *node) |
1164 | { | | 1202 | { |
1165 | | | 1203 | |
1166 | malloc_mutex_lock(&base_mtx); | | 1204 | malloc_mutex_lock(&base_mtx); |
1167 | /* LINTED */ | | 1205 | /* LINTED */ |
1168 | *(chunk_node_t **)node = base_chunk_nodes; | | 1206 | *(chunk_node_t **)node = base_chunk_nodes; |
1169 | base_chunk_nodes = node; | | 1207 | base_chunk_nodes = node; |
1170 | malloc_mutex_unlock(&base_mtx); | | 1208 | malloc_mutex_unlock(&base_mtx); |
1171 | } | | 1209 | } |
1172 | | | 1210 | |
1173 | /******************************************************************************/ | | 1211 | /******************************************************************************/ |
1174 | | | 1212 | |
1175 | #ifdef MALLOC_STATS | | 1213 | #ifdef MALLOC_STATS |
1176 | static void | | 1214 | static void |
1177 | stats_print(arena_t *arena) | | 1215 | stats_print(arena_t *arena) |
1178 | { | | 1216 | { |
1179 | unsigned i; | | 1217 | unsigned i; |
1180 | int gap_start; | | 1218 | int gap_start; |
1181 | | | 1219 | |
1182 | malloc_printf( | | 1220 | malloc_printf( |
1183 | " allocated/mapped nmalloc ndalloc\n"); | | 1221 | " allocated/mapped nmalloc ndalloc\n"); |
1184 | | | 1222 | |
1185 | malloc_printf("small: %12zu %-12s %12llu %12llu\n", | | 1223 | malloc_printf("small: %12zu %-12s %12llu %12llu\n", |
1186 | arena->stats.allocated_small, "", arena->stats.nmalloc_small, | | 1224 | arena->stats.allocated_small, "", arena->stats.nmalloc_small, |
1187 | arena->stats.ndalloc_small); | | 1225 | arena->stats.ndalloc_small); |
1188 | malloc_printf("large: %12zu %-12s %12llu %12llu\n", | | 1226 | malloc_printf("large: %12zu %-12s %12llu %12llu\n", |
1189 | arena->stats.allocated_large, "", arena->stats.nmalloc_large, | | 1227 | arena->stats.allocated_large, "", arena->stats.nmalloc_large, |
1190 | arena->stats.ndalloc_large); | | 1228 | arena->stats.ndalloc_large); |
1191 | malloc_printf("total: %12zu/%-12zu %12llu %12llu\n", | | 1229 | malloc_printf("total: %12zu/%-12zu %12llu %12llu\n", |
1192 | arena->stats.allocated_small + arena->stats.allocated_large, | | 1230 | arena->stats.allocated_small + arena->stats.allocated_large, |
1193 | arena->stats.mapped, | | 1231 | arena->stats.mapped, |
1194 | arena->stats.nmalloc_small + arena->stats.nmalloc_large, | | 1232 | arena->stats.nmalloc_small + arena->stats.nmalloc_large, |
1195 | arena->stats.ndalloc_small + arena->stats.ndalloc_large); | | 1233 | arena->stats.ndalloc_small + arena->stats.ndalloc_large); |
1196 | | | 1234 | |
1197 | malloc_printf("bins: bin size regs pgs requests newruns" | | 1235 | malloc_printf("bins: bin size regs pgs requests newruns" |
1198 | " reruns maxruns curruns\n"); | | 1236 | " reruns maxruns curruns\n"); |
1199 | for (i = 0, gap_start = -1; i < ntbins + nqbins + nsbins; i++) { | | 1237 | for (i = 0, gap_start = -1; i < ntbins + nqbins + nsbins; i++) { |
1200 | if (arena->bins[i].stats.nrequests == 0) { | | 1238 | if (arena->bins[i].stats.nrequests == 0) { |
1201 | if (gap_start == -1) | | 1239 | if (gap_start == -1) |
1202 | gap_start = i; | | 1240 | gap_start = i; |
1203 | } else { | | 1241 | } else { |
1204 | if (gap_start != -1) { | | 1242 | if (gap_start != -1) { |
1205 | if (i > gap_start + 1) { | | 1243 | if (i > gap_start + 1) { |
1206 | /* Gap of more than one size class. */ | | 1244 | /* Gap of more than one size class. */ |
1207 | malloc_printf("[%u..%u]\n", | | 1245 | malloc_printf("[%u..%u]\n", |
1208 | gap_start, i - 1); | | 1246 | gap_start, i - 1); |
1209 | } else { | | 1247 | } else { |
1210 | /* Gap of one size class. */ | | 1248 | /* Gap of one size class. */ |
1211 | malloc_printf("[%u]\n", gap_start); | | 1249 | malloc_printf("[%u]\n", gap_start); |
1212 | } | | 1250 | } |
1213 | gap_start = -1; | | 1251 | gap_start = -1; |
1214 | } | | 1252 | } |
1215 | malloc_printf( | | 1253 | malloc_printf( |
1216 | "%13u %1s %4u %4u %3u %9llu %9llu" | | 1254 | "%13u %1s %4u %4u %3u %9llu %9llu" |
1217 | " %9llu %7lu %7lu\n", | | 1255 | " %9llu %7lu %7lu\n", |
1218 | i, | | 1256 | i, |
1219 | i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S", | | 1257 | i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S", |
1220 | arena->bins[i].reg_size, | | 1258 | arena->bins[i].reg_size, |
1221 | arena->bins[i].nregs, | | 1259 | arena->bins[i].nregs, |
1222 | arena->bins[i].run_size >> pagesize_2pow, | | 1260 | arena->bins[i].run_size >> pagesize_2pow, |
1223 | arena->bins[i].stats.nrequests, | | 1261 | arena->bins[i].stats.nrequests, |
1224 | arena->bins[i].stats.nruns, | | 1262 | arena->bins[i].stats.nruns, |
1225 | arena->bins[i].stats.reruns, | | 1263 | arena->bins[i].stats.reruns, |
1226 | arena->bins[i].stats.highruns, | | 1264 | arena->bins[i].stats.highruns, |
1227 | arena->bins[i].stats.curruns); | | 1265 | arena->bins[i].stats.curruns); |
1228 | } | | 1266 | } |
1229 | } | | 1267 | } |
1230 | if (gap_start != -1) { | | 1268 | if (gap_start != -1) { |
1231 | if (i > gap_start + 1) { | | 1269 | if (i > gap_start + 1) { |
1232 | /* Gap of more than one size class. */ | | 1270 | /* Gap of more than one size class. */ |
1233 | malloc_printf("[%u..%u]\n", gap_start, i - 1); | | 1271 | malloc_printf("[%u..%u]\n", gap_start, i - 1); |
1234 | } else { | | 1272 | } else { |
1235 | /* Gap of one size class. */ | | 1273 | /* Gap of one size class. */ |
1236 | malloc_printf("[%u]\n", gap_start); | | 1274 | malloc_printf("[%u]\n", gap_start); |
1237 | } | | 1275 | } |
1238 | } | | 1276 | } |
1239 | } | | 1277 | } |
1240 | #endif | | 1278 | #endif |
1241 | | | 1279 | |
1242 | /* | | 1280 | /* |
1243 | * End Utility functions/macros. | | 1281 | * End Utility functions/macros. |
1244 | */ | | 1282 | */ |
1245 | /******************************************************************************/ | | 1283 | /******************************************************************************/ |
1246 | /* | | 1284 | /* |
1247 | * Begin chunk management functions. | | 1285 | * Begin chunk management functions. |
1248 | */ | | 1286 | */ |
1249 | | | 1287 | |
1250 | #ifndef lint | | 1288 | #ifndef lint |
1251 | static inline int | | 1289 | static inline int |
1252 | chunk_comp(chunk_node_t *a, chunk_node_t *b) | | 1290 | chunk_comp(chunk_node_t *a, chunk_node_t *b) |
1253 | { | | 1291 | { |
1254 | | | 1292 | |
1255 | assert(a != NULL); | | 1293 | assert(a != NULL); |
1256 | assert(b != NULL); | | 1294 | assert(b != NULL); |
1257 | | | 1295 | |
1258 | if ((uintptr_t)a->chunk < (uintptr_t)b->chunk) | | 1296 | if ((uintptr_t)a->chunk < (uintptr_t)b->chunk) |
1259 | return (-1); | | 1297 | return (-1); |
1260 | else if (a->chunk == b->chunk) | | 1298 | else if (a->chunk == b->chunk) |
1261 | return (0); | | 1299 | return (0); |
1262 | else | | 1300 | else |
1263 | return (1); | | 1301 | return (1); |
1264 | } | | 1302 | } |
1265 | | | 1303 | |
1266 | /* Generate red-black tree code for chunks. */ | | 1304 | /* Generate red-black tree code for chunks. */ |
1267 | RB_GENERATE_STATIC(chunk_tree_s, chunk_node_s, link, chunk_comp); | | 1305 | RB_GENERATE_STATIC(chunk_tree_s, chunk_node_s, link, chunk_comp); |
1268 | #endif | | 1306 | #endif |
1269 | | | 1307 | |
1270 | static void * | | 1308 | static void * |
1271 | pages_map_align(void *addr, size_t size, int align) | | 1309 | pages_map_align(void *addr, size_t size, int align) |
1272 | { | | 1310 | { |
1273 | void *ret; | | 1311 | void *ret; |
1274 | | | 1312 | |
1275 | /* | | 1313 | /* |
1276 | * We don't use MAP_FIXED here, because it can cause the *replacement* | | 1314 | * We don't use MAP_FIXED here, because it can cause the *replacement* |
1277 | * of existing mappings, and we only want to create new mappings. | | 1315 | * of existing mappings, and we only want to create new mappings. |
1278 | */ | | 1316 | */ |
1279 | ret = mmap(addr, size, PROT_READ | PROT_WRITE, | | 1317 | ret = mmap(addr, size, PROT_READ | PROT_WRITE, |
1280 | MAP_PRIVATE | MAP_ANON | MAP_ALIGNED(align), -1, 0); | | 1318 | MAP_PRIVATE | MAP_ANON | MAP_ALIGNED(align), -1, 0); |
1281 | assert(ret != NULL); | | 1319 | assert(ret != NULL); |
1282 | | | 1320 | |
1283 | if (ret == MAP_FAILED) | | 1321 | if (ret == MAP_FAILED) |
1284 | ret = NULL; | | 1322 | ret = NULL; |
1285 | else if (addr != NULL && ret != addr) { | | 1323 | else if (addr != NULL && ret != addr) { |
1286 | /* | | 1324 | /* |
1287 | * We succeeded in mapping memory, but not in the right place. | | 1325 | * We succeeded in mapping memory, but not in the right place. |
1288 | */ | | 1326 | */ |
1289 | if (munmap(ret, size) == -1) { | | 1327 | if (munmap(ret, size) == -1) { |
1290 | char buf[STRERROR_BUF]; | | 1328 | char buf[STRERROR_BUF]; |
1291 | | | 1329 | |
1292 | STRERROR_R(errno, buf, sizeof(buf)); | | 1330 | STRERROR_R(errno, buf, sizeof(buf)); |
1293 | _malloc_message(getprogname(), | | 1331 | _malloc_message(getprogname(), |
1294 | ": (malloc) Error in munmap(): ", buf, "\n"); | | 1332 | ": (malloc) Error in munmap(): ", buf, "\n"); |
1295 | if (opt_abort) | | 1333 | if (opt_abort) |
1296 | abort(); | | 1334 | abort(); |
1297 | } | | 1335 | } |
1298 | ret = NULL; | | 1336 | ret = NULL; |
1299 | } | | 1337 | } |
1300 | | | 1338 | |
1301 | assert(ret == NULL || (addr == NULL && ret != addr) | | 1339 | assert(ret == NULL || (addr == NULL && ret != addr) |
1302 | || (addr != NULL && ret == addr)); | | 1340 | || (addr != NULL && ret == addr)); |
1303 | return (ret); | | 1341 | return (ret); |
1304 | } | | 1342 | } |
1305 | | | 1343 | |
1306 | static void * | | 1344 | static void * |
1307 | pages_map(void *addr, size_t size) | | 1345 | pages_map(void *addr, size_t size) |
1308 | { | | 1346 | { |
1309 | | | 1347 | |
1310 | return pages_map_align(addr, size, 0); | | 1348 | return pages_map_align(addr, size, 0); |
1311 | } | | 1349 | } |
1312 | | | 1350 | |
1313 | static void | | 1351 | static void |
1314 | pages_unmap(void *addr, size_t size) | | 1352 | pages_unmap(void *addr, size_t size) |
1315 | { | | 1353 | { |
1316 | | | 1354 | |
1317 | if (munmap(addr, size) == -1) { | | 1355 | if (munmap(addr, size) == -1) { |
1318 | char buf[STRERROR_BUF]; | | 1356 | char buf[STRERROR_BUF]; |
1319 | | | 1357 | |
1320 | STRERROR_R(errno, buf, sizeof(buf)); | | 1358 | STRERROR_R(errno, buf, sizeof(buf)); |
1321 | _malloc_message(getprogname(), | | 1359 | _malloc_message(getprogname(), |
1322 | ": (malloc) Error in munmap(): ", buf, "\n"); | | 1360 | ": (malloc) Error in munmap(): ", buf, "\n"); |
1323 | if (opt_abort) | | 1361 | if (opt_abort) |
1324 | abort(); | | 1362 | abort(); |
1325 | } | | 1363 | } |
1326 | } | | 1364 | } |
1327 | | | 1365 | |
1328 | static void * | | 1366 | static void * |
1329 | chunk_alloc(size_t size) | | 1367 | chunk_alloc(size_t size) |
1330 | { | | 1368 | { |
1331 | void *ret, *chunk; | | 1369 | void *ret, *chunk; |
1332 | chunk_node_t *tchunk, *delchunk; | | 1370 | chunk_node_t *tchunk, *delchunk; |
1333 | | | 1371 | |
1334 | assert(size != 0); | | 1372 | assert(size != 0); |
1335 | assert((size & chunksize_mask) == 0); | | 1373 | assert((size & chunksize_mask) == 0); |
1336 | | | 1374 | |
1337 | malloc_mutex_lock(&chunks_mtx); | | 1375 | malloc_mutex_lock(&chunks_mtx); |
1338 | | | 1376 | |
1339 | if (size == chunksize) { | | 1377 | if (size == chunksize) { |
1340 | /* | | 1378 | /* |
1341 | * Check for address ranges that were previously chunks and try | | 1379 | * Check for address ranges that were previously chunks and try |
1342 | * to use them. | | 1380 | * to use them. |
1343 | */ | | 1381 | */ |
1344 | | | 1382 | |
1345 | /* LINTED */ | | 1383 | /* LINTED */ |
1346 | tchunk = RB_MIN(chunk_tree_s, &old_chunks); | | 1384 | tchunk = RB_MIN(chunk_tree_s, &old_chunks); |
1347 | while (tchunk != NULL) { | | 1385 | while (tchunk != NULL) { |
1348 | /* Found an address range. Try to recycle it. */ | | 1386 | /* Found an address range. Try to recycle it. */ |
1349 | | | 1387 | |
1350 | chunk = tchunk->chunk; | | 1388 | chunk = tchunk->chunk; |
1351 | delchunk = tchunk; | | 1389 | delchunk = tchunk; |
1352 | /* LINTED */ | | 1390 | /* LINTED */ |
1353 | tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk); | | 1391 | tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk); |
1354 | | | 1392 | |
1355 | /* Remove delchunk from the tree. */ | | 1393 | /* Remove delchunk from the tree. */ |
1356 | /* LINTED */ | | 1394 | /* LINTED */ |
1357 | RB_REMOVE(chunk_tree_s, &old_chunks, delchunk); | | 1395 | RB_REMOVE(chunk_tree_s, &old_chunks, delchunk); |
1358 | base_chunk_node_dealloc(delchunk); | | 1396 | base_chunk_node_dealloc(delchunk); |
1359 | | | 1397 | |
1360 | #ifdef USE_BRK | | 1398 | #ifdef USE_BRK |
1361 | if ((uintptr_t)chunk >= (uintptr_t)brk_base | | 1399 | if ((uintptr_t)chunk >= (uintptr_t)brk_base |
1362 | && (uintptr_t)chunk < (uintptr_t)brk_max) { | | 1400 | && (uintptr_t)chunk < (uintptr_t)brk_max) { |
1363 | /* Re-use a previously freed brk chunk. */ | | 1401 | /* Re-use a previously freed brk chunk. */ |
1364 | ret = chunk; | | 1402 | ret = chunk; |
1365 | goto RETURN; | | 1403 | goto RETURN; |
1366 | } | | 1404 | } |
1367 | #endif | | 1405 | #endif |
1368 | if ((ret = pages_map(chunk, size)) != NULL) { | | 1406 | if ((ret = pages_map(chunk, size)) != NULL) { |
1369 | /* Success. */ | | 1407 | /* Success. */ |
1370 | goto RETURN; | | 1408 | goto RETURN; |
1371 | } | | 1409 | } |
1372 | } | | 1410 | } |
1373 | } | | 1411 | } |
1374 | | | 1412 | |
1375 | /* | | 1413 | /* |
1376 | * Try to over-allocate, but allow the OS to place the allocation | | 1414 | * Try to over-allocate, but allow the OS to place the allocation |
1377 | * anywhere. Beware of size_t wrap-around. | | 1415 | * anywhere. Beware of size_t wrap-around. |
1378 | */ | | 1416 | */ |
1379 | if (size + chunksize > size) { | | 1417 | if (size + chunksize > size) { |
1380 | if ((ret = pages_map_align(NULL, size, chunksize_2pow)) | | 1418 | if ((ret = pages_map_align(NULL, size, chunksize_2pow)) |
1381 | != NULL) { | | 1419 | != NULL) { |
1382 | goto RETURN; | | 1420 | goto RETURN; |
1383 | } | | 1421 | } |
1384 | } | | 1422 | } |
1385 | | | 1423 | |
1386 | #ifdef USE_BRK | | 1424 | #ifdef USE_BRK |
1387 | /* | | 1425 | /* |
1388 | * Try to create allocations in brk, in order to make full use of | | 1426 | * Try to create allocations in brk, in order to make full use of |
1389 | * limited address space. | | 1427 | * limited address space. |
1390 | */ | | 1428 | */ |
1391 | if (brk_prev != (void *)-1) { | | 1429 | if (brk_prev != (void *)-1) { |
1392 | void *brk_cur; | | 1430 | void *brk_cur; |
1393 | intptr_t incr; | | 1431 | intptr_t incr; |
1394 | | | 1432 | |
1395 | /* | | 1433 | /* |
1396 | * The loop is necessary to recover from races with other | | 1434 | * The loop is necessary to recover from races with other |
1397 | * threads that are using brk for something other than malloc. | | 1435 | * threads that are using brk for something other than malloc. |
1398 | */ | | 1436 | */ |
1399 | malloc_mutex_lock(&brk_mtx); | | 1437 | malloc_mutex_lock(&brk_mtx); |
1400 | do { | | 1438 | do { |
1401 | /* Get the current end of brk. */ | | 1439 | /* Get the current end of brk. */ |
1402 | brk_cur = sbrk(0); | | 1440 | brk_cur = sbrk(0); |
1403 | | | 1441 | |
1404 | /* | | 1442 | /* |
1405 | * Calculate how much padding is necessary to | | 1443 | * Calculate how much padding is necessary to |
1406 | * chunk-align the end of brk. | | 1444 | * chunk-align the end of brk. |
1407 | */ | | 1445 | */ |
1408 | incr = (intptr_t)size | | 1446 | incr = (intptr_t)size |
1409 | - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); | | 1447 | - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); |
1410 | if (incr == (intptr_t)size) { | | 1448 | if (incr == (intptr_t)size) { |
1411 | ret = brk_cur; | | 1449 | ret = brk_cur; |
1412 | } else { | | 1450 | } else { |
1413 | ret = (void *)((intptr_t)brk_cur + incr); | | 1451 | ret = (void *)((intptr_t)brk_cur + incr); |
1414 | incr += size; | | 1452 | incr += size; |
1415 | } | | 1453 | } |
1416 | | | 1454 | |
1417 | brk_prev = sbrk(incr); | | 1455 | brk_prev = sbrk(incr); |
1418 | if (brk_prev == brk_cur) { | | 1456 | if (brk_prev == brk_cur) { |
1419 | /* Success. */ | | 1457 | /* Success. */ |
1420 | malloc_mutex_unlock(&brk_mtx); | | 1458 | malloc_mutex_unlock(&brk_mtx); |
1421 | brk_max = (void *)((intptr_t)ret + size); | | 1459 | brk_max = (void *)((intptr_t)ret + size); |
1422 | goto RETURN; | | 1460 | goto RETURN; |
1423 | } | | 1461 | } |
1424 | } while (brk_prev != (void *)-1); | | 1462 | } while (brk_prev != (void *)-1); |
1425 | malloc_mutex_unlock(&brk_mtx); | | 1463 | malloc_mutex_unlock(&brk_mtx); |
1426 | } | | 1464 | } |
1427 | #endif | | 1465 | #endif |
1428 | | | 1466 | |
1429 | /* All strategies for allocation failed. */ | | 1467 | /* All strategies for allocation failed. */ |
1430 | ret = NULL; | | 1468 | ret = NULL; |
1431 | RETURN: | | 1469 | RETURN: |
1432 | if (ret != NULL) { | | 1470 | if (ret != NULL) { |
1433 | chunk_node_t key; | | 1471 | chunk_node_t key; |
1434 | /* | | 1472 | /* |
1435 | * Clean out any entries in old_chunks that overlap with the | | 1473 | * Clean out any entries in old_chunks that overlap with the |
1436 | * memory we just allocated. | | 1474 | * memory we just allocated. |
1437 | */ | | 1475 | */ |
1438 | key.chunk = ret; | | 1476 | key.chunk = ret; |
1439 | /* LINTED */ | | 1477 | /* LINTED */ |
1440 | tchunk = RB_NFIND(chunk_tree_s, &old_chunks, &key); | | 1478 | tchunk = RB_NFIND(chunk_tree_s, &old_chunks, &key); |
1441 | while (tchunk != NULL | | 1479 | while (tchunk != NULL |
1442 | && (uintptr_t)tchunk->chunk >= (uintptr_t)ret | | 1480 | && (uintptr_t)tchunk->chunk >= (uintptr_t)ret |
1443 | && (uintptr_t)tchunk->chunk < (uintptr_t)ret + size) { | | 1481 | && (uintptr_t)tchunk->chunk < (uintptr_t)ret + size) { |
1444 | delchunk = tchunk; | | 1482 | delchunk = tchunk; |
1445 | /* LINTED */ | | 1483 | /* LINTED */ |
1446 | tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk); | | 1484 | tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk); |
1447 | /* LINTED */ | | 1485 | /* LINTED */ |
1448 | RB_REMOVE(chunk_tree_s, &old_chunks, delchunk); | | 1486 | RB_REMOVE(chunk_tree_s, &old_chunks, delchunk); |
1449 | base_chunk_node_dealloc(delchunk); | | 1487 | base_chunk_node_dealloc(delchunk); |
1450 | } | | 1488 | } |
1451 | | | 1489 | |
1452 | } | | 1490 | } |
1453 | #ifdef MALLOC_STATS | | 1491 | #ifdef MALLOC_STATS |
1454 | if (ret != NULL) { | | 1492 | if (ret != NULL) { |
1455 | stats_chunks.nchunks += (size / chunksize); | | 1493 | stats_chunks.nchunks += (size / chunksize); |
1456 | stats_chunks.curchunks += (size / chunksize); | | 1494 | stats_chunks.curchunks += (size / chunksize); |
1457 | } | | 1495 | } |
1458 | if (stats_chunks.curchunks > stats_chunks.highchunks) | | 1496 | if (stats_chunks.curchunks > stats_chunks.highchunks) |
1459 | stats_chunks.highchunks = stats_chunks.curchunks; | | 1497 | stats_chunks.highchunks = stats_chunks.curchunks; |
1460 | #endif | | 1498 | #endif |
1461 | malloc_mutex_unlock(&chunks_mtx); | | 1499 | malloc_mutex_unlock(&chunks_mtx); |
1462 | | | 1500 | |
1463 | assert(CHUNK_ADDR2BASE(ret) == ret); | | 1501 | assert(CHUNK_ADDR2BASE(ret) == ret); |
1464 | return (ret); | | 1502 | return (ret); |
1465 | } | | 1503 | } |
1466 | | | 1504 | |
1467 | static void | | 1505 | static void |
1468 | chunk_dealloc(void *chunk, size_t size) | | 1506 | chunk_dealloc(void *chunk, size_t size) |
1469 | { | | 1507 | { |
1470 | chunk_node_t *node; | | 1508 | chunk_node_t *node; |
1471 | | | 1509 | |
1472 | assert(chunk != NULL); | | 1510 | assert(chunk != NULL); |
1473 | assert(CHUNK_ADDR2BASE(chunk) == chunk); | | 1511 | assert(CHUNK_ADDR2BASE(chunk) == chunk); |
1474 | assert(size != 0); | | 1512 | assert(size != 0); |
1475 | assert((size & chunksize_mask) == 0); | | 1513 | assert((size & chunksize_mask) == 0); |
1476 | | | 1514 | |
1477 | malloc_mutex_lock(&chunks_mtx); | | 1515 | malloc_mutex_lock(&chunks_mtx); |
1478 | | | 1516 | |
1479 | #ifdef USE_BRK | | 1517 | #ifdef USE_BRK |
1480 | if ((uintptr_t)chunk >= (uintptr_t)brk_base | | 1518 | if ((uintptr_t)chunk >= (uintptr_t)brk_base |
1481 | && (uintptr_t)chunk < (uintptr_t)brk_max) { | | 1519 | && (uintptr_t)chunk < (uintptr_t)brk_max) { |
1482 | void *brk_cur; | | 1520 | void *brk_cur; |
1483 | | | 1521 | |
1484 | malloc_mutex_lock(&brk_mtx); | | 1522 | malloc_mutex_lock(&brk_mtx); |
1485 | /* Get the current end of brk. */ | | 1523 | /* Get the current end of brk. */ |
1486 | brk_cur = sbrk(0); | | 1524 | brk_cur = sbrk(0); |
1487 | | | 1525 | |
1488 | /* | | 1526 | /* |
1489 | * Try to shrink the data segment if this chunk is at the end | | 1527 | * Try to shrink the data segment if this chunk is at the end |
1490 | * of the data segment. The sbrk() call here is subject to a | | 1528 | * of the data segment. The sbrk() call here is subject to a |
1491 | * race condition with threads that use brk(2) or sbrk(2) | | 1529 | * race condition with threads that use brk(2) or sbrk(2) |
1492 | * directly, but the alternative would be to leak memory for | | 1530 | * directly, but the alternative would be to leak memory for |
1493 | * the sake of poorly designed multi-threaded programs. | | 1531 | * the sake of poorly designed multi-threaded programs. |
1494 | */ | | 1532 | */ |
1495 | if (brk_cur == brk_max | | 1533 | if (brk_cur == brk_max |
1496 | && (void *)((uintptr_t)chunk + size) == brk_max | | 1534 | && (void *)((uintptr_t)chunk + size) == brk_max |
1497 | && sbrk(-(intptr_t)size) == brk_max) { | | 1535 | && sbrk(-(intptr_t)size) == brk_max) { |
1498 | malloc_mutex_unlock(&brk_mtx); | | 1536 | malloc_mutex_unlock(&brk_mtx); |
1499 | if (brk_prev == brk_max) { | | 1537 | if (brk_prev == brk_max) { |
1500 | /* Success. */ | | 1538 | /* Success. */ |
1501 | brk_prev = (void *)((intptr_t)brk_max | | 1539 | brk_prev = (void *)((intptr_t)brk_max |
1502 | - (intptr_t)size); | | 1540 | - (intptr_t)size); |
1503 | brk_max = brk_prev; | | 1541 | brk_max = brk_prev; |
1504 | } | | 1542 | } |
1505 | } else { | | 1543 | } else { |
1506 | size_t offset; | | 1544 | size_t offset; |
1507 | | | 1545 | |
1508 | malloc_mutex_unlock(&brk_mtx); | | 1546 | malloc_mutex_unlock(&brk_mtx); |
1509 | madvise(chunk, size, MADV_FREE); | | 1547 | madvise(chunk, size, MADV_FREE); |
1510 | | | 1548 | |
1511 | /* | | 1549 | /* |
1512 | * Iteratively create records of each chunk-sized | | 1550 | * Iteratively create records of each chunk-sized |
1513 | * memory region that 'chunk' is comprised of, so that | | 1551 | * memory region that 'chunk' is comprised of, so that |
1514 | * the address range can be recycled if memory usage | | 1552 | * the address range can be recycled if memory usage |
1515 | * increases later on. | | 1553 | * increases later on. |
1516 | */ | | 1554 | */ |
1517 | for (offset = 0; offset < size; offset += chunksize) { | | 1555 | for (offset = 0; offset < size; offset += chunksize) { |
1518 | node = base_chunk_node_alloc(); | | 1556 | node = base_chunk_node_alloc(); |
1519 | if (node == NULL) | | 1557 | if (node == NULL) |
1520 | break; | | 1558 | break; |
1521 | | | 1559 | |
1522 | node->chunk = (void *)((uintptr_t)chunk | | 1560 | node->chunk = (void *)((uintptr_t)chunk |
1523 | + (uintptr_t)offset); | | 1561 | + (uintptr_t)offset); |
1524 | node->size = chunksize; | | 1562 | node->size = chunksize; |
1525 | /* LINTED */ | | 1563 | /* LINTED */ |
1526 | RB_INSERT(chunk_tree_s, &old_chunks, node); | | 1564 | RB_INSERT(chunk_tree_s, &old_chunks, node); |
1527 | } | | 1565 | } |
1528 | } | | 1566 | } |
1529 | } else { | | 1567 | } else { |
1530 | #endif | | 1568 | #endif |
1531 | pages_unmap(chunk, size); | | 1569 | pages_unmap(chunk, size); |
1532 | | | 1570 | |
1533 | /* | | 1571 | /* |
1534 | * Make a record of the chunk's address, so that the address | | 1572 | * Make a record of the chunk's address, so that the address |
1535 | * range can be recycled if memory usage increases later on. | | 1573 | * range can be recycled if memory usage increases later on. |
1536 | * Don't bother to create entries if (size > chunksize), since | | 1574 | * Don't bother to create entries if (size > chunksize), since |
1537 | * doing so could cause scalability issues for truly gargantuan | | 1575 | * doing so could cause scalability issues for truly gargantuan |
1538 | * objects (many gigabytes or larger). | | 1576 | * objects (many gigabytes or larger). |
1539 | */ | | 1577 | */ |
1540 | if (size == chunksize) { | | 1578 | if (size == chunksize) { |
1541 | node = base_chunk_node_alloc(); | | 1579 | node = base_chunk_node_alloc(); |
1542 | if (node != NULL) { | | 1580 | if (node != NULL) { |
1543 | node->chunk = (void *)(uintptr_t)chunk; | | 1581 | node->chunk = (void *)(uintptr_t)chunk; |
1544 | node->size = chunksize; | | 1582 | node->size = chunksize; |
1545 | /* LINTED */ | | 1583 | /* LINTED */ |
1546 | RB_INSERT(chunk_tree_s, &old_chunks, node); | | 1584 | RB_INSERT(chunk_tree_s, &old_chunks, node); |
1547 | } | | 1585 | } |
1548 | } | | 1586 | } |
1549 | #ifdef USE_BRK | | 1587 | #ifdef USE_BRK |
1550 | } | | 1588 | } |
1551 | #endif | | 1589 | #endif |
1552 | | | 1590 | |
1553 | #ifdef MALLOC_STATS | | 1591 | #ifdef MALLOC_STATS |
1554 | stats_chunks.curchunks -= (size / chunksize); | | 1592 | stats_chunks.curchunks -= (size / chunksize); |
1555 | #endif | | 1593 | #endif |
1556 | malloc_mutex_unlock(&chunks_mtx); | | 1594 | malloc_mutex_unlock(&chunks_mtx); |
1557 | } | | 1595 | } |
1558 | | | 1596 | |
1559 | /* | | 1597 | /* |
1560 | * End chunk management functions. | | 1598 | * End chunk management functions. |
1561 | */ | | 1599 | */ |
1562 | /******************************************************************************/ | | 1600 | /******************************************************************************/ |
1563 | /* | | 1601 | /* |
1564 | * Begin arena. | | 1602 | * Begin arena. |
1565 | */ | | 1603 | */ |
1566 | | | 1604 | |
1567 | /* | | 1605 | /* |
1568 | * Choose an arena based on a per-thread and (optimistically) per-CPU value. | | 1606 | * Choose an arena based on a per-thread and (optimistically) per-CPU value. |
1569 | * | | 1607 | * |
1570 | * We maintain at least one block of arenas. Usually there are more. | | 1608 | * We maintain at least one block of arenas. Usually there are more. |
1571 | * The blocks are $ncpu arenas in size. Whole blocks are 'hashed' | | 1609 | * The blocks are $ncpu arenas in size. Whole blocks are 'hashed' |
1572 | * amongst threads. To accomplish this, next_arena advances only in | | 1610 | * amongst threads. To accomplish this, next_arena advances only in |
1573 | * ncpu steps. | | 1611 | * ncpu steps. |
1574 | */ | | 1612 | */ |
1575 | static __noinline arena_t * | | 1613 | static __noinline arena_t * |
1576 | choose_arena_hard(void) | | 1614 | choose_arena_hard(void) |
1577 | { | | 1615 | { |
1578 | unsigned i, curcpu; | | 1616 | unsigned i, curcpu; |
1579 | arena_t **map; | | 1617 | arena_t **map; |
1580 | | | 1618 | |
1581 | /* Initialize the current block of arenas and advance to next. */ | | 1619 | /* Initialize the current block of arenas and advance to next. */ |
1582 | malloc_mutex_lock(&arenas_mtx); | | 1620 | malloc_mutex_lock(&arenas_mtx); |
1583 | assert(next_arena % ncpus == 0); | | 1621 | assert(next_arena % ncpus == 0); |
1584 | assert(narenas % ncpus == 0); | | 1622 | assert(narenas % ncpus == 0); |
1585 | map = &arenas[next_arena]; | | 1623 | map = &arenas[next_arena]; |
1586 | set_arenas_map(map); | | 1624 | set_arenas_map(map); |
1587 | for (i = 0; i < ncpus; i++) { | | 1625 | for (i = 0; i < ncpus; i++) { |
1588 | if (arenas[next_arena] == NULL) | | 1626 | if (arenas[next_arena] == NULL) |
1589 | arenas_extend(next_arena); | | 1627 | arenas_extend(next_arena); |
1590 | next_arena = (next_arena + 1) % narenas; | | 1628 | next_arena = (next_arena + 1) % narenas; |
1591 | } | | 1629 | } |
1592 | malloc_mutex_unlock(&arenas_mtx); | | 1630 | malloc_mutex_unlock(&arenas_mtx); |
1593 | | | 1631 | |
1594 | /* | | 1632 | /* |
1595 | * If we were unable to allocate an arena above, then default to | | 1633 | * If we were unable to allocate an arena above, then default to |
1596 | * the first arena, which is always present. | | 1634 | * the first arena, which is always present. |
1597 | */ | | 1635 | */ |
1598 | curcpu = thr_curcpu(); | | 1636 | curcpu = thr_curcpu(); |
1599 | if (map[curcpu] != NULL) | | 1637 | if (map[curcpu] != NULL) |
1600 | return map[curcpu]; | | 1638 | return map[curcpu]; |
1601 | return arenas[0]; | | 1639 | return arenas[0]; |
1602 | } | | 1640 | } |
1603 | | | 1641 | |
1604 | static inline arena_t * | | 1642 | static inline arena_t * |
1605 | choose_arena(void) | | 1643 | choose_arena(void) |
1606 | { | | 1644 | { |
1607 | unsigned curcpu; | | 1645 | unsigned curcpu; |
1608 | arena_t **map; | | 1646 | arena_t **map; |
1609 | | | 1647 | |
1610 | map = get_arenas_map(); | | 1648 | map = get_arenas_map(); |
1611 | curcpu = thr_curcpu(); | | 1649 | curcpu = thr_curcpu(); |
1612 | if (__predict_true(map != NULL && map[curcpu] != NULL)) | | 1650 | if (__predict_true(map != NULL && map[curcpu] != NULL)) |
1613 | return map[curcpu]; | | 1651 | return map[curcpu]; |
1614 | | | 1652 | |
1615 | return choose_arena_hard(); | | 1653 | return choose_arena_hard(); |
1616 | } | | 1654 | } |
1617 | | | 1655 | |
1618 | #ifndef lint | | 1656 | #ifndef lint |
1619 | static inline int | | 1657 | static inline int |
1620 | arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b) | | 1658 | arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b) |
1621 | { | | 1659 | { |
1622 | | | 1660 | |
1623 | assert(a != NULL); | | 1661 | assert(a != NULL); |
1624 | assert(b != NULL); | | 1662 | assert(b != NULL); |
1625 | | | 1663 | |
1626 | if ((uintptr_t)a < (uintptr_t)b) | | 1664 | if ((uintptr_t)a < (uintptr_t)b) |
1627 | return (-1); | | 1665 | return (-1); |
1628 | else if (a == b) | | 1666 | else if (a == b) |
1629 | return (0); | | 1667 | return (0); |
1630 | else | | 1668 | else |
1631 | return (1); | | 1669 | return (1); |
1632 | } | | 1670 | } |
1633 | | | 1671 | |
1634 | /* Generate red-black tree code for arena chunks. */ | | 1672 | /* Generate red-black tree code for arena chunks. */ |
1635 | RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp); | | 1673 | RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp); |
1636 | #endif | | 1674 | #endif |
1637 | | | 1675 | |
1638 | #ifndef lint | | 1676 | #ifndef lint |
1639 | static inline int | | 1677 | static inline int |
1640 | arena_run_comp(arena_run_t *a, arena_run_t *b) | | 1678 | arena_run_comp(arena_run_t *a, arena_run_t *b) |
1641 | { | | 1679 | { |
1642 | | | 1680 | |
1643 | assert(a != NULL); | | 1681 | assert(a != NULL); |
1644 | assert(b != NULL); | | 1682 | assert(b != NULL); |
1645 | | | 1683 | |
1646 | if ((uintptr_t)a < (uintptr_t)b) | | 1684 | if ((uintptr_t)a < (uintptr_t)b) |
1647 | return (-1); | | 1685 | return (-1); |
1648 | else if (a == b) | | 1686 | else if (a == b) |
1649 | return (0); | | 1687 | return (0); |
1650 | else | | 1688 | else |
1651 | return (1); | | 1689 | return (1); |
1652 | } | | 1690 | } |
1653 | | | 1691 | |
1654 | /* Generate red-black tree code for arena runs. */ | | 1692 | /* Generate red-black tree code for arena runs. */ |
1655 | RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp); | | 1693 | RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp); |
1656 | #endif | | 1694 | #endif |
1657 | | | 1695 | |
1658 | static inline void * | | 1696 | static inline void * |
1659 | arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin) | | 1697 | arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin) |
1660 | { | | 1698 | { |
1661 | void *ret; | | 1699 | void *ret; |
1662 | unsigned i, mask, bit, regind; | | 1700 | unsigned i, mask, bit, regind; |
1663 | | | 1701 | |
1664 | assert(run->magic == ARENA_RUN_MAGIC); | | 1702 | assert(run->magic == ARENA_RUN_MAGIC); |
1665 | assert(run->regs_minelm < bin->regs_mask_nelms); | | 1703 | assert(run->regs_minelm < bin->regs_mask_nelms); |
1666 | | | 1704 | |
1667 | /* | | 1705 | /* |
1668 | * Move the first check outside the loop, so that run->regs_minelm can | | 1706 | * Move the first check outside the loop, so that run->regs_minelm can |
1669 | * be updated unconditionally, without the possibility of updating it | | 1707 | * be updated unconditionally, without the possibility of updating it |
1670 | * multiple times. | | 1708 | * multiple times. |
1671 | */ | | 1709 | */ |
1672 | i = run->regs_minelm; | | 1710 | i = run->regs_minelm; |
1673 | mask = run->regs_mask[i]; | | 1711 | mask = run->regs_mask[i]; |
1674 | if (mask != 0) { | | 1712 | if (mask != 0) { |
1675 | /* Usable allocation found. */ | | 1713 | /* Usable allocation found. */ |
1676 | bit = ffs((int)mask) - 1; | | 1714 | bit = ffs((int)mask) - 1; |
1677 | | | 1715 | |
1678 | regind = ((i << (SIZEOF_INT_2POW + 3)) + bit); | | 1716 | regind = ((i << (SIZEOF_INT_2POW + 3)) + bit); |
1679 | ret = (void *)(((uintptr_t)run) + bin->reg0_offset | | 1717 | ret = (void *)(((uintptr_t)run) + bin->reg0_offset |
1680 | + (bin->reg_size * regind)); | | 1718 | + (bin->reg_size * regind)); |
1681 | | | 1719 | |
1682 | /* Clear bit. */ | | 1720 | /* Clear bit. */ |
1683 | mask ^= (1 << bit); | | 1721 | mask ^= (1 << bit); |
1684 | run->regs_mask[i] = mask; | | 1722 | run->regs_mask[i] = mask; |
1685 | | | 1723 | |
1686 | return (ret); | | 1724 | return (ret); |
1687 | } | | 1725 | } |
1688 | | | 1726 | |
1689 | for (i++; i < bin->regs_mask_nelms; i++) { | | 1727 | for (i++; i < bin->regs_mask_nelms; i++) { |
1690 | mask = run->regs_mask[i]; | | 1728 | mask = run->regs_mask[i]; |
1691 | if (mask != 0) { | | 1729 | if (mask != 0) { |
1692 | /* Usable allocation found. */ | | 1730 | /* Usable allocation found. */ |
1693 | bit = ffs((int)mask) - 1; | | 1731 | bit = ffs((int)mask) - 1; |
1694 | | | 1732 | |
1695 | regind = ((i << (SIZEOF_INT_2POW + 3)) + bit); | | 1733 | regind = ((i << (SIZEOF_INT_2POW + 3)) + bit); |
1696 | ret = (void *)(((uintptr_t)run) + bin->reg0_offset | | 1734 | ret = (void *)(((uintptr_t)run) + bin->reg0_offset |
1697 | + (bin->reg_size * regind)); | | 1735 | + (bin->reg_size * regind)); |
1698 | | | 1736 | |
1699 | /* Clear bit. */ | | 1737 | /* Clear bit. */ |
1700 | mask ^= (1 << bit); | | 1738 | mask ^= (1 << bit); |
1701 | run->regs_mask[i] = mask; | | 1739 | run->regs_mask[i] = mask; |
1702 | | | 1740 | |
1703 | /* | | 1741 | /* |
1704 | * Make a note that nothing before this element | | 1742 | * Make a note that nothing before this element |
1705 | * contains a free region. | | 1743 | * contains a free region. |
1706 | */ | | 1744 | */ |
1707 | run->regs_minelm = i; /* Low payoff: + (mask == 0); */ | | 1745 | run->regs_minelm = i; /* Low payoff: + (mask == 0); */ |
1708 | | | 1746 | |
1709 | return (ret); | | 1747 | return (ret); |
1710 | } | | 1748 | } |
1711 | } | | 1749 | } |
1712 | /* Not reached. */ | | 1750 | /* Not reached. */ |
1713 | /* LINTED */ | | 1751 | /* LINTED */ |
1714 | assert(0); | | 1752 | assert(0); |
1715 | return (NULL); | | 1753 | return (NULL); |
1716 | } | | 1754 | } |
1717 | | | 1755 | |
1718 | static inline void | | 1756 | static inline void |
1719 | arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size) | | 1757 | arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size) |
1720 | { | | 1758 | { |
1721 | /* | | 1759 | /* |
1722 | * To divide by a number D that is not a power of two we multiply | | 1760 | * To divide by a number D that is not a power of two we multiply |
1723 | * by (2^21 / D) and then right shift by 21 positions. | | 1761 | * by (2^21 / D) and then right shift by 21 positions. |
1724 | * | | 1762 | * |
1725 | * X / D | | 1763 | * X / D |
1726 | * | | 1764 | * |
1727 | * becomes | | 1765 | * becomes |
1728 | * | | 1766 | * |
1729 | * (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT | | 1767 | * (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT |
1730 | */ | | 1768 | */ |
1731 | #define SIZE_INV_SHIFT 21 | | 1769 | #define SIZE_INV_SHIFT 21 |
1732 | #define SIZE_INV(s) (((1 << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1) | | 1770 | #define SIZE_INV(s) (((1 << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1) |
1733 | static const unsigned size_invs[] = { | | 1771 | static const unsigned size_invs[] = { |
1734 | SIZE_INV(3), | | 1772 | SIZE_INV(3), |
1735 | SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), | | 1773 | SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), |
1736 | SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), | | 1774 | SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), |
1737 | SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), | | 1775 | SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), |
1738 | SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), | | 1776 | SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), |
1739 | SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), | | 1777 | SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), |
1740 | SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), | | 1778 | SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), |
1741 | SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) | | 1779 | SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) |
1742 | #if (QUANTUM_2POW_MIN < 4) | | 1780 | #if (QUANTUM_2POW_MIN < 4) |
1743 | , | | 1781 | , |
1744 | SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35), | | 1782 | SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35), |
1745 | SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39), | | 1783 | SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39), |
1746 | SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43), | | 1784 | SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43), |
1747 | SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47), | | 1785 | SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47), |
1748 | SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51), | | 1786 | SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51), |
1749 | SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55), | | 1787 | SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55), |
1750 | SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59), | | 1788 | SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59), |
1751 | SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63) | | 1789 | SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63) |
1752 | #endif | | 1790 | #endif |
1753 | }; | | 1791 | }; |
1754 | unsigned diff, regind, elm, bit; | | 1792 | unsigned diff, regind, elm, bit; |
1755 | | | 1793 | |
1756 | /* LINTED */ | | 1794 | /* LINTED */ |
1757 | assert(run->magic == ARENA_RUN_MAGIC); | | 1795 | assert(run->magic == ARENA_RUN_MAGIC); |
1758 | assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3 | | 1796 | assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3 |
1759 | >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN)); | | 1797 | >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN)); |
1760 | | | 1798 | |
1761 | /* | | 1799 | /* |
1762 | * Avoid doing division with a variable divisor if possible. Using | | 1800 | * Avoid doing division with a variable divisor if possible. Using |
1763 | * actual division here can reduce allocator throughput by over 20%! | | 1801 | * actual division here can reduce allocator throughput by over 20%! |
1764 | */ | | 1802 | */ |
1765 | diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset); | | 1803 | diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset); |
1766 | if ((size & (size - 1)) == 0) { | | 1804 | if ((size & (size - 1)) == 0) { |
1767 | /* | | 1805 | /* |
1768 | * log2_table allows fast division of a power of two in the | | 1806 | * log2_table allows fast division of a power of two in the |
1769 | * [1..128] range. | | 1807 | * [1..128] range. |
1770 | * | | 1808 | * |
1771 | * (x / divisor) becomes (x >> log2_table[divisor - 1]). | | 1809 | * (x / divisor) becomes (x >> log2_table[divisor - 1]). |
1772 | */ | | 1810 | */ |
1773 | static const unsigned char log2_table[] = { | | 1811 | static const unsigned char log2_table[] = { |
1774 | 0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, | | 1812 | 0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, |
1775 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, | | 1813 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, |
1776 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | | 1814 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
1777 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, | | 1815 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, |
1778 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | | 1816 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
1779 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | | 1817 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
1780 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | | 1818 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
1781 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 | | 1819 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 |
1782 | }; | | 1820 | }; |
1783 | | | 1821 | |
1784 | if (size <= 128) | | 1822 | if (size <= 128) |
1785 | regind = (diff >> log2_table[size - 1]); | | 1823 | regind = (diff >> log2_table[size - 1]); |
1786 | else if (size <= 32768) | | 1824 | else if (size <= 32768) |
1787 | regind = diff >> (8 + log2_table[(size >> 8) - 1]); | | 1825 | regind = diff >> (8 + log2_table[(size >> 8) - 1]); |
1788 | else { | | 1826 | else { |
1789 | /* | | 1827 | /* |
1790 | * The page size is too large for us to use the lookup | | 1828 | * The page size is too large for us to use the lookup |
1791 | * table. Use real division. | | 1829 | * table. Use real division. |
1792 | */ | | 1830 | */ |
1793 | regind = (unsigned)(diff / size); | | 1831 | regind = (unsigned)(diff / size); |
1794 | } | | 1832 | } |
1795 | } else if (size <= ((sizeof(size_invs) / sizeof(unsigned)) | | 1833 | } else if (size <= ((sizeof(size_invs) / sizeof(unsigned)) |
1796 | << QUANTUM_2POW_MIN) + 2) { | | 1834 | << QUANTUM_2POW_MIN) + 2) { |
1797 | regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff; | | 1835 | regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff; |
1798 | regind >>= SIZE_INV_SHIFT; | | 1836 | regind >>= SIZE_INV_SHIFT; |
| @@ -2658,1326 +2696,1321 @@ arena_new(arena_t *arena) | | | @@ -2658,1326 +2696,1321 @@ arena_new(arena_t *arena) |
2658 | #endif | | 2696 | #endif |
2659 | } | | 2697 | } |
2660 | | | 2698 | |
2661 | /* Quantum-spaced bins. */ | | 2699 | /* Quantum-spaced bins. */ |
2662 | for (; i < ntbins + nqbins; i++) { | | 2700 | for (; i < ntbins + nqbins; i++) { |
2663 | bin = &arena->bins[i]; | | 2701 | bin = &arena->bins[i]; |
2664 | bin->runcur = NULL; | | 2702 | bin->runcur = NULL; |
2665 | RB_INIT(&bin->runs); | | 2703 | RB_INIT(&bin->runs); |
2666 | | | 2704 | |
2667 | bin->reg_size = quantum * (i - ntbins + 1); | | 2705 | bin->reg_size = quantum * (i - ntbins + 1); |
2668 | /* | | 2706 | /* |
2669 | pow2_size = pow2_ceil(quantum * (i - ntbins + 1)); | | 2707 | pow2_size = pow2_ceil(quantum * (i - ntbins + 1)); |
2670 | */ | | 2708 | */ |
2671 | prev_run_size = arena_bin_run_size_calc(bin, prev_run_size); | | 2709 | prev_run_size = arena_bin_run_size_calc(bin, prev_run_size); |
2672 | | | 2710 | |
2673 | #ifdef MALLOC_STATS | | 2711 | #ifdef MALLOC_STATS |
2674 | memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); | | 2712 | memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); |
2675 | #endif | | 2713 | #endif |
2676 | } | | 2714 | } |
2677 | | | 2715 | |
2678 | /* (2^n)-spaced sub-page bins. */ | | 2716 | /* (2^n)-spaced sub-page bins. */ |
2679 | for (; i < ntbins + nqbins + nsbins; i++) { | | 2717 | for (; i < ntbins + nqbins + nsbins; i++) { |
2680 | bin = &arena->bins[i]; | | 2718 | bin = &arena->bins[i]; |
2681 | bin->runcur = NULL; | | 2719 | bin->runcur = NULL; |
2682 | RB_INIT(&bin->runs); | | 2720 | RB_INIT(&bin->runs); |
2683 | | | 2721 | |
2684 | bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1)); | | 2722 | bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1)); |
2685 | | | 2723 | |
2686 | prev_run_size = arena_bin_run_size_calc(bin, prev_run_size); | | 2724 | prev_run_size = arena_bin_run_size_calc(bin, prev_run_size); |
2687 | | | 2725 | |
2688 | #ifdef MALLOC_STATS | | 2726 | #ifdef MALLOC_STATS |
2689 | memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); | | 2727 | memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); |
2690 | #endif | | 2728 | #endif |
2691 | } | | 2729 | } |
2692 | | | 2730 | |
2693 | #ifdef MALLOC_DEBUG | | 2731 | #ifdef MALLOC_DEBUG |
2694 | arena->magic = ARENA_MAGIC; | | 2732 | arena->magic = ARENA_MAGIC; |
2695 | #endif | | 2733 | #endif |
2696 | | | 2734 | |
2697 | return (false); | | 2735 | return (false); |
2698 | } | | 2736 | } |
2699 | | | 2737 | |
2700 | /* Create a new arena and insert it into the arenas array at index ind. */ | | 2738 | /* Create a new arena and insert it into the arenas array at index ind. */ |
2701 | static arena_t * | | 2739 | static arena_t * |
2702 | arenas_extend(unsigned ind) | | 2740 | arenas_extend(unsigned ind) |
2703 | { | | 2741 | { |
2704 | arena_t *ret; | | 2742 | arena_t *ret; |
2705 | | | 2743 | |
2706 | /* Allocate enough space for trailing bins. */ | | 2744 | /* Allocate enough space for trailing bins. */ |
2707 | ret = (arena_t *)base_alloc(sizeof(arena_t) | | 2745 | ret = (arena_t *)base_alloc(sizeof(arena_t) |
2708 | + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1))); | | 2746 | + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1))); |
2709 | if (ret != NULL && arena_new(ret) == false) { | | 2747 | if (ret != NULL && arena_new(ret) == false) { |
2710 | arenas[ind] = ret; | | 2748 | arenas[ind] = ret; |
2711 | return (ret); | | 2749 | return (ret); |
2712 | } | | 2750 | } |
2713 | /* Only reached if there is an OOM error. */ | | 2751 | /* Only reached if there is an OOM error. */ |
2714 | | | 2752 | |
2715 | /* | | 2753 | /* |
2716 | * OOM here is quite inconvenient to propagate, since dealing with it | | 2754 | * OOM here is quite inconvenient to propagate, since dealing with it |
2717 | * would require a check for failure in the fast path. Instead, punt | | 2755 | * would require a check for failure in the fast path. Instead, punt |
2718 | * by using arenas[0]. In practice, this is an extremely unlikely | | 2756 | * by using arenas[0]. In practice, this is an extremely unlikely |
2719 | * failure. | | 2757 | * failure. |
2720 | */ | | 2758 | */ |
2721 | _malloc_message(getprogname(), | | 2759 | _malloc_message(getprogname(), |
2722 | ": (malloc) Error initializing arena\n", "", ""); | | 2760 | ": (malloc) Error initializing arena\n", "", ""); |
2723 | if (opt_abort) | | 2761 | if (opt_abort) |
2724 | abort(); | | 2762 | abort(); |
2725 | | | 2763 | |
2726 | return (arenas[0]); | | 2764 | return (arenas[0]); |
2727 | } | | 2765 | } |
2728 | | | 2766 | |
2729 | /* | | 2767 | /* |
2730 | * End arena. | | 2768 | * End arena. |
2731 | */ | | 2769 | */ |
2732 | /******************************************************************************/ | | 2770 | /******************************************************************************/ |
2733 | /* | | 2771 | /* |
2734 | * Begin general internal functions. | | 2772 | * Begin general internal functions. |
2735 | */ | | 2773 | */ |
2736 | | | 2774 | |
2737 | static void * | | 2775 | static void * |
2738 | huge_malloc(size_t size) | | 2776 | huge_malloc(size_t size) |
2739 | { | | 2777 | { |
2740 | void *ret; | | 2778 | void *ret; |
2741 | size_t csize; | | 2779 | size_t csize; |
2742 | chunk_node_t *node; | | 2780 | chunk_node_t *node; |
2743 | | | 2781 | |
2744 | /* Allocate one or more contiguous chunks for this request. */ | | 2782 | /* Allocate one or more contiguous chunks for this request. */ |
2745 | | | 2783 | |
2746 | csize = CHUNK_CEILING(size); | | 2784 | csize = CHUNK_CEILING(size); |
2747 | if (csize == 0) { | | 2785 | if (csize == 0) { |
2748 | /* size is large enough to cause size_t wrap-around. */ | | 2786 | /* size is large enough to cause size_t wrap-around. */ |
2749 | return (NULL); | | 2787 | return (NULL); |
2750 | } | | 2788 | } |
2751 | | | 2789 | |
2752 | /* Allocate a chunk node with which to track the chunk. */ | | 2790 | /* Allocate a chunk node with which to track the chunk. */ |
2753 | node = base_chunk_node_alloc(); | | 2791 | node = base_chunk_node_alloc(); |
2754 | if (node == NULL) | | 2792 | if (node == NULL) |
2755 | return (NULL); | | 2793 | return (NULL); |
2756 | | | 2794 | |
2757 | ret = chunk_alloc(csize); | | 2795 | ret = chunk_alloc(csize); |
2758 | if (ret == NULL) { | | 2796 | if (ret == NULL) { |
2759 | base_chunk_node_dealloc(node); | | 2797 | base_chunk_node_dealloc(node); |
2760 | return (NULL); | | 2798 | return (NULL); |
2761 | } | | 2799 | } |
2762 | | | 2800 | |
2763 | /* Insert node into huge. */ | | 2801 | /* Insert node into huge. */ |
2764 | node->chunk = ret; | | 2802 | node->chunk = ret; |
2765 | node->size = csize; | | 2803 | node->size = csize; |
2766 | | | 2804 | |
2767 | malloc_mutex_lock(&chunks_mtx); | | 2805 | malloc_mutex_lock(&chunks_mtx); |
2768 | RB_INSERT(chunk_tree_s, &huge, node); | | 2806 | RB_INSERT(chunk_tree_s, &huge, node); |
2769 | #ifdef MALLOC_STATS | | 2807 | #ifdef MALLOC_STATS |
2770 | huge_nmalloc++; | | 2808 | huge_nmalloc++; |
2771 | huge_allocated += csize; | | 2809 | huge_allocated += csize; |
2772 | #endif | | 2810 | #endif |
2773 | malloc_mutex_unlock(&chunks_mtx); | | 2811 | malloc_mutex_unlock(&chunks_mtx); |
2774 | | | 2812 | |
2775 | if (opt_junk) | | 2813 | if (opt_junk) |
2776 | memset(ret, 0xa5, csize); | | 2814 | memset(ret, 0xa5, csize); |
2777 | else if (opt_zero) | | 2815 | else if (opt_zero) |
2778 | memset(ret, 0, csize); | | 2816 | memset(ret, 0, csize); |
2779 | | | 2817 | |
2780 | return (ret); | | 2818 | return (ret); |
2781 | } | | 2819 | } |
2782 | | | 2820 | |
2783 | /* Only handles large allocations that require more than chunk alignment. */ | | 2821 | /* Only handles large allocations that require more than chunk alignment. */ |
2784 | static void * | | 2822 | static void * |
2785 | huge_palloc(size_t alignment, size_t size) | | 2823 | huge_palloc(size_t alignment, size_t size) |
2786 | { | | 2824 | { |
2787 | void *ret; | | 2825 | void *ret; |
2788 | size_t alloc_size, chunk_size, offset; | | 2826 | size_t alloc_size, chunk_size, offset; |
2789 | chunk_node_t *node; | | 2827 | chunk_node_t *node; |
2790 | | | 2828 | |
2791 | /* | | 2829 | /* |
2792 | * This allocation requires alignment that is even larger than chunk | | 2830 | * This allocation requires alignment that is even larger than chunk |
2793 | * alignment. This means that huge_malloc() isn't good enough. | | 2831 | * alignment. This means that huge_malloc() isn't good enough. |
2794 | * | | 2832 | * |
2795 | * Allocate almost twice as many chunks as are demanded by the size or | | 2833 | * Allocate almost twice as many chunks as are demanded by the size or |
2796 | * alignment, in order to assure the alignment can be achieved, then | | 2834 | * alignment, in order to assure the alignment can be achieved, then |
2797 | * unmap leading and trailing chunks. | | 2835 | * unmap leading and trailing chunks. |
2798 | */ | | 2836 | */ |
2799 | assert(alignment >= chunksize); | | 2837 | assert(alignment >= chunksize); |
2800 | | | 2838 | |
2801 | chunk_size = CHUNK_CEILING(size); | | 2839 | chunk_size = CHUNK_CEILING(size); |
2802 | | | 2840 | |
2803 | if (size >= alignment) | | 2841 | if (size >= alignment) |
2804 | alloc_size = chunk_size + alignment - chunksize; | | 2842 | alloc_size = chunk_size + alignment - chunksize; |
2805 | else | | 2843 | else |
2806 | alloc_size = (alignment << 1) - chunksize; | | 2844 | alloc_size = (alignment << 1) - chunksize; |
2807 | | | 2845 | |
2808 | /* Allocate a chunk node with which to track the chunk. */ | | 2846 | /* Allocate a chunk node with which to track the chunk. */ |
2809 | node = base_chunk_node_alloc(); | | 2847 | node = base_chunk_node_alloc(); |
2810 | if (node == NULL) | | 2848 | if (node == NULL) |
2811 | return (NULL); | | 2849 | return (NULL); |
2812 | | | 2850 | |
2813 | ret = chunk_alloc(alloc_size); | | 2851 | ret = chunk_alloc(alloc_size); |
2814 | if (ret == NULL) { | | 2852 | if (ret == NULL) { |
2815 | base_chunk_node_dealloc(node); | | 2853 | base_chunk_node_dealloc(node); |
2816 | return (NULL); | | 2854 | return (NULL); |
2817 | } | | 2855 | } |
2818 | | | 2856 | |
2819 | offset = (uintptr_t)ret & (alignment - 1); | | 2857 | offset = (uintptr_t)ret & (alignment - 1); |
2820 | assert((offset & chunksize_mask) == 0); | | 2858 | assert((offset & chunksize_mask) == 0); |
2821 | assert(offset < alloc_size); | | 2859 | assert(offset < alloc_size); |
2822 | if (offset == 0) { | | 2860 | if (offset == 0) { |
2823 | /* Trim trailing space. */ | | 2861 | /* Trim trailing space. */ |
2824 | chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size | | 2862 | chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size |
2825 | - chunk_size); | | 2863 | - chunk_size); |
2826 | } else { | | 2864 | } else { |
2827 | size_t trailsize; | | 2865 | size_t trailsize; |
2828 | | | 2866 | |
2829 | /* Trim leading space. */ | | 2867 | /* Trim leading space. */ |
2830 | chunk_dealloc(ret, alignment - offset); | | 2868 | chunk_dealloc(ret, alignment - offset); |
2831 | | | 2869 | |
2832 | ret = (void *)((uintptr_t)ret + (alignment - offset)); | | 2870 | ret = (void *)((uintptr_t)ret + (alignment - offset)); |
2833 | | | 2871 | |
2834 | trailsize = alloc_size - (alignment - offset) - chunk_size; | | 2872 | trailsize = alloc_size - (alignment - offset) - chunk_size; |
2835 | if (trailsize != 0) { | | 2873 | if (trailsize != 0) { |
2836 | /* Trim trailing space. */ | | 2874 | /* Trim trailing space. */ |
2837 | assert(trailsize < alloc_size); | | 2875 | assert(trailsize < alloc_size); |
2838 | chunk_dealloc((void *)((uintptr_t)ret + chunk_size), | | 2876 | chunk_dealloc((void *)((uintptr_t)ret + chunk_size), |
2839 | trailsize); | | 2877 | trailsize); |
2840 | } | | 2878 | } |
2841 | } | | 2879 | } |
2842 | | | 2880 | |
2843 | /* Insert node into huge. */ | | 2881 | /* Insert node into huge. */ |
2844 | node->chunk = ret; | | 2882 | node->chunk = ret; |
2845 | node->size = chunk_size; | | 2883 | node->size = chunk_size; |
2846 | | | 2884 | |
2847 | malloc_mutex_lock(&chunks_mtx); | | 2885 | malloc_mutex_lock(&chunks_mtx); |
2848 | RB_INSERT(chunk_tree_s, &huge, node); | | 2886 | RB_INSERT(chunk_tree_s, &huge, node); |
2849 | #ifdef MALLOC_STATS | | 2887 | #ifdef MALLOC_STATS |
2850 | huge_nmalloc++; | | 2888 | huge_nmalloc++; |
2851 | huge_allocated += chunk_size; | | 2889 | huge_allocated += chunk_size; |
2852 | #endif | | 2890 | #endif |
2853 | malloc_mutex_unlock(&chunks_mtx); | | 2891 | malloc_mutex_unlock(&chunks_mtx); |
2854 | | | 2892 | |
2855 | if (opt_junk) | | 2893 | if (opt_junk) |
2856 | memset(ret, 0xa5, chunk_size); | | 2894 | memset(ret, 0xa5, chunk_size); |
2857 | else if (opt_zero) | | 2895 | else if (opt_zero) |
2858 | memset(ret, 0, chunk_size); | | 2896 | memset(ret, 0, chunk_size); |
2859 | | | 2897 | |
2860 | return (ret); | | 2898 | return (ret); |
2861 | } | | 2899 | } |
2862 | | | 2900 | |
2863 | static void * | | 2901 | static void * |
2864 | huge_ralloc(void *ptr, size_t size, size_t oldsize) | | 2902 | huge_ralloc(void *ptr, size_t size, size_t oldsize) |
2865 | { | | 2903 | { |
2866 | void *ret; | | 2904 | void *ret; |
2867 | | | 2905 | |
2868 | /* Avoid moving the allocation if the size class would not change. */ | | 2906 | /* Avoid moving the allocation if the size class would not change. */ |
2869 | if (oldsize > arena_maxclass && | | 2907 | if (oldsize > arena_maxclass && |
2870 | CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) { | | 2908 | CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) { |
2871 | if (opt_junk && size < oldsize) { | | 2909 | if (opt_junk && size < oldsize) { |
2872 | memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize | | 2910 | memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize |
2873 | - size); | | 2911 | - size); |
2874 | } else if (opt_zero && size > oldsize) { | | 2912 | } else if (opt_zero && size > oldsize) { |
2875 | memset((void *)((uintptr_t)ptr + oldsize), 0, size | | 2913 | memset((void *)((uintptr_t)ptr + oldsize), 0, size |
2876 | - oldsize); | | 2914 | - oldsize); |
2877 | } | | 2915 | } |
2878 | return (ptr); | | 2916 | return (ptr); |
2879 | } | | 2917 | } |
2880 | | | 2918 | |
2881 | if (CHUNK_ADDR2BASE(ptr) == ptr | | 2919 | if (CHUNK_ADDR2BASE(ptr) == ptr |
2882 | #ifdef USE_BRK | | 2920 | #ifdef USE_BRK |
2883 | && ((uintptr_t)ptr < (uintptr_t)brk_base | | 2921 | && ((uintptr_t)ptr < (uintptr_t)brk_base |
2884 | || (uintptr_t)ptr >= (uintptr_t)brk_max) | | 2922 | || (uintptr_t)ptr >= (uintptr_t)brk_max) |
2885 | #endif | | 2923 | #endif |
2886 | ) { | | 2924 | ) { |
2887 | chunk_node_t *node, key; | | 2925 | chunk_node_t *node, key; |
2888 | void *newptr; | | 2926 | void *newptr; |
2889 | size_t oldcsize; | | 2927 | size_t oldcsize; |
2890 | size_t newcsize; | | 2928 | size_t newcsize; |
2891 | | | 2929 | |
2892 | newcsize = CHUNK_CEILING(size); | | 2930 | newcsize = CHUNK_CEILING(size); |
2893 | oldcsize = CHUNK_CEILING(oldsize); | | 2931 | oldcsize = CHUNK_CEILING(oldsize); |
2894 | assert(oldcsize != newcsize); | | 2932 | assert(oldcsize != newcsize); |
2895 | if (newcsize == 0) { | | 2933 | if (newcsize == 0) { |
2896 | /* size_t wrap-around */ | | 2934 | /* size_t wrap-around */ |
2897 | return (NULL); | | 2935 | return (NULL); |
2898 | } | | 2936 | } |
2899 | | | 2937 | |
2900 | /* | | 2938 | /* |
2901 | * Remove the old region from the tree now. If mremap() | | 2939 | * Remove the old region from the tree now. If mremap() |
2902 | * returns the region to the system, other thread may | | 2940 | * returns the region to the system, other thread may |
2903 | * map it for same huge allocation and insert it to the | | 2941 | * map it for same huge allocation and insert it to the |
2904 | * tree before we acquire the mutex lock again. | | 2942 | * tree before we acquire the mutex lock again. |
2905 | */ | | 2943 | */ |
2906 | malloc_mutex_lock(&chunks_mtx); | | 2944 | malloc_mutex_lock(&chunks_mtx); |
2907 | key.chunk = __DECONST(void *, ptr); | | 2945 | key.chunk = __DECONST(void *, ptr); |
2908 | /* LINTED */ | | 2946 | /* LINTED */ |
2909 | node = RB_FIND(chunk_tree_s, &huge, &key); | | 2947 | node = RB_FIND(chunk_tree_s, &huge, &key); |
2910 | assert(node != NULL); | | 2948 | assert(node != NULL); |
2911 | assert(node->chunk == ptr); | | 2949 | assert(node->chunk == ptr); |
2912 | assert(node->size == oldcsize); | | 2950 | assert(node->size == oldcsize); |
2913 | RB_REMOVE(chunk_tree_s, &huge, node); | | 2951 | RB_REMOVE(chunk_tree_s, &huge, node); |
2914 | malloc_mutex_unlock(&chunks_mtx); | | 2952 | malloc_mutex_unlock(&chunks_mtx); |
2915 | | | 2953 | |
2916 | newptr = mremap(ptr, oldcsize, NULL, newcsize, | | 2954 | newptr = mremap(ptr, oldcsize, NULL, newcsize, |
2917 | MAP_ALIGNED(chunksize_2pow)); | | 2955 | MAP_ALIGNED(chunksize_2pow)); |
2918 | if (newptr == MAP_FAILED) { | | 2956 | if (newptr == MAP_FAILED) { |
2919 | /* We still own the old region. */ | | 2957 | /* We still own the old region. */ |
2920 | malloc_mutex_lock(&chunks_mtx); | | 2958 | malloc_mutex_lock(&chunks_mtx); |
2921 | RB_INSERT(chunk_tree_s, &huge, node); | | 2959 | RB_INSERT(chunk_tree_s, &huge, node); |
2922 | malloc_mutex_unlock(&chunks_mtx); | | 2960 | malloc_mutex_unlock(&chunks_mtx); |
2923 | } else { | | 2961 | } else { |
2924 | assert(CHUNK_ADDR2BASE(newptr) == newptr); | | 2962 | assert(CHUNK_ADDR2BASE(newptr) == newptr); |
2925 | | | 2963 | |
2926 | /* Insert new or resized old region. */ | | 2964 | /* Insert new or resized old region. */ |
2927 | malloc_mutex_lock(&chunks_mtx); | | 2965 | malloc_mutex_lock(&chunks_mtx); |
2928 | node->size = newcsize; | | 2966 | node->size = newcsize; |
2929 | node->chunk = newptr; | | 2967 | node->chunk = newptr; |
2930 | RB_INSERT(chunk_tree_s, &huge, node); | | 2968 | RB_INSERT(chunk_tree_s, &huge, node); |
2931 | #ifdef MALLOC_STATS | | 2969 | #ifdef MALLOC_STATS |
2932 | huge_nralloc++; | | 2970 | huge_nralloc++; |
2933 | huge_allocated += newcsize - oldcsize; | | 2971 | huge_allocated += newcsize - oldcsize; |
2934 | if (newcsize > oldcsize) { | | 2972 | if (newcsize > oldcsize) { |
2935 | stats_chunks.curchunks += | | 2973 | stats_chunks.curchunks += |
2936 | (newcsize - oldcsize) / chunksize; | | 2974 | (newcsize - oldcsize) / chunksize; |
2937 | if (stats_chunks.curchunks > | | 2975 | if (stats_chunks.curchunks > |
2938 | stats_chunks.highchunks) | | 2976 | stats_chunks.highchunks) |
2939 | stats_chunks.highchunks = | | 2977 | stats_chunks.highchunks = |
2940 | stats_chunks.curchunks; | | 2978 | stats_chunks.curchunks; |
2941 | } else { | | 2979 | } else { |
2942 | stats_chunks.curchunks -= | | 2980 | stats_chunks.curchunks -= |
2943 | (oldcsize - newcsize) / chunksize; | | 2981 | (oldcsize - newcsize) / chunksize; |
2944 | } | | 2982 | } |
2945 | #endif | | 2983 | #endif |
2946 | malloc_mutex_unlock(&chunks_mtx); | | 2984 | malloc_mutex_unlock(&chunks_mtx); |
2947 | | | 2985 | |
2948 | if (opt_junk && size < oldsize) { | | 2986 | if (opt_junk && size < oldsize) { |
2949 | memset((void *)((uintptr_t)newptr + size), 0x5a, | | 2987 | memset((void *)((uintptr_t)newptr + size), 0x5a, |
2950 | newcsize - size); | | 2988 | newcsize - size); |
2951 | } else if (opt_zero && size > oldsize) { | | 2989 | } else if (opt_zero && size > oldsize) { |
2952 | memset((void *)((uintptr_t)newptr + oldsize), 0, | | 2990 | memset((void *)((uintptr_t)newptr + oldsize), 0, |
2953 | size - oldsize); | | 2991 | size - oldsize); |
2954 | } | | 2992 | } |
2955 | return (newptr); | | 2993 | return (newptr); |
2956 | } | | 2994 | } |
2957 | } | | 2995 | } |
2958 | | | 2996 | |
2959 | /* | | 2997 | /* |
2960 | * If we get here, then size and oldsize are different enough that we | | 2998 | * If we get here, then size and oldsize are different enough that we |
2961 | * need to use a different size class. In that case, fall back to | | 2999 | * need to use a different size class. In that case, fall back to |
2962 | * allocating new space and copying. | | 3000 | * allocating new space and copying. |
2963 | */ | | 3001 | */ |
2964 | ret = huge_malloc(size); | | 3002 | ret = huge_malloc(size); |
2965 | if (ret == NULL) | | 3003 | if (ret == NULL) |
2966 | return (NULL); | | 3004 | return (NULL); |
2967 | | | 3005 | |
2968 | if (CHUNK_ADDR2BASE(ptr) == ptr) { | | 3006 | if (CHUNK_ADDR2BASE(ptr) == ptr) { |
2969 | /* The old allocation is a chunk. */ | | 3007 | /* The old allocation is a chunk. */ |
2970 | if (size < oldsize) | | 3008 | if (size < oldsize) |
2971 | memcpy(ret, ptr, size); | | 3009 | memcpy(ret, ptr, size); |
2972 | else | | 3010 | else |
2973 | memcpy(ret, ptr, oldsize); | | 3011 | memcpy(ret, ptr, oldsize); |
2974 | } else { | | 3012 | } else { |
2975 | /* The old allocation is a region. */ | | 3013 | /* The old allocation is a region. */ |
2976 | assert(oldsize < size); | | 3014 | assert(oldsize < size); |
2977 | memcpy(ret, ptr, oldsize); | | 3015 | memcpy(ret, ptr, oldsize); |
2978 | } | | 3016 | } |
2979 | idalloc(ptr); | | 3017 | idalloc(ptr); |
2980 | return (ret); | | 3018 | return (ret); |
2981 | } | | 3019 | } |
2982 | | | 3020 | |
2983 | static void | | 3021 | static void |
2984 | huge_dalloc(void *ptr) | | 3022 | huge_dalloc(void *ptr) |
2985 | { | | 3023 | { |
2986 | chunk_node_t key; | | 3024 | chunk_node_t key; |
2987 | chunk_node_t *node; | | 3025 | chunk_node_t *node; |
2988 | | | 3026 | |
2989 | malloc_mutex_lock(&chunks_mtx); | | 3027 | malloc_mutex_lock(&chunks_mtx); |
2990 | | | 3028 | |
2991 | /* Extract from tree of huge allocations. */ | | 3029 | /* Extract from tree of huge allocations. */ |
2992 | key.chunk = ptr; | | 3030 | key.chunk = ptr; |
2993 | /* LINTED */ | | 3031 | /* LINTED */ |
2994 | node = RB_FIND(chunk_tree_s, &huge, &key); | | 3032 | node = RB_FIND(chunk_tree_s, &huge, &key); |
2995 | assert(node != NULL); | | 3033 | assert(node != NULL); |
2996 | assert(node->chunk == ptr); | | 3034 | assert(node->chunk == ptr); |
2997 | /* LINTED */ | | 3035 | /* LINTED */ |
2998 | RB_REMOVE(chunk_tree_s, &huge, node); | | 3036 | RB_REMOVE(chunk_tree_s, &huge, node); |
2999 | | | 3037 | |
3000 | #ifdef MALLOC_STATS | | 3038 | #ifdef MALLOC_STATS |
3001 | huge_ndalloc++; | | 3039 | huge_ndalloc++; |
3002 | huge_allocated -= node->size; | | 3040 | huge_allocated -= node->size; |
3003 | #endif | | 3041 | #endif |
3004 | | | 3042 | |
3005 | malloc_mutex_unlock(&chunks_mtx); | | 3043 | malloc_mutex_unlock(&chunks_mtx); |
3006 | | | 3044 | |
3007 | /* Unmap chunk. */ | | 3045 | /* Unmap chunk. */ |
3008 | #ifdef USE_BRK | | 3046 | #ifdef USE_BRK |
3009 | if (opt_junk) | | 3047 | if (opt_junk) |
3010 | memset(node->chunk, 0x5a, node->size); | | 3048 | memset(node->chunk, 0x5a, node->size); |
3011 | #endif | | 3049 | #endif |
3012 | chunk_dealloc(node->chunk, node->size); | | 3050 | chunk_dealloc(node->chunk, node->size); |
3013 | | | 3051 | |
3014 | base_chunk_node_dealloc(node); | | 3052 | base_chunk_node_dealloc(node); |
3015 | } | | 3053 | } |
3016 | | | 3054 | |
3017 | static void * | | 3055 | static void * |
3018 | imalloc(size_t size) | | 3056 | imalloc(size_t size) |
3019 | { | | 3057 | { |
3020 | void *ret; | | 3058 | void *ret; |
3021 | | | 3059 | |
3022 | assert(size != 0); | | 3060 | assert(size != 0); |
3023 | | | 3061 | |
3024 | if (size <= arena_maxclass) | | 3062 | if (size <= arena_maxclass) |
3025 | ret = arena_malloc(choose_arena(), size); | | 3063 | ret = arena_malloc(choose_arena(), size); |
3026 | else | | 3064 | else |
3027 | ret = huge_malloc(size); | | 3065 | ret = huge_malloc(size); |
3028 | | | 3066 | |
3029 | return (ret); | | 3067 | return (ret); |
3030 | } | | 3068 | } |
3031 | | | 3069 | |
3032 | static void * | | 3070 | static void * |
3033 | ipalloc(size_t alignment, size_t size) | | 3071 | ipalloc(size_t alignment, size_t size) |
3034 | { | | 3072 | { |
3035 | void *ret; | | 3073 | void *ret; |
3036 | size_t ceil_size; | | 3074 | size_t ceil_size; |
3037 | | | 3075 | |
3038 | /* | | 3076 | /* |
3039 | * Round size up to the nearest multiple of alignment. | | 3077 | * Round size up to the nearest multiple of alignment. |
3040 | * | | 3078 | * |
3041 | * This done, we can take advantage of the fact that for each small | | 3079 | * This done, we can take advantage of the fact that for each small |
3042 | * size class, every object is aligned at the smallest power of two | | 3080 | * size class, every object is aligned at the smallest power of two |
3043 | * that is non-zero in the base two representation of the size. For | | 3081 | * that is non-zero in the base two representation of the size. For |
3044 | * example: | | 3082 | * example: |
3045 | * | | 3083 | * |
3046 | * Size | Base 2 | Minimum alignment | | 3084 | * Size | Base 2 | Minimum alignment |
3047 | * -----+----------+------------------ | | 3085 | * -----+----------+------------------ |
3048 | * 96 | 1100000 | 32 | | 3086 | * 96 | 1100000 | 32 |
3049 | * 144 | 10100000 | 32 | | 3087 | * 144 | 10100000 | 32 |
3050 | * 192 | 11000000 | 64 | | 3088 | * 192 | 11000000 | 64 |
3051 | * | | 3089 | * |
3052 | * Depending on runtime settings, it is possible that arena_malloc() | | 3090 | * Depending on runtime settings, it is possible that arena_malloc() |
3053 | * will further round up to a power of two, but that never causes | | 3091 | * will further round up to a power of two, but that never causes |
3054 | * correctness issues. | | 3092 | * correctness issues. |
3055 | */ | | 3093 | */ |
3056 | ceil_size = (size + (alignment - 1)) & (-alignment); | | 3094 | ceil_size = (size + (alignment - 1)) & (-alignment); |
3057 | /* | | 3095 | /* |
3058 | * (ceil_size < size) protects against the combination of maximal | | 3096 | * (ceil_size < size) protects against the combination of maximal |
3059 | * alignment and size greater than maximal alignment. | | 3097 | * alignment and size greater than maximal alignment. |
3060 | */ | | 3098 | */ |
3061 | if (ceil_size < size) { | | 3099 | if (ceil_size < size) { |
3062 | /* size_t overflow. */ | | 3100 | /* size_t overflow. */ |
3063 | return (NULL); | | 3101 | return (NULL); |
3064 | } | | 3102 | } |
3065 | | | 3103 | |
3066 | if (ceil_size <= pagesize || (alignment <= pagesize | | 3104 | if (ceil_size <= pagesize || (alignment <= pagesize |
3067 | && ceil_size <= arena_maxclass)) | | 3105 | && ceil_size <= arena_maxclass)) |
3068 | ret = arena_malloc(choose_arena(), ceil_size); | | 3106 | ret = arena_malloc(choose_arena(), ceil_size); |
3069 | else { | | 3107 | else { |
3070 | size_t run_size; | | 3108 | size_t run_size; |
3071 | | | 3109 | |
3072 | /* | | 3110 | /* |
3073 | * We can't achieve sub-page alignment, so round up alignment | | 3111 | * We can't achieve sub-page alignment, so round up alignment |
3074 | * permanently; it makes later calculations simpler. | | 3112 | * permanently; it makes later calculations simpler. |
3075 | */ | | 3113 | */ |
3076 | alignment = PAGE_CEILING(alignment); | | 3114 | alignment = PAGE_CEILING(alignment); |
3077 | ceil_size = PAGE_CEILING(size); | | 3115 | ceil_size = PAGE_CEILING(size); |
3078 | /* | | 3116 | /* |
3079 | * (ceil_size < size) protects against very large sizes within | | 3117 | * (ceil_size < size) protects against very large sizes within |
3080 | * pagesize of SIZE_T_MAX. | | 3118 | * pagesize of SIZE_T_MAX. |
3081 | * | | 3119 | * |
3082 | * (ceil_size + alignment < ceil_size) protects against the | | 3120 | * (ceil_size + alignment < ceil_size) protects against the |
3083 | * combination of maximal alignment and ceil_size large enough | | 3121 | * combination of maximal alignment and ceil_size large enough |
3084 | * to cause overflow. This is similar to the first overflow | | 3122 | * to cause overflow. This is similar to the first overflow |
3085 | * check above, but it needs to be repeated due to the new | | 3123 | * check above, but it needs to be repeated due to the new |
3086 | * ceil_size value, which may now be *equal* to maximal | | 3124 | * ceil_size value, which may now be *equal* to maximal |
3087 | * alignment, whereas before we only detected overflow if the | | 3125 | * alignment, whereas before we only detected overflow if the |
3088 | * original size was *greater* than maximal alignment. | | 3126 | * original size was *greater* than maximal alignment. |
3089 | */ | | 3127 | */ |
3090 | if (ceil_size < size || ceil_size + alignment < ceil_size) { | | 3128 | if (ceil_size < size || ceil_size + alignment < ceil_size) { |
3091 | /* size_t overflow. */ | | 3129 | /* size_t overflow. */ |
3092 | return (NULL); | | 3130 | return (NULL); |
3093 | } | | 3131 | } |
3094 | | | 3132 | |
3095 | /* | | 3133 | /* |
3096 | * Calculate the size of the over-size run that arena_palloc() | | 3134 | * Calculate the size of the over-size run that arena_palloc() |
3097 | * would need to allocate in order to guarantee the alignment. | | 3135 | * would need to allocate in order to guarantee the alignment. |
3098 | */ | | 3136 | */ |
3099 | if (ceil_size >= alignment) | | 3137 | if (ceil_size >= alignment) |
3100 | run_size = ceil_size + alignment - pagesize; | | 3138 | run_size = ceil_size + alignment - pagesize; |
3101 | else { | | 3139 | else { |
3102 | /* | | 3140 | /* |
3103 | * It is possible that (alignment << 1) will cause | | 3141 | * It is possible that (alignment << 1) will cause |
3104 | * overflow, but it doesn't matter because we also | | 3142 | * overflow, but it doesn't matter because we also |
3105 | * subtract pagesize, which in the case of overflow | | 3143 | * subtract pagesize, which in the case of overflow |
3106 | * leaves us with a very large run_size. That causes | | 3144 | * leaves us with a very large run_size. That causes |
3107 | * the first conditional below to fail, which means | | 3145 | * the first conditional below to fail, which means |
3108 | * that the bogus run_size value never gets used for | | 3146 | * that the bogus run_size value never gets used for |
3109 | * anything important. | | 3147 | * anything important. |
3110 | */ | | 3148 | */ |
3111 | run_size = (alignment << 1) - pagesize; | | 3149 | run_size = (alignment << 1) - pagesize; |
3112 | } | | 3150 | } |
3113 | | | 3151 | |
3114 | if (run_size <= arena_maxclass) { | | 3152 | if (run_size <= arena_maxclass) { |
3115 | ret = arena_palloc(choose_arena(), alignment, ceil_size, | | 3153 | ret = arena_palloc(choose_arena(), alignment, ceil_size, |
3116 | run_size); | | 3154 | run_size); |
3117 | } else if (alignment <= chunksize) | | 3155 | } else if (alignment <= chunksize) |
3118 | ret = huge_malloc(ceil_size); | | 3156 | ret = huge_malloc(ceil_size); |
3119 | else | | 3157 | else |
3120 | ret = huge_palloc(alignment, ceil_size); | | 3158 | ret = huge_palloc(alignment, ceil_size); |
3121 | } | | 3159 | } |
3122 | | | 3160 | |
3123 | assert(((uintptr_t)ret & (alignment - 1)) == 0); | | 3161 | assert(((uintptr_t)ret & (alignment - 1)) == 0); |
3124 | return (ret); | | 3162 | return (ret); |
3125 | } | | 3163 | } |
3126 | | | 3164 | |
3127 | static void * | | 3165 | static void * |
3128 | icalloc(size_t size) | | 3166 | icalloc(size_t size) |
3129 | { | | 3167 | { |
3130 | void *ret; | | 3168 | void *ret; |
3131 | | | 3169 | |
3132 | if (size <= arena_maxclass) { | | 3170 | if (size <= arena_maxclass) { |
3133 | ret = arena_malloc(choose_arena(), size); | | 3171 | ret = arena_malloc(choose_arena(), size); |
3134 | if (ret == NULL) | | 3172 | if (ret == NULL) |
3135 | return (NULL); | | 3173 | return (NULL); |
3136 | memset(ret, 0, size); | | 3174 | memset(ret, 0, size); |
3137 | } else { | | 3175 | } else { |
3138 | /* | | 3176 | /* |
3139 | * The virtual memory system provides zero-filled pages, so | | 3177 | * The virtual memory system provides zero-filled pages, so |
3140 | * there is no need to do so manually, unless opt_junk is | | 3178 | * there is no need to do so manually, unless opt_junk is |
3141 | * enabled, in which case huge_malloc() fills huge allocations | | 3179 | * enabled, in which case huge_malloc() fills huge allocations |
3142 | * with junk. | | 3180 | * with junk. |
3143 | */ | | 3181 | */ |
3144 | ret = huge_malloc(size); | | 3182 | ret = huge_malloc(size); |
3145 | if (ret == NULL) | | 3183 | if (ret == NULL) |
3146 | return (NULL); | | 3184 | return (NULL); |
3147 | | | 3185 | |
3148 | if (opt_junk) | | 3186 | if (opt_junk) |
3149 | memset(ret, 0, size); | | 3187 | memset(ret, 0, size); |
3150 | #ifdef USE_BRK | | 3188 | #ifdef USE_BRK |
3151 | else if ((uintptr_t)ret >= (uintptr_t)brk_base | | 3189 | else if ((uintptr_t)ret >= (uintptr_t)brk_base |
3152 | && (uintptr_t)ret < (uintptr_t)brk_max) { | | 3190 | && (uintptr_t)ret < (uintptr_t)brk_max) { |
3153 | /* | | 3191 | /* |
3154 | * This may be a re-used brk chunk. Therefore, zero | | 3192 | * This may be a re-used brk chunk. Therefore, zero |
3155 | * the memory. | | 3193 | * the memory. |
3156 | */ | | 3194 | */ |
3157 | memset(ret, 0, size); | | 3195 | memset(ret, 0, size); |
3158 | } | | 3196 | } |
3159 | #endif | | 3197 | #endif |
3160 | } | | 3198 | } |
3161 | | | 3199 | |
3162 | return (ret); | | 3200 | return (ret); |
3163 | } | | 3201 | } |
3164 | | | 3202 | |
3165 | static size_t | | 3203 | static size_t |
3166 | isalloc(const void *ptr) | | 3204 | isalloc(const void *ptr) |
3167 | { | | 3205 | { |
3168 | size_t ret; | | 3206 | size_t ret; |
3169 | arena_chunk_t *chunk; | | 3207 | arena_chunk_t *chunk; |
3170 | | | 3208 | |
3171 | assert(ptr != NULL); | | 3209 | assert(ptr != NULL); |
3172 | | | 3210 | |
3173 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); | | 3211 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
3174 | if (chunk != ptr) { | | 3212 | if (chunk != ptr) { |
3175 | /* Region. */ | | 3213 | /* Region. */ |
3176 | assert(chunk->arena->magic == ARENA_MAGIC); | | 3214 | assert(chunk->arena->magic == ARENA_MAGIC); |
3177 | | | 3215 | |
3178 | ret = arena_salloc(ptr); | | 3216 | ret = arena_salloc(ptr); |
3179 | } else { | | 3217 | } else { |
3180 | chunk_node_t *node, key; | | 3218 | chunk_node_t *node, key; |
3181 | | | 3219 | |
3182 | /* Chunk (huge allocation). */ | | 3220 | /* Chunk (huge allocation). */ |
3183 | | | 3221 | |
3184 | malloc_mutex_lock(&chunks_mtx); | | 3222 | malloc_mutex_lock(&chunks_mtx); |
3185 | | | 3223 | |
3186 | /* Extract from tree of huge allocations. */ | | 3224 | /* Extract from tree of huge allocations. */ |
3187 | key.chunk = __DECONST(void *, ptr); | | 3225 | key.chunk = __DECONST(void *, ptr); |
3188 | /* LINTED */ | | 3226 | /* LINTED */ |
3189 | node = RB_FIND(chunk_tree_s, &huge, &key); | | 3227 | node = RB_FIND(chunk_tree_s, &huge, &key); |
3190 | assert(node != NULL); | | 3228 | assert(node != NULL); |
3191 | | | 3229 | |
3192 | ret = node->size; | | 3230 | ret = node->size; |
3193 | | | 3231 | |
3194 | malloc_mutex_unlock(&chunks_mtx); | | 3232 | malloc_mutex_unlock(&chunks_mtx); |
3195 | } | | 3233 | } |
3196 | | | 3234 | |
3197 | return (ret); | | 3235 | return (ret); |
3198 | } | | 3236 | } |
3199 | | | 3237 | |
3200 | static void * | | 3238 | static void * |
3201 | iralloc(void *ptr, size_t size) | | 3239 | iralloc(void *ptr, size_t size) |
3202 | { | | 3240 | { |
3203 | void *ret; | | 3241 | void *ret; |
3204 | size_t oldsize; | | 3242 | size_t oldsize; |
3205 | | | 3243 | |
3206 | assert(ptr != NULL); | | 3244 | assert(ptr != NULL); |
3207 | assert(size != 0); | | 3245 | assert(size != 0); |
3208 | | | 3246 | |
3209 | oldsize = isalloc(ptr); | | 3247 | oldsize = isalloc(ptr); |
3210 | | | 3248 | |
3211 | if (size <= arena_maxclass) | | 3249 | if (size <= arena_maxclass) |
3212 | ret = arena_ralloc(ptr, size, oldsize); | | 3250 | ret = arena_ralloc(ptr, size, oldsize); |
3213 | else | | 3251 | else |
3214 | ret = huge_ralloc(ptr, size, oldsize); | | 3252 | ret = huge_ralloc(ptr, size, oldsize); |
3215 | | | 3253 | |
3216 | return (ret); | | 3254 | return (ret); |
3217 | } | | 3255 | } |
3218 | | | 3256 | |
3219 | static void | | 3257 | static void |
3220 | idalloc(void *ptr) | | 3258 | idalloc(void *ptr) |
3221 | { | | 3259 | { |
3222 | arena_chunk_t *chunk; | | 3260 | arena_chunk_t *chunk; |
3223 | | | 3261 | |
3224 | assert(ptr != NULL); | | 3262 | assert(ptr != NULL); |
3225 | | | 3263 | |
3226 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); | | 3264 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
3227 | if (chunk != ptr) { | | 3265 | if (chunk != ptr) { |
3228 | /* Region. */ | | 3266 | /* Region. */ |
3229 | arena_dalloc(chunk->arena, chunk, ptr); | | 3267 | arena_dalloc(chunk->arena, chunk, ptr); |
3230 | } else | | 3268 | } else |
3231 | huge_dalloc(ptr); | | 3269 | huge_dalloc(ptr); |
3232 | } | | 3270 | } |
3233 | | | 3271 | |
3234 | static void | | 3272 | static void |
3235 | malloc_print_stats(void) | | 3273 | malloc_print_stats(void) |
3236 | { | | 3274 | { |
3237 | | | 3275 | |
3238 | if (opt_print_stats) { | | 3276 | if (opt_print_stats) { |
3239 | char s[UMAX2S_BUFSIZE]; | | 3277 | char s[UMAX2S_BUFSIZE]; |
3240 | _malloc_message("___ Begin malloc statistics ___\n", "", "", | | 3278 | _malloc_message("___ Begin malloc statistics ___\n", "", "", |
3241 | ""); | | 3279 | ""); |
3242 | _malloc_message("Assertions ", | | 3280 | _malloc_message("Assertions ", |
3243 | #ifdef NDEBUG | | 3281 | #ifdef NDEBUG |
3244 | "disabled", | | 3282 | "disabled", |
3245 | #else | | 3283 | #else |
3246 | "enabled", | | 3284 | "enabled", |
3247 | #endif | | 3285 | #endif |
3248 | "\n", ""); | | 3286 | "\n", ""); |
3249 | _malloc_message("Boolean MALLOC_OPTIONS: ", | | 3287 | _malloc_message("Boolean MALLOC_OPTIONS: ", |
3250 | opt_abort ? "A" : "a", | | 3288 | opt_abort ? "A" : "a", |
3251 | opt_junk ? "J" : "j", | | 3289 | opt_junk ? "J" : "j", |
3252 | opt_hint ? "H" : "h"); | | 3290 | opt_hint ? "H" : "h"); |
3253 | _malloc_message(opt_utrace ? "PU" : "Pu", | | 3291 | _malloc_message(opt_utrace ? "PU" : "Pu", |
3254 | opt_sysv ? "V" : "v", | | 3292 | opt_sysv ? "V" : "v", |
3255 | opt_xmalloc ? "X" : "x", | | 3293 | opt_xmalloc ? "X" : "x", |
3256 | opt_zero ? "Z\n" : "z\n"); | | 3294 | opt_zero ? "Z\n" : "z\n"); |
3257 | | | 3295 | |
3258 | _malloc_message("CPUs: ", size_t2s(ncpus, s), "\n", ""); | | 3296 | _malloc_message("CPUs: ", size_t2s(ncpus, s), "\n", ""); |
3259 | _malloc_message("Max arenas: ", size_t2s(narenas, s), "\n", ""); | | 3297 | _malloc_message("Max arenas: ", size_t2s(narenas, s), "\n", ""); |
3260 | _malloc_message("Pointer size: ", size_t2s(sizeof(void *), s), | | 3298 | _malloc_message("Pointer size: ", size_t2s(sizeof(void *), s), |
3261 | "\n", ""); | | 3299 | "\n", ""); |
3262 | _malloc_message("Quantum size: ", size_t2s(quantum, s), "\n", ""); | | 3300 | _malloc_message("Quantum size: ", size_t2s(quantum, s), "\n", ""); |
3263 | _malloc_message("Max small size: ", size_t2s(small_max, s), "\n", | | 3301 | _malloc_message("Max small size: ", size_t2s(small_max, s), "\n", |
3264 | ""); | | 3302 | ""); |
3265 | | | 3303 | |
3266 | _malloc_message("Chunk size: ", size_t2s(chunksize, s), "", ""); | | 3304 | _malloc_message("Chunk size: ", size_t2s(chunksize, s), "", ""); |
3267 | _malloc_message(" (2^", size_t2s((size_t)opt_chunk_2pow, s), | | 3305 | _malloc_message(" (2^", size_t2s((size_t)opt_chunk_2pow, s), |
3268 | ")\n", ""); | | 3306 | ")\n", ""); |
3269 | | | 3307 | |
3270 | #ifdef MALLOC_STATS | | 3308 | #ifdef MALLOC_STATS |
3271 | { | | 3309 | { |
3272 | size_t allocated, mapped; | | 3310 | size_t allocated, mapped; |
3273 | unsigned i; | | 3311 | unsigned i; |
3274 | arena_t *arena; | | 3312 | arena_t *arena; |
3275 | | | 3313 | |
3276 | /* Calculate and print allocated/mapped stats. */ | | 3314 | /* Calculate and print allocated/mapped stats. */ |
3277 | | | 3315 | |
3278 | /* arenas. */ | | 3316 | /* arenas. */ |
3279 | for (i = 0, allocated = 0; i < narenas; i++) { | | 3317 | for (i = 0, allocated = 0; i < narenas; i++) { |
3280 | if (arenas[i] != NULL) { | | 3318 | if (arenas[i] != NULL) { |
3281 | malloc_mutex_lock(&arenas[i]->mtx); | | 3319 | malloc_mutex_lock(&arenas[i]->mtx); |
3282 | allocated += | | 3320 | allocated += |
3283 | arenas[i]->stats.allocated_small; | | 3321 | arenas[i]->stats.allocated_small; |
3284 | allocated += | | 3322 | allocated += |
3285 | arenas[i]->stats.allocated_large; | | 3323 | arenas[i]->stats.allocated_large; |
3286 | malloc_mutex_unlock(&arenas[i]->mtx); | | 3324 | malloc_mutex_unlock(&arenas[i]->mtx); |
3287 | } | | 3325 | } |
3288 | } | | 3326 | } |
3289 | | | 3327 | |
3290 | /* huge/base. */ | | 3328 | /* huge/base. */ |
3291 | malloc_mutex_lock(&chunks_mtx); | | 3329 | malloc_mutex_lock(&chunks_mtx); |
3292 | allocated += huge_allocated; | | 3330 | allocated += huge_allocated; |
3293 | mapped = stats_chunks.curchunks * chunksize; | | 3331 | mapped = stats_chunks.curchunks * chunksize; |
3294 | malloc_mutex_unlock(&chunks_mtx); | | 3332 | malloc_mutex_unlock(&chunks_mtx); |
3295 | | | 3333 | |
3296 | malloc_mutex_lock(&base_mtx); | | 3334 | malloc_mutex_lock(&base_mtx); |
3297 | mapped += base_mapped; | | 3335 | mapped += base_mapped; |
3298 | malloc_mutex_unlock(&base_mtx); | | 3336 | malloc_mutex_unlock(&base_mtx); |
3299 | | | 3337 | |
3300 | malloc_printf("Allocated: %zu, mapped: %zu\n", | | 3338 | malloc_printf("Allocated: %zu, mapped: %zu\n", |
3301 | allocated, mapped); | | 3339 | allocated, mapped); |
3302 | | | 3340 | |
3303 | /* Print chunk stats. */ | | 3341 | /* Print chunk stats. */ |
3304 | { | | 3342 | { |
3305 | chunk_stats_t chunks_stats; | | 3343 | chunk_stats_t chunks_stats; |
3306 | | | 3344 | |
3307 | malloc_mutex_lock(&chunks_mtx); | | 3345 | malloc_mutex_lock(&chunks_mtx); |
3308 | chunks_stats = stats_chunks; | | 3346 | chunks_stats = stats_chunks; |
3309 | malloc_mutex_unlock(&chunks_mtx); | | 3347 | malloc_mutex_unlock(&chunks_mtx); |
3310 | | | 3348 | |
3311 | malloc_printf("chunks: nchunks " | | 3349 | malloc_printf("chunks: nchunks " |
3312 | "highchunks curchunks\n"); | | 3350 | "highchunks curchunks\n"); |
3313 | malloc_printf(" %13llu%13lu%13lu\n", | | 3351 | malloc_printf(" %13llu%13lu%13lu\n", |
3314 | chunks_stats.nchunks, | | 3352 | chunks_stats.nchunks, |
3315 | chunks_stats.highchunks, | | 3353 | chunks_stats.highchunks, |
3316 | chunks_stats.curchunks); | | 3354 | chunks_stats.curchunks); |
3317 | } | | 3355 | } |
3318 | | | 3356 | |
3319 | /* Print chunk stats. */ | | 3357 | /* Print chunk stats. */ |
3320 | malloc_printf( | | 3358 | malloc_printf( |
3321 | "huge: nmalloc ndalloc " | | 3359 | "huge: nmalloc ndalloc " |
3322 | "nralloc allocated\n"); | | 3360 | "nralloc allocated\n"); |
3323 | malloc_printf(" %12llu %12llu %12llu %12zu\n", | | 3361 | malloc_printf(" %12llu %12llu %12llu %12zu\n", |
3324 | huge_nmalloc, huge_ndalloc, huge_nralloc, | | 3362 | huge_nmalloc, huge_ndalloc, huge_nralloc, |
3325 | huge_allocated); | | 3363 | huge_allocated); |
3326 | | | 3364 | |
3327 | /* Print stats for each arena. */ | | 3365 | /* Print stats for each arena. */ |
3328 | for (i = 0; i < narenas; i++) { | | 3366 | for (i = 0; i < narenas; i++) { |
3329 | arena = arenas[i]; | | 3367 | arena = arenas[i]; |
3330 | if (arena != NULL) { | | 3368 | if (arena != NULL) { |
3331 | malloc_printf( | | 3369 | malloc_printf( |
3332 | "\narenas[%u] @ %p\n", i, arena); | | 3370 | "\narenas[%u] @ %p\n", i, arena); |
3333 | malloc_mutex_lock(&arena->mtx); | | 3371 | malloc_mutex_lock(&arena->mtx); |
3334 | stats_print(arena); | | 3372 | stats_print(arena); |
3335 | malloc_mutex_unlock(&arena->mtx); | | 3373 | malloc_mutex_unlock(&arena->mtx); |
3336 | } | | 3374 | } |
3337 | } | | 3375 | } |
3338 | } | | 3376 | } |
3339 | #endif /* #ifdef MALLOC_STATS */ | | 3377 | #endif /* #ifdef MALLOC_STATS */ |
3340 | _malloc_message("--- End malloc statistics ---\n", "", "", ""); | | 3378 | _malloc_message("--- End malloc statistics ---\n", "", "", ""); |
3341 | } | | 3379 | } |
3342 | } | | 3380 | } |
3343 | | | 3381 | |
3344 | /* | | 3382 | /* |
3345 | * FreeBSD's pthreads implementation calls malloc(3), so the malloc | | 3383 | * FreeBSD's pthreads implementation calls malloc(3), so the malloc |
3346 | * implementation has to take pains to avoid infinite recursion during | | 3384 | * implementation has to take pains to avoid infinite recursion during |
3347 | * initialization. | | 3385 | * initialization. |
3348 | */ | | 3386 | */ |
3349 | static inline bool | | 3387 | static inline bool |
3350 | malloc_init(void) | | 3388 | malloc_init(void) |
3351 | { | | 3389 | { |
3352 | | | 3390 | |
3353 | if (malloc_initialized == false) | | 3391 | if (malloc_initialized == false) |
3354 | return (malloc_init_hard()); | | 3392 | return (malloc_init_hard()); |
3355 | | | 3393 | |
3356 | return (false); | | 3394 | return (false); |
3357 | } | | 3395 | } |
3358 | | | 3396 | |
3359 | static bool | | 3397 | static bool |
3360 | malloc_init_hard(void) | | 3398 | malloc_init_hard(void) |
3361 | { | | 3399 | { |
3362 | unsigned i, j; | | 3400 | unsigned i, j; |
3363 | ssize_t linklen; | | 3401 | ssize_t linklen; |
3364 | char buf[PATH_MAX + 1]; | | 3402 | char buf[PATH_MAX + 1]; |
3365 | const char *opts = ""; | | 3403 | const char *opts = ""; |
3366 | int serrno; | | 3404 | int serrno; |
3367 | | | 3405 | |
3368 | malloc_mutex_lock(&init_lock); | | 3406 | malloc_mutex_lock(&init_lock); |
3369 | if (malloc_initialized) { | | 3407 | if (malloc_initialized) { |
3370 | /* | | 3408 | /* |
3371 | * Another thread initialized the allocator before this one | | 3409 | * Another thread initialized the allocator before this one |
3372 | * acquired init_lock. | | 3410 | * acquired init_lock. |
3373 | */ | | 3411 | */ |
3374 | malloc_mutex_unlock(&init_lock); | | 3412 | malloc_mutex_unlock(&init_lock); |
3375 | return (false); | | 3413 | return (false); |
3376 | } | | 3414 | } |
3377 | | | 3415 | |
3378 | serrno = errno; | | 3416 | serrno = errno; |
3379 | /* Get number of CPUs. */ | | 3417 | /* Get number of CPUs. */ |
3380 | { | | 3418 | { |
3381 | int mib[2]; | | 3419 | int mib[2]; |
3382 | size_t len; | | 3420 | size_t len; |
3383 | | | 3421 | |
3384 | mib[0] = CTL_HW; | | 3422 | mib[0] = CTL_HW; |
3385 | mib[1] = HW_NCPU; | | 3423 | mib[1] = HW_NCPU; |
3386 | len = sizeof(ncpus); | | 3424 | len = sizeof(ncpus); |
3387 | if (sysctl(mib, 2, &ncpus, &len, (void *) 0, 0) == -1) { | | 3425 | if (sysctl(mib, 2, &ncpus, &len, (void *) 0, 0) == -1) { |
3388 | /* Error. */ | | 3426 | /* Error. */ |
3389 | ncpus = 1; | | 3427 | ncpus = 1; |
3390 | } | | 3428 | } |
3391 | } | | 3429 | } |
3392 | | | 3430 | |
3393 | /* Get page size. */ | | 3431 | /* Get page size. */ |
3394 | { | | 3432 | { |
3395 | long result; | | 3433 | long result; |
3396 | | | 3434 | |
3397 | result = sysconf(_SC_PAGESIZE); | | 3435 | result = sysconf(_SC_PAGESIZE); |
3398 | assert(result != -1); | | 3436 | assert(result != -1); |
3399 | pagesize = (unsigned) result; | | 3437 | pagesize = (unsigned) result; |
3400 | | | 3438 | |
3401 | /* | | 3439 | /* |
3402 | * We assume that pagesize is a power of 2 when calculating | | 3440 | * We assume that pagesize is a power of 2 when calculating |
3403 | * pagesize_mask and pagesize_2pow. | | 3441 | * pagesize_mask and pagesize_2pow. |
3404 | */ | | 3442 | */ |
3405 | assert(((result - 1) & result) == 0); | | 3443 | assert(((result - 1) & result) == 0); |
3406 | pagesize_mask = result - 1; | | 3444 | pagesize_mask = result - 1; |
3407 | pagesize_2pow = ffs((int)result) - 1; | | 3445 | pagesize_2pow = ffs((int)result) - 1; |
3408 | } | | 3446 | } |
3409 | | | 3447 | |
3410 | for (i = 0; i < 3; i++) { | | 3448 | for (i = 0; i < 3; i++) { |
3411 | /* Get runtime configuration. */ | | 3449 | /* Get runtime configuration. */ |
3412 | switch (i) { | | 3450 | switch (i) { |
3413 | case 0: | | 3451 | case 0: |
3414 | if ((linklen = readlink("/etc/malloc.conf", buf, | | 3452 | if ((linklen = readlink("/etc/malloc.conf", buf, |
3415 | sizeof(buf) - 1)) != -1) { | | 3453 | sizeof(buf) - 1)) != -1) { |
3416 | /* | | 3454 | /* |
3417 | * Use the contents of the "/etc/malloc.conf" | | 3455 | * Use the contents of the "/etc/malloc.conf" |
3418 | * symbolic link's name. | | 3456 | * symbolic link's name. |
3419 | */ | | 3457 | */ |
3420 | buf[linklen] = '\0'; | | 3458 | buf[linklen] = '\0'; |
3421 | opts = buf; | | 3459 | opts = buf; |
3422 | } else { | | 3460 | } else { |
3423 | /* No configuration specified. */ | | 3461 | /* No configuration specified. */ |
3424 | buf[0] = '\0'; | | 3462 | buf[0] = '\0'; |
3425 | opts = buf; | | 3463 | opts = buf; |
3426 | } | | 3464 | } |
3427 | break; | | 3465 | break; |
3428 | case 1: | | 3466 | case 1: |
3429 | if ((opts = getenv("MALLOC_OPTIONS")) != NULL && | | 3467 | if ((opts = getenv("MALLOC_OPTIONS")) != NULL && |
3430 | issetugid() == 0) { | | 3468 | issetugid() == 0) { |
3431 | /* | | 3469 | /* |
3432 | * Do nothing; opts is already initialized to | | 3470 | * Do nothing; opts is already initialized to |
3433 | * the value of the MALLOC_OPTIONS environment | | 3471 | * the value of the MALLOC_OPTIONS environment |
3434 | * variable. | | 3472 | * variable. |
3435 | */ | | 3473 | */ |
3436 | } else { | | 3474 | } else { |
3437 | /* No configuration specified. */ | | 3475 | /* No configuration specified. */ |
3438 | buf[0] = '\0'; | | 3476 | buf[0] = '\0'; |
3439 | opts = buf; | | 3477 | opts = buf; |
3440 | } | | 3478 | } |
3441 | break; | | 3479 | break; |
3442 | case 2: | | 3480 | case 2: |
3443 | if (_malloc_options != NULL) { | | 3481 | if (_malloc_options != NULL) { |
3444 | /* | | 3482 | /* |
3445 | * Use options that were compiled into the program. | | 3483 | * Use options that were compiled into the program. |
3446 | */ | | 3484 | */ |
3447 | opts = _malloc_options; | | 3485 | opts = _malloc_options; |
3448 | } else { | | 3486 | } else { |
3449 | /* No configuration specified. */ | | 3487 | /* No configuration specified. */ |
3450 | buf[0] = '\0'; | | 3488 | buf[0] = '\0'; |
3451 | opts = buf; | | 3489 | opts = buf; |
3452 | } | | 3490 | } |
3453 | break; | | 3491 | break; |
3454 | default: | | 3492 | default: |
3455 | /* NOTREACHED */ | | 3493 | /* NOTREACHED */ |
3456 | /* LINTED */ | | 3494 | /* LINTED */ |
3457 | assert(false); | | 3495 | assert(false); |
3458 | } | | 3496 | } |
3459 | | | 3497 | |
3460 | for (j = 0; opts[j] != '\0'; j++) { | | 3498 | for (j = 0; opts[j] != '\0'; j++) { |
3461 | switch (opts[j]) { | | 3499 | switch (opts[j]) { |
3462 | case 'a': | | 3500 | case 'a': |
3463 | opt_abort = false; | | 3501 | opt_abort = false; |
3464 | break; | | 3502 | break; |
3465 | case 'A': | | 3503 | case 'A': |
3466 | opt_abort = true; | | 3504 | opt_abort = true; |
3467 | break; | | 3505 | break; |
3468 | case 'h': | | 3506 | case 'h': |
3469 | opt_hint = false; | | 3507 | opt_hint = false; |
3470 | break; | | 3508 | break; |
3471 | case 'H': | | 3509 | case 'H': |
3472 | opt_hint = true; | | 3510 | opt_hint = true; |
3473 | break; | | 3511 | break; |
3474 | case 'j': | | 3512 | case 'j': |
3475 | opt_junk = false; | | 3513 | opt_junk = false; |
3476 | break; | | 3514 | break; |
3477 | case 'J': | | 3515 | case 'J': |
3478 | opt_junk = true; | | 3516 | opt_junk = true; |
3479 | break; | | 3517 | break; |
3480 | case 'k': | | 3518 | case 'k': |
3481 | /* | | 3519 | /* |
3482 | * Chunks always require at least one header | | 3520 | * Chunks always require at least one header |
3483 | * page, so chunks can never be smaller than | | 3521 | * page, so chunks can never be smaller than |
3484 | * two pages. | | 3522 | * two pages. |
3485 | */ | | 3523 | */ |
3486 | if (opt_chunk_2pow > pagesize_2pow + 1) | | 3524 | if (opt_chunk_2pow > pagesize_2pow + 1) |
3487 | opt_chunk_2pow--; | | 3525 | opt_chunk_2pow--; |
3488 | break; | | 3526 | break; |
3489 | case 'K': | | 3527 | case 'K': |
3490 | if (opt_chunk_2pow + 1 < | | 3528 | if (opt_chunk_2pow + 1 < |
3491 | (int)(sizeof(size_t) << 3)) | | 3529 | (int)(sizeof(size_t) << 3)) |
3492 | opt_chunk_2pow++; | | 3530 | opt_chunk_2pow++; |
3493 | break; | | 3531 | break; |
3494 | case 'n': | | 3532 | case 'n': |
3495 | opt_narenas_lshift--; | | 3533 | opt_narenas_lshift--; |
3496 | break; | | 3534 | break; |
3497 | case 'N': | | 3535 | case 'N': |
3498 | opt_narenas_lshift++; | | 3536 | opt_narenas_lshift++; |
3499 | break; | | 3537 | break; |
3500 | case 'p': | | 3538 | case 'p': |
3501 | opt_print_stats = false; | | 3539 | opt_print_stats = false; |
3502 | break; | | 3540 | break; |
3503 | case 'P': | | 3541 | case 'P': |
3504 | opt_print_stats = true; | | 3542 | opt_print_stats = true; |
3505 | break; | | 3543 | break; |
3506 | case 'q': | | 3544 | case 'q': |
3507 | if (opt_quantum_2pow > QUANTUM_2POW_MIN) | | 3545 | if (opt_quantum_2pow > QUANTUM_2POW_MIN) |
3508 | opt_quantum_2pow--; | | 3546 | opt_quantum_2pow--; |
3509 | break; | | 3547 | break; |
3510 | case 'Q': | | 3548 | case 'Q': |
3511 | if (opt_quantum_2pow < pagesize_2pow - 1) | | 3549 | if (opt_quantum_2pow < pagesize_2pow - 1) |
3512 | opt_quantum_2pow++; | | 3550 | opt_quantum_2pow++; |
3513 | break; | | 3551 | break; |
3514 | case 's': | | 3552 | case 's': |
3515 | if (opt_small_max_2pow > QUANTUM_2POW_MIN) | | 3553 | if (opt_small_max_2pow > QUANTUM_2POW_MIN) |
3516 | opt_small_max_2pow--; | | 3554 | opt_small_max_2pow--; |
3517 | break; | | 3555 | break; |
3518 | case 'S': | | 3556 | case 'S': |
3519 | if (opt_small_max_2pow < pagesize_2pow - 1) | | 3557 | if (opt_small_max_2pow < pagesize_2pow - 1) |
3520 | opt_small_max_2pow++; | | 3558 | opt_small_max_2pow++; |
3521 | break; | | 3559 | break; |
3522 | case 'u': | | 3560 | case 'u': |
3523 | opt_utrace = false; | | 3561 | opt_utrace = false; |
3524 | break; | | 3562 | break; |
3525 | case 'U': | | 3563 | case 'U': |
3526 | opt_utrace = true; | | 3564 | opt_utrace = true; |
3527 | break; | | 3565 | break; |
3528 | case 'v': | | 3566 | case 'v': |
3529 | opt_sysv = false; | | 3567 | opt_sysv = false; |
3530 | break; | | 3568 | break; |
3531 | case 'V': | | 3569 | case 'V': |
3532 | opt_sysv = true; | | 3570 | opt_sysv = true; |
3533 | break; | | 3571 | break; |
3534 | case 'x': | | 3572 | case 'x': |
3535 | opt_xmalloc = false; | | 3573 | opt_xmalloc = false; |
3536 | break; | | 3574 | break; |
3537 | case 'X': | | 3575 | case 'X': |
3538 | opt_xmalloc = true; | | 3576 | opt_xmalloc = true; |
3539 | break; | | 3577 | break; |
3540 | case 'z': | | 3578 | case 'z': |
3541 | opt_zero = false; | | 3579 | opt_zero = false; |
3542 | break; | | 3580 | break; |
3543 | case 'Z': | | 3581 | case 'Z': |
3544 | opt_zero = true; | | 3582 | opt_zero = true; |
3545 | break; | | 3583 | break; |
3546 | default: { | | 3584 | default: { |
3547 | char cbuf[2]; | | 3585 | char cbuf[2]; |
3548 | | | 3586 | |
3549 | cbuf[0] = opts[j]; | | 3587 | cbuf[0] = opts[j]; |
3550 | cbuf[1] = '\0'; | | 3588 | cbuf[1] = '\0'; |
3551 | _malloc_message(getprogname(), | | 3589 | _malloc_message(getprogname(), |
3552 | ": (malloc) Unsupported character in " | | 3590 | ": (malloc) Unsupported character in " |
3553 | "malloc options: '", cbuf, "'\n"); | | 3591 | "malloc options: '", cbuf, "'\n"); |
3554 | } | | 3592 | } |
3555 | } | | 3593 | } |
3556 | } | | 3594 | } |
3557 | } | | 3595 | } |
3558 | errno = serrno; | | 3596 | errno = serrno; |
3559 | | | 3597 | |
3560 | /* Take care to call atexit() only once. */ | | 3598 | /* Take care to call atexit() only once. */ |
3561 | if (opt_print_stats) { | | 3599 | if (opt_print_stats) { |
3562 | /* Print statistics at exit. */ | | 3600 | /* Print statistics at exit. */ |
3563 | atexit(malloc_print_stats); | | 3601 | atexit(malloc_print_stats); |
3564 | } | | 3602 | } |
3565 | | | 3603 | |
3566 | /* Set variables according to the value of opt_small_max_2pow. */ | | 3604 | /* Set variables according to the value of opt_small_max_2pow. */ |
3567 | if (opt_small_max_2pow < opt_quantum_2pow) | | 3605 | if (opt_small_max_2pow < opt_quantum_2pow) |
3568 | opt_small_max_2pow = opt_quantum_2pow; | | 3606 | opt_small_max_2pow = opt_quantum_2pow; |
3569 | small_max = (1 << opt_small_max_2pow); | | 3607 | small_max = (1 << opt_small_max_2pow); |
3570 | | | 3608 | |
3571 | /* Set bin-related variables. */ | | 3609 | /* Set bin-related variables. */ |
3572 | bin_maxclass = (pagesize >> 1); | | 3610 | bin_maxclass = (pagesize >> 1); |
3573 | assert(opt_quantum_2pow >= TINY_MIN_2POW); | | 3611 | assert(opt_quantum_2pow >= TINY_MIN_2POW); |
3574 | ntbins = (unsigned)(opt_quantum_2pow - TINY_MIN_2POW); | | 3612 | ntbins = (unsigned)(opt_quantum_2pow - TINY_MIN_2POW); |
3575 | assert(ntbins <= opt_quantum_2pow); | | 3613 | assert(ntbins <= opt_quantum_2pow); |
3576 | nqbins = (unsigned)(small_max >> opt_quantum_2pow); | | 3614 | nqbins = (unsigned)(small_max >> opt_quantum_2pow); |
3577 | nsbins = (unsigned)(pagesize_2pow - opt_small_max_2pow - 1); | | 3615 | nsbins = (unsigned)(pagesize_2pow - opt_small_max_2pow - 1); |
3578 | | | 3616 | |
3579 | /* Set variables according to the value of opt_quantum_2pow. */ | | 3617 | /* Set variables according to the value of opt_quantum_2pow. */ |
3580 | quantum = (1 << opt_quantum_2pow); | | 3618 | quantum = (1 << opt_quantum_2pow); |
3581 | quantum_mask = quantum - 1; | | 3619 | quantum_mask = quantum - 1; |
3582 | if (ntbins > 0) | | 3620 | if (ntbins > 0) |
3583 | small_min = (quantum >> 1) + 1; | | 3621 | small_min = (quantum >> 1) + 1; |
3584 | else | | 3622 | else |
3585 | small_min = 1; | | 3623 | small_min = 1; |
3586 | assert(small_min <= quantum); | | 3624 | assert(small_min <= quantum); |
3587 | | | 3625 | |
3588 | /* Set variables according to the value of opt_chunk_2pow. */ | | 3626 | /* Set variables according to the value of opt_chunk_2pow. */ |
3589 | chunksize = (1LU << opt_chunk_2pow); | | 3627 | chunksize = (1LU << opt_chunk_2pow); |
3590 | chunksize_mask = chunksize - 1; | | 3628 | chunksize_mask = chunksize - 1; |
3591 | chunksize_2pow = (unsigned)opt_chunk_2pow; | | 3629 | chunksize_2pow = (unsigned)opt_chunk_2pow; |
3592 | chunk_npages = (unsigned)(chunksize >> pagesize_2pow); | | 3630 | chunk_npages = (unsigned)(chunksize >> pagesize_2pow); |
3593 | { | | 3631 | { |
3594 | unsigned header_size; | | 3632 | unsigned header_size; |
3595 | | | 3633 | |
3596 | header_size = (unsigned)(sizeof(arena_chunk_t) + | | 3634 | header_size = (unsigned)(sizeof(arena_chunk_t) + |
3597 | (sizeof(arena_chunk_map_t) * (chunk_npages - 1))); | | 3635 | (sizeof(arena_chunk_map_t) * (chunk_npages - 1))); |
3598 | arena_chunk_header_npages = (header_size >> pagesize_2pow); | | 3636 | arena_chunk_header_npages = (header_size >> pagesize_2pow); |
3599 | if ((header_size & pagesize_mask) != 0) | | 3637 | if ((header_size & pagesize_mask) != 0) |
3600 | arena_chunk_header_npages++; | | 3638 | arena_chunk_header_npages++; |
3601 | } | | 3639 | } |
3602 | arena_maxclass = chunksize - (arena_chunk_header_npages << | | 3640 | arena_maxclass = chunksize - (arena_chunk_header_npages << |
3603 | pagesize_2pow); | | 3641 | pagesize_2pow); |
3604 | | | 3642 | |
3605 | UTRACE(0, 0, 0); | | 3643 | UTRACE(0, 0, 0); |
3606 | | | 3644 | |
3607 | #ifdef MALLOC_STATS | | 3645 | #ifdef MALLOC_STATS |
3608 | memset(&stats_chunks, 0, sizeof(chunk_stats_t)); | | 3646 | memset(&stats_chunks, 0, sizeof(chunk_stats_t)); |
3609 | #endif | | 3647 | #endif |
3610 | | | 3648 | |
3611 | /* Various sanity checks that regard configuration. */ | | 3649 | /* Various sanity checks that regard configuration. */ |
3612 | assert(quantum >= sizeof(void *)); | | 3650 | assert(quantum >= sizeof(void *)); |
3613 | assert(quantum <= pagesize); | | 3651 | assert(quantum <= pagesize); |
3614 | assert(chunksize >= pagesize); | | 3652 | assert(chunksize >= pagesize); |
3615 | assert(quantum * 4 <= chunksize); | | 3653 | assert(quantum * 4 <= chunksize); |
3616 | | | 3654 | |
3617 | /* Initialize chunks data. */ | | 3655 | /* Initialize chunks data. */ |
3618 | malloc_mutex_init(&chunks_mtx); | | 3656 | malloc_mutex_init(&chunks_mtx); |
3619 | RB_INIT(&huge); | | 3657 | RB_INIT(&huge); |
3620 | #ifdef USE_BRK | | 3658 | #ifdef USE_BRK |
3621 | malloc_mutex_init(&brk_mtx); | | 3659 | malloc_mutex_init(&brk_mtx); |
3622 | brk_base = sbrk(0); | | 3660 | brk_base = sbrk(0); |
3623 | brk_prev = brk_base; | | 3661 | brk_prev = brk_base; |
3624 | brk_max = brk_base; | | 3662 | brk_max = brk_base; |
3625 | #endif | | 3663 | #endif |
3626 | #ifdef MALLOC_STATS | | 3664 | #ifdef MALLOC_STATS |
3627 | huge_nmalloc = 0; | | 3665 | huge_nmalloc = 0; |
3628 | huge_ndalloc = 0; | | 3666 | huge_ndalloc = 0; |
3629 | huge_nralloc = 0; | | 3667 | huge_nralloc = 0; |
3630 | huge_allocated = 0; | | 3668 | huge_allocated = 0; |
3631 | #endif | | 3669 | #endif |
3632 | RB_INIT(&old_chunks); | | 3670 | RB_INIT(&old_chunks); |
3633 | | | 3671 | |
3634 | /* Initialize base allocation data structures. */ | | 3672 | /* Initialize base allocation data structures. */ |
3635 | #ifdef MALLOC_STATS | | 3673 | #ifdef MALLOC_STATS |
3636 | base_mapped = 0; | | 3674 | base_mapped = 0; |
3637 | #endif | | 3675 | #endif |
3638 | #ifdef USE_BRK | | 3676 | #ifdef USE_BRK |
3639 | /* | | 3677 | /* |
3640 | * Allocate a base chunk here, since it doesn't actually have to be | | 3678 | * Allocate a base chunk here, since it doesn't actually have to be |
3641 | * chunk-aligned. Doing this before allocating any other chunks allows | | 3679 | * chunk-aligned. Doing this before allocating any other chunks allows |
3642 | * the use of space that would otherwise be wasted. | | 3680 | * the use of space that would otherwise be wasted. |
3643 | */ | | 3681 | */ |
3644 | base_pages_alloc(0); | | 3682 | base_pages_alloc(0); |
3645 | #endif | | 3683 | #endif |
3646 | base_chunk_nodes = NULL; | | 3684 | base_chunk_nodes = NULL; |
3647 | malloc_mutex_init(&base_mtx); | | 3685 | malloc_mutex_init(&base_mtx); |
3648 | | | 3686 | |
3649 | if (ncpus > 1) { | | 3687 | if (ncpus > 1) { |
3650 | /* | | 3688 | /* |
3651 | * For SMP systems, create four times as many arenas as there | | 3689 | * For SMP systems, create four times as many arenas as there |
3652 | * are CPUs by default. | | 3690 | * are CPUs by default. |
3653 | */ | | 3691 | */ |
3654 | opt_narenas_lshift += 2; | | 3692 | opt_narenas_lshift += 2; |
3655 | } | | 3693 | } |
3656 | | | 3694 | |
3657 | #ifdef NO_TLS | | | |
3658 | /* Initialize arena key. */ | | | |
3659 | (void)thr_keycreate(&arenas_map_key, NULL); | | | |
3660 | #endif | | | |
3661 | | | | |
3662 | /* Determine how many arenas to use. */ | | 3695 | /* Determine how many arenas to use. */ |
3663 | narenas = ncpus; | | 3696 | narenas = ncpus; |
3664 | if (opt_narenas_lshift > 0) { | | 3697 | if (opt_narenas_lshift > 0) { |
3665 | if ((narenas << opt_narenas_lshift) > narenas) | | 3698 | if ((narenas << opt_narenas_lshift) > narenas) |
3666 | narenas <<= opt_narenas_lshift; | | 3699 | narenas <<= opt_narenas_lshift; |
3667 | /* | | 3700 | /* |
3668 | * Make sure not to exceed the limits of what base_malloc() | | 3701 | * Make sure not to exceed the limits of what base_malloc() |
3669 | * can handle. | | 3702 | * can handle. |
3670 | */ | | 3703 | */ |
3671 | if (narenas * sizeof(arena_t *) > chunksize) | | 3704 | if (narenas * sizeof(arena_t *) > chunksize) |
3672 | narenas = (unsigned)(chunksize / sizeof(arena_t *)); | | 3705 | narenas = (unsigned)(chunksize / sizeof(arena_t *)); |
3673 | } else if (opt_narenas_lshift < 0) { | | 3706 | } else if (opt_narenas_lshift < 0) { |
3674 | if ((narenas << opt_narenas_lshift) < narenas) | | 3707 | if ((narenas << opt_narenas_lshift) < narenas) |
3675 | narenas <<= opt_narenas_lshift; | | 3708 | narenas <<= opt_narenas_lshift; |
3676 | /* Make sure there is at least one arena. */ | | 3709 | /* Make sure there is at least one arena. */ |
3677 | if (narenas == 0) | | 3710 | if (narenas == 0) |
3678 | narenas = 1; | | 3711 | narenas = 1; |
3679 | } | | 3712 | } |
3680 | | | 3713 | |
3681 | next_arena = 0; | | 3714 | next_arena = 0; |
3682 | | | 3715 | |
3683 | /* Allocate and initialize arenas. */ | | 3716 | /* Allocate and initialize arenas. */ |
3684 | arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); | | 3717 | arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); |
3685 | if (arenas == NULL) { | | 3718 | if (arenas == NULL) { |
3686 | malloc_mutex_unlock(&init_lock); | | 3719 | malloc_mutex_unlock(&init_lock); |
3687 | return (true); | | 3720 | return (true); |
3688 | } | | 3721 | } |
3689 | /* | | 3722 | /* |
3690 | * Zero the array. In practice, this should always be pre-zeroed, | | 3723 | * Zero the array. In practice, this should always be pre-zeroed, |
3691 | * since it was just mmap()ed, but let's be sure. | | 3724 | * since it was just mmap()ed, but let's be sure. |
3692 | */ | | 3725 | */ |
3693 | memset(arenas, 0, sizeof(arena_t *) * narenas); | | 3726 | memset(arenas, 0, sizeof(arena_t *) * narenas); |
3694 | | | 3727 | |
3695 | /* | | 3728 | /* |
3696 | * Initialize one arena here. The rest are lazily created in | | 3729 | * Initialize one arena here. The rest are lazily created in |
3697 | * arena_choose_hard(). | | 3730 | * arena_choose_hard(). |
3698 | */ | | 3731 | */ |
3699 | arenas_extend(0); | | 3732 | arenas_extend(0); |
3700 | if (arenas[0] == NULL) { | | 3733 | if (arenas[0] == NULL) { |
3701 | malloc_mutex_unlock(&init_lock); | | 3734 | malloc_mutex_unlock(&init_lock); |
3702 | return (true); | | 3735 | return (true); |
3703 | } | | 3736 | } |
3704 | | | 3737 | |
3705 | malloc_mutex_init(&arenas_mtx); | | 3738 | malloc_mutex_init(&arenas_mtx); |
3706 | | | 3739 | |
3707 | malloc_initialized = true; | | 3740 | malloc_initialized = true; |
3708 | malloc_mutex_unlock(&init_lock); | | 3741 | malloc_mutex_unlock(&init_lock); |
3709 | return (false); | | 3742 | return (false); |
3710 | } | | 3743 | } |
3711 | | | 3744 | |
3712 | /* | | 3745 | /* |
3713 | * End general internal functions. | | 3746 | * End general internal functions. |
3714 | */ | | 3747 | */ |
3715 | /******************************************************************************/ | | 3748 | /******************************************************************************/ |
3716 | /* | | 3749 | /* |
3717 | * Begin malloc(3)-compatible functions. | | 3750 | * Begin malloc(3)-compatible functions. |
3718 | */ | | 3751 | */ |
3719 | | | 3752 | |
3720 | void * | | 3753 | void * |
3721 | malloc(size_t size) | | 3754 | malloc(size_t size) |
3722 | { | | 3755 | { |
3723 | void *ret; | | 3756 | void *ret; |
3724 | | | 3757 | |
3725 | if (malloc_init()) { | | 3758 | if (malloc_init()) { |
3726 | ret = NULL; | | 3759 | ret = NULL; |
3727 | goto RETURN; | | 3760 | goto RETURN; |
3728 | } | | 3761 | } |
3729 | | | 3762 | |
3730 | if (size == 0) { | | 3763 | if (size == 0) { |
3731 | if (opt_sysv == false) | | 3764 | if (opt_sysv == false) |
3732 | size = 1; | | 3765 | size = 1; |
3733 | else { | | 3766 | else { |
3734 | ret = NULL; | | 3767 | ret = NULL; |
3735 | goto RETURN; | | 3768 | goto RETURN; |
3736 | } | | 3769 | } |
3737 | } | | 3770 | } |
3738 | | | 3771 | |
3739 | ret = imalloc(size); | | 3772 | ret = imalloc(size); |
3740 | | | 3773 | |
3741 | RETURN: | | 3774 | RETURN: |
3742 | if (ret == NULL) { | | 3775 | if (ret == NULL) { |
3743 | if (opt_xmalloc) { | | 3776 | if (opt_xmalloc) { |
3744 | _malloc_message(getprogname(), | | 3777 | _malloc_message(getprogname(), |
3745 | ": (malloc) Error in malloc(): out of memory\n", "", | | 3778 | ": (malloc) Error in malloc(): out of memory\n", "", |
3746 | ""); | | 3779 | ""); |
3747 | abort(); | | 3780 | abort(); |
3748 | } | | 3781 | } |
3749 | errno = ENOMEM; | | 3782 | errno = ENOMEM; |
3750 | } | | 3783 | } |
3751 | | | 3784 | |
3752 | UTRACE(0, size, ret); | | 3785 | UTRACE(0, size, ret); |
3753 | return (ret); | | 3786 | return (ret); |
3754 | } | | 3787 | } |
3755 | | | 3788 | |
3756 | int | | 3789 | int |
3757 | posix_memalign(void **memptr, size_t alignment, size_t size) | | 3790 | posix_memalign(void **memptr, size_t alignment, size_t size) |
3758 | { | | 3791 | { |
3759 | int ret; | | 3792 | int ret; |
3760 | void *result; | | 3793 | void *result; |
3761 | | | 3794 | |
3762 | if (malloc_init()) | | 3795 | if (malloc_init()) |
3763 | result = NULL; | | 3796 | result = NULL; |
3764 | else { | | 3797 | else { |
3765 | /* Make sure that alignment is a large enough power of 2. */ | | 3798 | /* Make sure that alignment is a large enough power of 2. */ |
3766 | if (((alignment - 1) & alignment) != 0 | | 3799 | if (((alignment - 1) & alignment) != 0 |
3767 | || alignment < sizeof(void *)) { | | 3800 | || alignment < sizeof(void *)) { |
3768 | if (opt_xmalloc) { | | 3801 | if (opt_xmalloc) { |
3769 | _malloc_message(getprogname(), | | 3802 | _malloc_message(getprogname(), |
3770 | ": (malloc) Error in posix_memalign(): " | | 3803 | ": (malloc) Error in posix_memalign(): " |
3771 | "invalid alignment\n", "", ""); | | 3804 | "invalid alignment\n", "", ""); |
3772 | abort(); | | 3805 | abort(); |
3773 | } | | 3806 | } |
3774 | result = NULL; | | 3807 | result = NULL; |
3775 | ret = EINVAL; | | 3808 | ret = EINVAL; |
3776 | goto RETURN; | | 3809 | goto RETURN; |
3777 | } | | 3810 | } |
3778 | | | 3811 | |
3779 | result = ipalloc(alignment, size); | | 3812 | result = ipalloc(alignment, size); |
3780 | } | | 3813 | } |
3781 | | | 3814 | |
3782 | if (result == NULL) { | | 3815 | if (result == NULL) { |
3783 | if (opt_xmalloc) { | | 3816 | if (opt_xmalloc) { |
3784 | _malloc_message(getprogname(), | | 3817 | _malloc_message(getprogname(), |
3785 | ": (malloc) Error in posix_memalign(): out of memory\n", | | 3818 | ": (malloc) Error in posix_memalign(): out of memory\n", |
3786 | "", ""); | | 3819 | "", ""); |
3787 | abort(); | | 3820 | abort(); |
3788 | } | | 3821 | } |
3789 | ret = ENOMEM; | | 3822 | ret = ENOMEM; |
3790 | goto RETURN; | | 3823 | goto RETURN; |
3791 | } | | 3824 | } |
3792 | | | 3825 | |
3793 | *memptr = result; | | 3826 | *memptr = result; |
3794 | ret = 0; | | 3827 | ret = 0; |
3795 | | | 3828 | |
3796 | RETURN: | | 3829 | RETURN: |
3797 | UTRACE(0, size, result); | | 3830 | UTRACE(0, size, result); |
3798 | return (ret); | | 3831 | return (ret); |
3799 | } | | 3832 | } |
3800 | | | 3833 | |
3801 | void * | | 3834 | void * |
3802 | calloc(size_t num, size_t size) | | 3835 | calloc(size_t num, size_t size) |
3803 | { | | 3836 | { |
3804 | void *ret; | | 3837 | void *ret; |
3805 | size_t num_size; | | 3838 | size_t num_size; |
3806 | | | 3839 | |
3807 | if (malloc_init()) { | | 3840 | if (malloc_init()) { |
3808 | num_size = 0; | | 3841 | num_size = 0; |
3809 | ret = NULL; | | 3842 | ret = NULL; |
3810 | goto RETURN; | | 3843 | goto RETURN; |
3811 | } | | 3844 | } |
3812 | | | 3845 | |
3813 | num_size = num * size; | | 3846 | num_size = num * size; |
3814 | if (num_size == 0) { | | 3847 | if (num_size == 0) { |
3815 | if ((opt_sysv == false) && ((num == 0) || (size == 0))) | | 3848 | if ((opt_sysv == false) && ((num == 0) || (size == 0))) |
3816 | num_size = 1; | | 3849 | num_size = 1; |
3817 | else { | | 3850 | else { |
3818 | ret = NULL; | | 3851 | ret = NULL; |
3819 | goto RETURN; | | 3852 | goto RETURN; |
3820 | } | | 3853 | } |
3821 | /* | | 3854 | /* |
3822 | * Try to avoid division here. We know that it isn't possible to | | 3855 | * Try to avoid division here. We know that it isn't possible to |
3823 | * overflow during multiplication if neither operand uses any of the | | 3856 | * overflow during multiplication if neither operand uses any of the |
3824 | * most significant half of the bits in a size_t. | | 3857 | * most significant half of the bits in a size_t. |
3825 | */ | | 3858 | */ |
3826 | } else if ((unsigned long long)((num | size) & | | 3859 | } else if ((unsigned long long)((num | size) & |
3827 | ((unsigned long long)SIZE_T_MAX << (sizeof(size_t) << 2))) && | | 3860 | ((unsigned long long)SIZE_T_MAX << (sizeof(size_t) << 2))) && |
3828 | (num_size / size != num)) { | | 3861 | (num_size / size != num)) { |
3829 | /* size_t overflow. */ | | 3862 | /* size_t overflow. */ |
3830 | ret = NULL; | | 3863 | ret = NULL; |
3831 | goto RETURN; | | 3864 | goto RETURN; |
3832 | } | | 3865 | } |
3833 | | | 3866 | |
3834 | ret = icalloc(num_size); | | 3867 | ret = icalloc(num_size); |
3835 | | | 3868 | |
3836 | RETURN: | | 3869 | RETURN: |
3837 | if (ret == NULL) { | | 3870 | if (ret == NULL) { |
3838 | if (opt_xmalloc) { | | 3871 | if (opt_xmalloc) { |
3839 | _malloc_message(getprogname(), | | 3872 | _malloc_message(getprogname(), |
3840 | ": (malloc) Error in calloc(): out of memory\n", "", | | 3873 | ": (malloc) Error in calloc(): out of memory\n", "", |
3841 | ""); | | 3874 | ""); |
3842 | abort(); | | 3875 | abort(); |
3843 | } | | 3876 | } |
3844 | errno = ENOMEM; | | 3877 | errno = ENOMEM; |
3845 | } | | 3878 | } |
3846 | | | 3879 | |
3847 | UTRACE(0, num_size, ret); | | 3880 | UTRACE(0, num_size, ret); |
3848 | return (ret); | | 3881 | return (ret); |
3849 | } | | 3882 | } |
3850 | | | 3883 | |
3851 | void * | | 3884 | void * |
3852 | realloc(void *ptr, size_t size) | | 3885 | realloc(void *ptr, size_t size) |
3853 | { | | 3886 | { |
3854 | void *ret; | | 3887 | void *ret; |
3855 | | | 3888 | |
3856 | if (size == 0) { | | 3889 | if (size == 0) { |
3857 | if (opt_sysv == false) | | 3890 | if (opt_sysv == false) |
3858 | size = 1; | | 3891 | size = 1; |
3859 | else { | | 3892 | else { |
3860 | if (ptr != NULL) | | 3893 | if (ptr != NULL) |
3861 | idalloc(ptr); | | 3894 | idalloc(ptr); |
3862 | ret = NULL; | | 3895 | ret = NULL; |
3863 | goto RETURN; | | 3896 | goto RETURN; |
3864 | } | | 3897 | } |
3865 | } | | 3898 | } |
3866 | | | 3899 | |
3867 | if (ptr != NULL) { | | 3900 | if (ptr != NULL) { |
3868 | assert(malloc_initialized); | | 3901 | assert(malloc_initialized); |
3869 | | | 3902 | |
3870 | ret = iralloc(ptr, size); | | 3903 | ret = iralloc(ptr, size); |
3871 | | | 3904 | |
3872 | if (ret == NULL) { | | 3905 | if (ret == NULL) { |
3873 | if (opt_xmalloc) { | | 3906 | if (opt_xmalloc) { |
3874 | _malloc_message(getprogname(), | | 3907 | _malloc_message(getprogname(), |
3875 | ": (malloc) Error in realloc(): out of " | | 3908 | ": (malloc) Error in realloc(): out of " |
3876 | "memory\n", "", ""); | | 3909 | "memory\n", "", ""); |
3877 | abort(); | | 3910 | abort(); |
3878 | } | | 3911 | } |
3879 | errno = ENOMEM; | | 3912 | errno = ENOMEM; |
3880 | } | | 3913 | } |
3881 | } else { | | 3914 | } else { |
3882 | if (malloc_init()) | | 3915 | if (malloc_init()) |
3883 | ret = NULL; | | 3916 | ret = NULL; |
3884 | else | | 3917 | else |
3885 | ret = imalloc(size); | | 3918 | ret = imalloc(size); |
3886 | | | 3919 | |
3887 | if (ret == NULL) { | | 3920 | if (ret == NULL) { |
3888 | if (opt_xmalloc) { | | 3921 | if (opt_xmalloc) { |
3889 | _malloc_message(getprogname(), | | 3922 | _malloc_message(getprogname(), |
3890 | ": (malloc) Error in realloc(): out of " | | 3923 | ": (malloc) Error in realloc(): out of " |
3891 | "memory\n", "", ""); | | 3924 | "memory\n", "", ""); |
3892 | abort(); | | 3925 | abort(); |
3893 | } | | 3926 | } |
3894 | errno = ENOMEM; | | 3927 | errno = ENOMEM; |
3895 | } | | 3928 | } |
3896 | } | | 3929 | } |
3897 | | | 3930 | |
3898 | RETURN: | | 3931 | RETURN: |
3899 | UTRACE(ptr, size, ret); | | 3932 | UTRACE(ptr, size, ret); |
3900 | return (ret); | | 3933 | return (ret); |
3901 | } | | 3934 | } |
3902 | | | 3935 | |
3903 | void | | 3936 | void |
3904 | free(void *ptr) | | 3937 | free(void *ptr) |
3905 | { | | 3938 | { |
3906 | | | 3939 | |
3907 | UTRACE(ptr, 0, 0); | | 3940 | UTRACE(ptr, 0, 0); |
3908 | if (ptr != NULL) { | | 3941 | if (ptr != NULL) { |
3909 | assert(malloc_initialized); | | 3942 | assert(malloc_initialized); |
3910 | | | 3943 | |
3911 | idalloc(ptr); | | 3944 | idalloc(ptr); |
3912 | } | | 3945 | } |
3913 | } | | 3946 | } |
3914 | | | 3947 | |
3915 | /* | | 3948 | /* |
3916 | * End malloc(3)-compatible functions. | | 3949 | * End malloc(3)-compatible functions. |
3917 | */ | | 3950 | */ |
3918 | /******************************************************************************/ | | 3951 | /******************************************************************************/ |
3919 | /* | | 3952 | /* |
3920 | * Begin non-standard functions. | | 3953 | * Begin non-standard functions. |
3921 | */ | | 3954 | */ |
3922 | #ifndef __NetBSD__ | | 3955 | #ifndef __NetBSD__ |
3923 | size_t | | 3956 | size_t |
3924 | malloc_usable_size(const void *ptr) | | 3957 | malloc_usable_size(const void *ptr) |
3925 | { | | 3958 | { |
3926 | | | 3959 | |
3927 | assert(ptr != NULL); | | 3960 | assert(ptr != NULL); |
3928 | | | 3961 | |
3929 | return (isalloc(ptr)); | | 3962 | return (isalloc(ptr)); |
3930 | } | | 3963 | } |
3931 | #endif | | 3964 | #endif |
3932 | | | 3965 | |
3933 | /* | | 3966 | /* |
3934 | * End non-standard functions. | | 3967 | * End non-standard functions. |
3935 | */ | | 3968 | */ |
3936 | /******************************************************************************/ | | 3969 | /******************************************************************************/ |
3937 | /* | | 3970 | /* |
3938 | * Begin library-private functions, used by threading libraries for protection | | 3971 | * Begin library-private functions, used by threading libraries for protection |
3939 | * of malloc during fork(). These functions are only called if the program is | | 3972 | * of malloc during fork(). These functions are only called if the program is |
3940 | * running in threaded mode, so there is no need to check whether the program | | 3973 | * running in threaded mode, so there is no need to check whether the program |
3941 | * is threaded here. | | 3974 | * is threaded here. |
3942 | */ | | 3975 | */ |
3943 | | | 3976 | |
3944 | void | | 3977 | void |
3945 | _malloc_prefork(void) | | 3978 | _malloc_prefork(void) |
3946 | { | | 3979 | { |
3947 | unsigned i; | | 3980 | unsigned i; |
3948 | | | 3981 | |
3949 | /* Acquire all mutexes in a safe order. */ | | 3982 | /* Acquire all mutexes in a safe order. */ |
3950 | | | 3983 | |
3951 | malloc_mutex_lock(&arenas_mtx); | | 3984 | malloc_mutex_lock(&arenas_mtx); |
3952 | for (i = 0; i < narenas; i++) { | | 3985 | for (i = 0; i < narenas; i++) { |
3953 | if (arenas[i] != NULL) | | 3986 | if (arenas[i] != NULL) |
3954 | malloc_mutex_lock(&arenas[i]->mtx); | | 3987 | malloc_mutex_lock(&arenas[i]->mtx); |
3955 | } | | 3988 | } |
3956 | | | 3989 | |
3957 | malloc_mutex_lock(&base_mtx); | | 3990 | malloc_mutex_lock(&base_mtx); |
3958 | | | 3991 | |
3959 | malloc_mutex_lock(&chunks_mtx); | | 3992 | malloc_mutex_lock(&chunks_mtx); |
3960 | } | | 3993 | } |
3961 | | | 3994 | |
3962 | void | | 3995 | void |
3963 | _malloc_postfork(void) | | 3996 | _malloc_postfork(void) |
3964 | { | | 3997 | { |
3965 | unsigned i; | | 3998 | unsigned i; |
3966 | | | 3999 | |
3967 | /* Release all mutexes, now that fork() has completed. */ | | 4000 | /* Release all mutexes, now that fork() has completed. */ |
3968 | | | 4001 | |
3969 | malloc_mutex_unlock(&chunks_mtx); | | 4002 | malloc_mutex_unlock(&chunks_mtx); |
3970 | | | 4003 | |
3971 | malloc_mutex_unlock(&base_mtx); | | 4004 | malloc_mutex_unlock(&base_mtx); |
3972 | | | 4005 | |
3973 | for (i = 0; i < narenas; i++) { | | 4006 | for (i = 0; i < narenas; i++) { |
3974 | if (arenas[i] != NULL) | | 4007 | if (arenas[i] != NULL) |
3975 | malloc_mutex_unlock(&arenas[i]->mtx); | | 4008 | malloc_mutex_unlock(&arenas[i]->mtx); |
3976 | } | | 4009 | } |
3977 | malloc_mutex_unlock(&arenas_mtx); | | 4010 | malloc_mutex_unlock(&arenas_mtx); |
3978 | } | | 4011 | } |
3979 | | | 4012 | |
3980 | /* | | 4013 | /* |
3981 | * End library-private functions. | | 4014 | * End library-private functions. |
3982 | */ | | 4015 | */ |
3983 | /******************************************************************************/ | | 4016 | /******************************************************************************/ |