diff options
Diffstat (limited to 'src/lib/libc/stdlib/malloc.c')
-rw-r--r-- | src/lib/libc/stdlib/malloc.c | 2081 |
1 files changed, 1740 insertions, 341 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index 3c57fad024..446a1ca254 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
@@ -1,421 +1,1820 @@ | |||
1 | /* $OpenBSD: malloc.c,v 1.152 2014/04/03 16:18:11 schwarze Exp $ */ | ||
1 | /* | 2 | /* |
2 | * Copyright (c) 1983 Regents of the University of California. | 3 | * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> |
3 | * All rights reserved. | 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> |
5 | * Copyright (c) 2008 Damien Miller <djm@openbsd.org> | ||
6 | * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> | ||
4 | * | 7 | * |
5 | * Redistribution and use in source and binary forms, with or without | 8 | * Permission to use, copy, modify, and distribute this software for any |
6 | * modification, are permitted provided that the following conditions | 9 | * purpose with or without fee is hereby granted, provided that the above |
7 | * are met: | 10 | * copyright notice and this permission notice appear in all copies. |
8 | * 1. Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * 2. Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * 3. All advertising materials mentioning features or use of this software | ||
14 | * must display the following acknowledgement: | ||
15 | * This product includes software developed by the University of | ||
16 | * California, Berkeley and its contributors. | ||
17 | * 4. Neither the name of the University nor the names of its contributors | ||
18 | * may be used to endorse or promote products derived from this software | ||
19 | * without specific prior written permission. | ||
20 | * | 11 | * |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 13 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | 14 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | 15 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 16 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | 17 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | 18 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
31 | * SUCH DAMAGE. | ||
32 | */ | 19 | */ |
33 | 20 | ||
34 | #if defined(LIBC_SCCS) && !defined(lint) | ||
35 | /*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/ | ||
36 | static char *rcsid = "$Id: malloc.c,v 1.1.1.1 1995/10/18 08:42:18 deraadt Exp $"; | ||
37 | #endif /* LIBC_SCCS and not lint */ | ||
38 | |||
39 | /* | 21 | /* |
40 | * malloc.c (Caltech) 2/21/82 | 22 | * If we meet some day, and you think this stuff is worth it, you |
41 | * Chris Kingsley, kingsley@cit-20. | 23 | * can buy me a beer in return. Poul-Henning Kamp |
42 | * | ||
43 | * This is a very fast storage allocator. It allocates blocks of a small | ||
44 | * number of different sizes, and keeps free lists of each size. Blocks that | ||
45 | * don't exactly fit are passed up to the next larger size. In this | ||
46 | * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. | ||
47 | * This is designed for use in a virtual memory environment. | ||
48 | */ | 24 | */ |
49 | 25 | ||
26 | /* #define MALLOC_STATS */ | ||
27 | |||
50 | #include <sys/types.h> | 28 | #include <sys/types.h> |
29 | #include <sys/param.h> | ||
30 | #include <sys/queue.h> | ||
31 | #include <sys/mman.h> | ||
32 | #include <sys/uio.h> | ||
33 | #include <errno.h> | ||
34 | #include <stdint.h> | ||
51 | #include <stdlib.h> | 35 | #include <stdlib.h> |
52 | #include <string.h> | 36 | #include <string.h> |
37 | #include <stdio.h> | ||
53 | #include <unistd.h> | 38 | #include <unistd.h> |
54 | 39 | ||
55 | #define NULL 0 | 40 | #ifdef MALLOC_STATS |
41 | #include <sys/tree.h> | ||
42 | #include <fcntl.h> | ||
43 | #endif | ||
44 | |||
45 | #include "thread_private.h" | ||
46 | |||
47 | #if defined(__sparc__) && !defined(__sparcv9__) | ||
48 | #define MALLOC_PAGESHIFT (13U) | ||
49 | #elif defined(__mips64__) | ||
50 | #define MALLOC_PAGESHIFT (14U) | ||
51 | #else | ||
52 | #define MALLOC_PAGESHIFT (PAGE_SHIFT) | ||
53 | #endif | ||
54 | |||
55 | #define MALLOC_MINSHIFT 4 | ||
56 | #define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) | ||
57 | #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) | ||
58 | #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) | ||
59 | #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) | ||
60 | #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) | ||
61 | |||
62 | #define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) | ||
63 | #define MALLOC_MAXCACHE 256 | ||
64 | #define MALLOC_DELAYED_CHUNKS 15 /* max of getrnibble() */ | ||
65 | #define MALLOC_INITIAL_REGIONS 512 | ||
66 | #define MALLOC_DEFAULT_CACHE 64 | ||
67 | |||
68 | /* | ||
69 | * When the P option is active, we move allocations between half a page | ||
70 | * and a whole page towards the end, subject to alignment constraints. | ||
71 | * This is the extra headroom we allow. Set to zero to be the most | ||
72 | * strict. | ||
73 | */ | ||
74 | #define MALLOC_LEEWAY 0 | ||
56 | 75 | ||
57 | static void morecore(); | 76 | #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) |
58 | static int findbucket(); | ||
59 | 77 | ||
60 | /* | 78 | /* |
61 | * The overhead on a block is at least 4 bytes. When free, this space | 79 | * What to use for Junk. This is the byte value we use to fill with |
62 | * contains a pointer to the next free block, and the bottom two bits must | 80 | * when the 'J' option is enabled. Use SOME_JUNK right after alloc, |
63 | * be zero. When in use, the first byte is set to MAGIC, and the second | 81 | * and SOME_FREEJUNK right before free. |
64 | * byte is the size index. The remaining bytes are for alignment. | ||
65 | * If range checking is enabled then a second word holds the size of the | ||
66 | * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). | ||
67 | * The order of elements is critical: ov_magic must overlay the low order | ||
68 | * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. | ||
69 | */ | 82 | */ |
70 | union overhead { | 83 | #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ |
71 | union overhead *ov_next; /* when free */ | 84 | #define SOME_FREEJUNK 0xdf |
72 | struct { | 85 | |
73 | u_char ovu_magic; /* magic number */ | 86 | #define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \ |
74 | u_char ovu_index; /* bucket # */ | 87 | MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) |
75 | #ifdef RCHECK | 88 | |
76 | u_short ovu_rmagic; /* range magic number */ | 89 | #define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ |
77 | u_long ovu_size; /* actual block size */ | 90 | MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) |
91 | |||
92 | #define MQUERY(a, sz) mquery((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ | ||
93 | MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t)0) | ||
94 | |||
95 | struct region_info { | ||
96 | void *p; /* page; low bits used to mark chunks */ | ||
97 | uintptr_t size; /* size for pages, or chunk_info pointer */ | ||
98 | #ifdef MALLOC_STATS | ||
99 | void *f; /* where allocated from */ | ||
78 | #endif | 100 | #endif |
79 | } ovu; | ||
80 | #define ov_magic ovu.ovu_magic | ||
81 | #define ov_index ovu.ovu_index | ||
82 | #define ov_rmagic ovu.ovu_rmagic | ||
83 | #define ov_size ovu.ovu_size | ||
84 | }; | 101 | }; |
85 | 102 | ||
86 | #define MAGIC 0xef /* magic # on accounting info */ | 103 | LIST_HEAD(chunk_head, chunk_info); |
87 | #define RMAGIC 0x5555 /* magic # on range info */ | ||
88 | 104 | ||
89 | #ifdef RCHECK | 105 | struct dir_info { |
90 | #define RSLOP sizeof (u_short) | 106 | u_int32_t canary1; |
107 | struct region_info *r; /* region slots */ | ||
108 | size_t regions_total; /* number of region slots */ | ||
109 | size_t regions_free; /* number of free slots */ | ||
110 | /* lists of free chunk info structs */ | ||
111 | struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; | ||
112 | /* lists of chunks with free slots */ | ||
113 | struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1]; | ||
114 | size_t free_regions_size; /* free pages cached */ | ||
115 | /* free pages cache */ | ||
116 | struct region_info free_regions[MALLOC_MAXCACHE]; | ||
117 | /* delayed free chunk slots */ | ||
118 | void *delayed_chunks[MALLOC_DELAYED_CHUNKS + 1]; | ||
119 | u_short chunk_start; | ||
120 | #ifdef MALLOC_STATS | ||
121 | size_t inserts; | ||
122 | size_t insert_collisions; | ||
123 | size_t finds; | ||
124 | size_t find_collisions; | ||
125 | size_t deletes; | ||
126 | size_t delete_moves; | ||
127 | size_t cheap_realloc_tries; | ||
128 | size_t cheap_reallocs; | ||
129 | #define STATS_INC(x) ((x)++) | ||
130 | #define STATS_ZERO(x) ((x) = 0) | ||
131 | #define STATS_SETF(x,y) ((x)->f = (y)) | ||
91 | #else | 132 | #else |
92 | #define RSLOP 0 | 133 | #define STATS_INC(x) /* nothing */ |
93 | #endif | 134 | #define STATS_ZERO(x) /* nothing */ |
135 | #define STATS_SETF(x,y) /* nothing */ | ||
136 | #endif /* MALLOC_STATS */ | ||
137 | u_int32_t canary2; | ||
138 | }; | ||
139 | #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ | ||
140 | ~MALLOC_PAGEMASK) | ||
94 | 141 | ||
95 | /* | 142 | /* |
96 | * nextf[i] is the pointer to the next free block of size 2^(i+3). The | 143 | * This structure describes a page worth of chunks. |
97 | * smallest allocatable block is 8 bytes. The overhead information | 144 | * |
98 | * precedes the data area returned to the user. | 145 | * How many bits per u_short in the bitmap |
99 | */ | 146 | */ |
100 | #define NBUCKETS 30 | 147 | #define MALLOC_BITS (NBBY * sizeof(u_short)) |
101 | static union overhead *nextf[NBUCKETS]; | 148 | struct chunk_info { |
102 | extern char *sbrk(); | 149 | LIST_ENTRY(chunk_info) entries; |
150 | void *page; /* pointer to the page */ | ||
151 | u_int32_t canary; | ||
152 | u_short size; /* size of this page's chunks */ | ||
153 | u_short shift; /* how far to shift for this size */ | ||
154 | u_short free; /* how many free chunks */ | ||
155 | u_short total; /* how many chunk */ | ||
156 | /* which chunks are free */ | ||
157 | u_short bits[1]; | ||
158 | }; | ||
159 | |||
160 | struct malloc_readonly { | ||
161 | struct dir_info *g_pool; /* Main bookkeeping information */ | ||
162 | int malloc_abort; /* abort() on error */ | ||
163 | int malloc_freenow; /* Free quickly - disable chunk rnd */ | ||
164 | int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ | ||
165 | int malloc_hint; /* call madvice on free pages? */ | ||
166 | int malloc_junk; /* junk fill? */ | ||
167 | int malloc_move; /* move allocations to end of page? */ | ||
168 | int malloc_realloc; /* always realloc? */ | ||
169 | int malloc_xmalloc; /* xmalloc behaviour? */ | ||
170 | int malloc_zero; /* zero fill? */ | ||
171 | size_t malloc_guard; /* use guard pages after allocations? */ | ||
172 | u_int malloc_cache; /* free pages we cache */ | ||
173 | #ifdef MALLOC_STATS | ||
174 | int malloc_stats; /* dump statistics at end */ | ||
175 | #endif | ||
176 | u_int32_t malloc_canary; /* Matched against ones in g_pool */ | ||
177 | }; | ||
103 | 178 | ||
104 | static int pagesz; /* page size */ | 179 | /* This object is mapped PROT_READ after initialisation to prevent tampering */ |
105 | static int pagebucket; /* page size bucket */ | 180 | static union { |
181 | struct malloc_readonly mopts; | ||
182 | u_char _pad[MALLOC_PAGESIZE]; | ||
183 | } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))); | ||
184 | #define mopts malloc_readonly.mopts | ||
185 | #define g_pool mopts.g_pool | ||
106 | 186 | ||
107 | #ifdef MSTATS | 187 | char *malloc_options; /* compile-time options */ |
108 | /* | 188 | |
109 | * nmalloc[i] is the difference between the number of mallocs and frees | 189 | static char *malloc_func; /* current function */ |
110 | * for a given block size. | 190 | static int malloc_active; /* status of malloc */ |
191 | |||
192 | static size_t malloc_guarded; /* bytes used for guards */ | ||
193 | static size_t malloc_used; /* bytes allocated */ | ||
194 | |||
195 | static size_t rnibblesused; /* random nibbles used */ | ||
196 | static u_char rbytes[512]; /* random bytes */ | ||
197 | static u_char getrnibble(void); | ||
198 | |||
199 | extern char *__progname; | ||
200 | |||
201 | #ifdef MALLOC_STATS | ||
202 | void malloc_dump(int); | ||
203 | static void malloc_exit(void); | ||
204 | #define CALLER __builtin_return_address(0) | ||
205 | #else | ||
206 | #define CALLER NULL | ||
207 | #endif | ||
208 | |||
209 | /* low bits of r->p determine size: 0 means >= page size and p->size holding | ||
210 | * real size, otherwise r->size is a shift count, or 1 for malloc(0) | ||
111 | */ | 211 | */ |
112 | static u_int nmalloc[NBUCKETS]; | 212 | #define REALSIZE(sz, r) \ |
113 | #include <stdio.h> | 213 | (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ |
214 | (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) | ||
215 | |||
216 | static inline size_t | ||
217 | hash(void *p) | ||
218 | { | ||
219 | size_t sum; | ||
220 | union { | ||
221 | uintptr_t p; | ||
222 | unsigned short a[sizeof(void *) / sizeof(short)]; | ||
223 | } u; | ||
224 | u.p = (uintptr_t)p >> MALLOC_PAGESHIFT; | ||
225 | sum = u.a[0]; | ||
226 | sum = (sum << 7) - sum + u.a[1]; | ||
227 | #ifdef __LP64__ | ||
228 | sum = (sum << 7) - sum + u.a[2]; | ||
229 | sum = (sum << 7) - sum + u.a[3]; | ||
114 | #endif | 230 | #endif |
231 | return sum; | ||
232 | } | ||
115 | 233 | ||
116 | #if defined(DEBUG) || defined(RCHECK) | 234 | static void |
117 | #define ASSERT(p) if (!(p)) botch("p") | 235 | wrterror(char *msg, void *p) |
118 | #include <stdio.h> | ||
119 | static | ||
120 | botch(s) | ||
121 | char *s; | ||
122 | { | 236 | { |
123 | fprintf(stderr, "\r\nassertion botched: %s\r\n", s); | 237 | char *q = " error: "; |
124 | (void) fflush(stderr); /* just in case user buffered it */ | 238 | struct iovec iov[6]; |
125 | abort(); | 239 | char buf[20]; |
240 | int saved_errno = errno; | ||
241 | |||
242 | iov[0].iov_base = __progname; | ||
243 | iov[0].iov_len = strlen(__progname); | ||
244 | iov[1].iov_base = malloc_func; | ||
245 | iov[1].iov_len = strlen(malloc_func); | ||
246 | iov[2].iov_base = q; | ||
247 | iov[2].iov_len = strlen(q); | ||
248 | iov[3].iov_base = msg; | ||
249 | iov[3].iov_len = strlen(msg); | ||
250 | iov[4].iov_base = buf; | ||
251 | if (p == NULL) | ||
252 | iov[4].iov_len = 0; | ||
253 | else { | ||
254 | snprintf(buf, sizeof(buf), " %p", p); | ||
255 | iov[4].iov_len = strlen(buf); | ||
256 | } | ||
257 | iov[5].iov_base = "\n"; | ||
258 | iov[5].iov_len = 1; | ||
259 | writev(STDERR_FILENO, iov, 6); | ||
260 | |||
261 | #ifdef MALLOC_STATS | ||
262 | if (mopts.malloc_stats) | ||
263 | malloc_dump(STDERR_FILENO); | ||
264 | #endif /* MALLOC_STATS */ | ||
265 | |||
266 | errno = saved_errno; | ||
267 | if (mopts.malloc_abort) | ||
268 | abort(); | ||
126 | } | 269 | } |
127 | #else | ||
128 | #define ASSERT(p) | ||
129 | #endif | ||
130 | 270 | ||
131 | void * | 271 | static void |
132 | malloc(nbytes) | 272 | rbytes_init(void) |
133 | size_t nbytes; | 273 | { |
274 | arc4random_buf(rbytes, sizeof(rbytes)); | ||
275 | rnibblesused = 0; | ||
276 | } | ||
277 | |||
278 | static inline u_char | ||
279 | getrnibble(void) | ||
280 | { | ||
281 | u_char x; | ||
282 | |||
283 | if (rnibblesused >= 2 * sizeof(rbytes)) | ||
284 | rbytes_init(); | ||
285 | x = rbytes[rnibblesused++ / 2]; | ||
286 | return (rnibblesused & 1 ? x & 0xf : x >> 4); | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * Cache maintenance. We keep at most malloc_cache pages cached. | ||
291 | * If the cache is becoming full, unmap pages in the cache for real, | ||
292 | * and then add the region to the cache | ||
293 | * Opposed to the regular region data structure, the sizes in the | ||
294 | * cache are in MALLOC_PAGESIZE units. | ||
295 | */ | ||
296 | static void | ||
297 | unmap(struct dir_info *d, void *p, size_t sz) | ||
298 | { | ||
299 | size_t psz = sz >> MALLOC_PAGESHIFT; | ||
300 | size_t rsz, tounmap; | ||
301 | struct region_info *r; | ||
302 | u_int i, offset; | ||
303 | |||
304 | if (sz != PAGEROUND(sz)) { | ||
305 | wrterror("munmap round", NULL); | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | if (psz > mopts.malloc_cache) { | ||
310 | if (munmap(p, sz)) | ||
311 | wrterror("munmap", p); | ||
312 | malloc_used -= sz; | ||
313 | return; | ||
314 | } | ||
315 | tounmap = 0; | ||
316 | rsz = mopts.malloc_cache - d->free_regions_size; | ||
317 | if (psz > rsz) | ||
318 | tounmap = psz - rsz; | ||
319 | offset = getrnibble() + (getrnibble() << 4); | ||
320 | for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { | ||
321 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; | ||
322 | if (r->p != NULL) { | ||
323 | rsz = r->size << MALLOC_PAGESHIFT; | ||
324 | if (munmap(r->p, rsz)) | ||
325 | wrterror("munmap", r->p); | ||
326 | r->p = NULL; | ||
327 | if (tounmap > r->size) | ||
328 | tounmap -= r->size; | ||
329 | else | ||
330 | tounmap = 0; | ||
331 | d->free_regions_size -= r->size; | ||
332 | r->size = 0; | ||
333 | malloc_used -= rsz; | ||
334 | } | ||
335 | } | ||
336 | if (tounmap > 0) | ||
337 | wrterror("malloc cache underflow", NULL); | ||
338 | for (i = 0; i < mopts.malloc_cache; i++) { | ||
339 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; | ||
340 | if (r->p == NULL) { | ||
341 | if (mopts.malloc_hint) | ||
342 | madvise(p, sz, MADV_FREE); | ||
343 | if (mopts.malloc_freeunmap) | ||
344 | mprotect(p, sz, PROT_NONE); | ||
345 | r->p = p; | ||
346 | r->size = psz; | ||
347 | d->free_regions_size += psz; | ||
348 | break; | ||
349 | } | ||
350 | } | ||
351 | if (i == mopts.malloc_cache) | ||
352 | wrterror("malloc free slot lost", NULL); | ||
353 | if (d->free_regions_size > mopts.malloc_cache) | ||
354 | wrterror("malloc cache overflow", NULL); | ||
355 | } | ||
356 | |||
357 | static void | ||
358 | zapcacheregion(struct dir_info *d, void *p, size_t len) | ||
359 | { | ||
360 | u_int i; | ||
361 | struct region_info *r; | ||
362 | size_t rsz; | ||
363 | |||
364 | for (i = 0; i < mopts.malloc_cache; i++) { | ||
365 | r = &d->free_regions[i]; | ||
366 | if (r->p >= p && r->p <= (void *)((char *)p + len)) { | ||
367 | rsz = r->size << MALLOC_PAGESHIFT; | ||
368 | if (munmap(r->p, rsz)) | ||
369 | wrterror("munmap", r->p); | ||
370 | r->p = NULL; | ||
371 | d->free_regions_size -= r->size; | ||
372 | r->size = 0; | ||
373 | malloc_used -= rsz; | ||
374 | } | ||
375 | } | ||
376 | } | ||
377 | |||
378 | static void * | ||
379 | map(struct dir_info *d, size_t sz, int zero_fill) | ||
134 | { | 380 | { |
135 | register union overhead *op; | 381 | size_t psz = sz >> MALLOC_PAGESHIFT; |
136 | register long bucket, n; | 382 | struct region_info *r, *big = NULL; |
137 | register unsigned amt; | 383 | u_int i, offset; |
384 | void *p; | ||
385 | |||
386 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | ||
387 | d->canary1 != ~d->canary2) | ||
388 | wrterror("internal struct corrupt", NULL); | ||
389 | if (sz != PAGEROUND(sz)) { | ||
390 | wrterror("map round", NULL); | ||
391 | return MAP_FAILED; | ||
392 | } | ||
393 | if (psz > d->free_regions_size) { | ||
394 | p = MMAP(sz); | ||
395 | if (p != MAP_FAILED) | ||
396 | malloc_used += sz; | ||
397 | /* zero fill not needed */ | ||
398 | return p; | ||
399 | } | ||
400 | offset = getrnibble() + (getrnibble() << 4); | ||
401 | for (i = 0; i < mopts.malloc_cache; i++) { | ||
402 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; | ||
403 | if (r->p != NULL) { | ||
404 | if (r->size == psz) { | ||
405 | p = r->p; | ||
406 | if (mopts.malloc_freeunmap) | ||
407 | mprotect(p, sz, PROT_READ | PROT_WRITE); | ||
408 | if (mopts.malloc_hint) | ||
409 | madvise(p, sz, MADV_NORMAL); | ||
410 | r->p = NULL; | ||
411 | r->size = 0; | ||
412 | d->free_regions_size -= psz; | ||
413 | if (zero_fill) | ||
414 | memset(p, 0, sz); | ||
415 | else if (mopts.malloc_junk && | ||
416 | mopts.malloc_freeunmap) | ||
417 | memset(p, SOME_FREEJUNK, sz); | ||
418 | return p; | ||
419 | } else if (r->size > psz) | ||
420 | big = r; | ||
421 | } | ||
422 | } | ||
423 | if (big != NULL) { | ||
424 | r = big; | ||
425 | p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT); | ||
426 | if (mopts.malloc_freeunmap) | ||
427 | mprotect(p, sz, PROT_READ | PROT_WRITE); | ||
428 | if (mopts.malloc_hint) | ||
429 | madvise(p, sz, MADV_NORMAL); | ||
430 | r->size -= psz; | ||
431 | d->free_regions_size -= psz; | ||
432 | if (zero_fill) | ||
433 | memset(p, 0, sz); | ||
434 | else if (mopts.malloc_junk && mopts.malloc_freeunmap) | ||
435 | memset(p, SOME_FREEJUNK, sz); | ||
436 | return p; | ||
437 | } | ||
438 | p = MMAP(sz); | ||
439 | if (p != MAP_FAILED) | ||
440 | malloc_used += sz; | ||
441 | if (d->free_regions_size > mopts.malloc_cache) | ||
442 | wrterror("malloc cache", NULL); | ||
443 | /* zero fill not needed */ | ||
444 | return p; | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * Initialize a dir_info, which should have been cleared by caller | ||
449 | */ | ||
450 | static int | ||
451 | omalloc_init(struct dir_info **dp) | ||
452 | { | ||
453 | char *p, b[64]; | ||
454 | int i, j; | ||
455 | size_t d_avail, regioninfo_size; | ||
456 | struct dir_info *d; | ||
457 | |||
458 | rbytes_init(); | ||
138 | 459 | ||
139 | /* | 460 | /* |
140 | * First time malloc is called, setup page size and | 461 | * Default options |
141 | * align break pointer so all data will be page aligned. | ||
142 | */ | 462 | */ |
143 | if (pagesz == 0) { | 463 | mopts.malloc_abort = 1; |
144 | pagesz = n = getpagesize(); | 464 | mopts.malloc_move = 1; |
145 | op = (union overhead *)sbrk(0); | 465 | mopts.malloc_cache = MALLOC_DEFAULT_CACHE; |
146 | n = n - sizeof (*op) - ((long)op & (n - 1)); | 466 | |
147 | if (n < 0) | 467 | for (i = 0; i < 3; i++) { |
148 | n += pagesz; | 468 | switch (i) { |
149 | if (n) { | 469 | case 0: |
150 | if (sbrk(n) == (char *)-1) | 470 | j = readlink("/etc/malloc.conf", b, sizeof b - 1); |
151 | return (NULL); | 471 | if (j <= 0) |
472 | continue; | ||
473 | b[j] = '\0'; | ||
474 | p = b; | ||
475 | break; | ||
476 | case 1: | ||
477 | if (issetugid() == 0) | ||
478 | p = getenv("MALLOC_OPTIONS"); | ||
479 | else | ||
480 | continue; | ||
481 | break; | ||
482 | case 2: | ||
483 | p = malloc_options; | ||
484 | break; | ||
485 | default: | ||
486 | p = NULL; | ||
152 | } | 487 | } |
153 | bucket = 0; | 488 | |
154 | amt = 8; | 489 | for (; p != NULL && *p != '\0'; p++) { |
155 | while (pagesz > amt) { | 490 | switch (*p) { |
156 | amt <<= 1; | 491 | case '>': |
157 | bucket++; | 492 | mopts.malloc_cache <<= 1; |
493 | if (mopts.malloc_cache > MALLOC_MAXCACHE) | ||
494 | mopts.malloc_cache = MALLOC_MAXCACHE; | ||
495 | break; | ||
496 | case '<': | ||
497 | mopts.malloc_cache >>= 1; | ||
498 | break; | ||
499 | case 'a': | ||
500 | mopts.malloc_abort = 0; | ||
501 | break; | ||
502 | case 'A': | ||
503 | mopts.malloc_abort = 1; | ||
504 | break; | ||
505 | #ifdef MALLOC_STATS | ||
506 | case 'd': | ||
507 | mopts.malloc_stats = 0; | ||
508 | break; | ||
509 | case 'D': | ||
510 | mopts.malloc_stats = 1; | ||
511 | break; | ||
512 | #endif /* MALLOC_STATS */ | ||
513 | case 'f': | ||
514 | mopts.malloc_freenow = 0; | ||
515 | mopts.malloc_freeunmap = 0; | ||
516 | break; | ||
517 | case 'F': | ||
518 | mopts.malloc_freenow = 1; | ||
519 | mopts.malloc_freeunmap = 1; | ||
520 | break; | ||
521 | case 'g': | ||
522 | mopts.malloc_guard = 0; | ||
523 | break; | ||
524 | case 'G': | ||
525 | mopts.malloc_guard = MALLOC_PAGESIZE; | ||
526 | break; | ||
527 | case 'h': | ||
528 | mopts.malloc_hint = 0; | ||
529 | break; | ||
530 | case 'H': | ||
531 | mopts.malloc_hint = 1; | ||
532 | break; | ||
533 | case 'j': | ||
534 | mopts.malloc_junk = 0; | ||
535 | break; | ||
536 | case 'J': | ||
537 | mopts.malloc_junk = 1; | ||
538 | break; | ||
539 | case 'n': | ||
540 | case 'N': | ||
541 | break; | ||
542 | case 'p': | ||
543 | mopts.malloc_move = 0; | ||
544 | break; | ||
545 | case 'P': | ||
546 | mopts.malloc_move = 1; | ||
547 | break; | ||
548 | case 'r': | ||
549 | mopts.malloc_realloc = 0; | ||
550 | break; | ||
551 | case 'R': | ||
552 | mopts.malloc_realloc = 1; | ||
553 | break; | ||
554 | case 's': | ||
555 | mopts.malloc_freeunmap = mopts.malloc_junk = 0; | ||
556 | mopts.malloc_guard = 0; | ||
557 | mopts.malloc_cache = MALLOC_DEFAULT_CACHE; | ||
558 | break; | ||
559 | case 'S': | ||
560 | mopts.malloc_freeunmap = mopts.malloc_junk = 1; | ||
561 | mopts.malloc_guard = MALLOC_PAGESIZE; | ||
562 | mopts.malloc_cache = 0; | ||
563 | break; | ||
564 | case 'u': | ||
565 | mopts.malloc_freeunmap = 0; | ||
566 | break; | ||
567 | case 'U': | ||
568 | mopts.malloc_freeunmap = 1; | ||
569 | break; | ||
570 | case 'x': | ||
571 | mopts.malloc_xmalloc = 0; | ||
572 | break; | ||
573 | case 'X': | ||
574 | mopts.malloc_xmalloc = 1; | ||
575 | break; | ||
576 | case 'z': | ||
577 | mopts.malloc_zero = 0; | ||
578 | break; | ||
579 | case 'Z': | ||
580 | mopts.malloc_zero = 1; | ||
581 | break; | ||
582 | default: { | ||
583 | static const char q[] = "malloc() warning: " | ||
584 | "unknown char in MALLOC_OPTIONS\n"; | ||
585 | write(STDERR_FILENO, q, sizeof(q) - 1); | ||
586 | break; | ||
587 | } | ||
588 | } | ||
158 | } | 589 | } |
159 | pagebucket = bucket; | ||
160 | } | 590 | } |
591 | |||
161 | /* | 592 | /* |
162 | * Convert amount of memory requested into closest block size | 593 | * We want junk in the entire allocation, and zero only in the part |
163 | * stored in hash buckets which satisfies request. | 594 | * the user asked for. |
164 | * Account for space used per block for accounting. | ||
165 | */ | 595 | */ |
166 | if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { | 596 | if (mopts.malloc_zero) |
167 | #ifndef RCHECK | 597 | mopts.malloc_junk = 1; |
168 | amt = 8; /* size of first bucket */ | 598 | |
169 | bucket = 0; | 599 | #ifdef MALLOC_STATS |
170 | #else | 600 | if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { |
171 | amt = 16; /* size of first bucket */ | 601 | static const char q[] = "malloc() warning: atexit(2) failed." |
172 | bucket = 1; | 602 | " Will not be able to dump stats on exit\n"; |
173 | #endif | 603 | write(STDERR_FILENO, q, sizeof(q) - 1); |
174 | n = -((long)sizeof (*op) + RSLOP); | ||
175 | } else { | ||
176 | amt = pagesz; | ||
177 | bucket = pagebucket; | ||
178 | } | ||
179 | while (nbytes > amt + n) { | ||
180 | amt <<= 1; | ||
181 | if (amt == 0) | ||
182 | return (NULL); | ||
183 | bucket++; | ||
184 | } | 604 | } |
605 | #endif /* MALLOC_STATS */ | ||
606 | |||
607 | while ((mopts.malloc_canary = arc4random()) == 0) | ||
608 | ; | ||
609 | |||
185 | /* | 610 | /* |
186 | * If nothing in hash bucket right now, | 611 | * Allocate dir_info with a guard page on either side. Also |
187 | * request more memory from the system. | 612 | * randomise offset inside the page at which the dir_info |
613 | * lies (subject to alignment by 1 << MALLOC_MINSHIFT) | ||
188 | */ | 614 | */ |
189 | if ((op = nextf[bucket]) == NULL) { | 615 | if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED) |
190 | morecore(bucket); | 616 | return -1; |
191 | if ((op = nextf[bucket]) == NULL) | 617 | mprotect(p, MALLOC_PAGESIZE, PROT_NONE); |
192 | return (NULL); | 618 | mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ, |
193 | } | 619 | MALLOC_PAGESIZE, PROT_NONE); |
194 | /* remove from linked list */ | 620 | d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; |
195 | nextf[bucket] = op->ov_next; | 621 | d = (struct dir_info *)(p + MALLOC_PAGESIZE + |
196 | op->ov_magic = MAGIC; | 622 | (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); |
197 | op->ov_index = bucket; | 623 | |
198 | #ifdef MSTATS | 624 | d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; |
199 | nmalloc[bucket]++; | 625 | regioninfo_size = d->regions_total * sizeof(struct region_info); |
200 | #endif | 626 | d->r = MMAP(regioninfo_size); |
201 | #ifdef RCHECK | 627 | if (d->r == MAP_FAILED) { |
628 | wrterror("malloc init mmap failed", NULL); | ||
629 | d->regions_total = 0; | ||
630 | return 1; | ||
631 | } | ||
632 | for (i = 0; i <= MALLOC_MAXSHIFT; i++) { | ||
633 | LIST_INIT(&d->chunk_info_list[i]); | ||
634 | LIST_INIT(&d->chunk_dir[i]); | ||
635 | } | ||
636 | malloc_used += regioninfo_size; | ||
637 | d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; | ||
638 | d->canary2 = ~d->canary1; | ||
639 | |||
640 | *dp = d; | ||
641 | |||
202 | /* | 642 | /* |
203 | * Record allocated size of block and | 643 | * Options have been set and will never be reset. |
204 | * bound space with magic numbers. | 644 | * Prevent further tampering with them. |
205 | */ | 645 | */ |
206 | op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); | 646 | if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) |
207 | op->ov_rmagic = RMAGIC; | 647 | mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); |
208 | *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; | 648 | |
649 | return 0; | ||
650 | } | ||
651 | |||
652 | static int | ||
653 | omalloc_grow(struct dir_info *d) | ||
654 | { | ||
655 | size_t newtotal; | ||
656 | size_t newsize; | ||
657 | size_t mask; | ||
658 | size_t i; | ||
659 | struct region_info *p; | ||
660 | |||
661 | if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 ) | ||
662 | return 1; | ||
663 | |||
664 | newtotal = d->regions_total * 2; | ||
665 | newsize = newtotal * sizeof(struct region_info); | ||
666 | mask = newtotal - 1; | ||
667 | |||
668 | p = MMAP(newsize); | ||
669 | if (p == MAP_FAILED) | ||
670 | return 1; | ||
671 | |||
672 | malloc_used += newsize; | ||
673 | memset(p, 0, newsize); | ||
674 | STATS_ZERO(d->inserts); | ||
675 | STATS_ZERO(d->insert_collisions); | ||
676 | for (i = 0; i < d->regions_total; i++) { | ||
677 | void *q = d->r[i].p; | ||
678 | if (q != NULL) { | ||
679 | size_t index = hash(q) & mask; | ||
680 | STATS_INC(d->inserts); | ||
681 | while (p[index].p != NULL) { | ||
682 | index = (index - 1) & mask; | ||
683 | STATS_INC(d->insert_collisions); | ||
684 | } | ||
685 | p[index] = d->r[i]; | ||
686 | } | ||
687 | } | ||
688 | /* avoid pages containing meta info to end up in cache */ | ||
689 | if (munmap(d->r, d->regions_total * sizeof(struct region_info))) | ||
690 | wrterror("munmap", d->r); | ||
691 | else | ||
692 | malloc_used -= d->regions_total * sizeof(struct region_info); | ||
693 | d->regions_free = d->regions_free + d->regions_total; | ||
694 | d->regions_total = newtotal; | ||
695 | d->r = p; | ||
696 | return 0; | ||
697 | } | ||
698 | |||
699 | static struct chunk_info * | ||
700 | alloc_chunk_info(struct dir_info *d, int bits) | ||
701 | { | ||
702 | struct chunk_info *p; | ||
703 | size_t size, count; | ||
704 | |||
705 | if (bits == 0) | ||
706 | count = MALLOC_PAGESIZE / MALLOC_MINSIZE; | ||
707 | else | ||
708 | count = MALLOC_PAGESIZE >> bits; | ||
709 | |||
710 | size = howmany(count, MALLOC_BITS); | ||
711 | size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); | ||
712 | size = ALIGN(size); | ||
713 | |||
714 | if (LIST_EMPTY(&d->chunk_info_list[bits])) { | ||
715 | char *q; | ||
716 | int i; | ||
717 | |||
718 | q = MMAP(MALLOC_PAGESIZE); | ||
719 | if (q == MAP_FAILED) | ||
720 | return NULL; | ||
721 | malloc_used += MALLOC_PAGESIZE; | ||
722 | count = MALLOC_PAGESIZE / size; | ||
723 | for (i = 0; i < count; i++, q += size) | ||
724 | LIST_INSERT_HEAD(&d->chunk_info_list[bits], | ||
725 | (struct chunk_info *)q, entries); | ||
726 | } | ||
727 | p = LIST_FIRST(&d->chunk_info_list[bits]); | ||
728 | LIST_REMOVE(p, entries); | ||
729 | memset(p, 0, size); | ||
730 | p->canary = d->canary1; | ||
731 | return p; | ||
732 | } | ||
733 | |||
734 | |||
735 | /* | ||
736 | * The hashtable uses the assumption that p is never NULL. This holds since | ||
737 | * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. | ||
738 | */ | ||
739 | static int | ||
740 | insert(struct dir_info *d, void *p, size_t sz, void *f) | ||
741 | { | ||
742 | size_t index; | ||
743 | size_t mask; | ||
744 | void *q; | ||
745 | |||
746 | if (d->regions_free * 4 < d->regions_total) { | ||
747 | if (omalloc_grow(d)) | ||
748 | return 1; | ||
749 | } | ||
750 | mask = d->regions_total - 1; | ||
751 | index = hash(p) & mask; | ||
752 | q = d->r[index].p; | ||
753 | STATS_INC(d->inserts); | ||
754 | while (q != NULL) { | ||
755 | index = (index - 1) & mask; | ||
756 | q = d->r[index].p; | ||
757 | STATS_INC(d->insert_collisions); | ||
758 | } | ||
759 | d->r[index].p = p; | ||
760 | d->r[index].size = sz; | ||
761 | #ifdef MALLOC_STATS | ||
762 | d->r[index].f = f; | ||
209 | #endif | 763 | #endif |
210 | return ((char *)(op + 1)); | 764 | d->regions_free--; |
765 | return 0; | ||
211 | } | 766 | } |
212 | 767 | ||
768 | static struct region_info * | ||
769 | find(struct dir_info *d, void *p) | ||
770 | { | ||
771 | size_t index; | ||
772 | size_t mask = d->regions_total - 1; | ||
773 | void *q, *r; | ||
774 | |||
775 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | ||
776 | d->canary1 != ~d->canary2) | ||
777 | wrterror("internal struct corrupt", NULL); | ||
778 | p = MASK_POINTER(p); | ||
779 | index = hash(p) & mask; | ||
780 | r = d->r[index].p; | ||
781 | q = MASK_POINTER(r); | ||
782 | STATS_INC(d->finds); | ||
783 | while (q != p && r != NULL) { | ||
784 | index = (index - 1) & mask; | ||
785 | r = d->r[index].p; | ||
786 | q = MASK_POINTER(r); | ||
787 | STATS_INC(d->find_collisions); | ||
788 | } | ||
789 | return (q == p && r != NULL) ? &d->r[index] : NULL; | ||
790 | } | ||
791 | |||
792 | static void | ||
793 | delete(struct dir_info *d, struct region_info *ri) | ||
794 | { | ||
795 | /* algorithm R, Knuth Vol III section 6.4 */ | ||
796 | size_t mask = d->regions_total - 1; | ||
797 | size_t i, j, r; | ||
798 | |||
799 | if (d->regions_total & (d->regions_total - 1)) | ||
800 | wrterror("regions_total not 2^x", NULL); | ||
801 | d->regions_free++; | ||
802 | STATS_INC(g_pool->deletes); | ||
803 | |||
804 | i = ri - d->r; | ||
805 | for (;;) { | ||
806 | d->r[i].p = NULL; | ||
807 | d->r[i].size = 0; | ||
808 | j = i; | ||
809 | for (;;) { | ||
810 | i = (i - 1) & mask; | ||
811 | if (d->r[i].p == NULL) | ||
812 | return; | ||
813 | r = hash(d->r[i].p) & mask; | ||
814 | if ((i <= r && r < j) || (r < j && j < i) || | ||
815 | (j < i && i <= r)) | ||
816 | continue; | ||
817 | d->r[j] = d->r[i]; | ||
818 | STATS_INC(g_pool->delete_moves); | ||
819 | break; | ||
820 | } | ||
821 | |||
822 | } | ||
823 | } | ||
824 | |||
213 | /* | 825 | /* |
214 | * Allocate more memory to the indicated bucket. | 826 | * Allocate a page of chunks |
215 | */ | 827 | */ |
216 | static void | 828 | static struct chunk_info * |
217 | morecore(bucket) | 829 | omalloc_make_chunks(struct dir_info *d, int bits) |
218 | int bucket; | ||
219 | { | 830 | { |
220 | register union overhead *op; | 831 | struct chunk_info *bp; |
221 | register long sz; /* size of desired block */ | 832 | void *pp; |
222 | long amt; /* amount to allocate */ | 833 | int i, k; |
223 | int nblks; /* how many blocks we get */ | ||
224 | 834 | ||
225 | /* | 835 | /* Allocate a new bucket */ |
226 | * sbrk_size <= 0 only for big, FLUFFY, requests (about | 836 | pp = map(d, MALLOC_PAGESIZE, 0); |
227 | * 2^30 bytes on a VAX, I think) or for a negative arg. | 837 | if (pp == MAP_FAILED) |
228 | */ | 838 | return NULL; |
229 | sz = 1 << (bucket + 3); | 839 | |
230 | #ifdef DEBUG | 840 | bp = alloc_chunk_info(d, bits); |
231 | ASSERT(sz > 0); | 841 | if (bp == NULL) { |
232 | #else | 842 | unmap(d, pp, MALLOC_PAGESIZE); |
233 | if (sz <= 0) | 843 | return NULL; |
234 | return; | 844 | } |
235 | #endif | 845 | |
236 | if (sz < pagesz) { | 846 | /* memory protect the page allocated in the malloc(0) case */ |
237 | amt = pagesz; | 847 | if (bits == 0) { |
238 | nblks = amt / sz; | 848 | bp->size = 0; |
849 | bp->shift = 1; | ||
850 | i = MALLOC_MINSIZE - 1; | ||
851 | while (i >>= 1) | ||
852 | bp->shift++; | ||
853 | bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift; | ||
854 | bp->page = pp; | ||
855 | |||
856 | k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE); | ||
857 | if (k < 0) { | ||
858 | unmap(d, pp, MALLOC_PAGESIZE); | ||
859 | LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries); | ||
860 | return NULL; | ||
861 | } | ||
239 | } else { | 862 | } else { |
240 | amt = sz + pagesz; | 863 | bp->size = 1U << bits; |
241 | nblks = 1; | 864 | bp->shift = bits; |
865 | bp->total = bp->free = MALLOC_PAGESIZE >> bits; | ||
866 | bp->page = pp; | ||
242 | } | 867 | } |
243 | op = (union overhead *)sbrk(amt); | 868 | |
244 | /* no more room! */ | 869 | /* set all valid bits in the bitmap */ |
245 | if ((long)op == -1) | 870 | k = bp->total; |
246 | return; | 871 | i = 0; |
247 | /* | 872 | |
248 | * Add new memory allocated to that on | 873 | /* Do a bunch at a time */ |
249 | * free list for this hash bucket. | 874 | for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) |
250 | */ | 875 | bp->bits[i / MALLOC_BITS] = (u_short)~0U; |
251 | nextf[bucket] = op; | 876 | |
252 | while (--nblks > 0) { | 877 | for (; i < k; i++) |
253 | op->ov_next = (union overhead *)((caddr_t)op + sz); | 878 | bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); |
254 | op = (union overhead *)((caddr_t)op + sz); | 879 | |
255 | } | 880 | LIST_INSERT_HEAD(&d->chunk_dir[bits], bp, entries); |
881 | |||
882 | bits++; | ||
883 | if ((uintptr_t)pp & bits) | ||
884 | wrterror("pp & bits", pp); | ||
885 | |||
886 | insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL); | ||
887 | return bp; | ||
256 | } | 888 | } |
257 | 889 | ||
258 | void | 890 | |
259 | free(cp) | 891 | /* |
260 | void *cp; | 892 | * Allocate a chunk |
261 | { | 893 | */ |
262 | register long size; | 894 | static void * |
263 | register union overhead *op; | 895 | malloc_bytes(struct dir_info *d, size_t size, void *f) |
264 | 896 | { | |
265 | if (cp == NULL) | 897 | int i, j; |
266 | return; | 898 | size_t k; |
267 | op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); | 899 | u_short u, *lp; |
268 | #ifdef DEBUG | 900 | struct chunk_info *bp; |
269 | ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ | 901 | |
270 | #else | 902 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || |
271 | if (op->ov_magic != MAGIC) | 903 | d->canary1 != ~d->canary2) |
272 | return; /* sanity */ | 904 | wrterror("internal struct corrupt", NULL); |
273 | #endif | 905 | /* Don't bother with anything less than this */ |
274 | #ifdef RCHECK | 906 | /* unless we have a malloc(0) requests */ |
275 | ASSERT(op->ov_rmagic == RMAGIC); | 907 | if (size != 0 && size < MALLOC_MINSIZE) |
276 | ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); | 908 | size = MALLOC_MINSIZE; |
277 | #endif | 909 | |
278 | size = op->ov_index; | 910 | /* Find the right bucket */ |
279 | ASSERT(size < NBUCKETS); | 911 | if (size == 0) |
280 | op->ov_next = nextf[size]; /* also clobbers ov_magic */ | 912 | j = 0; |
281 | nextf[size] = op; | 913 | else { |
282 | #ifdef MSTATS | 914 | j = MALLOC_MINSHIFT; |
283 | nmalloc[size]--; | 915 | i = (size - 1) >> (MALLOC_MINSHIFT - 1); |
916 | while (i >>= 1) | ||
917 | j++; | ||
918 | } | ||
919 | |||
920 | /* If it's empty, make a page more of that size chunks */ | ||
921 | if (LIST_EMPTY(&d->chunk_dir[j])) { | ||
922 | bp = omalloc_make_chunks(d, j); | ||
923 | if (bp == NULL) | ||
924 | return NULL; | ||
925 | } else | ||
926 | bp = LIST_FIRST(&d->chunk_dir[j]); | ||
927 | |||
928 | if (bp->canary != d->canary1) | ||
929 | wrterror("chunk info corrupted", NULL); | ||
930 | |||
931 | i = d->chunk_start; | ||
932 | if (bp->free > 1) | ||
933 | i += getrnibble(); | ||
934 | if (i >= bp->total) | ||
935 | i &= bp->total - 1; | ||
936 | for (;;) { | ||
937 | for (;;) { | ||
938 | lp = &bp->bits[i / MALLOC_BITS]; | ||
939 | if (!*lp) { | ||
940 | i += MALLOC_BITS; | ||
941 | i &= ~(MALLOC_BITS - 1); | ||
942 | if (i >= bp->total) | ||
943 | i = 0; | ||
944 | } else | ||
945 | break; | ||
946 | } | ||
947 | k = i % MALLOC_BITS; | ||
948 | u = 1 << k; | ||
949 | if (*lp & u) | ||
950 | break; | ||
951 | if (++i >= bp->total) | ||
952 | i = 0; | ||
953 | } | ||
954 | d->chunk_start += i + 1; | ||
955 | #ifdef MALLOC_STATS | ||
956 | if (i == 0) { | ||
957 | struct region_info *r = find(d, bp->page); | ||
958 | r->f = f; | ||
959 | } | ||
284 | #endif | 960 | #endif |
961 | |||
962 | *lp ^= u; | ||
963 | |||
964 | /* If there are no more free, remove from free-list */ | ||
965 | if (!--bp->free) | ||
966 | LIST_REMOVE(bp, entries); | ||
967 | |||
968 | /* Adjust to the real offset of that chunk */ | ||
969 | k += (lp - bp->bits) * MALLOC_BITS; | ||
970 | k <<= bp->shift; | ||
971 | |||
972 | if (mopts.malloc_junk && bp->size > 0) | ||
973 | memset((char *)bp->page + k, SOME_JUNK, bp->size); | ||
974 | return ((char *)bp->page + k); | ||
975 | } | ||
976 | |||
977 | |||
978 | /* | ||
979 | * Free a chunk, and possibly the page it's on, if the page becomes empty. | ||
980 | */ | ||
981 | static void | ||
982 | free_bytes(struct dir_info *d, struct region_info *r, void *ptr) | ||
983 | { | ||
984 | struct chunk_head *mp; | ||
985 | struct chunk_info *info; | ||
986 | int i; | ||
987 | |||
988 | info = (struct chunk_info *)r->size; | ||
989 | if (info->canary != d->canary1) | ||
990 | wrterror("chunk info corrupted", NULL); | ||
991 | |||
992 | /* Find the chunk number on the page */ | ||
993 | i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; | ||
994 | |||
995 | if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) { | ||
996 | wrterror("modified chunk-pointer", ptr); | ||
997 | return; | ||
998 | } | ||
999 | if (info->bits[i / MALLOC_BITS] & (1U << (i % MALLOC_BITS))) { | ||
1000 | wrterror("chunk is already free", ptr); | ||
1001 | return; | ||
1002 | } | ||
1003 | |||
1004 | info->bits[i / MALLOC_BITS] |= 1U << (i % MALLOC_BITS); | ||
1005 | info->free++; | ||
1006 | |||
1007 | if (info->size != 0) | ||
1008 | mp = d->chunk_dir + info->shift; | ||
1009 | else | ||
1010 | mp = d->chunk_dir; | ||
1011 | |||
1012 | if (info->free == 1) { | ||
1013 | /* Page became non-full */ | ||
1014 | LIST_INSERT_HEAD(mp, info, entries); | ||
1015 | return; | ||
1016 | } | ||
1017 | if (info->free != info->total) | ||
1018 | return; | ||
1019 | |||
1020 | LIST_REMOVE(info, entries); | ||
1021 | |||
1022 | if (info->size == 0 && !mopts.malloc_freeunmap) | ||
1023 | mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); | ||
1024 | unmap(d, info->page, MALLOC_PAGESIZE); | ||
1025 | |||
1026 | delete(d, r); | ||
1027 | if (info->size != 0) | ||
1028 | mp = &d->chunk_info_list[info->shift]; | ||
1029 | else | ||
1030 | mp = &d->chunk_info_list[0]; | ||
1031 | LIST_INSERT_HEAD(mp, info, entries); | ||
1032 | } | ||
1033 | |||
1034 | |||
1035 | |||
1036 | static void * | ||
1037 | omalloc(size_t sz, int zero_fill, void *f) | ||
1038 | { | ||
1039 | void *p; | ||
1040 | size_t psz; | ||
1041 | |||
1042 | if (sz > MALLOC_MAXCHUNK) { | ||
1043 | if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { | ||
1044 | errno = ENOMEM; | ||
1045 | return NULL; | ||
1046 | } | ||
1047 | sz += mopts.malloc_guard; | ||
1048 | psz = PAGEROUND(sz); | ||
1049 | p = map(g_pool, psz, zero_fill); | ||
1050 | if (p == MAP_FAILED) { | ||
1051 | errno = ENOMEM; | ||
1052 | return NULL; | ||
1053 | } | ||
1054 | if (insert(g_pool, p, sz, f)) { | ||
1055 | unmap(g_pool, p, psz); | ||
1056 | errno = ENOMEM; | ||
1057 | return NULL; | ||
1058 | } | ||
1059 | if (mopts.malloc_guard) { | ||
1060 | if (mprotect((char *)p + psz - mopts.malloc_guard, | ||
1061 | mopts.malloc_guard, PROT_NONE)) | ||
1062 | wrterror("mprotect", NULL); | ||
1063 | malloc_guarded += mopts.malloc_guard; | ||
1064 | } | ||
1065 | |||
1066 | if (mopts.malloc_move && | ||
1067 | sz - mopts.malloc_guard < MALLOC_PAGESIZE - | ||
1068 | MALLOC_LEEWAY) { | ||
1069 | /* fill whole allocation */ | ||
1070 | if (mopts.malloc_junk) | ||
1071 | memset(p, SOME_JUNK, psz - mopts.malloc_guard); | ||
1072 | /* shift towards the end */ | ||
1073 | p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY - | ||
1074 | (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1)); | ||
1075 | /* fill zeros if needed and overwritten above */ | ||
1076 | if (zero_fill && mopts.malloc_junk) | ||
1077 | memset(p, 0, sz - mopts.malloc_guard); | ||
1078 | } else { | ||
1079 | if (mopts.malloc_junk) { | ||
1080 | if (zero_fill) | ||
1081 | memset((char *)p + sz - mopts.malloc_guard, | ||
1082 | SOME_JUNK, psz - sz); | ||
1083 | else | ||
1084 | memset(p, SOME_JUNK, | ||
1085 | psz - mopts.malloc_guard); | ||
1086 | } | ||
1087 | } | ||
1088 | |||
1089 | } else { | ||
1090 | /* takes care of SOME_JUNK */ | ||
1091 | p = malloc_bytes(g_pool, sz, f); | ||
1092 | if (zero_fill && p != NULL && sz > 0) | ||
1093 | memset(p, 0, sz); | ||
1094 | } | ||
1095 | |||
1096 | return p; | ||
285 | } | 1097 | } |
286 | 1098 | ||
287 | /* | 1099 | /* |
288 | * When a program attempts "storage compaction" as mentioned in the | 1100 | * Common function for handling recursion. Only |
289 | * old malloc man page, it realloc's an already freed block. Usually | 1101 | * print the error message once, to avoid making the problem |
290 | * this is the last block it freed; occasionally it might be farther | 1102 | * potentially worse. |
291 | * back. We have to search all the free lists for the block in order | ||
292 | * to determine its bucket: 1st we make one pass thru the lists | ||
293 | * checking only the first block in each; if that fails we search | ||
294 | * ``realloc_srchlen'' blocks in each list for a match (the variable | ||
295 | * is extern so the caller can modify it). If that fails we just copy | ||
296 | * however many bytes was given to realloc() and hope it's not huge. | ||
297 | */ | 1103 | */ |
298 | int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ | 1104 | static void |
1105 | malloc_recurse(void) | ||
1106 | { | ||
1107 | static int noprint; | ||
1108 | |||
1109 | if (noprint == 0) { | ||
1110 | noprint = 1; | ||
1111 | wrterror("recursive call", NULL); | ||
1112 | } | ||
1113 | malloc_active--; | ||
1114 | _MALLOC_UNLOCK(); | ||
1115 | errno = EDEADLK; | ||
1116 | } | ||
1117 | |||
1118 | static int | ||
1119 | malloc_init(void) | ||
1120 | { | ||
1121 | if (omalloc_init(&g_pool)) { | ||
1122 | _MALLOC_UNLOCK(); | ||
1123 | if (mopts.malloc_xmalloc) | ||
1124 | wrterror("out of memory", NULL); | ||
1125 | errno = ENOMEM; | ||
1126 | return -1; | ||
1127 | } | ||
1128 | return 0; | ||
1129 | } | ||
299 | 1130 | ||
300 | void * | 1131 | void * |
301 | realloc(cp, nbytes) | 1132 | malloc(size_t size) |
302 | void *cp; | 1133 | { |
303 | size_t nbytes; | 1134 | void *r; |
304 | { | 1135 | int saved_errno = errno; |
305 | register u_long onb; | 1136 | |
306 | register long i; | 1137 | _MALLOC_LOCK(); |
307 | union overhead *op; | 1138 | malloc_func = " in malloc():"; |
308 | char *res; | 1139 | if (g_pool == NULL) { |
309 | int was_alloced = 0; | 1140 | if (malloc_init() != 0) |
310 | 1141 | return NULL; | |
311 | if (cp == NULL) | 1142 | } |
312 | return (malloc(nbytes)); | 1143 | if (malloc_active++) { |
313 | op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); | 1144 | malloc_recurse(); |
314 | if (op->ov_magic == MAGIC) { | 1145 | return NULL; |
315 | was_alloced++; | 1146 | } |
316 | i = op->ov_index; | 1147 | r = omalloc(size, mopts.malloc_zero, CALLER); |
1148 | malloc_active--; | ||
1149 | _MALLOC_UNLOCK(); | ||
1150 | if (r == NULL && mopts.malloc_xmalloc) { | ||
1151 | wrterror("out of memory", NULL); | ||
1152 | errno = ENOMEM; | ||
1153 | } | ||
1154 | if (r != NULL) | ||
1155 | errno = saved_errno; | ||
1156 | return r; | ||
1157 | } | ||
1158 | |||
1159 | static void | ||
1160 | ofree(void *p) | ||
1161 | { | ||
1162 | struct region_info *r; | ||
1163 | size_t sz; | ||
1164 | |||
1165 | r = find(g_pool, p); | ||
1166 | if (r == NULL) { | ||
1167 | wrterror("bogus pointer (double free?)", p); | ||
1168 | return; | ||
1169 | } | ||
1170 | REALSIZE(sz, r); | ||
1171 | if (sz > MALLOC_MAXCHUNK) { | ||
1172 | if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE - | ||
1173 | MALLOC_LEEWAY) { | ||
1174 | if (r->p != p) { | ||
1175 | wrterror("bogus pointer", p); | ||
1176 | return; | ||
1177 | } | ||
1178 | } else { | ||
1179 | #if notyetbecause_of_realloc | ||
1180 | /* shifted towards the end */ | ||
1181 | if (p != ((char *)r->p) + ((MALLOC_PAGESIZE - | ||
1182 | MALLOC_MINSIZE - sz - mopts.malloc_guard) & | ||
1183 | ~(MALLOC_MINSIZE-1))) { | ||
1184 | } | ||
1185 | #endif | ||
1186 | p = r->p; | ||
1187 | } | ||
1188 | if (mopts.malloc_guard) { | ||
1189 | if (sz < mopts.malloc_guard) | ||
1190 | wrterror("guard size", NULL); | ||
1191 | if (!mopts.malloc_freeunmap) { | ||
1192 | if (mprotect((char *)p + PAGEROUND(sz) - | ||
1193 | mopts.malloc_guard, mopts.malloc_guard, | ||
1194 | PROT_READ | PROT_WRITE)) | ||
1195 | wrterror("mprotect", NULL); | ||
1196 | } | ||
1197 | malloc_guarded -= mopts.malloc_guard; | ||
1198 | } | ||
1199 | if (mopts.malloc_junk && !mopts.malloc_freeunmap) | ||
1200 | memset(p, SOME_FREEJUNK, | ||
1201 | PAGEROUND(sz) - mopts.malloc_guard); | ||
1202 | unmap(g_pool, p, PAGEROUND(sz)); | ||
1203 | delete(g_pool, r); | ||
1204 | } else { | ||
1205 | void *tmp; | ||
1206 | int i; | ||
1207 | |||
1208 | if (mopts.malloc_junk && sz > 0) | ||
1209 | memset(p, SOME_FREEJUNK, sz); | ||
1210 | if (!mopts.malloc_freenow) { | ||
1211 | i = getrnibble(); | ||
1212 | tmp = p; | ||
1213 | p = g_pool->delayed_chunks[i]; | ||
1214 | g_pool->delayed_chunks[i] = tmp; | ||
1215 | } | ||
1216 | if (p != NULL) { | ||
1217 | r = find(g_pool, p); | ||
1218 | if (r == NULL) { | ||
1219 | wrterror("bogus pointer (double free?)", p); | ||
1220 | return; | ||
1221 | } | ||
1222 | free_bytes(g_pool, r, p); | ||
1223 | } | ||
1224 | } | ||
1225 | } | ||
1226 | |||
1227 | void | ||
1228 | free(void *ptr) | ||
1229 | { | ||
1230 | int saved_errno = errno; | ||
1231 | |||
1232 | /* This is legal. */ | ||
1233 | if (ptr == NULL) | ||
1234 | return; | ||
1235 | |||
1236 | _MALLOC_LOCK(); | ||
1237 | malloc_func = " in free():"; | ||
1238 | if (g_pool == NULL) { | ||
1239 | _MALLOC_UNLOCK(); | ||
1240 | wrterror("free() called before allocation", NULL); | ||
1241 | return; | ||
1242 | } | ||
1243 | if (malloc_active++) { | ||
1244 | malloc_recurse(); | ||
1245 | return; | ||
1246 | } | ||
1247 | ofree(ptr); | ||
1248 | malloc_active--; | ||
1249 | _MALLOC_UNLOCK(); | ||
1250 | errno = saved_errno; | ||
1251 | } | ||
1252 | |||
1253 | |||
1254 | static void * | ||
1255 | orealloc(void *p, size_t newsz, void *f) | ||
1256 | { | ||
1257 | struct region_info *r; | ||
1258 | size_t oldsz, goldsz, gnewsz; | ||
1259 | void *q; | ||
1260 | |||
1261 | if (p == NULL) | ||
1262 | return omalloc(newsz, 0, f); | ||
1263 | |||
1264 | r = find(g_pool, p); | ||
1265 | if (r == NULL) { | ||
1266 | wrterror("bogus pointer (double free?)", p); | ||
1267 | return NULL; | ||
1268 | } | ||
1269 | if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { | ||
1270 | errno = ENOMEM; | ||
1271 | return NULL; | ||
1272 | } | ||
1273 | |||
1274 | REALSIZE(oldsz, r); | ||
1275 | goldsz = oldsz; | ||
1276 | if (oldsz > MALLOC_MAXCHUNK) { | ||
1277 | if (oldsz < mopts.malloc_guard) | ||
1278 | wrterror("guard size", NULL); | ||
1279 | oldsz -= mopts.malloc_guard; | ||
1280 | } | ||
1281 | |||
1282 | gnewsz = newsz; | ||
1283 | if (gnewsz > MALLOC_MAXCHUNK) | ||
1284 | gnewsz += mopts.malloc_guard; | ||
1285 | |||
1286 | if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && | ||
1287 | !mopts.malloc_realloc) { | ||
1288 | size_t roldsz = PAGEROUND(goldsz); | ||
1289 | size_t rnewsz = PAGEROUND(gnewsz); | ||
1290 | |||
1291 | if (rnewsz > roldsz) { | ||
1292 | if (!mopts.malloc_guard) { | ||
1293 | void *hint = (char *)p + roldsz; | ||
1294 | size_t needed = rnewsz - roldsz; | ||
1295 | |||
1296 | STATS_INC(g_pool->cheap_realloc_tries); | ||
1297 | zapcacheregion(g_pool, hint, needed); | ||
1298 | q = MQUERY(hint, needed); | ||
1299 | if (q == hint) | ||
1300 | q = MMAPA(hint, needed); | ||
1301 | else | ||
1302 | q = MAP_FAILED; | ||
1303 | if (q == hint) { | ||
1304 | malloc_used += needed; | ||
1305 | if (mopts.malloc_junk) | ||
1306 | memset(q, SOME_JUNK, needed); | ||
1307 | r->size = newsz; | ||
1308 | STATS_SETF(r, f); | ||
1309 | STATS_INC(g_pool->cheap_reallocs); | ||
1310 | return p; | ||
1311 | } else if (q != MAP_FAILED) { | ||
1312 | if (munmap(q, needed)) | ||
1313 | wrterror("munmap", q); | ||
1314 | } | ||
1315 | } | ||
1316 | } else if (rnewsz < roldsz) { | ||
1317 | if (mopts.malloc_guard) { | ||
1318 | if (mprotect((char *)p + roldsz - | ||
1319 | mopts.malloc_guard, mopts.malloc_guard, | ||
1320 | PROT_READ | PROT_WRITE)) | ||
1321 | wrterror("mprotect", NULL); | ||
1322 | if (mprotect((char *)p + rnewsz - | ||
1323 | mopts.malloc_guard, mopts.malloc_guard, | ||
1324 | PROT_NONE)) | ||
1325 | wrterror("mprotect", NULL); | ||
1326 | } | ||
1327 | unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz); | ||
1328 | r->size = gnewsz; | ||
1329 | STATS_SETF(r, f); | ||
1330 | return p; | ||
1331 | } else { | ||
1332 | if (newsz > oldsz && mopts.malloc_junk) | ||
1333 | memset((char *)p + newsz, SOME_JUNK, | ||
1334 | rnewsz - mopts.malloc_guard - newsz); | ||
1335 | r->size = gnewsz; | ||
1336 | STATS_SETF(r, f); | ||
1337 | return p; | ||
1338 | } | ||
1339 | } | ||
1340 | if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { | ||
1341 | if (mopts.malloc_junk && newsz > 0) | ||
1342 | memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); | ||
1343 | STATS_SETF(r, f); | ||
1344 | return p; | ||
1345 | } else if (newsz != oldsz || mopts.malloc_realloc) { | ||
1346 | q = omalloc(newsz, 0, f); | ||
1347 | if (q == NULL) | ||
1348 | return NULL; | ||
1349 | if (newsz != 0 && oldsz != 0) | ||
1350 | memcpy(q, p, oldsz < newsz ? oldsz : newsz); | ||
1351 | ofree(p); | ||
1352 | return q; | ||
317 | } else { | 1353 | } else { |
1354 | STATS_SETF(r, f); | ||
1355 | return p; | ||
1356 | } | ||
1357 | } | ||
1358 | |||
1359 | void * | ||
1360 | realloc(void *ptr, size_t size) | ||
1361 | { | ||
1362 | void *r; | ||
1363 | int saved_errno = errno; | ||
1364 | |||
1365 | _MALLOC_LOCK(); | ||
1366 | malloc_func = " in realloc():"; | ||
1367 | if (g_pool == NULL) { | ||
1368 | if (malloc_init() != 0) | ||
1369 | return NULL; | ||
1370 | } | ||
1371 | if (malloc_active++) { | ||
1372 | malloc_recurse(); | ||
1373 | return NULL; | ||
1374 | } | ||
1375 | r = orealloc(ptr, size, CALLER); | ||
1376 | |||
1377 | malloc_active--; | ||
1378 | _MALLOC_UNLOCK(); | ||
1379 | if (r == NULL && mopts.malloc_xmalloc) { | ||
1380 | wrterror("out of memory", NULL); | ||
1381 | errno = ENOMEM; | ||
1382 | } | ||
1383 | if (r != NULL) | ||
1384 | errno = saved_errno; | ||
1385 | return r; | ||
1386 | } | ||
1387 | |||
1388 | |||
1389 | #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) | ||
1390 | |||
1391 | void * | ||
1392 | calloc(size_t nmemb, size_t size) | ||
1393 | { | ||
1394 | void *r; | ||
1395 | int saved_errno = errno; | ||
1396 | |||
1397 | _MALLOC_LOCK(); | ||
1398 | malloc_func = " in calloc():"; | ||
1399 | if (g_pool == NULL) { | ||
1400 | if (malloc_init() != 0) | ||
1401 | return NULL; | ||
1402 | } | ||
1403 | if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && | ||
1404 | nmemb > 0 && SIZE_MAX / nmemb < size) { | ||
1405 | _MALLOC_UNLOCK(); | ||
1406 | if (mopts.malloc_xmalloc) | ||
1407 | wrterror("out of memory", NULL); | ||
1408 | errno = ENOMEM; | ||
1409 | return NULL; | ||
1410 | } | ||
1411 | |||
1412 | if (malloc_active++) { | ||
1413 | malloc_recurse(); | ||
1414 | return NULL; | ||
1415 | } | ||
1416 | |||
1417 | size *= nmemb; | ||
1418 | r = omalloc(size, 1, CALLER); | ||
1419 | |||
1420 | malloc_active--; | ||
1421 | _MALLOC_UNLOCK(); | ||
1422 | if (r == NULL && mopts.malloc_xmalloc) { | ||
1423 | wrterror("out of memory", NULL); | ||
1424 | errno = ENOMEM; | ||
1425 | } | ||
1426 | if (r != NULL) | ||
1427 | errno = saved_errno; | ||
1428 | return r; | ||
1429 | } | ||
1430 | |||
1431 | static void * | ||
1432 | mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) | ||
1433 | { | ||
1434 | char *p, *q; | ||
1435 | |||
1436 | if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) { | ||
1437 | wrterror("mapalign bad alignment", NULL); | ||
1438 | return MAP_FAILED; | ||
1439 | } | ||
1440 | if (sz != PAGEROUND(sz)) { | ||
1441 | wrterror("mapalign round", NULL); | ||
1442 | return MAP_FAILED; | ||
1443 | } | ||
1444 | |||
1445 | /* Allocate sz + alignment bytes of memory, which must include a | ||
1446 | * subrange of size bytes that is properly aligned. Unmap the | ||
1447 | * other bytes, and then return that subrange. | ||
1448 | */ | ||
1449 | |||
1450 | /* We need sz + alignment to fit into a size_t. */ | ||
1451 | if (alignment > SIZE_MAX - sz) | ||
1452 | return MAP_FAILED; | ||
1453 | |||
1454 | p = map(d, sz + alignment, zero_fill); | ||
1455 | if (p == MAP_FAILED) | ||
1456 | return MAP_FAILED; | ||
1457 | q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); | ||
1458 | if (q != p) { | ||
1459 | if (munmap(p, q - p)) | ||
1460 | wrterror("munmap", p); | ||
1461 | } | ||
1462 | if (munmap(q + sz, alignment - (q - p))) | ||
1463 | wrterror("munmap", q + sz); | ||
1464 | malloc_used -= alignment; | ||
1465 | |||
1466 | return q; | ||
1467 | } | ||
1468 | |||
1469 | static void * | ||
1470 | omemalign(size_t alignment, size_t sz, int zero_fill, void *f) | ||
1471 | { | ||
1472 | size_t psz; | ||
1473 | void *p; | ||
1474 | |||
1475 | if (alignment <= MALLOC_PAGESIZE) { | ||
318 | /* | 1476 | /* |
319 | * Already free, doing "compaction". | 1477 | * max(size, alignment) is enough to assure the requested alignment, |
320 | * | 1478 | * since the allocator always allocates power-of-two blocks. |
321 | * Search for the old block of memory on the | ||
322 | * free list. First, check the most common | ||
323 | * case (last element free'd), then (this failing) | ||
324 | * the last ``realloc_srchlen'' items free'd. | ||
325 | * If all lookups fail, then assume the size of | ||
326 | * the memory block being realloc'd is the | ||
327 | * largest possible (so that all "nbytes" of new | ||
328 | * memory are copied into). Note that this could cause | ||
329 | * a memory fault if the old area was tiny, and the moon | ||
330 | * is gibbous. However, that is very unlikely. | ||
331 | */ | 1479 | */ |
332 | if ((i = findbucket(op, 1)) < 0 && | 1480 | if (sz < alignment) |
333 | (i = findbucket(op, realloc_srchlen)) < 0) | 1481 | sz = alignment; |
334 | i = NBUCKETS; | 1482 | return omalloc(sz, zero_fill, f); |
335 | } | 1483 | } |
336 | onb = 1 << (i + 3); | 1484 | |
337 | if (onb < pagesz) | 1485 | if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { |
338 | onb -= sizeof (*op) + RSLOP; | 1486 | errno = ENOMEM; |
339 | else | 1487 | return NULL; |
340 | onb += pagesz - sizeof (*op) - RSLOP; | 1488 | } |
341 | /* avoid the copy if same size block */ | 1489 | |
342 | if (was_alloced) { | 1490 | sz += mopts.malloc_guard; |
343 | if (i) { | 1491 | psz = PAGEROUND(sz); |
344 | i = 1 << (i + 2); | 1492 | |
345 | if (i < pagesz) | 1493 | p = mapalign(g_pool, alignment, psz, zero_fill); |
346 | i -= sizeof (*op) + RSLOP; | 1494 | if (p == NULL) { |
347 | else | 1495 | errno = ENOMEM; |
348 | i += pagesz - sizeof (*op) - RSLOP; | 1496 | return NULL; |
1497 | } | ||
1498 | |||
1499 | if (insert(g_pool, p, sz, f)) { | ||
1500 | unmap(g_pool, p, psz); | ||
1501 | errno = ENOMEM; | ||
1502 | return NULL; | ||
1503 | } | ||
1504 | |||
1505 | if (mopts.malloc_guard) { | ||
1506 | if (mprotect((char *)p + psz - mopts.malloc_guard, | ||
1507 | mopts.malloc_guard, PROT_NONE)) | ||
1508 | wrterror("mprotect", NULL); | ||
1509 | malloc_guarded += mopts.malloc_guard; | ||
1510 | } | ||
1511 | |||
1512 | if (mopts.malloc_junk) { | ||
1513 | if (zero_fill) | ||
1514 | memset((char *)p + sz - mopts.malloc_guard, | ||
1515 | SOME_JUNK, psz - sz); | ||
1516 | else | ||
1517 | memset(p, SOME_JUNK, psz - mopts.malloc_guard); | ||
1518 | } | ||
1519 | |||
1520 | return p; | ||
1521 | } | ||
1522 | |||
1523 | int | ||
1524 | posix_memalign(void **memptr, size_t alignment, size_t size) | ||
1525 | { | ||
1526 | int res, saved_errno = errno; | ||
1527 | void *r; | ||
1528 | |||
1529 | /* Make sure that alignment is a large enough power of 2. */ | ||
1530 | if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) | ||
1531 | return EINVAL; | ||
1532 | |||
1533 | _MALLOC_LOCK(); | ||
1534 | malloc_func = " in posix_memalign():"; | ||
1535 | if (g_pool == NULL) { | ||
1536 | if (malloc_init() != 0) | ||
1537 | goto err; | ||
1538 | } | ||
1539 | if (malloc_active++) { | ||
1540 | malloc_recurse(); | ||
1541 | goto err; | ||
1542 | } | ||
1543 | r = omemalign(alignment, size, mopts.malloc_zero, CALLER); | ||
1544 | malloc_active--; | ||
1545 | _MALLOC_UNLOCK(); | ||
1546 | if (r == NULL) { | ||
1547 | if (mopts.malloc_xmalloc) { | ||
1548 | wrterror("out of memory", NULL); | ||
1549 | errno = ENOMEM; | ||
349 | } | 1550 | } |
350 | if (nbytes <= onb && nbytes > i) { | 1551 | goto err; |
351 | #ifdef RCHECK | ||
352 | op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); | ||
353 | *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; | ||
354 | #endif | ||
355 | return(cp); | ||
356 | } else | ||
357 | free(cp); | ||
358 | } | 1552 | } |
359 | if ((res = malloc(nbytes)) == NULL) | 1553 | errno = saved_errno; |
360 | return (NULL); | 1554 | *memptr = r; |
361 | if (cp != res) /* common optimization if "compacting" */ | 1555 | return 0; |
362 | bcopy(cp, res, (nbytes < onb) ? nbytes : onb); | 1556 | |
363 | return (res); | 1557 | err: |
1558 | res = errno; | ||
1559 | errno = saved_errno; | ||
1560 | return res; | ||
364 | } | 1561 | } |
365 | 1562 | ||
366 | /* | 1563 | #ifdef MALLOC_STATS |
367 | * Search ``srchlen'' elements of each free list for a block whose | 1564 | |
368 | * header starts at ``freep''. If srchlen is -1 search the whole list. | 1565 | struct malloc_leak { |
369 | * Return bucket number, or -1 if not found. | 1566 | void (*f)(); |
370 | */ | 1567 | size_t total_size; |
371 | static | 1568 | int count; |
372 | findbucket(freep, srchlen) | 1569 | }; |
373 | union overhead *freep; | 1570 | |
374 | int srchlen; | 1571 | struct leaknode { |
1572 | RB_ENTRY(leaknode) entry; | ||
1573 | struct malloc_leak d; | ||
1574 | }; | ||
1575 | |||
1576 | static int | ||
1577 | leakcmp(struct leaknode *e1, struct leaknode *e2) | ||
375 | { | 1578 | { |
376 | register union overhead *p; | 1579 | return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; |
377 | register int i, j; | 1580 | } |
378 | 1581 | ||
379 | for (i = 0; i < NBUCKETS; i++) { | 1582 | static RB_HEAD(leaktree, leaknode) leakhead; |
380 | j = 0; | 1583 | RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp) |
381 | for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { | 1584 | |
382 | if (p == freep) | 1585 | static void |
383 | return (i); | 1586 | putleakinfo(void *f, size_t sz, int cnt) |
384 | j++; | 1587 | { |
1588 | struct leaknode key, *p; | ||
1589 | static struct leaknode *page; | ||
1590 | static int used; | ||
1591 | |||
1592 | if (cnt == 0) | ||
1593 | return; | ||
1594 | |||
1595 | key.d.f = f; | ||
1596 | p = RB_FIND(leaktree, &leakhead, &key); | ||
1597 | if (p == NULL) { | ||
1598 | if (page == NULL || | ||
1599 | used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { | ||
1600 | page = MMAP(MALLOC_PAGESIZE); | ||
1601 | if (page == MAP_FAILED) | ||
1602 | return; | ||
1603 | used = 0; | ||
385 | } | 1604 | } |
1605 | p = &page[used++]; | ||
1606 | p->d.f = f; | ||
1607 | p->d.total_size = sz * cnt; | ||
1608 | p->d.count = cnt; | ||
1609 | RB_INSERT(leaktree, &leakhead, p); | ||
1610 | } else { | ||
1611 | p->d.total_size += sz * cnt; | ||
1612 | p->d.count += cnt; | ||
386 | } | 1613 | } |
387 | return (-1); | ||
388 | } | 1614 | } |
389 | 1615 | ||
390 | #ifdef MSTATS | 1616 | static struct malloc_leak *malloc_leaks; |
391 | /* | 1617 | |
392 | * mstats - print out statistics about malloc | 1618 | static void |
393 | * | 1619 | dump_leaks(int fd) |
394 | * Prints two lines of numbers, one showing the length of the free list | 1620 | { |
395 | * for each size category, the second showing the number of mallocs - | 1621 | struct leaknode *p; |
396 | * frees for each size category. | 1622 | char buf[64]; |
397 | */ | 1623 | int i = 0; |
398 | mstats(s) | 1624 | |
399 | char *s; | 1625 | snprintf(buf, sizeof(buf), "Leak report\n"); |
400 | { | 1626 | write(fd, buf, strlen(buf)); |
401 | register int i, j; | 1627 | snprintf(buf, sizeof(buf), " f sum # avg\n"); |
402 | register union overhead *p; | 1628 | write(fd, buf, strlen(buf)); |
403 | int totfree = 0, | 1629 | /* XXX only one page of summary */ |
404 | totused = 0; | 1630 | if (malloc_leaks == NULL) |
405 | 1631 | malloc_leaks = MMAP(MALLOC_PAGESIZE); | |
406 | fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); | 1632 | if (malloc_leaks != MAP_FAILED) |
407 | for (i = 0; i < NBUCKETS; i++) { | 1633 | memset(malloc_leaks, 0, MALLOC_PAGESIZE); |
408 | for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) | 1634 | RB_FOREACH(p, leaktree, &leakhead) { |
409 | ; | 1635 | snprintf(buf, sizeof(buf), "%12p %7zu %6u %6zu\n", p->d.f, |
410 | fprintf(stderr, " %d", j); | 1636 | p->d.total_size, p->d.count, p->d.total_size / p->d.count); |
411 | totfree += j * (1 << (i + 3)); | 1637 | write(fd, buf, strlen(buf)); |
412 | } | 1638 | if (malloc_leaks == MAP_FAILED || |
413 | fprintf(stderr, "\nused:\t"); | 1639 | i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) |
414 | for (i = 0; i < NBUCKETS; i++) { | 1640 | continue; |
415 | fprintf(stderr, " %d", nmalloc[i]); | 1641 | malloc_leaks[i].f = p->d.f; |
416 | totused += nmalloc[i] * (1 << (i + 3)); | 1642 | malloc_leaks[i].total_size = p->d.total_size; |
417 | } | 1643 | malloc_leaks[i].count = p->d.count; |
418 | fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", | 1644 | i++; |
419 | totused, totfree); | 1645 | } |
420 | } | 1646 | } |
421 | #endif | 1647 | |
1648 | static void | ||
1649 | dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) | ||
1650 | { | ||
1651 | char buf[64]; | ||
1652 | |||
1653 | while (p != NULL) { | ||
1654 | snprintf(buf, sizeof(buf), "chunk %12p %12p %4d %d/%d\n", | ||
1655 | p->page, ((p->bits[0] & 1) ? NULL : f), | ||
1656 | p->size, p->free, p->total); | ||
1657 | write(fd, buf, strlen(buf)); | ||
1658 | if (!fromfreelist) { | ||
1659 | if (p->bits[0] & 1) | ||
1660 | putleakinfo(NULL, p->size, p->total - p->free); | ||
1661 | else { | ||
1662 | putleakinfo(f, p->size, 1); | ||
1663 | putleakinfo(NULL, p->size, | ||
1664 | p->total - p->free - 1); | ||
1665 | } | ||
1666 | break; | ||
1667 | } | ||
1668 | p = LIST_NEXT(p, entries); | ||
1669 | if (p != NULL) { | ||
1670 | snprintf(buf, sizeof(buf), " "); | ||
1671 | write(fd, buf, strlen(buf)); | ||
1672 | } | ||
1673 | } | ||
1674 | } | ||
1675 | |||
1676 | static void | ||
1677 | dump_free_chunk_info(int fd, struct dir_info *d) | ||
1678 | { | ||
1679 | char buf[64]; | ||
1680 | int i, count; | ||
1681 | |||
1682 | snprintf(buf, sizeof(buf), "Free chunk structs:\n"); | ||
1683 | write(fd, buf, strlen(buf)); | ||
1684 | for (i = 0; i <= MALLOC_MAXSHIFT; i++) { | ||
1685 | struct chunk_info *p; | ||
1686 | |||
1687 | count = 0; | ||
1688 | LIST_FOREACH(p, &d->chunk_info_list[i], entries) | ||
1689 | count++; | ||
1690 | p = LIST_FIRST(&d->chunk_dir[i]); | ||
1691 | if (p == NULL && count == 0) | ||
1692 | continue; | ||
1693 | snprintf(buf, sizeof(buf), "%2d) %3d ", i, count); | ||
1694 | write(fd, buf, strlen(buf)); | ||
1695 | if (p != NULL) | ||
1696 | dump_chunk(fd, p, NULL, 1); | ||
1697 | else | ||
1698 | write(fd, "\n", 1); | ||
1699 | } | ||
1700 | |||
1701 | } | ||
1702 | |||
1703 | static void | ||
1704 | dump_free_page_info(int fd, struct dir_info *d) | ||
1705 | { | ||
1706 | char buf[64]; | ||
1707 | int i; | ||
1708 | |||
1709 | snprintf(buf, sizeof(buf), "Free pages cached: %zu\n", | ||
1710 | d->free_regions_size); | ||
1711 | write(fd, buf, strlen(buf)); | ||
1712 | for (i = 0; i < mopts.malloc_cache; i++) { | ||
1713 | if (d->free_regions[i].p != NULL) { | ||
1714 | snprintf(buf, sizeof(buf), "%2d) ", i); | ||
1715 | write(fd, buf, strlen(buf)); | ||
1716 | snprintf(buf, sizeof(buf), "free at %p: %zu\n", | ||
1717 | d->free_regions[i].p, d->free_regions[i].size); | ||
1718 | write(fd, buf, strlen(buf)); | ||
1719 | } | ||
1720 | } | ||
1721 | } | ||
1722 | |||
1723 | static void | ||
1724 | malloc_dump1(int fd, struct dir_info *d) | ||
1725 | { | ||
1726 | char buf[64]; | ||
1727 | size_t i, realsize; | ||
1728 | |||
1729 | snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d); | ||
1730 | write(fd, buf, strlen(buf)); | ||
1731 | if (d == NULL) | ||
1732 | return; | ||
1733 | snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n", | ||
1734 | d->regions_free, d->regions_total); | ||
1735 | write(fd, buf, strlen(buf)); | ||
1736 | snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds, | ||
1737 | d->find_collisions); | ||
1738 | write(fd, buf, strlen(buf)); | ||
1739 | snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts, | ||
1740 | d->insert_collisions); | ||
1741 | write(fd, buf, strlen(buf)); | ||
1742 | snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes, | ||
1743 | d->delete_moves); | ||
1744 | write(fd, buf, strlen(buf)); | ||
1745 | snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n", | ||
1746 | d->cheap_reallocs, d->cheap_realloc_tries); | ||
1747 | write(fd, buf, strlen(buf)); | ||
1748 | dump_free_chunk_info(fd, d); | ||
1749 | dump_free_page_info(fd, d); | ||
1750 | snprintf(buf, sizeof(buf), | ||
1751 | "slot) hash d type page f size [free/n]\n"); | ||
1752 | write(fd, buf, strlen(buf)); | ||
1753 | for (i = 0; i < d->regions_total; i++) { | ||
1754 | if (d->r[i].p != NULL) { | ||
1755 | size_t h = hash(d->r[i].p) & | ||
1756 | (d->regions_total - 1); | ||
1757 | snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ", | ||
1758 | i, h, h - i); | ||
1759 | write(fd, buf, strlen(buf)); | ||
1760 | REALSIZE(realsize, &d->r[i]); | ||
1761 | if (realsize > MALLOC_MAXCHUNK) { | ||
1762 | putleakinfo(d->r[i].f, realsize, 1); | ||
1763 | snprintf(buf, sizeof(buf), | ||
1764 | "pages %12p %12p %zu\n", d->r[i].p, | ||
1765 | d->r[i].f, realsize); | ||
1766 | write(fd, buf, strlen(buf)); | ||
1767 | } else | ||
1768 | dump_chunk(fd, | ||
1769 | (struct chunk_info *)d->r[i].size, | ||
1770 | d->r[i].f, 0); | ||
1771 | } | ||
1772 | } | ||
1773 | snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used); | ||
1774 | write(fd, buf, strlen(buf)); | ||
1775 | snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded); | ||
1776 | write(fd, buf, strlen(buf)); | ||
1777 | dump_leaks(fd); | ||
1778 | write(fd, "\n", 1); | ||
1779 | } | ||
1780 | |||
1781 | void | ||
1782 | malloc_dump(int fd) | ||
1783 | { | ||
1784 | int i; | ||
1785 | void *p; | ||
1786 | struct region_info *r; | ||
1787 | int saved_errno = errno; | ||
1788 | |||
1789 | for (i = 0; i <= MALLOC_DELAYED_CHUNKS; i++) { | ||
1790 | p = g_pool->delayed_chunks[i]; | ||
1791 | if (p == NULL) | ||
1792 | continue; | ||
1793 | r = find(g_pool, p); | ||
1794 | if (r == NULL) | ||
1795 | wrterror("bogus pointer in malloc_dump", p); | ||
1796 | free_bytes(g_pool, r, p); | ||
1797 | g_pool->delayed_chunks[i] = NULL; | ||
1798 | } | ||
1799 | /* XXX leak when run multiple times */ | ||
1800 | RB_INIT(&leakhead); | ||
1801 | malloc_dump1(fd, g_pool); | ||
1802 | errno = saved_errno; | ||
1803 | } | ||
1804 | |||
1805 | static void | ||
1806 | malloc_exit(void) | ||
1807 | { | ||
1808 | static const char q[] = "malloc() warning: Couldn't dump stats\n"; | ||
1809 | int save_errno = errno, fd; | ||
1810 | |||
1811 | fd = open("malloc.out", O_RDWR|O_APPEND); | ||
1812 | if (fd != -1) { | ||
1813 | malloc_dump(fd); | ||
1814 | close(fd); | ||
1815 | } else | ||
1816 | write(STDERR_FILENO, q, sizeof(q) - 1); | ||
1817 | errno = save_errno; | ||
1818 | } | ||
1819 | |||
1820 | #endif /* MALLOC_STATS */ | ||