summaryrefslogtreecommitdiff
path: root/src/lib/libc/stdlib/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libc/stdlib/malloc.c')
-rw-r--r--src/lib/libc/stdlib/malloc.c1693
1 files changed, 1693 insertions, 0 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
new file mode 100644
index 0000000000..5fc75c2c75
--- /dev/null
+++ b/src/lib/libc/stdlib/malloc.c
@@ -0,0 +1,1693 @@
1/* $OpenBSD: malloc.c,v 1.140 2011/10/06 14:37:04 otto Exp $ */
2/*
3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18/*
19 * Parts of this code, mainly the sub page sized chunk management code is
20 * derived from the malloc implementation with the following license:
21 */
22/*
23 * ----------------------------------------------------------------------------
24 * "THE BEER-WARE LICENSE" (Revision 42):
25 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
26 * can do whatever you want with this stuff. If we meet some day, and you think
27 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
28 * ----------------------------------------------------------------------------
29 */
30
31/* #define MALLOC_STATS */
32
33#include <sys/types.h>
34#include <sys/param.h>
35#include <sys/queue.h>
36#include <sys/mman.h>
37#include <sys/uio.h>
38#include <errno.h>
39#include <stdint.h>
40#include <stdlib.h>
41#include <string.h>
42#include <stdio.h>
43#include <unistd.h>
44
45#ifdef MALLOC_STATS
46#include <sys/tree.h>
47#include <fcntl.h>
48#endif
49
50#include "thread_private.h"
51
52#if defined(__sparc__) && !defined(__sparcv9__)
53#define MALLOC_PAGESHIFT (13U)
54#elif defined(__mips64__)
55#define MALLOC_PAGESHIFT (14U)
56#else
57#define MALLOC_PAGESHIFT (PGSHIFT)
58#endif
59
60#define MALLOC_MINSHIFT 4
61#define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1)
62#define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT)
63#define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT)
64#define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1)
65#define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
66
67#define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT)
68#define MALLOC_MAXCACHE 256
69#define MALLOC_DELAYED_CHUNKS 15 /* max of getrnibble() */
70#define MALLOC_INITIAL_REGIONS 512
71#define MALLOC_DEFAULT_CACHE 64
72
73/*
74 * When the P option is active, we move allocations between half a page
75 * and a whole page towards the end, subject to alignment constraints.
76 * This is the extra headroom we allow. Set to zero to be the most
77 * strict.
78 */
79#define MALLOC_LEEWAY 0
80
81#define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
82
83/*
84 * What to use for Junk. This is the byte value we use to fill with
85 * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
86 * and SOME_FREEJUNK right before free.
87 */
88#define SOME_JUNK 0xd0 /* as in "Duh" :-) */
89#define SOME_FREEJUNK 0xdf
90
91#define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
92 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
93
94#define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
95 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
96
97struct region_info {
98 void *p; /* page; low bits used to mark chunks */
99 uintptr_t size; /* size for pages, or chunk_info pointer */
100#ifdef MALLOC_STATS
101 void *f; /* where allocated from */
102#endif
103};
104
105LIST_HEAD(chunk_head, chunk_info);
106
107struct dir_info {
108 u_int32_t canary1;
109 struct region_info *r; /* region slots */
110 size_t regions_total; /* number of region slots */
111 size_t regions_free; /* number of free slots */
112 /* lists of free chunk info structs */
113 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
114 /* lists of chunks with free slots */
115 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1];
116 size_t free_regions_size; /* free pages cached */
117 /* free pages cache */
118 struct region_info free_regions[MALLOC_MAXCACHE];
119 /* delayed free chunk slots */
120 void *delayed_chunks[MALLOC_DELAYED_CHUNKS + 1];
121 u_short chunk_start;
122#ifdef MALLOC_STATS
123 size_t inserts;
124 size_t insert_collisions;
125 size_t finds;
126 size_t find_collisions;
127 size_t deletes;
128 size_t delete_moves;
129 size_t cheap_realloc_tries;
130 size_t cheap_reallocs;
131#define STATS_INC(x) ((x)++)
132#define STATS_ZERO(x) ((x) = 0)
133#define STATS_SETF(x,y) ((x)->f = (y))
134#else
135#define STATS_INC(x) /* nothing */
136#define STATS_ZERO(x) /* nothing */
137#define STATS_SETF(x,y) /* nothing */
138#endif /* MALLOC_STATS */
139 u_int32_t canary2;
140};
141#define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
142 ~MALLOC_PAGEMASK)
143
144/*
145 * This structure describes a page worth of chunks.
146 *
147 * How many bits per u_short in the bitmap
148 */
149#define MALLOC_BITS (NBBY * sizeof(u_short))
150struct chunk_info {
151 LIST_ENTRY(chunk_info) entries;
152 void *page; /* pointer to the page */
153 u_int32_t canary;
154 u_short size; /* size of this page's chunks */
155 u_short shift; /* how far to shift for this size */
156 u_short free; /* how many free chunks */
157 u_short total; /* how many chunk */
158 /* which chunks are free */
159 u_short bits[1];
160};
161
162struct malloc_readonly {
163 struct dir_info *g_pool; /* Main bookkeeping information */
164 int malloc_abort; /* abort() on error */
165 int malloc_freeprot; /* mprotect free pages PROT_NONE? */
166 int malloc_hint; /* call madvice on free pages? */
167 int malloc_junk; /* junk fill? */
168 int malloc_move; /* move allocations to end of page? */
169 int malloc_realloc; /* always realloc? */
170 int malloc_xmalloc; /* xmalloc behaviour? */
171 int malloc_zero; /* zero fill? */
172 size_t malloc_guard; /* use guard pages after allocations? */
173 u_int malloc_cache; /* free pages we cache */
174#ifdef MALLOC_STATS
175 int malloc_stats; /* dump statistics at end */
176#endif
177 u_int32_t malloc_canary; /* Matched against ones in g_pool */
178};
179
180/* This object is mapped PROT_READ after initialisation to prevent tampering */
181static union {
182 struct malloc_readonly mopts;
183 u_char _pad[MALLOC_PAGESIZE];
184} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
185#define mopts malloc_readonly.mopts
186#define g_pool mopts.g_pool
187
188char *malloc_options; /* compile-time options */
189
190static char *malloc_func; /* current function */
191static int malloc_active; /* status of malloc */
192
193static size_t malloc_guarded; /* bytes used for guards */
194static size_t malloc_used; /* bytes allocated */
195
196static size_t rnibblesused; /* random nibbles used */
197static u_char rbytes[512]; /* random bytes */
198static u_char getrnibble(void);
199
200extern char *__progname;
201
202#ifdef MALLOC_STATS
203void malloc_dump(int);
204static void malloc_exit(void);
205#define CALLER __builtin_return_address(0)
206#else
207#define CALLER NULL
208#endif
209
210/* low bits of r->p determine size: 0 means >= page size and p->size holding
211 * real size, otherwise r->size is a shift count, or 1 for malloc(0)
212 */
213#define REALSIZE(sz, r) \
214 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \
215 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
216
217static inline size_t
218hash(void *p)
219{
220 size_t sum;
221 union {
222 uintptr_t p;
223 unsigned short a[sizeof(void *) / sizeof(short)];
224 } u;
225 u.p = (uintptr_t)p >> MALLOC_PAGESHIFT;
226 sum = u.a[0];
227 sum = (sum << 7) - sum + u.a[1];
228#ifdef __LP64__
229 sum = (sum << 7) - sum + u.a[2];
230 sum = (sum << 7) - sum + u.a[3];
231#endif
232 return sum;
233}
234
235static void
236wrterror(char *msg, void *p)
237{
238 char *q = " error: ";
239 struct iovec iov[6];
240 char buf[20];
241 int saved_errno = errno;
242
243 iov[0].iov_base = __progname;
244 iov[0].iov_len = strlen(__progname);
245 iov[1].iov_base = malloc_func;
246 iov[1].iov_len = strlen(malloc_func);
247 iov[2].iov_base = q;
248 iov[2].iov_len = strlen(q);
249 iov[3].iov_base = msg;
250 iov[3].iov_len = strlen(msg);
251 iov[4].iov_base = buf;
252 if (p == NULL)
253 iov[4].iov_len = 0;
254 else {
255 snprintf(buf, sizeof(buf), " %p", p);
256 iov[4].iov_len = strlen(buf);
257 }
258 iov[5].iov_base = "\n";
259 iov[5].iov_len = 1;
260 writev(STDERR_FILENO, iov, 6);
261
262#ifdef MALLOC_STATS
263 if (mopts.malloc_stats)
264 malloc_dump(STDERR_FILENO);
265#endif /* MALLOC_STATS */
266
267 errno = saved_errno;
268 if (mopts.malloc_abort)
269 abort();
270}
271
272static void
273rbytes_init(void)
274{
275 arc4random_buf(rbytes, sizeof(rbytes));
276 rnibblesused = 0;
277}
278
279static inline u_char
280getrnibble(void)
281{
282 u_char x;
283
284 if (rnibblesused >= 2 * sizeof(rbytes))
285 rbytes_init();
286 x = rbytes[rnibblesused++ / 2];
287 return (rnibblesused & 1 ? x & 0xf : x >> 4);
288}
289
290/*
291 * Cache maintenance. We keep at most malloc_cache pages cached.
292 * If the cache is becoming full, unmap pages in the cache for real,
293 * and then add the region to the cache
294 * Opposed to the regular region data structure, the sizes in the
295 * cache are in MALLOC_PAGESIZE units.
296 */
297static void
298unmap(struct dir_info *d, void *p, size_t sz)
299{
300 size_t psz = sz >> MALLOC_PAGESHIFT;
301 size_t rsz, tounmap;
302 struct region_info *r;
303 u_int i, offset;
304
305 if (sz != PAGEROUND(sz)) {
306 wrterror("munmap round", NULL);
307 return;
308 }
309
310 if (psz > mopts.malloc_cache) {
311 if (munmap(p, sz))
312 wrterror("munmap", p);
313 malloc_used -= sz;
314 return;
315 }
316 tounmap = 0;
317 rsz = mopts.malloc_cache - d->free_regions_size;
318 if (psz > rsz)
319 tounmap = psz - rsz;
320 offset = getrnibble();
321 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
322 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
323 if (r->p != NULL) {
324 rsz = r->size << MALLOC_PAGESHIFT;
325 if (munmap(r->p, rsz))
326 wrterror("munmap", r->p);
327 r->p = NULL;
328 if (tounmap > r->size)
329 tounmap -= r->size;
330 else
331 tounmap = 0;
332 d->free_regions_size -= r->size;
333 r->size = 0;
334 malloc_used -= rsz;
335 }
336 }
337 if (tounmap > 0)
338 wrterror("malloc cache underflow", NULL);
339 for (i = 0; i < mopts.malloc_cache; i++) {
340 r = &d->free_regions[i];
341 if (r->p == NULL) {
342 if (mopts.malloc_hint)
343 madvise(p, sz, MADV_FREE);
344 if (mopts.malloc_freeprot)
345 mprotect(p, sz, PROT_NONE);
346 r->p = p;
347 r->size = psz;
348 d->free_regions_size += psz;
349 break;
350 }
351 }
352 if (i == mopts.malloc_cache)
353 wrterror("malloc free slot lost", NULL);
354 if (d->free_regions_size > mopts.malloc_cache)
355 wrterror("malloc cache overflow", NULL);
356}
357
358static void
359zapcacheregion(struct dir_info *d, void *p)
360{
361 u_int i;
362 struct region_info *r;
363 size_t rsz;
364
365 for (i = 0; i < mopts.malloc_cache; i++) {
366 r = &d->free_regions[i];
367 if (r->p == p) {
368 rsz = r->size << MALLOC_PAGESHIFT;
369 if (munmap(r->p, rsz))
370 wrterror("munmap", r->p);
371 r->p = NULL;
372 d->free_regions_size -= r->size;
373 r->size = 0;
374 malloc_used -= rsz;
375 }
376 }
377}
378
379static void *
380map(struct dir_info *d, size_t sz, int zero_fill)
381{
382 size_t psz = sz >> MALLOC_PAGESHIFT;
383 struct region_info *r, *big = NULL;
384 u_int i, offset;
385 void *p;
386
387 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
388 d->canary1 != ~d->canary2)
389 wrterror("internal struct corrupt", NULL);
390 if (sz != PAGEROUND(sz)) {
391 wrterror("map round", NULL);
392 return NULL;
393 }
394 if (psz > d->free_regions_size) {
395 p = MMAP(sz);
396 if (p != MAP_FAILED)
397 malloc_used += sz;
398 /* zero fill not needed */
399 return p;
400 }
401 offset = getrnibble();
402 for (i = 0; i < mopts.malloc_cache; i++) {
403 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
404 if (r->p != NULL) {
405 if (r->size == psz) {
406 p = r->p;
407 if (mopts.malloc_freeprot)
408 mprotect(p, sz, PROT_READ | PROT_WRITE);
409 if (mopts.malloc_hint)
410 madvise(p, sz, MADV_NORMAL);
411 r->p = NULL;
412 r->size = 0;
413 d->free_regions_size -= psz;
414 if (zero_fill)
415 memset(p, 0, sz);
416 else if (mopts.malloc_junk &&
417 mopts.malloc_freeprot)
418 memset(p, SOME_FREEJUNK, sz);
419 return p;
420 } else if (r->size > psz)
421 big = r;
422 }
423 }
424 if (big != NULL) {
425 r = big;
426 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
427 if (mopts.malloc_freeprot)
428 mprotect(p, sz, PROT_READ | PROT_WRITE);
429 if (mopts.malloc_hint)
430 madvise(p, sz, MADV_NORMAL);
431 r->size -= psz;
432 d->free_regions_size -= psz;
433 if (zero_fill)
434 memset(p, 0, sz);
435 else if (mopts.malloc_junk && mopts.malloc_freeprot)
436 memset(p, SOME_FREEJUNK, sz);
437 return p;
438 }
439 p = MMAP(sz);
440 if (p != MAP_FAILED)
441 malloc_used += sz;
442 if (d->free_regions_size > mopts.malloc_cache)
443 wrterror("malloc cache", NULL);
444 /* zero fill not needed */
445 return p;
446}
447
448/*
449 * Initialize a dir_info, which should have been cleared by caller
450 */
451static int
452omalloc_init(struct dir_info **dp)
453{
454 char *p, b[64];
455 int i, j;
456 size_t d_avail, regioninfo_size;
457 struct dir_info *d;
458
459 rbytes_init();
460
461 /*
462 * Default options
463 */
464 mopts.malloc_abort = 1;
465 mopts.malloc_move = 1;
466 mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
467
468 for (i = 0; i < 3; i++) {
469 switch (i) {
470 case 0:
471 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
472 if (j <= 0)
473 continue;
474 b[j] = '\0';
475 p = b;
476 break;
477 case 1:
478 if (issetugid() == 0)
479 p = getenv("MALLOC_OPTIONS");
480 else
481 continue;
482 break;
483 case 2:
484 p = malloc_options;
485 break;
486 default:
487 p = NULL;
488 }
489
490 for (; p != NULL && *p != '\0'; p++) {
491 switch (*p) {
492 case '>':
493 mopts.malloc_cache <<= 1;
494 if (mopts.malloc_cache > MALLOC_MAXCACHE)
495 mopts.malloc_cache = MALLOC_MAXCACHE;
496 break;
497 case '<':
498 mopts.malloc_cache >>= 1;
499 break;
500 case 'a':
501 mopts.malloc_abort = 0;
502 break;
503 case 'A':
504 mopts.malloc_abort = 1;
505 break;
506#ifdef MALLOC_STATS
507 case 'd':
508 mopts.malloc_stats = 0;
509 break;
510 case 'D':
511 mopts.malloc_stats = 1;
512 break;
513#endif /* MALLOC_STATS */
514 case 'f':
515 mopts.malloc_freeprot = 0;
516 break;
517 case 'F':
518 mopts.malloc_freeprot = 1;
519 break;
520 case 'g':
521 mopts.malloc_guard = 0;
522 break;
523 case 'G':
524 mopts.malloc_guard = MALLOC_PAGESIZE;
525 break;
526 case 'h':
527 mopts.malloc_hint = 0;
528 break;
529 case 'H':
530 mopts.malloc_hint = 1;
531 break;
532 case 'j':
533 mopts.malloc_junk = 0;
534 break;
535 case 'J':
536 mopts.malloc_junk = 1;
537 break;
538 case 'n':
539 case 'N':
540 break;
541 case 'p':
542 mopts.malloc_move = 0;
543 break;
544 case 'P':
545 mopts.malloc_move = 1;
546 break;
547 case 'r':
548 mopts.malloc_realloc = 0;
549 break;
550 case 'R':
551 mopts.malloc_realloc = 1;
552 break;
553 case 's':
554 mopts.malloc_freeprot = mopts.malloc_junk = 0;
555 mopts.malloc_guard = 0;
556 mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
557 break;
558 case 'S':
559 mopts.malloc_freeprot = mopts.malloc_junk = 1;
560 mopts.malloc_guard = MALLOC_PAGESIZE;
561 mopts.malloc_cache = 0;
562 break;
563 case 'x':
564 mopts.malloc_xmalloc = 0;
565 break;
566 case 'X':
567 mopts.malloc_xmalloc = 1;
568 break;
569 case 'z':
570 mopts.malloc_zero = 0;
571 break;
572 case 'Z':
573 mopts.malloc_zero = 1;
574 break;
575 default: {
576 static const char q[] = "malloc() warning: "
577 "unknown char in MALLOC_OPTIONS\n";
578 write(STDERR_FILENO, q, sizeof(q) - 1);
579 break;
580 }
581 }
582 }
583 }
584
585 /*
586 * We want junk in the entire allocation, and zero only in the part
587 * the user asked for.
588 */
589 if (mopts.malloc_zero)
590 mopts.malloc_junk = 1;
591
592#ifdef MALLOC_STATS
593 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
594 static const char q[] = "malloc() warning: atexit(2) failed."
595 " Will not be able to dump stats on exit\n";
596 write(STDERR_FILENO, q, sizeof(q) - 1);
597 }
598#endif /* MALLOC_STATS */
599
600 while ((mopts.malloc_canary = arc4random()) == 0)
601 ;
602
603 /*
604 * Allocate dir_info with a guard page on either side. Also
605 * randomise offset inside the page at which the dir_info
606 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
607 */
608 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
609 return -1;
610 mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
611 mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
612 MALLOC_PAGESIZE, PROT_NONE);
613 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
614 d = (struct dir_info *)(p + MALLOC_PAGESIZE +
615 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
616
617 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
618 regioninfo_size = d->regions_total * sizeof(struct region_info);
619 d->r = MMAP(regioninfo_size);
620 if (d->r == MAP_FAILED) {
621 wrterror("malloc init mmap failed", NULL);
622 d->regions_total = 0;
623 return 1;
624 }
625 for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
626 LIST_INIT(&d->chunk_info_list[i]);
627 LIST_INIT(&d->chunk_dir[i]);
628 }
629 malloc_used += regioninfo_size;
630 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
631 d->canary2 = ~d->canary1;
632
633 *dp = d;
634
635 /*
636 * Options have been set and will never be reset.
637 * Prevent further tampering with them.
638 */
639 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
640 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
641
642 return 0;
643}
644
645static int
646omalloc_grow(struct dir_info *d)
647{
648 size_t newtotal;
649 size_t newsize;
650 size_t mask;
651 size_t i;
652 struct region_info *p;
653
654 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
655 return 1;
656
657 newtotal = d->regions_total * 2;
658 newsize = newtotal * sizeof(struct region_info);
659 mask = newtotal - 1;
660
661 p = MMAP(newsize);
662 if (p == MAP_FAILED)
663 return 1;
664
665 malloc_used += newsize;
666 memset(p, 0, newsize);
667 STATS_ZERO(d->inserts);
668 STATS_ZERO(d->insert_collisions);
669 for (i = 0; i < d->regions_total; i++) {
670 void *q = d->r[i].p;
671 if (q != NULL) {
672 size_t index = hash(q) & mask;
673 STATS_INC(d->inserts);
674 while (p[index].p != NULL) {
675 index = (index - 1) & mask;
676 STATS_INC(d->insert_collisions);
677 }
678 p[index] = d->r[i];
679 }
680 }
681 /* avoid pages containing meta info to end up in cache */
682 if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
683 wrterror("munmap", d->r);
684 else
685 malloc_used -= d->regions_total * sizeof(struct region_info);
686 d->regions_free = d->regions_free + d->regions_total;
687 d->regions_total = newtotal;
688 d->r = p;
689 return 0;
690}
691
692static struct chunk_info *
693alloc_chunk_info(struct dir_info *d, int bits)
694{
695 struct chunk_info *p;
696 size_t size, count;
697
698 if (bits == 0)
699 count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
700 else
701 count = MALLOC_PAGESIZE >> bits;
702
703 size = howmany(count, MALLOC_BITS);
704 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
705 size = ALIGN(size);
706
707 if (LIST_EMPTY(&d->chunk_info_list[bits])) {
708 void *q;
709 int i;
710
711 q = MMAP(MALLOC_PAGESIZE);
712 if (q == MAP_FAILED)
713 return NULL;
714 malloc_used += MALLOC_PAGESIZE;
715 count = MALLOC_PAGESIZE / size;
716 for (i = 0; i < count; i++, q += size)
717 LIST_INSERT_HEAD(&d->chunk_info_list[bits],
718 (struct chunk_info *)q, entries);
719 }
720 p = LIST_FIRST(&d->chunk_info_list[bits]);
721 LIST_REMOVE(p, entries);
722 memset(p, 0, size);
723 p->canary = d->canary1;
724 return p;
725}
726
727static int
728insert(struct dir_info *d, void *p, size_t sz, void *f)
729{
730 size_t index;
731 size_t mask;
732 void *q;
733
734 if (d->regions_free * 4 < d->regions_total) {
735 if (omalloc_grow(d))
736 return 1;
737 }
738 mask = d->regions_total - 1;
739 index = hash(p) & mask;
740 q = d->r[index].p;
741 STATS_INC(d->inserts);
742 while (q != NULL) {
743 index = (index - 1) & mask;
744 q = d->r[index].p;
745 STATS_INC(d->insert_collisions);
746 }
747 d->r[index].p = p;
748 d->r[index].size = sz;
749#ifdef MALLOC_STATS
750 d->r[index].f = f;
751#endif
752 d->regions_free--;
753 return 0;
754}
755
756static struct region_info *
757find(struct dir_info *d, void *p)
758{
759 size_t index;
760 size_t mask = d->regions_total - 1;
761 void *q, *r;
762
763 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
764 d->canary1 != ~d->canary2)
765 wrterror("internal struct corrupt", NULL);
766 p = MASK_POINTER(p);
767 index = hash(p) & mask;
768 r = d->r[index].p;
769 q = MASK_POINTER(r);
770 STATS_INC(d->finds);
771 while (q != p && r != NULL) {
772 index = (index - 1) & mask;
773 r = d->r[index].p;
774 q = MASK_POINTER(r);
775 STATS_INC(d->find_collisions);
776 }
777 return q == p ? &d->r[index] : NULL;
778}
779
780static void
781delete(struct dir_info *d, struct region_info *ri)
782{
783 /* algorithm R, Knuth Vol III section 6.4 */
784 size_t mask = d->regions_total - 1;
785 size_t i, j, r;
786
787 if (d->regions_total & (d->regions_total - 1))
788 wrterror("regions_total not 2^x", NULL);
789 d->regions_free++;
790 STATS_INC(g_pool->deletes);
791
792 i = ri - d->r;
793 for (;;) {
794 d->r[i].p = NULL;
795 d->r[i].size = 0;
796 j = i;
797 for (;;) {
798 i = (i - 1) & mask;
799 if (d->r[i].p == NULL)
800 return;
801 r = hash(d->r[i].p) & mask;
802 if ((i <= r && r < j) || (r < j && j < i) ||
803 (j < i && i <= r))
804 continue;
805 d->r[j] = d->r[i];
806 STATS_INC(g_pool->delete_moves);
807 break;
808 }
809
810 }
811}
812
813/*
814 * Allocate a page of chunks
815 */
816static struct chunk_info *
817omalloc_make_chunks(struct dir_info *d, int bits)
818{
819 struct chunk_info *bp;
820 void *pp;
821 int i, k;
822
823 /* Allocate a new bucket */
824 pp = map(d, MALLOC_PAGESIZE, 0);
825 if (pp == MAP_FAILED)
826 return NULL;
827
828 bp = alloc_chunk_info(d, bits);
829 if (bp == NULL) {
830 unmap(d, pp, MALLOC_PAGESIZE);
831 return NULL;
832 }
833
834 /* memory protect the page allocated in the malloc(0) case */
835 if (bits == 0) {
836 bp->size = 0;
837 bp->shift = 1;
838 i = MALLOC_MINSIZE - 1;
839 while (i >>= 1)
840 bp->shift++;
841 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
842 bp->page = pp;
843
844 k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
845 if (k < 0) {
846 unmap(d, pp, MALLOC_PAGESIZE);
847 LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries);
848 return NULL;
849 }
850 } else {
851 bp->size = 1U << bits;
852 bp->shift = bits;
853 bp->total = bp->free = MALLOC_PAGESIZE >> bits;
854 bp->page = pp;
855 }
856
857 /* set all valid bits in the bitmap */
858 k = bp->total;
859 i = 0;
860
861 /* Do a bunch at a time */
862 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
863 bp->bits[i / MALLOC_BITS] = (u_short)~0U;
864
865 for (; i < k; i++)
866 bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS);
867
868 LIST_INSERT_HEAD(&d->chunk_dir[bits], bp, entries);
869
870 bits++;
871 if ((uintptr_t)pp & bits)
872 wrterror("pp & bits", pp);
873
874 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL);
875 return bp;
876}
877
878
879/*
880 * Allocate a chunk
881 */
882static void *
883malloc_bytes(struct dir_info *d, size_t size, void *f)
884{
885 int i, j;
886 size_t k;
887 u_short u, *lp;
888 struct chunk_info *bp;
889
890 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
891 d->canary1 != ~d->canary2)
892 wrterror("internal struct corrupt", NULL);
893 /* Don't bother with anything less than this */
894 /* unless we have a malloc(0) requests */
895 if (size != 0 && size < MALLOC_MINSIZE)
896 size = MALLOC_MINSIZE;
897
898 /* Find the right bucket */
899 if (size == 0)
900 j = 0;
901 else {
902 j = MALLOC_MINSHIFT;
903 i = (size - 1) >> (MALLOC_MINSHIFT - 1);
904 while (i >>= 1)
905 j++;
906 }
907
908 /* If it's empty, make a page more of that size chunks */
909 if (LIST_EMPTY(&d->chunk_dir[j])) {
910 bp = omalloc_make_chunks(d, j);
911 if (bp == NULL)
912 return NULL;
913 } else
914 bp = LIST_FIRST(&d->chunk_dir[j]);
915
916 if (bp->canary != d->canary1)
917 wrterror("chunk info corrupted", NULL);
918
919 i = d->chunk_start;
920 if (bp->free > 1)
921 i += getrnibble();
922 if (i >= bp->total)
923 i &= bp->total - 1;
924 for (;;) {
925 for (;;) {
926 lp = &bp->bits[i / MALLOC_BITS];
927 if (!*lp) {
928 i += MALLOC_BITS;
929 i &= ~(MALLOC_BITS - 1);
930 if (i >= bp->total)
931 i = 0;
932 } else
933 break;
934 }
935 k = i % MALLOC_BITS;
936 u = 1 << k;
937 if (*lp & u)
938 break;
939 if (++i >= bp->total)
940 i = 0;
941 }
942 d->chunk_start += i + 1;
943#ifdef MALLOC_STATS
944 if (i == 0) {
945 struct region_info *r = find(d, bp->page);
946 r->f = f;
947 }
948#endif
949
950 *lp ^= u;
951
952 /* If there are no more free, remove from free-list */
953 if (!--bp->free)
954 LIST_REMOVE(bp, entries);
955
956 /* Adjust to the real offset of that chunk */
957 k += (lp - bp->bits) * MALLOC_BITS;
958 k <<= bp->shift;
959
960 if (mopts.malloc_junk && bp->size > 0)
961 memset((char *)bp->page + k, SOME_JUNK, bp->size);
962 return ((char *)bp->page + k);
963}
964
965
966/*
967 * Free a chunk, and possibly the page it's on, if the page becomes empty.
968 */
969static void
970free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
971{
972 struct chunk_head *mp;
973 struct chunk_info *info;
974 int i;
975
976 info = (struct chunk_info *)r->size;
977 if (info->canary != d->canary1)
978 wrterror("chunk info corrupted", NULL);
979
980 /* Find the chunk number on the page */
981 i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
982
983 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) {
984 wrterror("modified chunk-pointer", ptr);
985 return;
986 }
987 if (info->bits[i / MALLOC_BITS] & (1U << (i % MALLOC_BITS))) {
988 wrterror("chunk is already free", ptr);
989 return;
990 }
991
992 info->bits[i / MALLOC_BITS] |= 1U << (i % MALLOC_BITS);
993 info->free++;
994
995 if (info->size != 0)
996 mp = d->chunk_dir + info->shift;
997 else
998 mp = d->chunk_dir;
999
1000 if (info->free == 1) {
1001 /* Page became non-full */
1002 LIST_INSERT_HEAD(mp, info, entries);
1003 return;
1004 }
1005 if (info->free != info->total)
1006 return;
1007
1008 LIST_REMOVE(info, entries);
1009
1010 if (info->size == 0 && !mopts.malloc_freeprot)
1011 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1012 unmap(d, info->page, MALLOC_PAGESIZE);
1013
1014 delete(d, r);
1015 if (info->size != 0)
1016 mp = &d->chunk_info_list[info->shift];
1017 else
1018 mp = &d->chunk_info_list[0];
1019 LIST_INSERT_HEAD(mp, info, entries);
1020}
1021
1022
1023
1024static void *
1025omalloc(size_t sz, int zero_fill, void *f)
1026{
1027 void *p;
1028 size_t psz;
1029
1030 if (sz > MALLOC_MAXCHUNK) {
1031 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1032 errno = ENOMEM;
1033 return NULL;
1034 }
1035 sz += mopts.malloc_guard;
1036 psz = PAGEROUND(sz);
1037 p = map(g_pool, psz, zero_fill);
1038 if (p == MAP_FAILED) {
1039 errno = ENOMEM;
1040 return NULL;
1041 }
1042 if (insert(g_pool, p, sz, f)) {
1043 unmap(g_pool, p, psz);
1044 errno = ENOMEM;
1045 return NULL;
1046 }
1047 if (mopts.malloc_guard) {
1048 if (mprotect((char *)p + psz - mopts.malloc_guard,
1049 mopts.malloc_guard, PROT_NONE))
1050 wrterror("mprotect", NULL);
1051 malloc_guarded += mopts.malloc_guard;
1052 }
1053
1054 if (mopts.malloc_move &&
1055 sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1056 MALLOC_LEEWAY) {
1057 /* fill whole allocation */
1058 if (mopts.malloc_junk)
1059 memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1060 /* shift towards the end */
1061 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1062 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1063 /* fill zeros if needed and overwritten above */
1064 if (zero_fill && mopts.malloc_junk)
1065 memset(p, 0, sz - mopts.malloc_guard);
1066 } else {
1067 if (mopts.malloc_junk) {
1068 if (zero_fill)
1069 memset((char *)p + sz - mopts.malloc_guard,
1070 SOME_JUNK, psz - sz);
1071 else
1072 memset(p, SOME_JUNK,
1073 psz - mopts.malloc_guard);
1074 }
1075 }
1076
1077 } else {
1078 /* takes care of SOME_JUNK */
1079 p = malloc_bytes(g_pool, sz, f);
1080 if (zero_fill && p != NULL && sz > 0)
1081 memset(p, 0, sz);
1082 }
1083
1084 return p;
1085}
1086
1087/*
1088 * Common function for handling recursion. Only
1089 * print the error message once, to avoid making the problem
1090 * potentially worse.
1091 */
1092static void
1093malloc_recurse(void)
1094{
1095 static int noprint;
1096
1097 if (noprint == 0) {
1098 noprint = 1;
1099 wrterror("recursive call", NULL);
1100 }
1101 malloc_active--;
1102 _MALLOC_UNLOCK();
1103 errno = EDEADLK;
1104}
1105
1106static int
1107malloc_init(void)
1108{
1109 if (omalloc_init(&g_pool)) {
1110 _MALLOC_UNLOCK();
1111 if (mopts.malloc_xmalloc)
1112 wrterror("out of memory", NULL);
1113 errno = ENOMEM;
1114 return -1;
1115 }
1116 return 0;
1117}
1118
1119void *
1120malloc(size_t size)
1121{
1122 void *r;
1123 int saved_errno = errno;
1124
1125 _MALLOC_LOCK();
1126 malloc_func = " in malloc():";
1127 if (g_pool == NULL) {
1128 if (malloc_init() != 0)
1129 return NULL;
1130 }
1131 if (malloc_active++) {
1132 malloc_recurse();
1133 return NULL;
1134 }
1135 r = omalloc(size, mopts.malloc_zero, CALLER);
1136 malloc_active--;
1137 _MALLOC_UNLOCK();
1138 if (r == NULL && mopts.malloc_xmalloc) {
1139 wrterror("out of memory", NULL);
1140 errno = ENOMEM;
1141 }
1142 if (r != NULL)
1143 errno = saved_errno;
1144 return r;
1145}
1146
1147static void
1148ofree(void *p)
1149{
1150 struct region_info *r;
1151 size_t sz;
1152
1153 r = find(g_pool, p);
1154 if (r == NULL) {
1155 wrterror("bogus pointer (double free?)", p);
1156 return;
1157 }
1158 REALSIZE(sz, r);
1159 if (sz > MALLOC_MAXCHUNK) {
1160 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1161 MALLOC_LEEWAY) {
1162 if (r->p != p) {
1163 wrterror("bogus pointer", p);
1164 return;
1165 }
1166 } else {
1167#if notyetbecause_of_realloc
1168 /* shifted towards the end */
1169 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1170 MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1171 ~(MALLOC_MINSIZE-1))) {
1172 }
1173#endif
1174 p = r->p;
1175 }
1176 if (mopts.malloc_guard) {
1177 if (sz < mopts.malloc_guard)
1178 wrterror("guard size", NULL);
1179 if (!mopts.malloc_freeprot) {
1180 if (mprotect((char *)p + PAGEROUND(sz) -
1181 mopts.malloc_guard, mopts.malloc_guard,
1182 PROT_READ | PROT_WRITE))
1183 wrterror("mprotect", NULL);
1184 }
1185 malloc_guarded -= mopts.malloc_guard;
1186 }
1187 if (mopts.malloc_junk && !mopts.malloc_freeprot)
1188 memset(p, SOME_FREEJUNK,
1189 PAGEROUND(sz) - mopts.malloc_guard);
1190 unmap(g_pool, p, PAGEROUND(sz));
1191 delete(g_pool, r);
1192 } else {
1193 void *tmp;
1194 int i;
1195
1196 if (mopts.malloc_junk && sz > 0)
1197 memset(p, SOME_FREEJUNK, sz);
1198 if (!mopts.malloc_freeprot) {
1199 i = getrnibble();
1200 tmp = p;
1201 p = g_pool->delayed_chunks[i];
1202 g_pool->delayed_chunks[i] = tmp;
1203 }
1204 if (p != NULL) {
1205 r = find(g_pool, p);
1206 if (r == NULL) {
1207 wrterror("bogus pointer (double free?)", p);
1208 return;
1209 }
1210 free_bytes(g_pool, r, p);
1211 }
1212 }
1213}
1214
1215void
1216free(void *ptr)
1217{
1218 int saved_errno = errno;
1219
1220 /* This is legal. */
1221 if (ptr == NULL)
1222 return;
1223
1224 _MALLOC_LOCK();
1225 malloc_func = " in free():";
1226 if (g_pool == NULL) {
1227 _MALLOC_UNLOCK();
1228 wrterror("free() called before allocation", NULL);
1229 return;
1230 }
1231 if (malloc_active++) {
1232 malloc_recurse();
1233 return;
1234 }
1235 ofree(ptr);
1236 malloc_active--;
1237 _MALLOC_UNLOCK();
1238 errno = saved_errno;
1239}
1240
1241
1242static void *
1243orealloc(void *p, size_t newsz, void *f)
1244{
1245 struct region_info *r;
1246 size_t oldsz, goldsz, gnewsz;
1247 void *q;
1248
1249 if (p == NULL)
1250 return omalloc(newsz, 0, f);
1251
1252 r = find(g_pool, p);
1253 if (r == NULL) {
1254 wrterror("bogus pointer (double free?)", p);
1255 return NULL;
1256 }
1257 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1258 errno = ENOMEM;
1259 return NULL;
1260 }
1261
1262 REALSIZE(oldsz, r);
1263 goldsz = oldsz;
1264 if (oldsz > MALLOC_MAXCHUNK) {
1265 if (oldsz < mopts.malloc_guard)
1266 wrterror("guard size", NULL);
1267 oldsz -= mopts.malloc_guard;
1268 }
1269
1270 gnewsz = newsz;
1271 if (gnewsz > MALLOC_MAXCHUNK)
1272 gnewsz += mopts.malloc_guard;
1273
1274 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1275 !mopts.malloc_realloc) {
1276 size_t roldsz = PAGEROUND(goldsz);
1277 size_t rnewsz = PAGEROUND(gnewsz);
1278
1279 if (rnewsz > roldsz) {
1280 if (!mopts.malloc_guard) {
1281 STATS_INC(g_pool->cheap_realloc_tries);
1282 zapcacheregion(g_pool, (char *)p + roldsz);
1283 q = MMAPA((char *)p + roldsz, rnewsz - roldsz);
1284 if (q == (char *)p + roldsz) {
1285 malloc_used += rnewsz - roldsz;
1286 if (mopts.malloc_junk)
1287 memset(q, SOME_JUNK,
1288 rnewsz - roldsz);
1289 r->size = newsz;
1290 STATS_SETF(r, f);
1291 STATS_INC(g_pool->cheap_reallocs);
1292 return p;
1293 } else if (q != MAP_FAILED)
1294 munmap(q, rnewsz - roldsz);
1295 }
1296 } else if (rnewsz < roldsz) {
1297 if (mopts.malloc_guard) {
1298 if (mprotect((char *)p + roldsz -
1299 mopts.malloc_guard, mopts.malloc_guard,
1300 PROT_READ | PROT_WRITE))
1301 wrterror("mprotect", NULL);
1302 if (mprotect((char *)p + rnewsz -
1303 mopts.malloc_guard, mopts.malloc_guard,
1304 PROT_NONE))
1305 wrterror("mprotect", NULL);
1306 }
1307 unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1308 r->size = gnewsz;
1309 STATS_SETF(r, f);
1310 return p;
1311 } else {
1312 if (newsz > oldsz && mopts.malloc_junk)
1313 memset((char *)p + newsz, SOME_JUNK,
1314 rnewsz - mopts.malloc_guard - newsz);
1315 r->size = gnewsz;
1316 STATS_SETF(r, f);
1317 return p;
1318 }
1319 }
1320 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1321 if (mopts.malloc_junk && newsz > 0)
1322 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1323 STATS_SETF(r, f);
1324 return p;
1325 } else if (newsz != oldsz || mopts.malloc_realloc) {
1326 q = omalloc(newsz, 0, f);
1327 if (q == NULL)
1328 return NULL;
1329 if (newsz != 0 && oldsz != 0)
1330 memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1331 ofree(p);
1332 return q;
1333 } else {
1334 STATS_SETF(r, f);
1335 return p;
1336 }
1337}
1338
1339void *
1340realloc(void *ptr, size_t size)
1341{
1342 void *r;
1343 int saved_errno = errno;
1344
1345 _MALLOC_LOCK();
1346 malloc_func = " in realloc():";
1347 if (g_pool == NULL) {
1348 if (malloc_init() != 0)
1349 return NULL;
1350 }
1351 if (malloc_active++) {
1352 malloc_recurse();
1353 return NULL;
1354 }
1355 r = orealloc(ptr, size, CALLER);
1356
1357 malloc_active--;
1358 _MALLOC_UNLOCK();
1359 if (r == NULL && mopts.malloc_xmalloc) {
1360 wrterror("out of memory", NULL);
1361 errno = ENOMEM;
1362 }
1363 if (r != NULL)
1364 errno = saved_errno;
1365 return r;
1366}
1367
1368
1369#define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
1370
1371void *
1372calloc(size_t nmemb, size_t size)
1373{
1374 void *r;
1375 int saved_errno = errno;
1376
1377 _MALLOC_LOCK();
1378 malloc_func = " in calloc():";
1379 if (g_pool == NULL) {
1380 if (malloc_init() != 0)
1381 return NULL;
1382 }
1383 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1384 nmemb > 0 && SIZE_MAX / nmemb < size) {
1385 _MALLOC_UNLOCK();
1386 if (mopts.malloc_xmalloc)
1387 wrterror("out of memory", NULL);
1388 errno = ENOMEM;
1389 return NULL;
1390 }
1391
1392 if (malloc_active++) {
1393 malloc_recurse();
1394 return NULL;
1395 }
1396
1397 size *= nmemb;
1398 r = omalloc(size, 1, CALLER);
1399
1400 malloc_active--;
1401 _MALLOC_UNLOCK();
1402 if (r == NULL && mopts.malloc_xmalloc) {
1403 wrterror("out of memory", NULL);
1404 errno = ENOMEM;
1405 }
1406 if (r != NULL)
1407 errno = saved_errno;
1408 return r;
1409}
1410
1411int
1412posix_memalign(void **memptr, size_t alignment, size_t size)
1413{
1414 void *result;
1415
1416 /* Make sure that alignment is a large enough power of 2. */
1417 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1418 alignment > MALLOC_PAGESIZE)
1419 return EINVAL;
1420
1421 /*
1422 * max(size, alignment) is enough to assure the requested alignment,
1423 * since the allocator always allocates power-of-two blocks.
1424 */
1425 if (size < alignment)
1426 size = alignment;
1427 result = malloc(size);
1428
1429 if (result == NULL)
1430 return ENOMEM;
1431
1432 *memptr = result;
1433 return 0;
1434}
1435
1436#ifdef MALLOC_STATS
1437
1438struct malloc_leak {
1439 void (*f)();
1440 size_t total_size;
1441 int count;
1442};
1443
1444struct leaknode {
1445 RB_ENTRY(leaknode) entry;
1446 struct malloc_leak d;
1447};
1448
1449static int
1450leakcmp(struct leaknode *e1, struct leaknode *e2)
1451{
1452 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
1453}
1454
1455static RB_HEAD(leaktree, leaknode) leakhead;
1456RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp)
1457
1458static void
1459putleakinfo(void *f, size_t sz, int cnt)
1460{
1461 struct leaknode key, *p;
1462 static struct leaknode *page;
1463 static int used;
1464
1465 if (cnt == 0)
1466 return;
1467
1468 key.d.f = f;
1469 p = RB_FIND(leaktree, &leakhead, &key);
1470 if (p == NULL) {
1471 if (page == NULL ||
1472 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
1473 page = MMAP(MALLOC_PAGESIZE);
1474 if (page == MAP_FAILED)
1475 return;
1476 used = 0;
1477 }
1478 p = &page[used++];
1479 p->d.f = f;
1480 p->d.total_size = sz * cnt;
1481 p->d.count = cnt;
1482 RB_INSERT(leaktree, &leakhead, p);
1483 } else {
1484 p->d.total_size += sz * cnt;
1485 p->d.count += cnt;
1486 }
1487}
1488
1489static struct malloc_leak *malloc_leaks;
1490
1491static void
1492dump_leaks(int fd)
1493{
1494 struct leaknode *p;
1495 char buf[64];
1496 int i = 0;
1497
1498 snprintf(buf, sizeof(buf), "Leak report\n");
1499 write(fd, buf, strlen(buf));
1500 snprintf(buf, sizeof(buf), " f sum # avg\n");
1501 write(fd, buf, strlen(buf));
1502 /* XXX only one page of summary */
1503 if (malloc_leaks == NULL)
1504 malloc_leaks = MMAP(MALLOC_PAGESIZE);
1505 if (malloc_leaks != MAP_FAILED)
1506 memset(malloc_leaks, 0, MALLOC_PAGESIZE);
1507 RB_FOREACH(p, leaktree, &leakhead) {
1508 snprintf(buf, sizeof(buf), "%12p %7zu %6u %6zu\n", p->d.f,
1509 p->d.total_size, p->d.count, p->d.total_size / p->d.count);
1510 write(fd, buf, strlen(buf));
1511 if (malloc_leaks == MAP_FAILED ||
1512 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
1513 continue;
1514 malloc_leaks[i].f = p->d.f;
1515 malloc_leaks[i].total_size = p->d.total_size;
1516 malloc_leaks[i].count = p->d.count;
1517 i++;
1518 }
1519}
1520
1521static void
1522dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
1523{
1524 char buf[64];
1525
1526 while (p != NULL) {
1527 snprintf(buf, sizeof(buf), "chunk %12p %12p %4d %d/%d\n",
1528 p->page, ((p->bits[0] & 1) ? NULL : f),
1529 p->size, p->free, p->total);
1530 write(fd, buf, strlen(buf));
1531 if (!fromfreelist) {
1532 if (p->bits[0] & 1)
1533 putleakinfo(NULL, p->size, p->total - p->free);
1534 else {
1535 putleakinfo(f, p->size, 1);
1536 putleakinfo(NULL, p->size,
1537 p->total - p->free - 1);
1538 }
1539 break;
1540 }
1541 p = LIST_NEXT(p, entries);
1542 if (p != NULL) {
1543 snprintf(buf, sizeof(buf), " ");
1544 write(fd, buf, strlen(buf));
1545 }
1546 }
1547}
1548
1549static void
1550dump_free_chunk_info(int fd, struct dir_info *d)
1551{
1552 char buf[64];
1553 int i, count;
1554
1555 snprintf(buf, sizeof(buf), "Free chunk structs:\n");
1556 write(fd, buf, strlen(buf));
1557 for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
1558 struct chunk_info *p;
1559
1560 count = 0;
1561 LIST_FOREACH(p, &d->chunk_info_list[i], entries)
1562 count++;
1563 p = LIST_FIRST(&d->chunk_dir[i]);
1564 if (p == NULL && count == 0)
1565 continue;
1566 snprintf(buf, sizeof(buf), "%2d) %3d ", i, count);
1567 write(fd, buf, strlen(buf));
1568 if (p != NULL)
1569 dump_chunk(fd, p, NULL, 1);
1570 else
1571 write(fd, "\n", 1);
1572 }
1573
1574}
1575
1576static void
1577dump_free_page_info(int fd, struct dir_info *d)
1578{
1579 char buf[64];
1580 int i;
1581
1582 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
1583 d->free_regions_size);
1584 write(fd, buf, strlen(buf));
1585 for (i = 0; i < mopts.malloc_cache; i++) {
1586 if (d->free_regions[i].p != NULL) {
1587 snprintf(buf, sizeof(buf), "%2d) ", i);
1588 write(fd, buf, strlen(buf));
1589 snprintf(buf, sizeof(buf), "free at %p: %zu\n",
1590 d->free_regions[i].p, d->free_regions[i].size);
1591 write(fd, buf, strlen(buf));
1592 }
1593 }
1594}
1595
1596static void
1597malloc_dump1(int fd, struct dir_info *d)
1598{
1599 char buf[64];
1600 size_t i, realsize;
1601
1602 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
1603 write(fd, buf, strlen(buf));
1604 if (d == NULL)
1605 return;
1606 snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n",
1607 d->regions_free, d->regions_total);
1608 write(fd, buf, strlen(buf));
1609 snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds,
1610 d->find_collisions);
1611 write(fd, buf, strlen(buf));
1612 snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts,
1613 d->insert_collisions);
1614 write(fd, buf, strlen(buf));
1615 snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
1616 d->delete_moves);
1617 write(fd, buf, strlen(buf));
1618 snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
1619 d->cheap_reallocs, d->cheap_realloc_tries);
1620 write(fd, buf, strlen(buf));
1621 dump_free_chunk_info(fd, d);
1622 dump_free_page_info(fd, d);
1623 snprintf(buf, sizeof(buf),
1624 "slot) hash d type page f size [free/n]\n");
1625 write(fd, buf, strlen(buf));
1626 for (i = 0; i < d->regions_total; i++) {
1627 if (d->r[i].p != NULL) {
1628 size_t h = hash(d->r[i].p) &
1629 (d->regions_total - 1);
1630 snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ",
1631 i, h, h - i);
1632 write(fd, buf, strlen(buf));
1633 REALSIZE(realsize, &d->r[i]);
1634 if (realsize > MALLOC_MAXCHUNK) {
1635 putleakinfo(d->r[i].f, realsize, 1);
1636 snprintf(buf, sizeof(buf),
1637 "pages %12p %12p %zu\n", d->r[i].p,
1638 d->r[i].f, realsize);
1639 write(fd, buf, strlen(buf));
1640 } else
1641 dump_chunk(fd,
1642 (struct chunk_info *)d->r[i].size,
1643 d->r[i].f, 0);
1644 }
1645 }
1646 snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used);
1647 write(fd, buf, strlen(buf));
1648 snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded);
1649 write(fd, buf, strlen(buf));
1650 dump_leaks(fd);
1651 write(fd, "\n", 1);
1652}
1653
1654void
1655malloc_dump(int fd)
1656{
1657 int i;
1658 void *p;
1659 struct region_info *r;
1660 int saved_errno = errno;
1661
1662 for (i = 0; i <= MALLOC_DELAYED_CHUNKS; i++) {
1663 p = g_pool->delayed_chunks[i];
1664 if (p == NULL)
1665 continue;
1666 r = find(g_pool, p);
1667 if (r == NULL)
1668 wrterror("bogus pointer in malloc_dump", p);
1669 free_bytes(g_pool, r, p);
1670 g_pool->delayed_chunks[i] = NULL;
1671 }
1672 /* XXX leak when run multiple times */
1673 RB_INIT(&leakhead);
1674 malloc_dump1(fd, g_pool);
1675 errno = saved_errno;
1676}
1677
1678static void
1679malloc_exit(void)
1680{
1681 static const char q[] = "malloc() warning: Couldn't dump stats\n";
1682 int save_errno = errno, fd;
1683
1684 fd = open("malloc.out", O_RDWR|O_APPEND);
1685 if (fd != -1) {
1686 malloc_dump(fd);
1687 close(fd);
1688 } else
1689 write(STDERR_FILENO, q, sizeof(q) - 1);
1690 errno = save_errno;
1691}
1692
1693#endif /* MALLOC_STATS */