summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorotto <>2019-05-10 15:03:24 +0000
committerotto <>2019-05-10 15:03:24 +0000
commitdb9d6bd84c976c2cbc8987b4f833c82c5657008e (patch)
tree351f341ee663baddfeb4c10cfa7273013fe52ddd
parentdbcfb6b9d803c6373711f51c965f6a8f989c5157 (diff)
downloadopenbsd-db9d6bd84c976c2cbc8987b4f833c82c5657008e.tar.gz
openbsd-db9d6bd84c976c2cbc8987b4f833c82c5657008e.tar.bz2
openbsd-db9d6bd84c976c2cbc8987b4f833c82c5657008e.zip
Inroduce malloc_conceal() and calloc_conceal(). Similar to their
counterparts but return memory in pages marked MAP_CONCEAL and on free() freezero() is actually called.
-rw-r--r--src/lib/libc/stdlib/malloc.329
-rw-r--r--src/lib/libc/stdlib/malloc.c389
2 files changed, 219 insertions, 199 deletions
diff --git a/src/lib/libc/stdlib/malloc.3 b/src/lib/libc/stdlib/malloc.3
index a6edb2be00..ccb5d257a2 100644
--- a/src/lib/libc/stdlib/malloc.3
+++ b/src/lib/libc/stdlib/malloc.3
@@ -30,9 +30,9 @@
30.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31.\" SUCH DAMAGE. 31.\" SUCH DAMAGE.
32.\" 32.\"
33.\" $OpenBSD: malloc.3,v 1.122 2018/12/05 17:11:59 schwarze Exp $ 33.\" $OpenBSD: malloc.3,v 1.123 2019/05/10 15:03:24 otto Exp $
34.\" 34.\"
35.Dd $Mdocdate: December 5 2018 $ 35.Dd $Mdocdate: May 10 2019 $
36.Dt MALLOC 3 36.Dt MALLOC 3
37.Os 37.Os
38.Sh NAME 38.Sh NAME
@@ -43,7 +43,9 @@
43.Nm reallocarray , 43.Nm reallocarray ,
44.Nm recallocarray , 44.Nm recallocarray ,
45.Nm freezero , 45.Nm freezero ,
46.Nm aligned_alloc 46.Nm aligned_alloc ,
47.Nm malloc_conceal ,
48.Nm calloc_conceal
47.Nd memory allocation and deallocation 49.Nd memory allocation and deallocation
48.Sh SYNOPSIS 50.Sh SYNOPSIS
49.In stdlib.h 51.In stdlib.h
@@ -63,6 +65,10 @@
63.Fn freezero "void *ptr" "size_t size" 65.Fn freezero "void *ptr" "size_t size"
64.Ft void * 66.Ft void *
65.Fn aligned_alloc "size_t alignment" "size_t size" 67.Fn aligned_alloc "size_t alignment" "size_t size"
68.Ft void *
69.Fn malloc_conceal "size_t size"
70.Ft void *
71.Fn calloc_conceal "size_t nmemb" "size_t size"
66.Vt char *malloc_options ; 72.Vt char *malloc_options ;
67.Sh DESCRIPTION 73.Sh DESCRIPTION
68The standard functions 74The standard functions
@@ -233,6 +239,23 @@ If
233is not a multiple of 239is not a multiple of
234.Fa alignment , 240.Fa alignment ,
235behavior is undefined. 241behavior is undefined.
242.Pp
243The
244.Fn malloc_conceal
245and
246.Fn calloc_conceal
247functions behave the same as
248.Fn malloc
249and
250.Fn calloc
251respectively,
252with the exception that the allocation returned is marked with the
253.Dv MAP_CONCEAL
254.Xr mmap 2
255flag and calling
256.Fn free
257on the allocation will discard the contents explicitly.
258A reallocation of a concealed allocation will leave these properties intact.
236.Sh MALLOC OPTIONS 259.Sh MALLOC OPTIONS
237Upon the first call to the 260Upon the first call to the
238.Fn malloc 261.Fn malloc
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index e41178d5c5..ff41f61385 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.259 2019/01/10 18:47:05 otto Exp $ */ 1/* $OpenBSD: malloc.c,v 1.260 2019/05/10 15:03:24 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -90,17 +90,17 @@
90#define SOME_JUNK 0xdb /* deadbeef */ 90#define SOME_JUNK 0xdb /* deadbeef */
91#define SOME_FREEJUNK 0xdf /* dead, free */ 91#define SOME_FREEJUNK 0xdf /* dead, free */
92 92
93#define MMAP(sz) mmap(NULL, (sz), PROT_READ | PROT_WRITE, \ 93#define MMAP(sz,f) mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
94 MAP_ANON | MAP_PRIVATE, -1, 0) 94 MAP_ANON | MAP_PRIVATE | (f), -1, 0)
95 95
96#define MMAPNONE(sz) mmap(NULL, (sz), PROT_NONE, \ 96#define MMAPNONE(sz,f) mmap(NULL, (sz), PROT_NONE, \
97 MAP_ANON | MAP_PRIVATE, -1, 0) 97 MAP_ANON | MAP_PRIVATE | (f), -1, 0)
98 98
99#define MMAPA(a,sz) mmap((a), (sz), PROT_READ | PROT_WRITE, \ 99#define MMAPA(a,sz,f) mmap((a), (sz), PROT_READ | PROT_WRITE, \
100 MAP_ANON | MAP_PRIVATE, -1, 0) 100 MAP_ANON | MAP_PRIVATE | (f), -1, 0)
101 101
102#define MQUERY(a, sz) mquery((a), (sz), PROT_READ | PROT_WRITE, \ 102#define MQUERY(a,sz,f) mquery((a), (sz), PROT_READ | PROT_WRITE, \
103 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0) 103 MAP_ANON | MAP_PRIVATE | MAP_FIXED | (f), -1, 0)
104 104
105struct region_info { 105struct region_info {
106 void *p; /* page; low bits used to mark chunks */ 106 void *p; /* page; low bits used to mark chunks */
@@ -118,19 +118,22 @@ struct dir_info {
118 struct region_info *r; /* region slots */ 118 struct region_info *r; /* region slots */
119 size_t regions_total; /* number of region slots */ 119 size_t regions_total; /* number of region slots */
120 size_t regions_free; /* number of free slots */ 120 size_t regions_free; /* number of free slots */
121 size_t free_regions_size; /* free pages cached */
122 size_t rbytesused; /* random bytes used */
123 char *func; /* current function */
124 u_int malloc_cache; /* # of free pages we cache */
125 int malloc_junk; /* junk fill? */
126 int mmap_flag; /* extra flag for mmap */
127 u_int rotor;
128 int mutex;
121 /* lists of free chunk info structs */ 129 /* lists of free chunk info structs */
122 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; 130 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
123 /* lists of chunks with free slots */ 131 /* lists of chunks with free slots */
124 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; 132 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
125 size_t free_regions_size; /* free pages cached */
126 /* free pages cache */ 133 /* free pages cache */
127 struct region_info free_regions[MALLOC_MAXCACHE]; 134 struct region_info free_regions[MALLOC_MAXCACHE];
128 /* delayed free chunk slots */ 135 /* delayed free chunk slots */
129 u_int rotor;
130 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; 136 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
131 size_t rbytesused; /* random bytes used */
132 char *func; /* current function */
133 int mutex;
134 u_char rbytes[32]; /* random bytes */ 137 u_char rbytes[32]; /* random bytes */
135#ifdef MALLOC_STATS 138#ifdef MALLOC_STATS
136 size_t inserts; 139 size_t inserts;
@@ -187,12 +190,12 @@ struct malloc_readonly {
187 int malloc_mt; /* multi-threaded mode? */ 190 int malloc_mt; /* multi-threaded mode? */
188 int malloc_freecheck; /* Extensive double free check */ 191 int malloc_freecheck; /* Extensive double free check */
189 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ 192 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */
190 int malloc_junk; /* junk fill? */ 193 int def_malloc_junk; /* junk fill? */
191 int malloc_realloc; /* always realloc? */ 194 int malloc_realloc; /* always realloc? */
192 int malloc_xmalloc; /* xmalloc behaviour? */ 195 int malloc_xmalloc; /* xmalloc behaviour? */
193 int chunk_canaries; /* use canaries after chunks? */ 196 int chunk_canaries; /* use canaries after chunks? */
194 int internal_funcs; /* use better recallocarray/freezero? */ 197 int internal_funcs; /* use better recallocarray/freezero? */
195 u_int malloc_cache; /* free pages we cache */ 198 u_int def_malloc_cache; /* free pages we cache */
196 size_t malloc_guard; /* use guard pages after allocations? */ 199 size_t malloc_guard; /* use guard pages after allocations? */
197#ifdef MALLOC_STATS 200#ifdef MALLOC_STATS
198 int malloc_stats; /* dump statistics at end */ 201 int malloc_stats; /* dump statistics at end */
@@ -268,9 +271,9 @@ static inline struct dir_info *
268getpool(void) 271getpool(void)
269{ 272{
270 if (!mopts.malloc_mt) 273 if (!mopts.malloc_mt)
271 return mopts.malloc_pool[0]; 274 return mopts.malloc_pool[1];
272 else 275 else /* first one reserved for special pool */
273 return mopts.malloc_pool[TIB_GET()->tib_tid & 276 return mopts.malloc_pool[1 + TIB_GET()->tib_tid %
274 (mopts.malloc_mutexes - 1)]; 277 (mopts.malloc_mutexes - 1)];
275} 278}
276 279
@@ -327,16 +330,16 @@ omalloc_parseopt(char opt)
327 break; 330 break;
328 case '-': 331 case '-':
329 mopts.malloc_mutexes >>= 1; 332 mopts.malloc_mutexes >>= 1;
330 if (mopts.malloc_mutexes < 1) 333 if (mopts.malloc_mutexes < 2)
331 mopts.malloc_mutexes = 1; 334 mopts.malloc_mutexes = 2;
332 break; 335 break;
333 case '>': 336 case '>':
334 mopts.malloc_cache <<= 1; 337 mopts.def_malloc_cache <<= 1;
335 if (mopts.malloc_cache > MALLOC_MAXCACHE) 338 if (mopts.def_malloc_cache > MALLOC_MAXCACHE)
336 mopts.malloc_cache = MALLOC_MAXCACHE; 339 mopts.def_malloc_cache = MALLOC_MAXCACHE;
337 break; 340 break;
338 case '<': 341 case '<':
339 mopts.malloc_cache >>= 1; 342 mopts.def_malloc_cache >>= 1;
340 break; 343 break;
341 case 'c': 344 case 'c':
342 mopts.chunk_canaries = 0; 345 mopts.chunk_canaries = 0;
@@ -367,12 +370,12 @@ omalloc_parseopt(char opt)
367 mopts.malloc_guard = MALLOC_PAGESIZE; 370 mopts.malloc_guard = MALLOC_PAGESIZE;
368 break; 371 break;
369 case 'j': 372 case 'j':
370 if (mopts.malloc_junk > 0) 373 if (mopts.def_malloc_junk > 0)
371 mopts.malloc_junk--; 374 mopts.def_malloc_junk--;
372 break; 375 break;
373 case 'J': 376 case 'J':
374 if (mopts.malloc_junk < 2) 377 if (mopts.def_malloc_junk < 2)
375 mopts.malloc_junk++; 378 mopts.def_malloc_junk++;
376 break; 379 break;
377 case 'r': 380 case 'r':
378 mopts.malloc_realloc = 0; 381 mopts.malloc_realloc = 0;
@@ -410,8 +413,8 @@ omalloc_init(void)
410 * Default options 413 * Default options
411 */ 414 */
412 mopts.malloc_mutexes = 8; 415 mopts.malloc_mutexes = 8;
413 mopts.malloc_junk = 1; 416 mopts.def_malloc_junk = 1;
414 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 417 mopts.def_malloc_cache = MALLOC_DEFAULT_CACHE;
415 418
416 for (i = 0; i < 3; i++) { 419 for (i = 0; i < 3; i++) {
417 switch (i) { 420 switch (i) {
@@ -442,12 +445,12 @@ omalloc_init(void)
442 case 'S': 445 case 'S':
443 for (q = "CFGJ"; *q != '\0'; q++) 446 for (q = "CFGJ"; *q != '\0'; q++)
444 omalloc_parseopt(*q); 447 omalloc_parseopt(*q);
445 mopts.malloc_cache = 0; 448 mopts.def_malloc_cache = 0;
446 break; 449 break;
447 case 's': 450 case 's':
448 for (q = "cfgj"; *q != '\0'; q++) 451 for (q = "cfgj"; *q != '\0'; q++)
449 omalloc_parseopt(*q); 452 omalloc_parseopt(*q);
450 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 453 mopts.def_malloc_cache = MALLOC_DEFAULT_CACHE;
451 break; 454 break;
452 default: 455 default:
453 omalloc_parseopt(*p); 456 omalloc_parseopt(*p);
@@ -468,7 +471,7 @@ omalloc_init(void)
468} 471}
469 472
470static void 473static void
471omalloc_poolinit(struct dir_info **dp) 474omalloc_poolinit(struct dir_info **dp, int mmap_flag)
472{ 475{
473 char *p; 476 char *p;
474 size_t d_avail, regioninfo_size; 477 size_t d_avail, regioninfo_size;
@@ -480,7 +483,8 @@ omalloc_poolinit(struct dir_info **dp)
480 * randomise offset inside the page at which the dir_info 483 * randomise offset inside the page at which the dir_info
481 * lies (subject to alignment by 1 << MALLOC_MINSHIFT) 484 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
482 */ 485 */
483 if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED) 486 if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2), mmap_flag)) ==
487 MAP_FAILED)
484 wrterror(NULL, "malloc init mmap failed"); 488 wrterror(NULL, "malloc init mmap failed");
485 mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE); 489 mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE);
486 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; 490 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
@@ -490,7 +494,7 @@ omalloc_poolinit(struct dir_info **dp)
490 rbytes_init(d); 494 rbytes_init(d);
491 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; 495 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
492 regioninfo_size = d->regions_total * sizeof(struct region_info); 496 regioninfo_size = d->regions_total * sizeof(struct region_info);
493 d->r = MMAP(regioninfo_size); 497 d->r = MMAP(regioninfo_size, mmap_flag);
494 if (d->r == MAP_FAILED) { 498 if (d->r == MAP_FAILED) {
495 d->regions_total = 0; 499 d->regions_total = 0;
496 wrterror(NULL, "malloc init mmap failed"); 500 wrterror(NULL, "malloc init mmap failed");
@@ -501,6 +505,9 @@ omalloc_poolinit(struct dir_info **dp)
501 LIST_INIT(&d->chunk_dir[i][j]); 505 LIST_INIT(&d->chunk_dir[i][j]);
502 } 506 }
503 STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE); 507 STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE);
508 d->mmap_flag = mmap_flag;
509 d->malloc_junk = mopts.def_malloc_junk;
510 d->malloc_cache = mopts.def_malloc_cache;
504 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 511 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
505 d->canary2 = ~d->canary1; 512 d->canary2 = ~d->canary1;
506 513
@@ -523,7 +530,7 @@ omalloc_grow(struct dir_info *d)
523 newsize = newtotal * sizeof(struct region_info); 530 newsize = newtotal * sizeof(struct region_info);
524 mask = newtotal - 1; 531 mask = newtotal - 1;
525 532
526 p = MMAP(newsize); 533 p = MMAP(newsize, d->mmap_flag);
527 if (p == MAP_FAILED) 534 if (p == MAP_FAILED)
528 return 1; 535 return 1;
529 536
@@ -662,14 +669,14 @@ unmap(struct dir_info *d, void *p, size_t sz, size_t clear, int junk)
662 if (sz != PAGEROUND(sz)) 669 if (sz != PAGEROUND(sz))
663 wrterror(d, "munmap round"); 670 wrterror(d, "munmap round");
664 671
665 rsz = mopts.malloc_cache - d->free_regions_size; 672 rsz = d->malloc_cache - d->free_regions_size;
666 673
667 /* 674 /*
668 * normally the cache holds recently freed regions, but if the region 675 * normally the cache holds recently freed regions, but if the region
669 * to unmap is larger than the cache size or we're clearing and the 676 * to unmap is larger than the cache size or we're clearing and the
670 * cache is full, just munmap 677 * cache is full, just munmap
671 */ 678 */
672 if (psz > mopts.malloc_cache || (clear > 0 && rsz == 0)) { 679 if (psz > d->malloc_cache || (clear > 0 && rsz == 0)) {
673 i = munmap(p, sz); 680 i = munmap(p, sz);
674 if (i) 681 if (i)
675 wrterror(d, "munmap %p", p); 682 wrterror(d, "munmap %p", p);
@@ -677,7 +684,7 @@ unmap(struct dir_info *d, void *p, size_t sz, size_t clear, int junk)
677 return; 684 return;
678 } 685 }
679 offset = getrbyte(d); 686 offset = getrbyte(d);
680 mask = mopts.malloc_cache - 1; 687 mask = d->malloc_cache - 1;
681 if (psz > rsz) { 688 if (psz > rsz) {
682 size_t tounmap = psz - rsz; 689 size_t tounmap = psz - rsz;
683 for (i = 0; ; i++) { 690 for (i = 0; ; i++) {
@@ -717,7 +724,7 @@ unmap(struct dir_info *d, void *p, size_t sz, size_t clear, int junk)
717 break; 724 break;
718 } 725 }
719 } 726 }
720 if (d->free_regions_size > mopts.malloc_cache) 727 if (d->free_regions_size > d->malloc_cache)
721 wrterror(d, "malloc cache overflow"); 728 wrterror(d, "malloc cache overflow");
722} 729}
723 730
@@ -728,7 +735,7 @@ zapcacheregion(struct dir_info *d, void *p, size_t len)
728 struct region_info *r; 735 struct region_info *r;
729 size_t rsz; 736 size_t rsz;
730 737
731 for (i = 0; i < mopts.malloc_cache; i++) { 738 for (i = 0; i < d->malloc_cache; i++) {
732 r = &d->free_regions[i]; 739 r = &d->free_regions[i];
733 if (r->p >= p && r->p <= (void *)((char *)p + len)) { 740 if (r->p >= p && r->p <= (void *)((char *)p + len)) {
734 rsz = r->size << MALLOC_PAGESHIFT; 741 rsz = r->size << MALLOC_PAGESHIFT;
@@ -757,15 +764,15 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
757 764
758 if (hint == NULL && psz > d->free_regions_size) { 765 if (hint == NULL && psz > d->free_regions_size) {
759 _MALLOC_LEAVE(d); 766 _MALLOC_LEAVE(d);
760 p = MMAP(sz); 767 p = MMAP(sz, d->mmap_flag);
761 _MALLOC_ENTER(d); 768 _MALLOC_ENTER(d);
762 if (p != MAP_FAILED) 769 if (p != MAP_FAILED)
763 STATS_ADD(d->malloc_used, sz); 770 STATS_ADD(d->malloc_used, sz);
764 /* zero fill not needed */ 771 /* zero fill not needed */
765 return p; 772 return p;
766 } 773 }
767 for (i = 0; i < mopts.malloc_cache; i++) { 774 for (i = 0; i < d->malloc_cache; i++) {
768 r = &d->free_regions[(i + d->rotor) & (mopts.malloc_cache - 1)]; 775 r = &d->free_regions[(i + d->rotor) & (d->malloc_cache - 1)];
769 if (r->p != NULL) { 776 if (r->p != NULL) {
770 if (hint != NULL && r->p != hint) 777 if (hint != NULL && r->p != hint)
771 continue; 778 continue;
@@ -777,7 +784,7 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
777 mprotect(p, sz, PROT_READ | PROT_WRITE); 784 mprotect(p, sz, PROT_READ | PROT_WRITE);
778 if (zero_fill) 785 if (zero_fill)
779 memset(p, 0, sz); 786 memset(p, 0, sz);
780 else if (mopts.malloc_junk == 2 && 787 else if (d->malloc_junk == 2 &&
781 mopts.malloc_freeunmap) 788 mopts.malloc_freeunmap)
782 memset(p, SOME_FREEJUNK, sz); 789 memset(p, SOME_FREEJUNK, sz);
783 d->rotor += i + 1; 790 d->rotor += i + 1;
@@ -796,16 +803,16 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
796 d->free_regions_size -= psz; 803 d->free_regions_size -= psz;
797 if (zero_fill) 804 if (zero_fill)
798 memset(p, 0, sz); 805 memset(p, 0, sz);
799 else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) 806 else if (d->malloc_junk == 2 && mopts.malloc_freeunmap)
800 memset(p, SOME_FREEJUNK, sz); 807 memset(p, SOME_FREEJUNK, sz);
801 return p; 808 return p;
802 } 809 }
803 if (hint != NULL) 810 if (hint != NULL)
804 return MAP_FAILED; 811 return MAP_FAILED;
805 if (d->free_regions_size > mopts.malloc_cache) 812 if (d->free_regions_size > d->malloc_cache)
806 wrterror(d, "malloc cache"); 813 wrterror(d, "malloc cache");
807 _MALLOC_LEAVE(d); 814 _MALLOC_LEAVE(d);
808 p = MMAP(sz); 815 p = MMAP(sz, d->mmap_flag);
809 _MALLOC_ENTER(d); 816 _MALLOC_ENTER(d);
810 if (p != MAP_FAILED) 817 if (p != MAP_FAILED)
811 STATS_ADD(d->malloc_used, sz); 818 STATS_ADD(d->malloc_used, sz);
@@ -857,7 +864,7 @@ alloc_chunk_info(struct dir_info *d, int bits)
857 size += count * sizeof(u_short); 864 size += count * sizeof(u_short);
858 size = _ALIGN(size); 865 size = _ALIGN(size);
859 866
860 q = MMAP(MALLOC_PAGESIZE); 867 q = MMAP(MALLOC_PAGESIZE, d->mmap_flag);
861 if (q == MAP_FAILED) 868 if (q == MAP_FAILED)
862 return NULL; 869 return NULL;
863 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); 870 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
@@ -905,7 +912,7 @@ omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
905 return bp; 912 return bp;
906 913
907err: 914err:
908 unmap(d, pp, MALLOC_PAGESIZE, 0, mopts.malloc_junk); 915 unmap(d, pp, MALLOC_PAGESIZE, 0, d->malloc_junk);
909 return NULL; 916 return NULL;
910} 917}
911 918
@@ -1016,7 +1023,7 @@ found:
1016 1023
1017 p = (char *)bp->page + k; 1024 p = (char *)bp->page + k;
1018 if (bp->size > 0) { 1025 if (bp->size > 0) {
1019 if (mopts.malloc_junk == 2) 1026 if (d->malloc_junk == 2)
1020 memset(p, SOME_JUNK, bp->size); 1027 memset(p, SOME_JUNK, bp->size);
1021 else if (mopts.chunk_canaries) 1028 else if (mopts.chunk_canaries)
1022 fill_canary(p, size, bp->size); 1029 fill_canary(p, size, bp->size);
@@ -1148,15 +1155,15 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1148 1155
1149 if (MALLOC_MOVE_COND(sz)) { 1156 if (MALLOC_MOVE_COND(sz)) {
1150 /* fill whole allocation */ 1157 /* fill whole allocation */
1151 if (mopts.malloc_junk == 2) 1158 if (pool->malloc_junk == 2)
1152 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1159 memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1153 /* shift towards the end */ 1160 /* shift towards the end */
1154 p = MALLOC_MOVE(p, sz); 1161 p = MALLOC_MOVE(p, sz);
1155 /* fill zeros if needed and overwritten above */ 1162 /* fill zeros if needed and overwritten above */
1156 if (zero_fill && mopts.malloc_junk == 2) 1163 if (zero_fill && pool->malloc_junk == 2)
1157 memset(p, 0, sz - mopts.malloc_guard); 1164 memset(p, 0, sz - mopts.malloc_guard);
1158 } else { 1165 } else {
1159 if (mopts.malloc_junk == 2) { 1166 if (pool->malloc_junk == 2) {
1160 if (zero_fill) 1167 if (zero_fill)
1161 memset((char *)p + sz - mopts.malloc_guard, 1168 memset((char *)p + sz - mopts.malloc_guard,
1162 SOME_JUNK, psz - sz); 1169 SOME_JUNK, psz - sz);
@@ -1200,25 +1207,33 @@ malloc_recurse(struct dir_info *d)
1200void 1207void
1201_malloc_init(int from_rthreads) 1208_malloc_init(int from_rthreads)
1202{ 1209{
1203 int i, max; 1210 u_int i, nmutexes;
1204 struct dir_info *d; 1211 struct dir_info *d;
1205 1212
1206 _MALLOC_LOCK(0); 1213 _MALLOC_LOCK(1);
1207 if (!from_rthreads && mopts.malloc_pool[0]) { 1214 if (!from_rthreads && mopts.malloc_pool[1]) {
1208 _MALLOC_UNLOCK(0); 1215 _MALLOC_UNLOCK(1);
1209 return; 1216 return;
1210 } 1217 }
1211 if (!mopts.malloc_canary) 1218 if (!mopts.malloc_canary)
1212 omalloc_init(); 1219 omalloc_init();
1213 1220
1214 max = from_rthreads ? mopts.malloc_mutexes : 1; 1221 nmutexes = from_rthreads ? mopts.malloc_mutexes : 2;
1215 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 1222 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1216 mprotect(&malloc_readonly, sizeof(malloc_readonly), 1223 mprotect(&malloc_readonly, sizeof(malloc_readonly),
1217 PROT_READ | PROT_WRITE); 1224 PROT_READ | PROT_WRITE);
1218 for (i = 0; i < max; i++) { 1225 for (i = 0; i < nmutexes; i++) {
1219 if (mopts.malloc_pool[i]) 1226 if (mopts.malloc_pool[i])
1220 continue; 1227 continue;
1221 omalloc_poolinit(&d); 1228 if (i == 0) {
1229 omalloc_poolinit(&d, MAP_CONCEAL);
1230 d->malloc_junk = 2;
1231 d->malloc_cache = 0;
1232 } else {
1233 omalloc_poolinit(&d, 0);
1234 d->malloc_junk = mopts.def_malloc_junk;
1235 d->malloc_cache = mopts.def_malloc_cache;
1236 }
1222 d->mutex = i; 1237 d->mutex = i;
1223 mopts.malloc_pool[i] = d; 1238 mopts.malloc_pool[i] = d;
1224 } 1239 }
@@ -1234,10 +1249,31 @@ _malloc_init(int from_rthreads)
1234 */ 1249 */
1235 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 1250 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1236 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); 1251 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1237 _MALLOC_UNLOCK(0); 1252 _MALLOC_UNLOCK(1);
1238} 1253}
1239DEF_STRONG(_malloc_init); 1254DEF_STRONG(_malloc_init);
1240 1255
1256#define PROLOGUE(p, fn) \
1257 d = (p); \
1258 if (d == NULL) { \
1259 _malloc_init(0); \
1260 d = (p); \
1261 } \
1262 _MALLOC_LOCK(d->mutex); \
1263 d->func = fn; \
1264 if (d->active++) { \
1265 malloc_recurse(d); \
1266 return NULL; \
1267 } \
1268
1269#define EPILOGUE() \
1270 d->active--; \
1271 _MALLOC_UNLOCK(d->mutex); \
1272 if (r == NULL && mopts.malloc_xmalloc) \
1273 wrterror(d, "out of memory"); \
1274 if (r != NULL) \
1275 errno = saved_errno; \
1276
1241void * 1277void *
1242malloc(size_t size) 1278malloc(size_t size)
1243{ 1279{
@@ -1245,29 +1281,27 @@ malloc(size_t size)
1245 struct dir_info *d; 1281 struct dir_info *d;
1246 int saved_errno = errno; 1282 int saved_errno = errno;
1247 1283
1248 d = getpool(); 1284 PROLOGUE(getpool(), "malloc")
1249 if (d == NULL) {
1250 _malloc_init(0);
1251 d = getpool();
1252 }
1253 _MALLOC_LOCK(d->mutex);
1254 d->func = "malloc";
1255
1256 if (d->active++) {
1257 malloc_recurse(d);
1258 return NULL;
1259 }
1260 r = omalloc(d, size, 0, CALLER); 1285 r = omalloc(d, size, 0, CALLER);
1261 d->active--; 1286 EPILOGUE()
1262 _MALLOC_UNLOCK(d->mutex);
1263 if (r == NULL && mopts.malloc_xmalloc)
1264 wrterror(d, "out of memory");
1265 if (r != NULL)
1266 errno = saved_errno;
1267 return r; 1287 return r;
1268} 1288}
1269/*DEF_STRONG(malloc);*/ 1289/*DEF_STRONG(malloc);*/
1270 1290
1291void *
1292malloc_conceal(size_t size)
1293{
1294 void *r;
1295 struct dir_info *d;
1296 int saved_errno = errno;
1297
1298 PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
1299 r = omalloc(d, size, 0, CALLER);
1300 EPILOGUE()
1301 return r;
1302}
1303DEF_WEAK(malloc_conceal);
1304
1271static void 1305static void
1272validate_junk(struct dir_info *pool, void *p) 1306validate_junk(struct dir_info *pool, void *p)
1273{ 1307{
@@ -1298,25 +1332,23 @@ findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
1298 1332
1299 STATS_INC(pool->pool_searches); 1333 STATS_INC(pool->pool_searches);
1300 if (r == NULL) { 1334 if (r == NULL) {
1301 if (mopts.malloc_mt) { 1335 u_int i, nmutexes;
1302 int i; 1336
1303 1337 nmutexes = mopts.malloc_mt ? mopts.malloc_mutexes : 2;
1304 STATS_INC(pool->other_pool); 1338 STATS_INC(pool->other_pool);
1305 for (i = 1; i < mopts.malloc_mutexes; i++) { 1339 for (i = 1; i < nmutexes; i++) {
1306 int j = (argpool->mutex + i) & 1340 u_int j = (argpool->mutex + i) & (nmutexes - 1);
1307 (mopts.malloc_mutexes - 1); 1341
1308 1342 pool->active--;
1309 pool->active--; 1343 _MALLOC_UNLOCK(pool->mutex);
1310 _MALLOC_UNLOCK(pool->mutex); 1344 pool = mopts.malloc_pool[j];
1311 pool = mopts.malloc_pool[j]; 1345 _MALLOC_LOCK(pool->mutex);
1312 _MALLOC_LOCK(pool->mutex); 1346 pool->active++;
1313 pool->active++; 1347 r = find(pool, p);
1314 r = find(pool, p); 1348 if (r != NULL) {
1315 if (r != NULL) { 1349 *saved_function = pool->func;
1316 *saved_function = pool->func; 1350 pool->func = argpool->func;
1317 pool->func = argpool->func; 1351 break;
1318 break;
1319 }
1320 } 1352 }
1321 } 1353 }
1322 if (r == NULL) 1354 if (r == NULL)
@@ -1337,6 +1369,10 @@ ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1337 r = findpool(p, *argpool, &pool, &saved_function); 1369 r = findpool(p, *argpool, &pool, &saved_function);
1338 1370
1339 REALSIZE(sz, r); 1371 REALSIZE(sz, r);
1372 if (pool->mmap_flag) {
1373 clear = 1;
1374 argsz = sz;
1375 }
1340 if (check) { 1376 if (check) {
1341 if (sz <= MALLOC_MAXCHUNK) { 1377 if (sz <= MALLOC_MAXCHUNK) {
1342 if (mopts.chunk_canaries && sz > 0) { 1378 if (mopts.chunk_canaries && sz > 0) {
@@ -1386,7 +1422,7 @@ ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1386 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); 1422 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1387 } 1423 }
1388 unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0, 1424 unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0,
1389 mopts.malloc_junk); 1425 pool->malloc_junk);
1390 delete(pool, r); 1426 delete(pool, r);
1391 } else { 1427 } else {
1392 /* Validate and optionally canary check */ 1428 /* Validate and optionally canary check */
@@ -1402,7 +1438,7 @@ ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1402 wrterror(pool, 1438 wrterror(pool,
1403 "double free %p", p); 1439 "double free %p", p);
1404 } 1440 }
1405 if (mopts.malloc_junk && sz > 0) 1441 if (pool->malloc_junk && sz > 0)
1406 memset(p, SOME_FREEJUNK, sz); 1442 memset(p, SOME_FREEJUNK, sz);
1407 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; 1443 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1408 tmp = p; 1444 tmp = p;
@@ -1410,7 +1446,7 @@ ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1410 if (tmp == p) 1446 if (tmp == p)
1411 wrterror(pool, "double free %p", tmp); 1447 wrterror(pool, "double free %p", tmp);
1412 pool->delayed_chunks[i] = tmp; 1448 pool->delayed_chunks[i] = tmp;
1413 if (mopts.malloc_junk) 1449 if (pool->malloc_junk)
1414 validate_junk(pool, p); 1450 validate_junk(pool, p);
1415 } else if (argsz > 0) 1451 } else if (argsz > 0)
1416 memset(p, 0, argsz); 1452 memset(p, 0, argsz);
@@ -1503,6 +1539,7 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1503 size_t oldsz, goldsz, gnewsz; 1539 size_t oldsz, goldsz, gnewsz;
1504 void *q, *ret; 1540 void *q, *ret;
1505 uint32_t chunknum; 1541 uint32_t chunknum;
1542 int forced;
1506 1543
1507 if (p == NULL) 1544 if (p == NULL)
1508 return omalloc(*argpool, newsz, 0, f); 1545 return omalloc(*argpool, newsz, 0, f);
@@ -1531,8 +1568,8 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1531 if (gnewsz > MALLOC_MAXCHUNK) 1568 if (gnewsz > MALLOC_MAXCHUNK)
1532 gnewsz += mopts.malloc_guard; 1569 gnewsz += mopts.malloc_guard;
1533 1570
1534 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && 1571 forced = mopts.malloc_realloc || pool->mmap_flag;
1535 !mopts.malloc_realloc) { 1572 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && !forced) {
1536 /* First case: from n pages sized allocation to m pages sized 1573 /* First case: from n pages sized allocation to m pages sized
1537 allocation, m > n */ 1574 allocation, m > n */
1538 size_t roldsz = PAGEROUND(goldsz); 1575 size_t roldsz = PAGEROUND(goldsz);
@@ -1549,15 +1586,15 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1549 if (q == hint) 1586 if (q == hint)
1550 goto gotit; 1587 goto gotit;
1551 zapcacheregion(pool, hint, needed); 1588 zapcacheregion(pool, hint, needed);
1552 q = MQUERY(hint, needed); 1589 q = MQUERY(hint, needed, pool->mmap_flag);
1553 if (q == hint) 1590 if (q == hint)
1554 q = MMAPA(hint, needed); 1591 q = MMAPA(hint, needed, pool->mmap_flag);
1555 else 1592 else
1556 q = MAP_FAILED; 1593 q = MAP_FAILED;
1557 if (q == hint) { 1594 if (q == hint) {
1558gotit: 1595gotit:
1559 STATS_ADD(pool->malloc_used, needed); 1596 STATS_ADD(pool->malloc_used, needed);
1560 if (mopts.malloc_junk == 2) 1597 if (pool->malloc_junk == 2)
1561 memset(q, SOME_JUNK, needed); 1598 memset(q, SOME_JUNK, needed);
1562 r->size = gnewsz; 1599 r->size = gnewsz;
1563 if (r->p != p) { 1600 if (r->p != p) {
@@ -1590,7 +1627,7 @@ gotit:
1590 wrterror(pool, "mprotect"); 1627 wrterror(pool, "mprotect");
1591 } 1628 }
1592 unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0, 1629 unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0,
1593 mopts.malloc_junk); 1630 pool->malloc_junk);
1594 r->size = gnewsz; 1631 r->size = gnewsz;
1595 if (MALLOC_MOVE_COND(gnewsz)) { 1632 if (MALLOC_MOVE_COND(gnewsz)) {
1596 void *pp = MALLOC_MOVE(r->p, gnewsz); 1633 void *pp = MALLOC_MOVE(r->p, gnewsz);
@@ -1613,7 +1650,7 @@ gotit:
1613 p = pp; 1650 p = pp;
1614 } 1651 }
1615 if (p == r->p) { 1652 if (p == r->p) {
1616 if (newsz > oldsz && mopts.malloc_junk == 2) 1653 if (newsz > oldsz && pool->malloc_junk == 2)
1617 memset((char *)p + newsz, SOME_JUNK, 1654 memset((char *)p + newsz, SOME_JUNK,
1618 rnewsz - mopts.malloc_guard - 1655 rnewsz - mopts.malloc_guard -
1619 newsz); 1656 newsz);
@@ -1627,9 +1664,9 @@ gotit:
1627 } 1664 }
1628 if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 && 1665 if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1629 newsz <= MALLOC_MAXCHUNK && newsz > 0 && 1666 newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1630 1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) { 1667 1 << find_chunksize(newsz) == oldsz && !forced) {
1631 /* do not reallocate if new size fits good in existing chunk */ 1668 /* do not reallocate if new size fits good in existing chunk */
1632 if (mopts.malloc_junk == 2) 1669 if (pool->malloc_junk == 2)
1633 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1670 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1634 if (mopts.chunk_canaries) { 1671 if (mopts.chunk_canaries) {
1635 info->bits[info->offset + chunknum] = newsz; 1672 info->bits[info->offset + chunknum] = newsz;
@@ -1637,7 +1674,7 @@ gotit:
1637 } 1674 }
1638 STATS_SETF(r, f); 1675 STATS_SETF(r, f);
1639 ret = p; 1676 ret = p;
1640 } else if (newsz != oldsz || mopts.malloc_realloc) { 1677 } else if (newsz != oldsz || forced) {
1641 /* create new allocation */ 1678 /* create new allocation */
1642 q = omalloc(pool, newsz, 0, f); 1679 q = omalloc(pool, newsz, 0, f);
1643 if (q == NULL) { 1680 if (q == NULL) {
@@ -1670,30 +1707,13 @@ realloc(void *ptr, size_t size)
1670 void *r; 1707 void *r;
1671 int saved_errno = errno; 1708 int saved_errno = errno;
1672 1709
1673 d = getpool(); 1710 PROLOGUE(getpool(), "realloc")
1674 if (d == NULL) {
1675 _malloc_init(0);
1676 d = getpool();
1677 }
1678 _MALLOC_LOCK(d->mutex);
1679 d->func = "realloc";
1680 if (d->active++) {
1681 malloc_recurse(d);
1682 return NULL;
1683 }
1684 r = orealloc(&d, ptr, size, CALLER); 1711 r = orealloc(&d, ptr, size, CALLER);
1685 1712 EPILOGUE()
1686 d->active--;
1687 _MALLOC_UNLOCK(d->mutex);
1688 if (r == NULL && mopts.malloc_xmalloc)
1689 wrterror(d, "out of memory");
1690 if (r != NULL)
1691 errno = saved_errno;
1692 return r; 1713 return r;
1693} 1714}
1694/*DEF_STRONG(realloc);*/ 1715/*DEF_STRONG(realloc);*/
1695 1716
1696
1697/* 1717/*
1698 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 1718 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1699 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 1719 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
@@ -1707,15 +1727,10 @@ calloc(size_t nmemb, size_t size)
1707 void *r; 1727 void *r;
1708 int saved_errno = errno; 1728 int saved_errno = errno;
1709 1729
1710 d = getpool(); 1730 PROLOGUE(getpool(), "calloc")
1711 if (d == NULL) {
1712 _malloc_init(0);
1713 d = getpool();
1714 }
1715 _MALLOC_LOCK(d->mutex);
1716 d->func = "calloc";
1717 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1731 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1718 nmemb > 0 && SIZE_MAX / nmemb < size) { 1732 nmemb > 0 && SIZE_MAX / nmemb < size) {
1733 d->active--;
1719 _MALLOC_UNLOCK(d->mutex); 1734 _MALLOC_UNLOCK(d->mutex);
1720 if (mopts.malloc_xmalloc) 1735 if (mopts.malloc_xmalloc)
1721 wrterror(d, "out of memory"); 1736 wrterror(d, "out of memory");
@@ -1723,23 +1738,37 @@ calloc(size_t nmemb, size_t size)
1723 return NULL; 1738 return NULL;
1724 } 1739 }
1725 1740
1726 if (d->active++) { 1741 size *= nmemb;
1727 malloc_recurse(d); 1742 r = omalloc(d, size, 1, CALLER);
1743 EPILOGUE()
1744 return r;
1745}
1746/*DEF_STRONG(calloc);*/
1747
1748void *
1749calloc_conceal(size_t nmemb, size_t size)
1750{
1751 struct dir_info *d;
1752 void *r;
1753 int saved_errno = errno;
1754
1755 PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
1756 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1757 nmemb > 0 && SIZE_MAX / nmemb < size) {
1758 d->active--;
1759 _MALLOC_UNLOCK(d->mutex);
1760 if (mopts.malloc_xmalloc)
1761 wrterror(d, "out of memory");
1762 errno = ENOMEM;
1728 return NULL; 1763 return NULL;
1729 } 1764 }
1730 1765
1731 size *= nmemb; 1766 size *= nmemb;
1732 r = omalloc(d, size, 1, CALLER); 1767 r = omalloc(d, size, 1, CALLER);
1733 1768 EPILOGUE()
1734 d->active--;
1735 _MALLOC_UNLOCK(d->mutex);
1736 if (r == NULL && mopts.malloc_xmalloc)
1737 wrterror(d, "out of memory");
1738 if (r != NULL)
1739 errno = saved_errno;
1740 return r; 1769 return r;
1741} 1770}
1742/*DEF_STRONG(calloc);*/ 1771DEF_WEAK(calloc_conceal);
1743 1772
1744static void * 1773static void *
1745orecallocarray(struct dir_info **argpool, void *p, size_t oldsize, 1774orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
@@ -1858,17 +1887,11 @@ recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1858 if (!mopts.internal_funcs) 1887 if (!mopts.internal_funcs)
1859 return recallocarray_p(ptr, oldnmemb, newnmemb, size); 1888 return recallocarray_p(ptr, oldnmemb, newnmemb, size);
1860 1889
1861 d = getpool(); 1890 PROLOGUE(getpool(), "recallocarray")
1862 if (d == NULL) {
1863 _malloc_init(0);
1864 d = getpool();
1865 }
1866
1867 _MALLOC_LOCK(d->mutex);
1868 d->func = "recallocarray";
1869 1891
1870 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1892 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1871 newnmemb > 0 && SIZE_MAX / newnmemb < size) { 1893 newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1894 d->active--;
1872 _MALLOC_UNLOCK(d->mutex); 1895 _MALLOC_UNLOCK(d->mutex);
1873 if (mopts.malloc_xmalloc) 1896 if (mopts.malloc_xmalloc)
1874 wrterror(d, "out of memory"); 1897 wrterror(d, "out of memory");
@@ -1880,6 +1903,7 @@ recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1880 if (ptr != NULL) { 1903 if (ptr != NULL) {
1881 if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1904 if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1882 oldnmemb > 0 && SIZE_MAX / oldnmemb < size) { 1905 oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1906 d->active--;
1883 _MALLOC_UNLOCK(d->mutex); 1907 _MALLOC_UNLOCK(d->mutex);
1884 errno = EINVAL; 1908 errno = EINVAL;
1885 return NULL; 1909 return NULL;
@@ -1887,24 +1911,12 @@ recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1887 oldsize = oldnmemb * size; 1911 oldsize = oldnmemb * size;
1888 } 1912 }
1889 1913
1890 if (d->active++) {
1891 malloc_recurse(d);
1892 return NULL;
1893 }
1894
1895 r = orecallocarray(&d, ptr, oldsize, newsize, CALLER); 1914 r = orecallocarray(&d, ptr, oldsize, newsize, CALLER);
1896 1915 EPILOGUE()
1897 d->active--;
1898 _MALLOC_UNLOCK(d->mutex);
1899 if (r == NULL && mopts.malloc_xmalloc)
1900 wrterror(d, "out of memory");
1901 if (r != NULL)
1902 errno = saved_errno;
1903 return r; 1916 return r;
1904} 1917}
1905DEF_WEAK(recallocarray); 1918DEF_WEAK(recallocarray);
1906 1919
1907
1908static void * 1920static void *
1909mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) 1921mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1910{ 1922{
@@ -1987,7 +1999,7 @@ omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
1987 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1999 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1988 } 2000 }
1989 2001
1990 if (mopts.malloc_junk == 2) { 2002 if (pool->malloc_junk == 2) {
1991 if (zero_fill) 2003 if (zero_fill)
1992 memset((char *)p + sz - mopts.malloc_guard, 2004 memset((char *)p + sz - mopts.malloc_guard,
1993 SOME_JUNK, psz - sz); 2005 SOME_JUNK, psz - sz);
@@ -2059,26 +2071,9 @@ aligned_alloc(size_t alignment, size_t size)
2059 return NULL; 2071 return NULL;
2060 } 2072 }
2061 2073
2062 d = getpool(); 2074 PROLOGUE(getpool(), "aligned_alloc")
2063 if (d == NULL) {
2064 _malloc_init(0);
2065 d = getpool();
2066 }
2067 _MALLOC_LOCK(d->mutex);
2068 d->func = "aligned_alloc";
2069 if (d->active++) {
2070 malloc_recurse(d);
2071 return NULL;
2072 }
2073 r = omemalign(d, alignment, size, 0, CALLER); 2075 r = omemalign(d, alignment, size, 0, CALLER);
2074 d->active--; 2076 EPILOGUE()
2075 _MALLOC_UNLOCK(d->mutex);
2076 if (r == NULL) {
2077 if (mopts.malloc_xmalloc)
2078 wrterror(d, "out of memory");
2079 return NULL;
2080 }
2081 errno = saved_errno;
2082 return r; 2077 return r;
2083} 2078}
2084/*DEF_STRONG(aligned_alloc);*/ 2079/*DEF_STRONG(aligned_alloc);*/
@@ -2121,7 +2116,7 @@ putleakinfo(void *f, size_t sz, int cnt)
2121 if (p == NULL) { 2116 if (p == NULL) {
2122 if (page == NULL || 2117 if (page == NULL ||
2123 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { 2118 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2124 page = MMAP(MALLOC_PAGESIZE); 2119 page = MMAP(MALLOC_PAGESIZE, 0);
2125 if (page == MAP_FAILED) 2120 if (page == MAP_FAILED)
2126 return; 2121 return;
2127 used = 0; 2122 used = 0;
@@ -2149,7 +2144,7 @@ dump_leaks(int fd)
2149 dprintf(fd, " f sum # avg\n"); 2144 dprintf(fd, " f sum # avg\n");
2150 /* XXX only one page of summary */ 2145 /* XXX only one page of summary */
2151 if (malloc_leaks == NULL) 2146 if (malloc_leaks == NULL)
2152 malloc_leaks = MMAP(MALLOC_PAGESIZE); 2147 malloc_leaks = MMAP(MALLOC_PAGESIZE, 0);
2153 if (malloc_leaks != MAP_FAILED) 2148 if (malloc_leaks != MAP_FAILED)
2154 memset(malloc_leaks, 0, MALLOC_PAGESIZE); 2149 memset(malloc_leaks, 0, MALLOC_PAGESIZE);
2155 RBT_FOREACH(p, leaktree, &leakhead) { 2150 RBT_FOREACH(p, leaktree, &leakhead) {
@@ -2219,7 +2214,7 @@ dump_free_page_info(int fd, struct dir_info *d)
2219 int i; 2214 int i;
2220 2215
2221 dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size); 2216 dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size);
2222 for (i = 0; i < mopts.malloc_cache; i++) { 2217 for (i = 0; i < d->malloc_cache; i++) {
2223 if (d->free_regions[i].p != NULL) { 2218 if (d->free_regions[i].p != NULL) {
2224 dprintf(fd, "%2d) ", i); 2219 dprintf(fd, "%2d) ", i);
2225 dprintf(fd, "free at %p: %zu\n", 2220 dprintf(fd, "free at %p: %zu\n",
@@ -2236,6 +2231,8 @@ malloc_dump1(int fd, int poolno, struct dir_info *d)
2236 dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d); 2231 dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2237 if (d == NULL) 2232 if (d == NULL)
2238 return; 2233 return;
2234 dprintf(fd, "J=%d cache=%u Fl=%x\n",
2235 d->malloc_junk, d->malloc_cache, d->mmap_flag);
2239 dprintf(fd, "Region slots free %zu/%zu\n", 2236 dprintf(fd, "Region slots free %zu/%zu\n",
2240 d->regions_free, d->regions_total); 2237 d->regions_free, d->regions_total);
2241 dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions); 2238 dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions);
@@ -2323,11 +2320,11 @@ malloc_exit(void)
2323 dprintf(fd, "******** Start dump %s *******\n", __progname); 2320 dprintf(fd, "******** Start dump %s *******\n", __progname);
2324 dprintf(fd, 2321 dprintf(fd,
2325 "MT=%d M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n", 2322 "MT=%d M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2326 mopts.malloc_mt, mopts.mallloc_mutexes, 2323 mopts.malloc_mt, mopts.malloc_mutexes,
2327 mopts.internal_funcs, mopts.malloc_freecheck, 2324 mopts.internal_funcs, mopts.malloc_freecheck,
2328 mopts.malloc_freeunmap, mopts.malloc_junk, 2325 mopts.malloc_freeunmap, mopts.def_malloc_junk,
2329 mopts.malloc_realloc, mopts.malloc_xmalloc, 2326 mopts.malloc_realloc, mopts.malloc_xmalloc,
2330 mopts.chunk_canaries, mopts.malloc_cache, 2327 mopts.chunk_canaries, mopts.def_malloc_cache,
2331 mopts.malloc_guard); 2328 mopts.malloc_guard);
2332 2329
2333 for (i = 0; i < mopts.malloc_mutexes; i++) 2330 for (i = 0; i < mopts.malloc_mutexes; i++)