summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/libc/stdlib/malloc.338
-rw-r--r--src/lib/libc/stdlib/malloc.c226
2 files changed, 178 insertions, 86 deletions
diff --git a/src/lib/libc/stdlib/malloc.3 b/src/lib/libc/stdlib/malloc.3
index b700add823..667baa9b7c 100644
--- a/src/lib/libc/stdlib/malloc.3
+++ b/src/lib/libc/stdlib/malloc.3
@@ -30,9 +30,9 @@
30.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31.\" SUCH DAMAGE. 31.\" SUCH DAMAGE.
32.\" 32.\"
33.\" $OpenBSD: malloc.3,v 1.137 2023/07/01 18:35:14 otto Exp $ 33.\" $OpenBSD: malloc.3,v 1.138 2023/10/22 12:19:26 otto Exp $
34.\" 34.\"
35.Dd $Mdocdate: July 1 2023 $ 35.Dd $Mdocdate: October 22 2023 $
36.Dt MALLOC 3 36.Dt MALLOC 3
37.Os 37.Os
38.Sh NAME 38.Sh NAME
@@ -307,7 +307,7 @@ These malloc options imply
307.Cm D . 307.Cm D .
308.It Cm F 308.It Cm F
309.Dq Freecheck . 309.Dq Freecheck .
310Enable more extensive double free and use after free detection. 310Enable more extensive double free and write after free detection.
311All chunks in the delayed free list will be checked for double frees and 311All chunks in the delayed free list will be checked for double frees and
312write after frees. 312write after frees.
313Unused pages on the freelist are read and write protected to 313Unused pages on the freelist are read and write protected to
@@ -641,18 +641,34 @@ or
641reallocate an unallocated pointer was made. 641reallocate an unallocated pointer was made.
642.It Dq double free 642.It Dq double free
643There was an attempt to free an allocation that had already been freed. 643There was an attempt to free an allocation that had already been freed.
644.It Dq write after free 644.It Dq write to free mem Va address Ns [ Va start Ns .. Ns Va end Ns ]@ Ns Va size
645An allocation has been modified after it was freed. 645An allocation has been modified after it was freed,
646or a chunk that was never allocated was written to.
647The
648.Va range
649at which corruption was detected is printed between [ and ].
650.Pp
651Enabling option
652.Cm D
653allows malloc to print information about where the allocation
654was done.
646.It Dq modified chunk-pointer 655.It Dq modified chunk-pointer
647The pointer passed to 656The pointer passed to
648.Fn free 657.Fn free
649or a reallocation function has been modified. 658or a reallocation function has been modified.
650.It Dq canary corrupted address offset@length 659.It Dq canary corrupted Va address Ns [ Va offset Ns ]@ Ns Va length Ns / Ns Va size
651A byte after the requested size has been overwritten, 660A byte after the requested
661.Va length has been overwritten,
652indicating a heap overflow. 662indicating a heap overflow.
653The offset at which corruption was detected is printed before the @, 663The
654and the requested length of the allocation after the @. 664.Va offset
655.It Dq recorded size oldsize inconsistent with size 665at which corruption was detected is printed between [ and ],
666the requested
667.Va length
668of the allocation is printed before the / and the
669.Va size
670of the allocation after the /.
671.It Dq recorded size Va oldsize No inconsistent with Va size
656.Fn recallocarray 672.Fn recallocarray
657or 673or
658.Fn freezero 674.Fn freezero
@@ -676,7 +692,7 @@ functions nor utilize any other functions which may call
676(e.g., 692(e.g.,
677.Xr stdio 3 693.Xr stdio 3
678routines). 694routines).
679.It Dq unknown char in MALLOC_OPTIONS 695.It Dq unknown char in Ev MALLOC_OPTIONS
680We found something we didn't understand. 696We found something we didn't understand.
681.It any other error 697.It any other error
682.Fn malloc 698.Fn malloc
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 814a417145..c3b2332251 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.290 2023/09/09 06:52:40 asou Exp $ */ 1/* $OpenBSD: malloc.c,v 1.291 2023/10/22 12:19:26 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -112,7 +112,7 @@ struct region_info {
112 void *p; /* page; low bits used to mark chunks */ 112 void *p; /* page; low bits used to mark chunks */
113 uintptr_t size; /* size for pages, or chunk_info pointer */ 113 uintptr_t size; /* size for pages, or chunk_info pointer */
114#ifdef MALLOC_STATS 114#ifdef MALLOC_STATS
115 void *f; /* where allocated from */ 115 void **f; /* where allocated from */
116#endif 116#endif
117}; 117};
118 118
@@ -146,7 +146,7 @@ struct dir_info {
146 size_t regions_total; /* number of region slots */ 146 size_t regions_total; /* number of region slots */
147 size_t regions_free; /* number of free slots */ 147 size_t regions_free; /* number of free slots */
148 size_t rbytesused; /* random bytes used */ 148 size_t rbytesused; /* random bytes used */
149 char *func; /* current function */ 149 const char *func; /* current function */
150 int malloc_junk; /* junk fill? */ 150 int malloc_junk; /* junk fill? */
151 int mmap_flag; /* extra flag for mmap */ 151 int mmap_flag; /* extra flag for mmap */
152 int mutex; 152 int mutex;
@@ -166,6 +166,7 @@ struct dir_info {
166 void *chunk_pages; 166 void *chunk_pages;
167 size_t chunk_pages_used; 167 size_t chunk_pages_used;
168#ifdef MALLOC_STATS 168#ifdef MALLOC_STATS
169 void *caller;
169 size_t inserts; 170 size_t inserts;
170 size_t insert_collisions; 171 size_t insert_collisions;
171 size_t finds; 172 size_t finds;
@@ -183,12 +184,16 @@ struct dir_info {
183#define STATS_INC(x) ((x)++) 184#define STATS_INC(x) ((x)++)
184#define STATS_ZERO(x) ((x) = 0) 185#define STATS_ZERO(x) ((x) = 0)
185#define STATS_SETF(x,y) ((x)->f = (y)) 186#define STATS_SETF(x,y) ((x)->f = (y))
187#define STATS_SETFN(x,k,y) ((x)->f[k] = (y))
188#define SET_CALLER(x,y) if (DO_STATS) ((x)->caller = (y))
186#else 189#else
187#define STATS_ADD(x,y) /* nothing */ 190#define STATS_ADD(x,y) /* nothing */
188#define STATS_SUB(x,y) /* nothing */ 191#define STATS_SUB(x,y) /* nothing */
189#define STATS_INC(x) /* nothing */ 192#define STATS_INC(x) /* nothing */
190#define STATS_ZERO(x) /* nothing */ 193#define STATS_ZERO(x) /* nothing */
191#define STATS_SETF(x,y) /* nothing */ 194#define STATS_SETF(x,y) /* nothing */
195#define STATS_SETFN(x,k,y) /* nothing */
196#define SET_CALLER(x,y) /* nothing */
192#endif /* MALLOC_STATS */ 197#endif /* MALLOC_STATS */
193 u_int32_t canary2; 198 u_int32_t canary2;
194}; 199};
@@ -212,6 +217,8 @@ struct chunk_info {
212 u_short bits[1]; /* which chunks are free */ 217 u_short bits[1]; /* which chunks are free */
213}; 218};
214 219
220#define CHUNK_FREE(i, n) ((i)->bits[(n) / MALLOC_BITS] & (1U << ((n) % MALLOC_BITS)))
221
215struct malloc_readonly { 222struct malloc_readonly {
216 /* Main bookkeeping information */ 223 /* Main bookkeeping information */
217 struct dir_info *malloc_pool[_MALLOC_MUTEXES]; 224 struct dir_info *malloc_pool[_MALLOC_MUTEXES];
@@ -227,7 +234,7 @@ struct malloc_readonly {
227 u_int junk_loc; /* variation in location of junk */ 234 u_int junk_loc; /* variation in location of junk */
228 size_t malloc_guard; /* use guard pages after allocations? */ 235 size_t malloc_guard; /* use guard pages after allocations? */
229#ifdef MALLOC_STATS 236#ifdef MALLOC_STATS
230 int malloc_stats; /* dump leak report at end */ 237 int malloc_stats; /* save callers, dump leak report at end */
231 int malloc_verbose; /* dump verbose statistics at end */ 238 int malloc_verbose; /* dump verbose statistics at end */
232#define DO_STATS mopts.malloc_stats 239#define DO_STATS mopts.malloc_stats
233#else 240#else
@@ -254,6 +261,7 @@ static __dead void wrterror(struct dir_info *d, char *msg, ...)
254void malloc_dump(void); 261void malloc_dump(void);
255PROTO_NORMAL(malloc_dump); 262PROTO_NORMAL(malloc_dump);
256static void malloc_exit(void); 263static void malloc_exit(void);
264static void print_chunk_details(struct dir_info *, void *, size_t, size_t);
257#endif 265#endif
258 266
259#if defined(__aarch64__) || \ 267#if defined(__aarch64__) || \
@@ -530,7 +538,7 @@ omalloc_init(void)
530 do { 538 do {
531 mopts.chunk_canaries = arc4random(); 539 mopts.chunk_canaries = arc4random();
532 } while ((u_char)mopts.chunk_canaries == 0 || 540 } while ((u_char)mopts.chunk_canaries == 0 ||
533 (u_char)mopts.chunk_canaries == SOME_FREEJUNK); 541 (u_char)mopts.chunk_canaries == SOME_FREEJUNK);
534} 542}
535 543
536static void 544static void
@@ -714,14 +722,14 @@ junk_free(int junk, void *p, size_t sz)
714} 722}
715 723
716static inline void 724static inline void
717validate_junk(struct dir_info *pool, void *p, size_t sz) 725validate_junk(struct dir_info *pool, void *p, size_t argsz)
718{ 726{
719 size_t i, step = 1; 727 size_t i, sz, step = 1;
720 uint64_t *lp = p; 728 uint64_t *lp = p;
721 729
722 if (pool->malloc_junk == 0 || sz == 0) 730 if (pool->malloc_junk == 0 || argsz == 0)
723 return; 731 return;
724 sz /= sizeof(uint64_t); 732 sz = argsz / sizeof(uint64_t);
725 if (pool->malloc_junk == 1) { 733 if (pool->malloc_junk == 1) {
726 if (sz > MALLOC_PAGESIZE / sizeof(uint64_t)) 734 if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
727 sz = MALLOC_PAGESIZE / sizeof(uint64_t); 735 sz = MALLOC_PAGESIZE / sizeof(uint64_t);
@@ -731,14 +739,23 @@ validate_junk(struct dir_info *pool, void *p, size_t sz)
731 } 739 }
732 /* see junk_free */ 740 /* see junk_free */
733 for (i = mopts.junk_loc % step; i < sz; i += step) { 741 for (i = mopts.junk_loc % step; i < sz; i += step) {
734 if (lp[i] != SOME_FREEJUNK_ULL) 742 if (lp[i] != SOME_FREEJUNK_ULL) {
735 wrterror(pool, "write after free %p", p); 743#ifdef MALLOC_STATS
744 if (DO_STATS && argsz <= MALLOC_MAXCHUNK)
745 print_chunk_details(pool, lp, argsz, i);
746 else
747#endif
748 wrterror(pool,
749 "write to free mem %p[%zu..%zu]@%zu",
750 lp, i * sizeof(uint64_t),
751 (i + 1) * sizeof(uint64_t) - 1, argsz);
752 }
736 } 753 }
737} 754}
738 755
739 756
740/* 757/*
741 * Cache maintenance. 758 * Cache maintenance.
742 * Opposed to the regular region data structure, the sizes in the 759 * Opposed to the regular region data structure, the sizes in the
743 * cache are in MALLOC_PAGESIZE units. 760 * cache are in MALLOC_PAGESIZE units.
744 */ 761 */
@@ -809,7 +826,7 @@ unmap(struct dir_info *d, void *p, size_t sz, size_t clear)
809 i = getrbyte(d) & (cache->max - 1); 826 i = getrbyte(d) & (cache->max - 1);
810 r = cache->pages[i]; 827 r = cache->pages[i];
811 fresh = (uintptr_t)r & 1; 828 fresh = (uintptr_t)r & 1;
812 *(uintptr_t*)&r &= ~1ULL; 829 *(uintptr_t*)&r &= ~1UL;
813 if (!fresh && !mopts.malloc_freeunmap) 830 if (!fresh && !mopts.malloc_freeunmap)
814 validate_junk(d, r, sz); 831 validate_junk(d, r, sz);
815 if (munmap(r, sz)) 832 if (munmap(r, sz))
@@ -995,11 +1012,18 @@ omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum)
995{ 1012{
996 struct chunk_info *bp; 1013 struct chunk_info *bp;
997 void *pp; 1014 void *pp;
1015 void *ff = NULL;
998 1016
999 /* Allocate a new bucket */ 1017 /* Allocate a new bucket */
1000 pp = map(d, MALLOC_PAGESIZE, 0); 1018 pp = map(d, MALLOC_PAGESIZE, 0);
1001 if (pp == MAP_FAILED) 1019 if (pp == MAP_FAILED)
1002 return NULL; 1020 return NULL;
1021 if (DO_STATS) {
1022 ff = map(d, MALLOC_PAGESIZE, 0);
1023 if (ff == MAP_FAILED)
1024 goto err;
1025 memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE / B2ALLOC(bucket));
1026 }
1003 1027
1004 /* memory protect the page allocated in the malloc(0) case */ 1028 /* memory protect the page allocated in the malloc(0) case */
1005 if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1) 1029 if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1)
@@ -1011,7 +1035,7 @@ omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum)
1011 bp->page = pp; 1035 bp->page = pp;
1012 1036
1013 if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp, 1037 if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp,
1014 NULL)) 1038 ff))
1015 goto err; 1039 goto err;
1016 LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries); 1040 LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries);
1017 1041
@@ -1022,6 +1046,8 @@ omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum)
1022 1046
1023err: 1047err:
1024 unmap(d, pp, MALLOC_PAGESIZE, 0); 1048 unmap(d, pp, MALLOC_PAGESIZE, 0);
1049 if (ff != NULL && ff != MAP_FAILED)
1050 unmap(d, ff, MALLOC_PAGESIZE, 0);
1025 return NULL; 1051 return NULL;
1026} 1052}
1027 1053
@@ -1101,7 +1127,7 @@ fill_canary(char *ptr, size_t sz, size_t allocated)
1101 * Allocate a chunk 1127 * Allocate a chunk
1102 */ 1128 */
1103static void * 1129static void *
1104malloc_bytes(struct dir_info *d, size_t size, void *f) 1130malloc_bytes(struct dir_info *d, size_t size)
1105{ 1131{
1106 u_int i, r, bucket, listnum; 1132 u_int i, r, bucket, listnum;
1107 size_t k; 1133 size_t k;
@@ -1153,11 +1179,6 @@ malloc_bytes(struct dir_info *d, size_t size, void *f)
1153 } 1179 }
1154 } 1180 }
1155found: 1181found:
1156 if (i == 0 && k == 0 && DO_STATS) {
1157 struct region_info *r = find(d, bp->page);
1158 STATS_SETF(r, f);
1159 }
1160
1161 *lp ^= 1 << k; 1182 *lp ^= 1 << k;
1162 1183
1163 /* If there are no more free, remove from free-list */ 1184 /* If there are no more free, remove from free-list */
@@ -1170,6 +1191,11 @@ found:
1170 if (mopts.chunk_canaries && size > 0) 1191 if (mopts.chunk_canaries && size > 0)
1171 bp->bits[bp->offset + k] = size; 1192 bp->bits[bp->offset + k] = size;
1172 1193
1194 if (DO_STATS) {
1195 struct region_info *r = find(d, bp->page);
1196 STATS_SETFN(r, k, d->caller);
1197 }
1198
1173 k *= B2ALLOC(bp->bucket); 1199 k *= B2ALLOC(bp->bucket);
1174 1200
1175 p = (char *)bp->page + k; 1201 p = (char *)bp->page + k;
@@ -1194,8 +1220,8 @@ validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1194 1220
1195 while (p < q) { 1221 while (p < q) {
1196 if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) { 1222 if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) {
1197 wrterror(d, "canary corrupted %p %#tx@%#zx%s", 1223 wrterror(d, "canary corrupted %p[%tu]@%zu/%zu%s",
1198 ptr, p - ptr, sz, 1224 ptr, p - ptr, sz, allocated,
1199 *p == SOME_FREEJUNK ? " (double free?)" : ""); 1225 *p == SOME_FREEJUNK ? " (double free?)" : "");
1200 } 1226 }
1201 p++; 1227 p++;
@@ -1215,8 +1241,7 @@ find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1215 1241
1216 if ((uintptr_t)ptr & (MALLOC_MINSIZE - 1)) 1242 if ((uintptr_t)ptr & (MALLOC_MINSIZE - 1))
1217 wrterror(d, "modified chunk-pointer %p", ptr); 1243 wrterror(d, "modified chunk-pointer %p", ptr);
1218 if (info->bits[chunknum / MALLOC_BITS] & 1244 if (CHUNK_FREE(info, chunknum))
1219 (1U << (chunknum % MALLOC_BITS)))
1220 wrterror(d, "double free %p", ptr); 1245 wrterror(d, "double free %p", ptr);
1221 if (check && info->bucket > 0) { 1246 if (check && info->bucket > 0) {
1222 validate_canary(d, ptr, info->bits[info->offset + chunknum], 1247 validate_canary(d, ptr, info->bits[info->offset + chunknum],
@@ -1239,9 +1264,6 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1239 info = (struct chunk_info *)r->size; 1264 info = (struct chunk_info *)r->size;
1240 chunknum = find_chunknum(d, info, ptr, 0); 1265 chunknum = find_chunknum(d, info, ptr, 0);
1241 1266
1242 if (chunknum == 0)
1243 STATS_SETF(r, NULL);
1244
1245 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1267 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1246 info->free++; 1268 info->free++;
1247 1269
@@ -1261,18 +1283,22 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1261 if (info->bucket == 0 && !mopts.malloc_freeunmap) 1283 if (info->bucket == 0 && !mopts.malloc_freeunmap)
1262 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1284 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1263 unmap(d, info->page, MALLOC_PAGESIZE, 0); 1285 unmap(d, info->page, MALLOC_PAGESIZE, 0);
1286#ifdef MALLOC_STATS
1287 if (r->f != NULL) {
1288 unmap(d, r->f, MALLOC_PAGESIZE, MALLOC_PAGESIZE);
1289 r->f = NULL;
1290 }
1291#endif
1264 1292
1265 delete(d, r); 1293 delete(d, r);
1266 mp = &d->chunk_info_list[info->bucket]; 1294 mp = &d->chunk_info_list[info->bucket];
1267 LIST_INSERT_HEAD(mp, info, entries); 1295 LIST_INSERT_HEAD(mp, info, entries);
1268} 1296}
1269 1297
1270
1271
1272static void * 1298static void *
1273omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f) 1299omalloc(struct dir_info *pool, size_t sz, int zero_fill)
1274{ 1300{
1275 void *p; 1301 void *p, *caller = NULL;
1276 size_t psz; 1302 size_t psz;
1277 1303
1278 if (sz > MALLOC_MAXCHUNK) { 1304 if (sz > MALLOC_MAXCHUNK) {
@@ -1287,7 +1313,11 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1287 errno = ENOMEM; 1313 errno = ENOMEM;
1288 return NULL; 1314 return NULL;
1289 } 1315 }
1290 if (insert(pool, p, sz, f)) { 1316#ifdef MALLOC_STATS
1317 if (DO_STATS)
1318 caller = pool->caller;
1319#endif
1320 if (insert(pool, p, sz, caller)) {
1291 unmap(pool, p, psz, 0); 1321 unmap(pool, p, psz, 0);
1292 errno = ENOMEM; 1322 errno = ENOMEM;
1293 return NULL; 1323 return NULL;
@@ -1324,7 +1354,7 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1324 1354
1325 } else { 1355 } else {
1326 /* takes care of SOME_JUNK */ 1356 /* takes care of SOME_JUNK */
1327 p = malloc_bytes(pool, sz, f); 1357 p = malloc_bytes(pool, sz);
1328 if (zero_fill && p != NULL && sz > 0) 1358 if (zero_fill && p != NULL && sz > 0)
1329 memset(p, 0, sz); 1359 memset(p, 0, sz);
1330 } 1360 }
@@ -1473,7 +1503,8 @@ malloc(size_t size)
1473 int saved_errno = errno; 1503 int saved_errno = errno;
1474 1504
1475 PROLOGUE(getpool(), "malloc") 1505 PROLOGUE(getpool(), "malloc")
1476 r = omalloc(d, size, 0, caller()); 1506 SET_CALLER(d, caller());
1507 r = omalloc(d, size, 0);
1477 EPILOGUE() 1508 EPILOGUE()
1478 return r; 1509 return r;
1479} 1510}
@@ -1487,7 +1518,8 @@ malloc_conceal(size_t size)
1487 int saved_errno = errno; 1518 int saved_errno = errno;
1488 1519
1489 PROLOGUE(mopts.malloc_pool[0], "malloc_conceal") 1520 PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
1490 r = omalloc(d, size, 0, caller()); 1521 SET_CALLER(d, caller());
1522 r = omalloc(d, size, 0);
1491 EPILOGUE() 1523 EPILOGUE()
1492 return r; 1524 return r;
1493} 1525}
@@ -1495,7 +1527,7 @@ DEF_WEAK(malloc_conceal);
1495 1527
1496static struct region_info * 1528static struct region_info *
1497findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool, 1529findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
1498 char **saved_function) 1530 const char ** saved_function)
1499{ 1531{
1500 struct dir_info *pool = argpool; 1532 struct dir_info *pool = argpool;
1501 struct region_info *r = find(pool, p); 1533 struct region_info *r = find(pool, p);
@@ -1533,7 +1565,7 @@ ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1533{ 1565{
1534 struct region_info *r; 1566 struct region_info *r;
1535 struct dir_info *pool; 1567 struct dir_info *pool;
1536 char *saved_function; 1568 const char *saved_function;
1537 size_t sz; 1569 size_t sz;
1538 1570
1539 r = findpool(p, *argpool, &pool, &saved_function); 1571 r = findpool(p, *argpool, &pool, &saved_function);
@@ -1721,11 +1753,11 @@ freezero(void *ptr, size_t sz)
1721DEF_WEAK(freezero); 1753DEF_WEAK(freezero);
1722 1754
1723static void * 1755static void *
1724orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f) 1756orealloc(struct dir_info **argpool, void *p, size_t newsz)
1725{ 1757{
1726 struct region_info *r; 1758 struct region_info *r;
1727 struct dir_info *pool; 1759 struct dir_info *pool;
1728 char *saved_function; 1760 const char *saved_function;
1729 struct chunk_info *info; 1761 struct chunk_info *info;
1730 size_t oldsz, goldsz, gnewsz; 1762 size_t oldsz, goldsz, gnewsz;
1731 void *q, *ret; 1763 void *q, *ret;
@@ -1733,7 +1765,7 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1733 int forced; 1765 int forced;
1734 1766
1735 if (p == NULL) 1767 if (p == NULL)
1736 return omalloc(*argpool, newsz, 0, f); 1768 return omalloc(*argpool, newsz, 0);
1737 1769
1738 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1770 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1739 errno = ENOMEM; 1771 errno = ENOMEM;
@@ -1797,7 +1829,7 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1797 if (mopts.chunk_canaries) 1829 if (mopts.chunk_canaries)
1798 fill_canary(p, newsz, 1830 fill_canary(p, newsz,
1799 PAGEROUND(newsz)); 1831 PAGEROUND(newsz));
1800 STATS_SETF(r, f); 1832 STATS_SETF(r, (*argpool)->caller);
1801 STATS_INC(pool->cheap_reallocs); 1833 STATS_INC(pool->cheap_reallocs);
1802 ret = p; 1834 ret = p;
1803 goto done; 1835 goto done;
@@ -1822,7 +1854,7 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1822 p = pp; 1854 p = pp;
1823 } else if (mopts.chunk_canaries) 1855 } else if (mopts.chunk_canaries)
1824 fill_canary(p, newsz, PAGEROUND(newsz)); 1856 fill_canary(p, newsz, PAGEROUND(newsz));
1825 STATS_SETF(r, f); 1857 STATS_SETF(r, (*argpool)->caller);
1826 ret = p; 1858 ret = p;
1827 goto done; 1859 goto done;
1828 } else { 1860 } else {
@@ -1844,7 +1876,7 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1844 if (mopts.chunk_canaries) 1876 if (mopts.chunk_canaries)
1845 fill_canary(p, newsz, PAGEROUND(newsz)); 1877 fill_canary(p, newsz, PAGEROUND(newsz));
1846 } 1878 }
1847 STATS_SETF(r, f); 1879 STATS_SETF(r, (*argpool)->caller);
1848 ret = p; 1880 ret = p;
1849 goto done; 1881 goto done;
1850 } 1882 }
@@ -1859,12 +1891,12 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1859 info->bits[info->offset + chunknum] = newsz; 1891 info->bits[info->offset + chunknum] = newsz;
1860 fill_canary(p, newsz, B2SIZE(info->bucket)); 1892 fill_canary(p, newsz, B2SIZE(info->bucket));
1861 } 1893 }
1862 if (DO_STATS && chunknum == 0) 1894 if (DO_STATS)
1863 STATS_SETF(r, f); 1895 STATS_SETFN(r, chunknum, (*argpool)->caller);
1864 ret = p; 1896 ret = p;
1865 } else if (newsz != oldsz || forced) { 1897 } else if (newsz != oldsz || forced) {
1866 /* create new allocation */ 1898 /* create new allocation */
1867 q = omalloc(pool, newsz, 0, f); 1899 q = omalloc(pool, newsz, 0);
1868 if (q == NULL) { 1900 if (q == NULL) {
1869 ret = NULL; 1901 ret = NULL;
1870 goto done; 1902 goto done;
@@ -1877,8 +1909,8 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1877 /* oldsz == newsz */ 1909 /* oldsz == newsz */
1878 if (newsz != 0) 1910 if (newsz != 0)
1879 wrterror(pool, "realloc internal inconsistency"); 1911 wrterror(pool, "realloc internal inconsistency");
1880 if (DO_STATS && chunknum == 0) 1912 if (DO_STATS)
1881 STATS_SETF(r, f); 1913 STATS_SETFN(r, chunknum, (*argpool)->caller);
1882 ret = p; 1914 ret = p;
1883 } 1915 }
1884done: 1916done:
@@ -1897,7 +1929,8 @@ realloc(void *ptr, size_t size)
1897 int saved_errno = errno; 1929 int saved_errno = errno;
1898 1930
1899 PROLOGUE(getpool(), "realloc") 1931 PROLOGUE(getpool(), "realloc")
1900 r = orealloc(&d, ptr, size, caller()); 1932 SET_CALLER(d, caller());
1933 r = orealloc(&d, ptr, size);
1901 EPILOGUE() 1934 EPILOGUE()
1902 return r; 1935 return r;
1903} 1936}
@@ -1917,6 +1950,7 @@ calloc(size_t nmemb, size_t size)
1917 int saved_errno = errno; 1950 int saved_errno = errno;
1918 1951
1919 PROLOGUE(getpool(), "calloc") 1952 PROLOGUE(getpool(), "calloc")
1953 SET_CALLER(d, caller());
1920 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1954 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1921 nmemb > 0 && SIZE_MAX / nmemb < size) { 1955 nmemb > 0 && SIZE_MAX / nmemb < size) {
1922 d->active--; 1956 d->active--;
@@ -1928,7 +1962,7 @@ calloc(size_t nmemb, size_t size)
1928 } 1962 }
1929 1963
1930 size *= nmemb; 1964 size *= nmemb;
1931 r = omalloc(d, size, 1, caller()); 1965 r = omalloc(d, size, 1);
1932 EPILOGUE() 1966 EPILOGUE()
1933 return r; 1967 return r;
1934} 1968}
@@ -1942,6 +1976,7 @@ calloc_conceal(size_t nmemb, size_t size)
1942 int saved_errno = errno; 1976 int saved_errno = errno;
1943 1977
1944 PROLOGUE(mopts.malloc_pool[0], "calloc_conceal") 1978 PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
1979 SET_CALLER(d, caller());
1945 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1980 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1946 nmemb > 0 && SIZE_MAX / nmemb < size) { 1981 nmemb > 0 && SIZE_MAX / nmemb < size) {
1947 d->active--; 1982 d->active--;
@@ -1953,7 +1988,7 @@ calloc_conceal(size_t nmemb, size_t size)
1953 } 1988 }
1954 1989
1955 size *= nmemb; 1990 size *= nmemb;
1956 r = omalloc(d, size, 1, caller()); 1991 r = omalloc(d, size, 1);
1957 EPILOGUE() 1992 EPILOGUE()
1958 return r; 1993 return r;
1959} 1994}
@@ -1961,16 +1996,16 @@ DEF_WEAK(calloc_conceal);
1961 1996
1962static void * 1997static void *
1963orecallocarray(struct dir_info **argpool, void *p, size_t oldsize, 1998orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
1964 size_t newsize, void *f) 1999 size_t newsize)
1965{ 2000{
1966 struct region_info *r; 2001 struct region_info *r;
1967 struct dir_info *pool; 2002 struct dir_info *pool;
1968 char *saved_function; 2003 const char *saved_function;
1969 void *newptr; 2004 void *newptr;
1970 size_t sz; 2005 size_t sz;
1971 2006
1972 if (p == NULL) 2007 if (p == NULL)
1973 return omalloc(*argpool, newsize, 1, f); 2008 return omalloc(*argpool, newsize, 1);
1974 2009
1975 if (oldsize == newsize) 2010 if (oldsize == newsize)
1976 return p; 2011 return p;
@@ -2001,7 +2036,7 @@ orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
2001 sz - mopts.malloc_guard, oldsize); 2036 sz - mopts.malloc_guard, oldsize);
2002 } 2037 }
2003 2038
2004 newptr = omalloc(pool, newsize, 0, f); 2039 newptr = omalloc(pool, newsize, 0);
2005 if (newptr == NULL) 2040 if (newptr == NULL)
2006 goto done; 2041 goto done;
2007 2042
@@ -2086,6 +2121,7 @@ recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2086 return recallocarray_p(ptr, oldnmemb, newnmemb, size); 2121 return recallocarray_p(ptr, oldnmemb, newnmemb, size);
2087 2122
2088 PROLOGUE(getpool(), "recallocarray") 2123 PROLOGUE(getpool(), "recallocarray")
2124 SET_CALLER(d, caller());
2089 2125
2090 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 2126 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2091 newnmemb > 0 && SIZE_MAX / newnmemb < size) { 2127 newnmemb > 0 && SIZE_MAX / newnmemb < size) {
@@ -2109,7 +2145,7 @@ recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2109 oldsize = oldnmemb * size; 2145 oldsize = oldnmemb * size;
2110 } 2146 }
2111 2147
2112 r = orecallocarray(&d, ptr, oldsize, newsize, caller()); 2148 r = orecallocarray(&d, ptr, oldsize, newsize);
2113 EPILOGUE() 2149 EPILOGUE()
2114 return r; 2150 return r;
2115} 2151}
@@ -2150,11 +2186,10 @@ mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
2150} 2186}
2151 2187
2152static void * 2188static void *
2153omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, 2189omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill)
2154 void *f)
2155{ 2190{
2156 size_t psz; 2191 size_t psz;
2157 void *p; 2192 void *p, *caller = NULL;
2158 2193
2159 /* If between half a page and a page, avoid MALLOC_MOVE. */ 2194 /* If between half a page and a page, avoid MALLOC_MOVE. */
2160 if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE) 2195 if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
@@ -2174,7 +2209,7 @@ omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
2174 pof2 <<= 1; 2209 pof2 <<= 1;
2175 } else 2210 } else
2176 pof2 = sz; 2211 pof2 = sz;
2177 return omalloc(pool, pof2, zero_fill, f); 2212 return omalloc(pool, pof2, zero_fill);
2178 } 2213 }
2179 2214
2180 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 2215 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
@@ -2193,7 +2228,11 @@ omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
2193 return NULL; 2228 return NULL;
2194 } 2229 }
2195 2230
2196 if (insert(pool, p, sz, f)) { 2231#ifdef MALLOC_STATS
2232 if (DO_STATS)
2233 caller = pool->caller;
2234#endif
2235 if (insert(pool, p, sz, caller)) {
2197 unmap(pool, p, psz, 0); 2236 unmap(pool, p, psz, 0);
2198 errno = ENOMEM; 2237 errno = ENOMEM;
2199 return NULL; 2238 return NULL;
@@ -2241,7 +2280,8 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
2241 malloc_recurse(d); 2280 malloc_recurse(d);
2242 goto err; 2281 goto err;
2243 } 2282 }
2244 r = omemalign(d, alignment, size, 0, caller()); 2283 SET_CALLER(d, caller());
2284 r = omemalign(d, alignment, size, 0);
2245 d->active--; 2285 d->active--;
2246 _MALLOC_UNLOCK(d->mutex); 2286 _MALLOC_UNLOCK(d->mutex);
2247 if (r == NULL) { 2287 if (r == NULL) {
@@ -2279,7 +2319,8 @@ aligned_alloc(size_t alignment, size_t size)
2279 } 2319 }
2280 2320
2281 PROLOGUE(getpool(), "aligned_alloc") 2321 PROLOGUE(getpool(), "aligned_alloc")
2282 r = omemalign(d, alignment, size, 0, caller()); 2322 SET_CALLER(d, caller());
2323 r = omemalign(d, alignment, size, 0);
2283 EPILOGUE() 2324 EPILOGUE()
2284 return r; 2325 return r;
2285} 2326}
@@ -2288,6 +2329,45 @@ DEF_STRONG(aligned_alloc);
2288#ifdef MALLOC_STATS 2329#ifdef MALLOC_STATS
2289 2330
2290static void 2331static void
2332print_chunk_details(struct dir_info *pool, void *p, size_t sz, size_t i)
2333{
2334 struct region_info *r;
2335 struct chunk_info *chunkinfo;
2336 uint32_t chunknum;
2337 Dl_info info;
2338 const char *caller, *pcaller = NULL;
2339 const char *object = ".";
2340 const char *pobject = ".";
2341 const char *msg = "";
2342
2343 r = find(pool, p);
2344 chunkinfo = (struct chunk_info *)r->size;
2345 chunknum = find_chunknum(pool, chunkinfo, p, 0);
2346 caller = r->f[chunknum];
2347 if (dladdr(caller, &info) != 0) {
2348 caller -= (uintptr_t)info.dli_fbase;
2349 object = info.dli_fname;
2350 }
2351 if (chunknum > 0) {
2352 chunknum--;
2353 pcaller = r->f[chunknum];
2354 if (dladdr(pcaller, &info) != 0) {
2355 pcaller -= (uintptr_t)info.dli_fbase;
2356 pobject = info.dli_fname;
2357 }
2358 if (CHUNK_FREE(chunkinfo, chunknum))
2359 msg = " (now free)";
2360 }
2361
2362 wrterror(pool,
2363 "write to free chunk %p[%zu..%zu]@%zu allocated at %s %p "
2364 "(preceding chunk %p allocated at %s %p%s)",
2365 p, i * sizeof(uint64_t),
2366 (i + 1) * sizeof(uint64_t) - 1, sz, object, caller, p - sz,
2367 pobject, pcaller, msg);
2368}
2369
2370static void
2291ulog(const char *format, ...) 2371ulog(const char *format, ...)
2292{ 2372{
2293 va_list ap; 2373 va_list ap;
@@ -2413,23 +2493,19 @@ dump_leaks(struct leaktree *leaks)
2413} 2493}
2414 2494
2415static void 2495static void
2416dump_chunk(struct leaktree* leaks, struct chunk_info *p, void *f, 2496dump_chunk(struct leaktree* leaks, struct chunk_info *p, void **f,
2417 int fromfreelist) 2497 int fromfreelist)
2418{ 2498{
2419 while (p != NULL) { 2499 while (p != NULL) {
2420 if (mopts.malloc_verbose) 2500 if (mopts.malloc_verbose)
2421 ulog("chunk %18p %18p %4zu %d/%d\n", 2501 ulog("chunk %18p %18p %4zu %d/%d\n",
2422 p->page, ((p->bits[0] & 1) ? NULL : f), 2502 p->page, NULL,
2423 B2SIZE(p->bucket), p->free, p->total); 2503 B2SIZE(p->bucket), p->free, p->total);
2424 if (!fromfreelist) { 2504 if (!fromfreelist) {
2425 size_t sz = B2SIZE(p->bucket); 2505 size_t i, sz = B2SIZE(p->bucket);
2426 if (p->bits[0] & 1) 2506 for (i = 0; i < p->total; i++) {
2427 putleakinfo(leaks, NULL, sz, p->total - 2507 if (!CHUNK_FREE(p, i))
2428 p->free); 2508 putleakinfo(leaks, f[i], sz, 1);
2429 else {
2430 putleakinfo(leaks, f, sz, 1);
2431 putleakinfo(leaks, NULL, sz,
2432 p->total - p->free - 1);
2433 } 2509 }
2434 break; 2510 break;
2435 } 2511 }
@@ -2501,7 +2577,7 @@ malloc_dump1(int poolno, struct dir_info *d, struct leaktree *leaks)
2501 2577
2502 if (mopts.malloc_verbose) { 2578 if (mopts.malloc_verbose) {
2503 ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d); 2579 ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2504 ulog("MT=%d J=%d Fl=%x\n", d->malloc_mt, d->malloc_junk, 2580 ulog("MT=%d J=%d Fl=%#x\n", d->malloc_mt, d->malloc_junk,
2505 d->mmap_flag); 2581 d->mmap_flag);
2506 ulog("Region slots free %zu/%zu\n", 2582 ulog("Region slots free %zu/%zu\n",
2507 d->regions_free, d->regions_total); 2583 d->regions_free, d->regions_total);
@@ -2589,7 +2665,7 @@ malloc_exit(void)
2589 int save_errno = errno; 2665 int save_errno = errno;
2590 2666
2591 ulog("******** Start dump %s *******\n", __progname); 2667 ulog("******** Start dump %s *******\n", __progname);
2592 ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u " 2668 ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%#x cache=%u "
2593 "G=%zu\n", 2669 "G=%zu\n",
2594 mopts.malloc_mutexes, 2670 mopts.malloc_mutexes,
2595 mopts.internal_funcs, mopts.malloc_freecheck, 2671 mopts.internal_funcs, mopts.malloc_freecheck,