summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorotto <>2016-10-20 05:38:41 +0000
committerotto <>2016-10-20 05:38:41 +0000
commitbe4e234f58a9cfcada007f689edaac74cdb2672a (patch)
tree0f42bf9db084388eb05b75499e06a217d6f33b01
parent32993d77e030496e89e508d70d0fb7a85cd0787f (diff)
downloadopenbsd-be4e234f58a9cfcada007f689edaac74cdb2672a.tar.gz
openbsd-be4e234f58a9cfcada007f689edaac74cdb2672a.tar.bz2
openbsd-be4e234f58a9cfcada007f689edaac74cdb2672a.zip
Also place canaries in > page sized objects (if C is in effect); ok tb@
-rw-r--r--src/lib/libc/stdlib/malloc.c213
1 files changed, 110 insertions, 103 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 7e7364f484..2a05d806d2 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.202 2016/10/15 18:24:40 guenther Exp $ */ 1/* $OpenBSD: malloc.c,v 1.203 2016/10/20 05:38:41 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -31,6 +31,7 @@
31#include <sys/mman.h> 31#include <sys/mman.h>
32#include <sys/uio.h> 32#include <sys/uio.h>
33#include <errno.h> 33#include <errno.h>
34#include <stdarg.h>
34#include <stdint.h> 35#include <stdint.h>
35#include <stdlib.h> 36#include <stdlib.h>
36#include <string.h> 37#include <string.h>
@@ -199,6 +200,8 @@ static union {
199char *malloc_options; /* compile-time options */ 200char *malloc_options; /* compile-time options */
200 201
201static u_char getrbyte(struct dir_info *d); 202static u_char getrbyte(struct dir_info *d);
203static __dead void wrterror(struct dir_info *d, char *msg, ...)
204 __attribute__((__format__ (printf, 2, 3)));
202 205
203#ifdef MALLOC_STATS 206#ifdef MALLOC_STATS
204void malloc_dump(int, struct dir_info *); 207void malloc_dump(int, struct dir_info *);
@@ -261,40 +264,26 @@ struct dir_info *getpool(void)
261} 264}
262 265
263static __dead void 266static __dead void
264wrterror(struct dir_info *d, char *msg, void *p) 267wrterror(struct dir_info *d, char *msg, ...)
265{ 268{
266 char *q = " error: "; 269 struct iovec iov[3];
267 struct iovec iov[7]; 270 char pidbuf[80];
268 char pidbuf[20]; 271 char buf[80];
269 char buf[20]; 272 int saved_errno = errno, ret;
270 int saved_errno = errno, i; 273 va_list ap;
271 274
272 iov[0].iov_base = __progname; 275 iov[0].iov_base = pidbuf;
273 iov[0].iov_len = strlen(__progname); 276 ret = snprintf(pidbuf, sizeof(pidbuf), "%.50s(%d) in %s(): ",
274 iov[1].iov_base = pidbuf; 277 __progname, getpid(), d->func ? d->func : "unknown");
275 snprintf(pidbuf, sizeof(pidbuf), "(%d) in ", getpid()); 278 iov[0].iov_len = ret > 0 ? strlen(pidbuf) : 0;
276 iov[1].iov_len = strlen(pidbuf); 279 iov[1].iov_base = buf;
277 if (d != NULL) { 280 va_start(ap, msg);
278 iov[2].iov_base = d->func; 281 ret = vsnprintf(buf, sizeof(buf), msg, ap);
279 iov[2].iov_len = strlen(d->func); 282 va_end(ap);
280 } else { 283 iov[1].iov_len = ret > 0 ? strlen(buf) : 0;
281 iov[2].iov_base = "unknown"; 284 iov[2].iov_base = "\n";
282 iov[2].iov_len = 7; 285 iov[2].iov_len = 1;
283 } 286 writev(STDERR_FILENO, iov, 3);
284 iov[3].iov_base = q;
285 iov[3].iov_len = strlen(q);
286 iov[4].iov_base = msg;
287 iov[4].iov_len = strlen(msg);
288 iov[5].iov_base = buf;
289 if (p == NULL)
290 iov[5].iov_len = 0;
291 else {
292 snprintf(buf, sizeof(buf), " %010p", p);
293 iov[5].iov_len = strlen(buf);
294 }
295 iov[6].iov_base = "\n";
296 iov[6].iov_len = 1;
297 writev(STDERR_FILENO, iov, 7);
298 287
299#ifdef MALLOC_STATS 288#ifdef MALLOC_STATS
300 if (mopts.malloc_stats) 289 if (mopts.malloc_stats)
@@ -342,12 +331,12 @@ unmap(struct dir_info *d, void *p, size_t sz)
342 u_int i, offset; 331 u_int i, offset;
343 332
344 if (sz != PAGEROUND(sz)) 333 if (sz != PAGEROUND(sz))
345 wrterror(d, "munmap round", NULL); 334 wrterror(d, "munmap round");
346 335
347 if (psz > mopts.malloc_cache) { 336 if (psz > mopts.malloc_cache) {
348 i = munmap(p, sz); 337 i = munmap(p, sz);
349 if (i) 338 if (i)
350 wrterror(d, "munmap", p); 339 wrterror(d, "munmap %p", p);
351 STATS_SUB(d->malloc_used, sz); 340 STATS_SUB(d->malloc_used, sz);
352 return; 341 return;
353 } 342 }
@@ -361,7 +350,7 @@ unmap(struct dir_info *d, void *p, size_t sz)
361 if (r->p != NULL) { 350 if (r->p != NULL) {
362 rsz = r->size << MALLOC_PAGESHIFT; 351 rsz = r->size << MALLOC_PAGESHIFT;
363 if (munmap(r->p, rsz)) 352 if (munmap(r->p, rsz))
364 wrterror(d, "munmap", r->p); 353 wrterror(d, "munmap %p", r->p);
365 r->p = NULL; 354 r->p = NULL;
366 if (tounmap > r->size) 355 if (tounmap > r->size)
367 tounmap -= r->size; 356 tounmap -= r->size;
@@ -373,7 +362,7 @@ unmap(struct dir_info *d, void *p, size_t sz)
373 } 362 }
374 } 363 }
375 if (tounmap > 0) 364 if (tounmap > 0)
376 wrterror(d, "malloc cache underflow", NULL); 365 wrterror(d, "malloc cache underflow");
377 for (i = 0; i < mopts.malloc_cache; i++) { 366 for (i = 0; i < mopts.malloc_cache; i++) {
378 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 367 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
379 if (r->p == NULL) { 368 if (r->p == NULL) {
@@ -393,9 +382,9 @@ unmap(struct dir_info *d, void *p, size_t sz)
393 } 382 }
394 } 383 }
395 if (i == mopts.malloc_cache) 384 if (i == mopts.malloc_cache)
396 wrterror(d, "malloc free slot lost", NULL); 385 wrterror(d, "malloc free slot lost");
397 if (d->free_regions_size > mopts.malloc_cache) 386 if (d->free_regions_size > mopts.malloc_cache)
398 wrterror(d, "malloc cache overflow", NULL); 387 wrterror(d, "malloc cache overflow");
399} 388}
400 389
401static void 390static void
@@ -410,7 +399,7 @@ zapcacheregion(struct dir_info *d, void *p, size_t len)
410 if (r->p >= p && r->p <= (void *)((char *)p + len)) { 399 if (r->p >= p && r->p <= (void *)((char *)p + len)) {
411 rsz = r->size << MALLOC_PAGESHIFT; 400 rsz = r->size << MALLOC_PAGESHIFT;
412 if (munmap(r->p, rsz)) 401 if (munmap(r->p, rsz))
413 wrterror(d, "munmap", r->p); 402 wrterror(d, "munmap %p", r->p);
414 r->p = NULL; 403 r->p = NULL;
415 d->free_regions_size -= r->size; 404 d->free_regions_size -= r->size;
416 r->size = 0; 405 r->size = 0;
@@ -429,9 +418,9 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
429 418
430 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 419 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
431 d->canary1 != ~d->canary2) 420 d->canary1 != ~d->canary2)
432 wrterror(d, "internal struct corrupt", NULL); 421 wrterror(d, "internal struct corrupt");
433 if (sz != PAGEROUND(sz)) 422 if (sz != PAGEROUND(sz))
434 wrterror(d, "map round", NULL); 423 wrterror(d, "map round");
435 424
436 if (!hint && psz > d->free_regions_size) { 425 if (!hint && psz > d->free_regions_size) {
437 _MALLOC_LEAVE(d); 426 _MALLOC_LEAVE(d);
@@ -486,7 +475,7 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
486 if (hint) 475 if (hint)
487 return MAP_FAILED; 476 return MAP_FAILED;
488 if (d->free_regions_size > mopts.malloc_cache) 477 if (d->free_regions_size > mopts.malloc_cache)
489 wrterror(d, "malloc cache", NULL); 478 wrterror(d, "malloc cache");
490 _MALLOC_LEAVE(d); 479 _MALLOC_LEAVE(d);
491 p = MMAP(sz); 480 p = MMAP(sz);
492 _MALLOC_ENTER(d); 481 _MALLOC_ENTER(d);
@@ -673,7 +662,7 @@ omalloc_poolinit(struct dir_info **dp)
673 * lies (subject to alignment by 1 << MALLOC_MINSHIFT) 662 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
674 */ 663 */
675 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED) 664 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
676 wrterror(NULL, "malloc init mmap failed", NULL); 665 wrterror(NULL, "malloc init mmap failed");
677 mprotect(p, MALLOC_PAGESIZE, PROT_NONE); 666 mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
678 mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ, 667 mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
679 MALLOC_PAGESIZE, PROT_NONE); 668 MALLOC_PAGESIZE, PROT_NONE);
@@ -687,7 +676,7 @@ omalloc_poolinit(struct dir_info **dp)
687 d->r = MMAP(regioninfo_size); 676 d->r = MMAP(regioninfo_size);
688 if (d->r == MAP_FAILED) { 677 if (d->r == MAP_FAILED) {
689 d->regions_total = 0; 678 d->regions_total = 0;
690 wrterror(NULL, "malloc init mmap failed", NULL); 679 wrterror(NULL, "malloc init mmap failed");
691 } 680 }
692 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 681 for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
693 LIST_INIT(&d->chunk_info_list[i]); 682 LIST_INIT(&d->chunk_info_list[i]);
@@ -738,7 +727,7 @@ omalloc_grow(struct dir_info *d)
738 } 727 }
739 /* avoid pages containing meta info to end up in cache */ 728 /* avoid pages containing meta info to end up in cache */
740 if (munmap(d->r, d->regions_total * sizeof(struct region_info))) 729 if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
741 wrterror(d, "munmap", d->r); 730 wrterror(d, "munmap %p", d->r);
742 else 731 else
743 STATS_SUB(d->malloc_used, 732 STATS_SUB(d->malloc_used,
744 d->regions_total * sizeof(struct region_info)); 733 d->regions_total * sizeof(struct region_info));
@@ -828,7 +817,7 @@ find(struct dir_info *d, void *p)
828 817
829 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 818 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
830 d->canary1 != ~d->canary2) 819 d->canary1 != ~d->canary2)
831 wrterror(d, "internal struct corrupt", NULL); 820 wrterror(d, "internal struct corrupt");
832 p = MASK_POINTER(p); 821 p = MASK_POINTER(p);
833 index = hash(p) & mask; 822 index = hash(p) & mask;
834 r = d->r[index].p; 823 r = d->r[index].p;
@@ -851,7 +840,7 @@ delete(struct dir_info *d, struct region_info *ri)
851 size_t i, j, r; 840 size_t i, j, r;
852 841
853 if (d->regions_total & (d->regions_total - 1)) 842 if (d->regions_total & (d->regions_total - 1))
854 wrterror(d, "regions_total not 2^x", NULL); 843 wrterror(d, "regions_total not 2^x");
855 d->regions_free++; 844 d->regions_free++;
856 STATS_INC(d->deletes); 845 STATS_INC(d->deletes);
857 846
@@ -936,7 +925,7 @@ omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
936 925
937 bits++; 926 bits++;
938 if ((uintptr_t)pp & bits) 927 if ((uintptr_t)pp & bits)
939 wrterror(d, "pp & bits", pp); 928 wrterror(d, "pp & bits %p", pp);
940 929
941 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL); 930 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL);
942 return bp; 931 return bp;
@@ -956,7 +945,7 @@ malloc_bytes(struct dir_info *d, size_t argsize, void *f)
956 945
957 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 946 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
958 d->canary1 != ~d->canary2) 947 d->canary1 != ~d->canary2)
959 wrterror(d, "internal struct corrupt", NULL); 948 wrterror(d, "internal struct corrupt");
960 949
961 size = argsize; 950 size = argsize;
962 951
@@ -984,7 +973,7 @@ malloc_bytes(struct dir_info *d, size_t argsize, void *f)
984 } 973 }
985 974
986 if (bp->canary != d->canary1) 975 if (bp->canary != d->canary1)
987 wrterror(d, "chunk info corrupted", NULL); 976 wrterror(d, "chunk info corrupted");
988 977
989 i = d->chunk_start; 978 i = d->chunk_start;
990 if (bp->free > 1) 979 if (bp->free > 1)
@@ -1045,6 +1034,25 @@ malloc_bytes(struct dir_info *d, size_t argsize, void *f)
1045 return ((char *)bp->page + k); 1034 return ((char *)bp->page + k);
1046} 1035}
1047 1036
1037static void
1038validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1039{
1040 size_t check_sz = allocated - sz;
1041 u_char *p, *q;
1042
1043 if (check_sz > CHUNK_CHECK_LENGTH)
1044 check_sz = CHUNK_CHECK_LENGTH;
1045 p = (u_char *)ptr + sz;
1046 q = p + check_sz;
1047
1048 while (p < q) {
1049 if (*p++ != SOME_JUNK) {
1050 wrterror(d, "chunk canary corrupted %p %#tx@%#zx",
1051 ptr, p - ptr - 1, sz);
1052 }
1053 }
1054}
1055
1048static uint32_t 1056static uint32_t
1049find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check) 1057find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check)
1050{ 1058{
@@ -1053,32 +1061,20 @@ find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check)
1053 1061
1054 info = (struct chunk_info *)r->size; 1062 info = (struct chunk_info *)r->size;
1055 if (info->canary != d->canary1) 1063 if (info->canary != d->canary1)
1056 wrterror(d, "chunk info corrupted", NULL); 1064 wrterror(d, "chunk info corrupted");
1057 1065
1058 /* Find the chunk number on the page */ 1066 /* Find the chunk number on the page */
1059 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 1067 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1060 if (check && mopts.chunk_canaries && info->size > 0) { 1068 if (check && mopts.chunk_canaries && info->size > 0) {
1061 size_t sz = info->bits[info->offset + chunknum]; 1069 validate_canary(d, ptr, info->bits[info->offset + chunknum],
1062 size_t check_sz = info->size - sz; 1070 info->size);
1063 u_char *p, *q;
1064
1065 if (check_sz > CHUNK_CHECK_LENGTH)
1066 check_sz = CHUNK_CHECK_LENGTH;
1067 p = (u_char *)ptr + sz;
1068 q = p + check_sz;
1069
1070 while (p < q)
1071 if (*p++ != SOME_JUNK) {
1072 q = (void *)(sz << 16 | p - (u_char *)ptr - 1);
1073 wrterror(d, "chunk canary corrupted: ", q);
1074 }
1075 } 1071 }
1076 1072
1077 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) 1073 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1078 wrterror(d, "modified chunk-pointer", ptr); 1074 wrterror(d, "modified chunk-pointer %p", ptr);
1079 if (info->bits[chunknum / MALLOC_BITS] & 1075 if (info->bits[chunknum / MALLOC_BITS] &
1080 (1U << (chunknum % MALLOC_BITS))) 1076 (1U << (chunknum % MALLOC_BITS)))
1081 wrterror(d, "chunk is already free", ptr); 1077 wrterror(d, "chunk is already free %p", ptr);
1082 return chunknum; 1078 return chunknum;
1083} 1079}
1084 1080
@@ -1156,7 +1152,7 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1156 if (mopts.malloc_guard) { 1152 if (mopts.malloc_guard) {
1157 if (mprotect((char *)p + psz - mopts.malloc_guard, 1153 if (mprotect((char *)p + psz - mopts.malloc_guard,
1158 mopts.malloc_guard, PROT_NONE)) 1154 mopts.malloc_guard, PROT_NONE))
1159 wrterror(pool, "mprotect", NULL); 1155 wrterror(pool, "mprotect");
1160 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1156 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1161 } 1157 }
1162 1158
@@ -1180,6 +1176,12 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1180 else 1176 else
1181 memset(p, SOME_JUNK, 1177 memset(p, SOME_JUNK,
1182 psz - mopts.malloc_guard); 1178 psz - mopts.malloc_guard);
1179 } else if (mopts.chunk_canaries) {
1180 size_t csz = psz - mopts.malloc_guard - sz;
1181
1182 if (csz > CHUNK_CHECK_LENGTH)
1183 csz = CHUNK_CHECK_LENGTH;
1184 memset(p + sz, SOME_JUNK, csz);
1183 } 1185 }
1184 } 1186 }
1185 1187
@@ -1205,7 +1207,7 @@ malloc_recurse(struct dir_info *d)
1205 1207
1206 if (noprint == 0) { 1208 if (noprint == 0) {
1207 noprint = 1; 1209 noprint = 1;
1208 wrterror(d, "recursive call", NULL); 1210 wrterror(d, "recursive call");
1209 } 1211 }
1210 d->active--; 1212 d->active--;
1211 _MALLOC_UNLOCK(d->mutex); 1213 _MALLOC_UNLOCK(d->mutex);
@@ -1264,7 +1266,7 @@ malloc(size_t size)
1264 d = getpool(); 1266 d = getpool();
1265 } 1267 }
1266 _MALLOC_LOCK(d->mutex); 1268 _MALLOC_LOCK(d->mutex);
1267 d->func = "malloc():"; 1269 d->func = "malloc";
1268 1270
1269 if (d->active++) { 1271 if (d->active++) {
1270 malloc_recurse(d); 1272 malloc_recurse(d);
@@ -1274,7 +1276,7 @@ malloc(size_t size)
1274 d->active--; 1276 d->active--;
1275 _MALLOC_UNLOCK(d->mutex); 1277 _MALLOC_UNLOCK(d->mutex);
1276 if (r == NULL && mopts.malloc_xmalloc) 1278 if (r == NULL && mopts.malloc_xmalloc)
1277 wrterror(d, "out of memory", NULL); 1279 wrterror(d, "out of memory");
1278 if (r != NULL) 1280 if (r != NULL)
1279 errno = saved_errno; 1281 errno = saved_errno;
1280 return r; 1282 return r;
@@ -1291,13 +1293,13 @@ validate_junk(struct dir_info *pool, void *p)
1291 return; 1293 return;
1292 r = find(pool, p); 1294 r = find(pool, p);
1293 if (r == NULL) 1295 if (r == NULL)
1294 wrterror(pool, "bogus pointer in validate_junk", p); 1296 wrterror(pool, "bogus pointer in validate_junk %p", p);
1295 REALSIZE(sz, r); 1297 REALSIZE(sz, r);
1296 if (sz > CHUNK_CHECK_LENGTH) 1298 if (sz > CHUNK_CHECK_LENGTH)
1297 sz = CHUNK_CHECK_LENGTH; 1299 sz = CHUNK_CHECK_LENGTH;
1298 for (byte = 0; byte < sz; byte++) { 1300 for (byte = 0; byte < sz; byte++) {
1299 if (((unsigned char *)p)[byte] != SOME_FREEJUNK) 1301 if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1300 wrterror(pool, "use after free", p); 1302 wrterror(pool, "use after free %p", p);
1301 } 1303 }
1302} 1304}
1303 1305
@@ -1327,7 +1329,7 @@ ofree(struct dir_info *argpool, void *p)
1327 } 1329 }
1328 } 1330 }
1329 if (r == NULL) 1331 if (r == NULL)
1330 wrterror(pool, "bogus pointer (double free?)", p); 1332 wrterror(pool, "bogus pointer (double free?) %p", p);
1331 } 1333 }
1332 1334
1333 REALSIZE(sz, r); 1335 REALSIZE(sz, r);
@@ -1335,7 +1337,11 @@ ofree(struct dir_info *argpool, void *p)
1335 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE - 1337 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1336 MALLOC_LEEWAY) { 1338 MALLOC_LEEWAY) {
1337 if (r->p != p) 1339 if (r->p != p)
1338 wrterror(pool, "bogus pointer", p); 1340 wrterror(pool, "bogus pointer %p", p);
1341 if (mopts.chunk_canaries)
1342 validate_canary(pool, p,
1343 sz - mopts.malloc_guard,
1344 PAGEROUND(sz - mopts.malloc_guard));
1339 } else { 1345 } else {
1340#if notyetbecause_of_realloc 1346#if notyetbecause_of_realloc
1341 /* shifted towards the end */ 1347 /* shifted towards the end */
@@ -1348,12 +1354,12 @@ ofree(struct dir_info *argpool, void *p)
1348 } 1354 }
1349 if (mopts.malloc_guard) { 1355 if (mopts.malloc_guard) {
1350 if (sz < mopts.malloc_guard) 1356 if (sz < mopts.malloc_guard)
1351 wrterror(pool, "guard size", NULL); 1357 wrterror(pool, "guard size");
1352 if (!mopts.malloc_freeunmap) { 1358 if (!mopts.malloc_freeunmap) {
1353 if (mprotect((char *)p + PAGEROUND(sz) - 1359 if (mprotect((char *)p + PAGEROUND(sz) -
1354 mopts.malloc_guard, mopts.malloc_guard, 1360 mopts.malloc_guard, mopts.malloc_guard,
1355 PROT_READ | PROT_WRITE)) 1361 PROT_READ | PROT_WRITE))
1356 wrterror(pool, "mprotect", NULL); 1362 wrterror(pool, "mprotect");
1357 } 1363 }
1358 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); 1364 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1359 } 1365 }
@@ -1371,7 +1377,7 @@ ofree(struct dir_info *argpool, void *p)
1371 tmp = p; 1377 tmp = p;
1372 p = pool->delayed_chunks[i]; 1378 p = pool->delayed_chunks[i];
1373 if (tmp == p) 1379 if (tmp == p)
1374 wrterror(pool, "double free", p); 1380 wrterror(pool, "double free %p", tmp);
1375 if (mopts.malloc_junk) 1381 if (mopts.malloc_junk)
1376 validate_junk(pool, p); 1382 validate_junk(pool, p);
1377 pool->delayed_chunks[i] = tmp; 1383 pool->delayed_chunks[i] = tmp;
@@ -1382,11 +1388,12 @@ ofree(struct dir_info *argpool, void *p)
1382 if (p != NULL) { 1388 if (p != NULL) {
1383 r = find(pool, p); 1389 r = find(pool, p);
1384 if (r == NULL) 1390 if (r == NULL)
1385 wrterror(pool, "bogus pointer (double free?)", p); 1391 wrterror(pool,
1392 "bogus pointer (double free?) %p", p);
1386 free_bytes(pool, r, p); 1393 free_bytes(pool, r, p);
1387 } 1394 }
1388 } 1395 }
1389done: 1396
1390 if (argpool != pool) { 1397 if (argpool != pool) {
1391 pool->active--; 1398 pool->active--;
1392 _MALLOC_UNLOCK(pool->mutex); 1399 _MALLOC_UNLOCK(pool->mutex);
@@ -1407,9 +1414,9 @@ free(void *ptr)
1407 1414
1408 d = getpool(); 1415 d = getpool();
1409 if (d == NULL) 1416 if (d == NULL)
1410 wrterror(d, "free() called before allocation", NULL); 1417 wrterror(d, "free() called before allocation");
1411 _MALLOC_LOCK(d->mutex); 1418 _MALLOC_LOCK(d->mutex);
1412 d->func = "free():"; 1419 d->func = "free";
1413 if (d->active++) { 1420 if (d->active++) {
1414 malloc_recurse(d); 1421 malloc_recurse(d);
1415 return; 1422 return;
@@ -1453,7 +1460,7 @@ orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1453 } 1460 }
1454 } 1461 }
1455 if (r == NULL) 1462 if (r == NULL)
1456 wrterror(pool, "bogus pointer (double free?)", p); 1463 wrterror(pool, "bogus pointer (double free?) %p", p);
1457 } 1464 }
1458 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1465 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1459 errno = ENOMEM; 1466 errno = ENOMEM;
@@ -1465,7 +1472,7 @@ orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1465 goldsz = oldsz; 1472 goldsz = oldsz;
1466 if (oldsz > MALLOC_MAXCHUNK) { 1473 if (oldsz > MALLOC_MAXCHUNK) {
1467 if (oldsz < mopts.malloc_guard) 1474 if (oldsz < mopts.malloc_guard)
1468 wrterror(pool, "guard size", NULL); 1475 wrterror(pool, "guard size");
1469 oldsz -= mopts.malloc_guard; 1476 oldsz -= mopts.malloc_guard;
1470 } 1477 }
1471 1478
@@ -1474,7 +1481,7 @@ orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1474 gnewsz += mopts.malloc_guard; 1481 gnewsz += mopts.malloc_guard;
1475 1482
1476 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && 1483 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1477 !mopts.malloc_realloc) { 1484 !mopts.chunk_canaries && !mopts.malloc_realloc) {
1478 size_t roldsz = PAGEROUND(goldsz); 1485 size_t roldsz = PAGEROUND(goldsz);
1479 size_t rnewsz = PAGEROUND(gnewsz); 1486 size_t rnewsz = PAGEROUND(gnewsz);
1480 1487
@@ -1505,7 +1512,7 @@ gotit:
1505 goto done; 1512 goto done;
1506 } else if (q != MAP_FAILED) { 1513 } else if (q != MAP_FAILED) {
1507 if (munmap(q, needed)) 1514 if (munmap(q, needed))
1508 wrterror(pool, "munmap", q); 1515 wrterror(pool, "munmap %p", q);
1509 } 1516 }
1510 } 1517 }
1511 } else if (rnewsz < roldsz) { 1518 } else if (rnewsz < roldsz) {
@@ -1513,11 +1520,11 @@ gotit:
1513 if (mprotect((char *)p + roldsz - 1520 if (mprotect((char *)p + roldsz -
1514 mopts.malloc_guard, mopts.malloc_guard, 1521 mopts.malloc_guard, mopts.malloc_guard,
1515 PROT_READ | PROT_WRITE)) 1522 PROT_READ | PROT_WRITE))
1516 wrterror(pool, "mprotect", NULL); 1523 wrterror(pool, "mprotect");
1517 if (mprotect((char *)p + rnewsz - 1524 if (mprotect((char *)p + rnewsz -
1518 mopts.malloc_guard, mopts.malloc_guard, 1525 mopts.malloc_guard, mopts.malloc_guard,
1519 PROT_NONE)) 1526 PROT_NONE))
1520 wrterror(pool, "mprotect", NULL); 1527 wrterror(pool, "mprotect");
1521 } 1528 }
1522 unmap(pool, (char *)p + rnewsz, roldsz - rnewsz); 1529 unmap(pool, (char *)p + rnewsz, roldsz - rnewsz);
1523 r->size = gnewsz; 1530 r->size = gnewsz;
@@ -1578,7 +1585,7 @@ realloc(void *ptr, size_t size)
1578 d = getpool(); 1585 d = getpool();
1579 } 1586 }
1580 _MALLOC_LOCK(d->mutex); 1587 _MALLOC_LOCK(d->mutex);
1581 d->func = "realloc():"; 1588 d->func = "realloc";
1582 if (d->active++) { 1589 if (d->active++) {
1583 malloc_recurse(d); 1590 malloc_recurse(d);
1584 return NULL; 1591 return NULL;
@@ -1588,7 +1595,7 @@ realloc(void *ptr, size_t size)
1588 d->active--; 1595 d->active--;
1589 _MALLOC_UNLOCK(d->mutex); 1596 _MALLOC_UNLOCK(d->mutex);
1590 if (r == NULL && mopts.malloc_xmalloc) 1597 if (r == NULL && mopts.malloc_xmalloc)
1591 wrterror(d, "out of memory", NULL); 1598 wrterror(d, "out of memory");
1592 if (r != NULL) 1599 if (r != NULL)
1593 errno = saved_errno; 1600 errno = saved_errno;
1594 return r; 1601 return r;
@@ -1615,12 +1622,12 @@ calloc(size_t nmemb, size_t size)
1615 d = getpool(); 1622 d = getpool();
1616 } 1623 }
1617 _MALLOC_LOCK(d->mutex); 1624 _MALLOC_LOCK(d->mutex);
1618 d->func = "calloc():"; 1625 d->func = "calloc";
1619 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1626 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1620 nmemb > 0 && SIZE_MAX / nmemb < size) { 1627 nmemb > 0 && SIZE_MAX / nmemb < size) {
1621 _MALLOC_UNLOCK(d->mutex); 1628 _MALLOC_UNLOCK(d->mutex);
1622 if (mopts.malloc_xmalloc) 1629 if (mopts.malloc_xmalloc)
1623 wrterror(d, "out of memory", NULL); 1630 wrterror(d, "out of memory");
1624 errno = ENOMEM; 1631 errno = ENOMEM;
1625 return NULL; 1632 return NULL;
1626 } 1633 }
@@ -1636,7 +1643,7 @@ calloc(size_t nmemb, size_t size)
1636 d->active--; 1643 d->active--;
1637 _MALLOC_UNLOCK(d->mutex); 1644 _MALLOC_UNLOCK(d->mutex);
1638 if (r == NULL && mopts.malloc_xmalloc) 1645 if (r == NULL && mopts.malloc_xmalloc)
1639 wrterror(d, "out of memory", NULL); 1646 wrterror(d, "out of memory");
1640 if (r != NULL) 1647 if (r != NULL)
1641 errno = saved_errno; 1648 errno = saved_errno;
1642 return r; 1649 return r;
@@ -1649,9 +1656,9 @@ mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1649 char *p, *q; 1656 char *p, *q;
1650 1657
1651 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) 1658 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1652 wrterror(d, "mapalign bad alignment", NULL); 1659 wrterror(d, "mapalign bad alignment");
1653 if (sz != PAGEROUND(sz)) 1660 if (sz != PAGEROUND(sz))
1654 wrterror(d, "mapalign round", NULL); 1661 wrterror(d, "mapalign round");
1655 1662
1656 /* Allocate sz + alignment bytes of memory, which must include a 1663 /* Allocate sz + alignment bytes of memory, which must include a
1657 * subrange of size bytes that is properly aligned. Unmap the 1664 * subrange of size bytes that is properly aligned. Unmap the
@@ -1668,10 +1675,10 @@ mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1668 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 1675 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1669 if (q != p) { 1676 if (q != p) {
1670 if (munmap(p, q - p)) 1677 if (munmap(p, q - p))
1671 wrterror(d, "munmap", p); 1678 wrterror(d, "munmap %p", p);
1672 } 1679 }
1673 if (munmap(q + sz, alignment - (q - p))) 1680 if (munmap(q + sz, alignment - (q - p)))
1674 wrterror(d, "munmap", q + sz); 1681 wrterror(d, "munmap %p", q + sz);
1675 STATS_SUB(d->malloc_used, alignment); 1682 STATS_SUB(d->malloc_used, alignment);
1676 1683
1677 return q; 1684 return q;
@@ -1716,7 +1723,7 @@ omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, voi
1716 if (mopts.malloc_guard) { 1723 if (mopts.malloc_guard) {
1717 if (mprotect((char *)p + psz - mopts.malloc_guard, 1724 if (mprotect((char *)p + psz - mopts.malloc_guard,
1718 mopts.malloc_guard, PROT_NONE)) 1725 mopts.malloc_guard, PROT_NONE))
1719 wrterror(pool, "mprotect", NULL); 1726 wrterror(pool, "mprotect");
1720 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1727 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1721 } 1728 }
1722 1729
@@ -1748,7 +1755,7 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
1748 d = getpool(); 1755 d = getpool();
1749 } 1756 }
1750 _MALLOC_LOCK(d->mutex); 1757 _MALLOC_LOCK(d->mutex);
1751 d->func = "posix_memalign():"; 1758 d->func = "posix_memalign";
1752 if (d->active++) { 1759 if (d->active++) {
1753 malloc_recurse(d); 1760 malloc_recurse(d);
1754 goto err; 1761 goto err;
@@ -1758,7 +1765,7 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
1758 _MALLOC_UNLOCK(d->mutex); 1765 _MALLOC_UNLOCK(d->mutex);
1759 if (r == NULL) { 1766 if (r == NULL) {
1760 if (mopts.malloc_xmalloc) 1767 if (mopts.malloc_xmalloc)
1761 wrterror(d, "out of memory", NULL); 1768 wrterror(d, "out of memory");
1762 goto err; 1769 goto err;
1763 } 1770 }
1764 errno = saved_errno; 1771 errno = saved_errno;
@@ -2007,7 +2014,7 @@ malloc_dump(int fd, struct dir_info *pool)
2007 continue; 2014 continue;
2008 r = find(pool, p); 2015 r = find(pool, p);
2009 if (r == NULL) 2016 if (r == NULL)
2010 wrterror(pool, "bogus pointer in malloc_dump", p); 2017 wrterror(pool, "bogus pointer in malloc_dump %p", p);
2011 free_bytes(pool, r, p); 2018 free_bytes(pool, r, p);
2012 pool->delayed_chunks[i] = NULL; 2019 pool->delayed_chunks[i] = NULL;
2013 } 2020 }