summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorotto <>2016-10-07 05:54:35 +0000
committerotto <>2016-10-07 05:54:35 +0000
commit70ebdfcf371f9185bca702c2fd1564fe5cf5ab54 (patch)
tree0afbe1812d042d5d560f4006bb03ba845f6ab1fa
parent79c84c9b60422a1cb8f419883524d0b8dc7c9bb8 (diff)
downloadopenbsd-70ebdfcf371f9185bca702c2fd1564fe5cf5ab54.tar.gz
openbsd-70ebdfcf371f9185bca702c2fd1564fe5cf5ab54.tar.bz2
openbsd-70ebdfcf371f9185bca702c2fd1564fe5cf5ab54.zip
Beter implementation of chunk canaries: store size in chunk meta data
instead of chunk itself; does not change actual allocated size; ok tedu@
-rw-r--r--src/lib/libc/stdlib/malloc.c124
1 files changed, 63 insertions, 61 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 20807a25b4..c5940ffad9 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.197 2016/09/21 04:38:56 guenther Exp $ */ 1/* $OpenBSD: malloc.c,v 1.198 2016/10/07 05:54:35 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -64,6 +64,7 @@
64#define MALLOC_INITIAL_REGIONS 512 64#define MALLOC_INITIAL_REGIONS 512
65#define MALLOC_DEFAULT_CACHE 64 65#define MALLOC_DEFAULT_CACHE 64
66#define MALLOC_CHUNK_LISTS 4 66#define MALLOC_CHUNK_LISTS 4
67#define CHUNK_CHECK_LENGTH 32
67 68
68/* 69/*
69 * When the P option is active, we move allocations between half a page 70 * When the P option is active, we move allocations between half a page
@@ -178,14 +179,13 @@ struct malloc_readonly {
178 int malloc_move; /* move allocations to end of page? */ 179 int malloc_move; /* move allocations to end of page? */
179 int malloc_realloc; /* always realloc? */ 180 int malloc_realloc; /* always realloc? */
180 int malloc_xmalloc; /* xmalloc behaviour? */ 181 int malloc_xmalloc; /* xmalloc behaviour? */
181 size_t malloc_canaries; /* use canaries after chunks? */ 182 int chunk_canaries; /* use canaries after chunks? */
182 size_t malloc_guard; /* use guard pages after allocations? */ 183 size_t malloc_guard; /* use guard pages after allocations? */
183 u_int malloc_cache; /* free pages we cache */ 184 u_int malloc_cache; /* free pages we cache */
184#ifdef MALLOC_STATS 185#ifdef MALLOC_STATS
185 int malloc_stats; /* dump statistics at end */ 186 int malloc_stats; /* dump statistics at end */
186#endif 187#endif
187 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */ 188 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */
188 uintptr_t malloc_chunk_canary;
189}; 189};
190 190
191/* This object is mapped PROT_READ after initialisation to prevent tampering */ 191/* This object is mapped PROT_READ after initialisation to prevent tampering */
@@ -288,7 +288,7 @@ wrterror(struct dir_info *d, char *msg, void *p)
288 if (p == NULL) 288 if (p == NULL)
289 iov[5].iov_len = 0; 289 iov[5].iov_len = 0;
290 else { 290 else {
291 snprintf(buf, sizeof(buf), " %p", p); 291 snprintf(buf, sizeof(buf), " %010p", p);
292 iov[5].iov_len = strlen(buf); 292 iov[5].iov_len = strlen(buf);
293 } 293 }
294 iov[6].iov_base = "\n"; 294 iov[6].iov_base = "\n";
@@ -512,10 +512,10 @@ omalloc_parseopt(char opt)
512 /* ignored */ 512 /* ignored */
513 break; 513 break;
514 case 'c': 514 case 'c':
515 mopts.malloc_canaries = 0; 515 mopts.chunk_canaries = 0;
516 break; 516 break;
517 case 'C': 517 case 'C':
518 mopts.malloc_canaries = sizeof(void *); 518 mopts.chunk_canaries = 1;
519 break; 519 break;
520#ifdef MALLOC_STATS 520#ifdef MALLOC_STATS
521 case 'd': 521 case 'd':
@@ -653,9 +653,6 @@ omalloc_init(void)
653 653
654 while ((mopts.malloc_canary = arc4random()) == 0) 654 while ((mopts.malloc_canary = arc4random()) == 0)
655 ; 655 ;
656
657 arc4random_buf(&mopts.malloc_chunk_canary,
658 sizeof(mopts.malloc_chunk_canary));
659} 656}
660 657
661/* 658/*
@@ -763,6 +760,8 @@ alloc_chunk_info(struct dir_info *d, int bits)
763 760
764 size = howmany(count, MALLOC_BITS); 761 size = howmany(count, MALLOC_BITS);
765 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); 762 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
763 if (mopts.chunk_canaries)
764 size += count * sizeof(u_short);
766 size = ALIGN(size); 765 size = ALIGN(size);
767 766
768 if (LIST_EMPTY(&d->chunk_info_list[bits])) { 767 if (LIST_EMPTY(&d->chunk_info_list[bits])) {
@@ -946,16 +945,19 @@ omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
946 * Allocate a chunk 945 * Allocate a chunk
947 */ 946 */
948static void * 947static void *
949malloc_bytes(struct dir_info *d, size_t size, void *f) 948malloc_bytes(struct dir_info *d, size_t argsize, void *f)
950{ 949{
951 int i, j, listnum; 950 int i, j, listnum;
952 size_t k; 951 size_t k, size;
953 u_short u, *lp; 952 u_short u, *lp;
954 struct chunk_info *bp; 953 struct chunk_info *bp;
955 954
956 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 955 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
957 d->canary1 != ~d->canary2) 956 d->canary1 != ~d->canary2)
958 wrterror(d, "internal struct corrupt", NULL); 957 wrterror(d, "internal struct corrupt", NULL);
958
959 size = argsize;
960
959 /* Don't bother with anything less than this */ 961 /* Don't bother with anything less than this */
960 /* unless we have a malloc(0) requests */ 962 /* unless we have a malloc(0) requests */
961 if (size != 0 && size < MALLOC_MINSIZE) 963 if (size != 0 && size < MALLOC_MINSIZE)
@@ -1021,22 +1023,28 @@ malloc_bytes(struct dir_info *d, size_t size, void *f)
1021 1023
1022 /* Adjust to the real offset of that chunk */ 1024 /* Adjust to the real offset of that chunk */
1023 k += (lp - bp->bits) * MALLOC_BITS; 1025 k += (lp - bp->bits) * MALLOC_BITS;
1026
1027 if (mopts.chunk_canaries)
1028 bp->bits[howmany(bp->total, MALLOC_BITS) + k] = argsize;
1029
1024 k <<= bp->shift; 1030 k <<= bp->shift;
1025 1031
1026 if (mopts.malloc_canaries && bp->size > 0) { 1032 if (bp->size > 0) {
1027 char *end = (char *)bp->page + k + bp->size; 1033 if (mopts.malloc_junk == 2)
1028 uintptr_t *canary = (uintptr_t *)(end - mopts.malloc_canaries); 1034 memset((char *)bp->page + k, SOME_JUNK, bp->size);
1029 *canary = mopts.malloc_chunk_canary ^ hash(canary); 1035 else if (mopts.chunk_canaries) {
1030 } 1036 size_t sz = bp->size - argsize;
1031 1037
1032 if (mopts.malloc_junk == 2 && bp->size > 0) 1038 if (sz > CHUNK_CHECK_LENGTH)
1033 memset((char *)bp->page + k, SOME_JUNK, 1039 sz = CHUNK_CHECK_LENGTH;
1034 bp->size - mopts.malloc_canaries); 1040 memset((char *)bp->page + k + argsize, SOME_JUNK, sz);
1041 }
1042 }
1035 return ((char *)bp->page + k); 1043 return ((char *)bp->page + k);
1036} 1044}
1037 1045
1038static uint32_t 1046static uint32_t
1039find_chunknum(struct dir_info *d, struct region_info *r, void *ptr) 1047find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check)
1040{ 1048{
1041 struct chunk_info *info; 1049 struct chunk_info *info;
1042 uint32_t chunknum; 1050 uint32_t chunknum;
@@ -1045,15 +1053,25 @@ find_chunknum(struct dir_info *d, struct region_info *r, void *ptr)
1045 if (info->canary != d->canary1) 1053 if (info->canary != d->canary1)
1046 wrterror(d, "chunk info corrupted", NULL); 1054 wrterror(d, "chunk info corrupted", NULL);
1047 1055
1048 if (mopts.malloc_canaries && info->size > 0) {
1049 char *end = (char *)ptr + info->size;
1050 uintptr_t *canary = (uintptr_t *)(end - mopts.malloc_canaries);
1051 if (*canary != (mopts.malloc_chunk_canary ^ hash(canary)))
1052 wrterror(d, "chunk canary corrupted", ptr);
1053 }
1054
1055 /* Find the chunk number on the page */ 1056 /* Find the chunk number on the page */
1056 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 1057 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1058 if (check && mopts.chunk_canaries && info->size > 0) {
1059 size_t sz = info->bits[howmany(info->total, MALLOC_BITS) +
1060 chunknum];
1061 size_t check_sz = info->size - sz;
1062 u_char *p, *q;
1063
1064 if (check_sz > CHUNK_CHECK_LENGTH)
1065 check_sz = CHUNK_CHECK_LENGTH;
1066 p = (u_char *)ptr + sz;
1067 q = p + check_sz;
1068
1069 while (p < q)
1070 if (*p++ != SOME_JUNK) {
1071 q = (void *)(sz << 16 | p - (u_char *)ptr - 1);
1072 wrterror(d, "chunk canary corrupted: ", q);
1073 }
1074 }
1057 1075
1058 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) 1076 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1059 wrterror(d, "modified chunk-pointer", ptr); 1077 wrterror(d, "modified chunk-pointer", ptr);
@@ -1075,8 +1093,7 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1075 int listnum; 1093 int listnum;
1076 1094
1077 info = (struct chunk_info *)r->size; 1095 info = (struct chunk_info *)r->size;
1078 if ((chunknum = find_chunknum(d, r, ptr)) == -1) 1096 chunknum = find_chunknum(d, r, ptr, 0);
1079 return;
1080 1097
1081 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1098 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1082 info->free++; 1099 info->free++;
@@ -1169,7 +1186,7 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1169 /* takes care of SOME_JUNK */ 1186 /* takes care of SOME_JUNK */
1170 p = malloc_bytes(pool, sz, f); 1187 p = malloc_bytes(pool, sz, f);
1171 if (zero_fill && p != NULL && sz > 0) 1188 if (zero_fill && p != NULL && sz > 0)
1172 memset(p, 0, sz - mopts.malloc_canaries); 1189 memset(p, 0, sz);
1173 } 1190 }
1174 1191
1175 return p; 1192 return p;
@@ -1251,8 +1268,6 @@ malloc(size_t size)
1251 malloc_recurse(d); 1268 malloc_recurse(d);
1252 return NULL; 1269 return NULL;
1253 } 1270 }
1254 if (size > 0 && size <= MALLOC_MAXCHUNK)
1255 size += mopts.malloc_canaries;
1256 r = omalloc(d, size, 0, CALLER); 1271 r = omalloc(d, size, 0, CALLER);
1257 d->active--; 1272 d->active--;
1258 _MALLOC_UNLOCK(d->mutex); 1273 _MALLOC_UNLOCK(d->mutex);
@@ -1275,10 +1290,8 @@ validate_junk(struct dir_info *pool, void *p) {
1275 if (r == NULL) 1290 if (r == NULL)
1276 wrterror(pool, "bogus pointer in validate_junk", p); 1291 wrterror(pool, "bogus pointer in validate_junk", p);
1277 REALSIZE(sz, r); 1292 REALSIZE(sz, r);
1278 if (sz > 0 && sz <= MALLOC_MAXCHUNK) 1293 if (sz > CHUNK_CHECK_LENGTH)
1279 sz -= mopts.malloc_canaries; 1294 sz = CHUNK_CHECK_LENGTH;
1280 if (sz > 32)
1281 sz = 32;
1282 for (byte = 0; byte < sz; byte++) { 1295 for (byte = 0; byte < sz; byte++) {
1283 if (((unsigned char *)p)[byte] != SOME_FREEJUNK) 1296 if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1284 wrterror(pool, "use after free", p); 1297 wrterror(pool, "use after free", p);
@@ -1347,11 +1360,10 @@ ofree(struct dir_info *argpool, void *p)
1347 void *tmp; 1360 void *tmp;
1348 int i; 1361 int i;
1349 1362
1350 if (mopts.malloc_junk && sz > 0)
1351 memset(p, SOME_FREEJUNK, sz - mopts.malloc_canaries);
1352 if (!mopts.malloc_freenow) { 1363 if (!mopts.malloc_freenow) {
1353 if (find_chunknum(pool, r, p) == -1) 1364 find_chunknum(pool, r, p, 1);
1354 goto done; 1365 if (mopts.malloc_junk && sz > 0)
1366 memset(p, SOME_FREEJUNK, sz);
1355 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; 1367 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1356 tmp = p; 1368 tmp = p;
1357 p = pool->delayed_chunks[i]; 1369 p = pool->delayed_chunks[i];
@@ -1360,6 +1372,9 @@ ofree(struct dir_info *argpool, void *p)
1360 if (mopts.malloc_junk) 1372 if (mopts.malloc_junk)
1361 validate_junk(pool, p); 1373 validate_junk(pool, p);
1362 pool->delayed_chunks[i] = tmp; 1374 pool->delayed_chunks[i] = tmp;
1375 } else {
1376 if (mopts.malloc_junk && sz > 0)
1377 memset(p, SOME_FREEJUNK, sz);
1363 } 1378 }
1364 if (p != NULL) { 1379 if (p != NULL) {
1365 r = find(pool, p); 1380 r = find(pool, p);
@@ -1516,28 +1531,21 @@ gotit:
1516 goto done; 1531 goto done;
1517 } 1532 }
1518 } 1533 }
1519 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { 1534 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.chunk_canaries &&
1520 if (mopts.malloc_junk == 2 && newsz > 0) { 1535 !mopts.malloc_realloc) {
1521 size_t usable_oldsz = oldsz; 1536 if (mopts.malloc_junk == 2 && newsz > 0)
1522 if (oldsz <= MALLOC_MAXCHUNK) 1537 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1523 usable_oldsz -= mopts.malloc_canaries;
1524 if (newsz < usable_oldsz)
1525 memset((char *)p + newsz, SOME_JUNK, usable_oldsz - newsz);
1526 }
1527 STATS_SETF(r, f); 1538 STATS_SETF(r, f);
1528 ret = p; 1539 ret = p;
1529 } else if (newsz != oldsz || mopts.malloc_realloc) { 1540 } else if (newsz != oldsz || mopts.chunk_canaries ||
1541 mopts.malloc_realloc) {
1530 q = omalloc(pool, newsz, 0, f); 1542 q = omalloc(pool, newsz, 0, f);
1531 if (q == NULL) { 1543 if (q == NULL) {
1532 ret = NULL; 1544 ret = NULL;
1533 goto done; 1545 goto done;
1534 } 1546 }
1535 if (newsz != 0 && oldsz != 0) { 1547 if (newsz != 0 && oldsz != 0)
1536 size_t copysz = oldsz < newsz ? oldsz : newsz; 1548 memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1537 if (copysz <= MALLOC_MAXCHUNK)
1538 copysz -= mopts.malloc_canaries;
1539 memcpy(q, p, copysz);
1540 }
1541 ofree(pool, p); 1549 ofree(pool, p);
1542 ret = q; 1550 ret = q;
1543 } else { 1551 } else {
@@ -1572,8 +1580,6 @@ realloc(void *ptr, size_t size)
1572 malloc_recurse(d); 1580 malloc_recurse(d);
1573 return NULL; 1581 return NULL;
1574 } 1582 }
1575 if (size > 0 && size <= MALLOC_MAXCHUNK)
1576 size += mopts.malloc_canaries;
1577 r = orealloc(d, ptr, size, CALLER); 1583 r = orealloc(d, ptr, size, CALLER);
1578 1584
1579 d->active--; 1585 d->active--;
@@ -1622,8 +1628,6 @@ calloc(size_t nmemb, size_t size)
1622 } 1628 }
1623 1629
1624 size *= nmemb; 1630 size *= nmemb;
1625 if (size > 0 && size <= MALLOC_MAXCHUNK)
1626 size += mopts.malloc_canaries;
1627 r = omalloc(d, size, 1, CALLER); 1631 r = omalloc(d, size, 1, CALLER);
1628 1632
1629 d->active--; 1633 d->active--;
@@ -1746,8 +1750,6 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
1746 malloc_recurse(d); 1750 malloc_recurse(d);
1747 goto err; 1751 goto err;
1748 } 1752 }
1749 if (size > 0 && size <= MALLOC_MAXCHUNK)
1750 size += mopts.malloc_canaries;
1751 r = omemalign(d, alignment, size, 0, CALLER); 1753 r = omemalign(d, alignment, size, 0, CALLER);
1752 d->active--; 1754 d->active--;
1753 _MALLOC_UNLOCK(d->mutex); 1755 _MALLOC_UNLOCK(d->mutex);