summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortedu <>2015-12-09 02:45:23 +0000
committertedu <>2015-12-09 02:45:23 +0000
commit52aece27155bb299c87b2af8d286f2cef62fb70a (patch)
tree4f931f5fb5fe2e7c1f9ded575084b26eb2f3bf07
parent83d9cc3cde2fc32d582b308940d1ea7f4814503b (diff)
downloadopenbsd-52aece27155bb299c87b2af8d286f2cef62fb70a.tar.gz
openbsd-52aece27155bb299c87b2af8d286f2cef62fb70a.tar.bz2
openbsd-52aece27155bb299c87b2af8d286f2cef62fb70a.zip
Integrate two patches originally from Daniel Micay.
1. Optionally add random "canaries" to the end of an allocation. This requires increasing the internal size of the allocation slightly, which probably results in a large effective increase with current power of two sizing. Therefore, this option is only enabled via 'C'. 2. When writing junk (0xdf) to freed chunks (current default behavior), check that the junk is still intact when finally freeing the delayed chunk to catch some potential use after free. This should be pretty cheap so there's no option to control it separately. ok deraadt tb
-rw-r--r--src/lib/libc/stdlib/malloc.c84
1 files changed, 76 insertions, 8 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index dd4c487241..36d2d717db 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.176 2015/09/13 20:29:23 guenther Exp $ */ 1/* $OpenBSD: malloc.c,v 1.177 2015/12/09 02:45:23 tedu Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -185,12 +185,14 @@ struct malloc_readonly {
185 int malloc_move; /* move allocations to end of page? */ 185 int malloc_move; /* move allocations to end of page? */
186 int malloc_realloc; /* always realloc? */ 186 int malloc_realloc; /* always realloc? */
187 int malloc_xmalloc; /* xmalloc behaviour? */ 187 int malloc_xmalloc; /* xmalloc behaviour? */
188 size_t malloc_canaries; /* use canaries after chunks? */
188 size_t malloc_guard; /* use guard pages after allocations? */ 189 size_t malloc_guard; /* use guard pages after allocations? */
189 u_int malloc_cache; /* free pages we cache */ 190 u_int malloc_cache; /* free pages we cache */
190#ifdef MALLOC_STATS 191#ifdef MALLOC_STATS
191 int malloc_stats; /* dump statistics at end */ 192 int malloc_stats; /* dump statistics at end */
192#endif 193#endif
193 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */ 194 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */
195 uintptr_t malloc_chunk_canary;
194}; 196};
195 197
196/* This object is mapped PROT_READ after initialisation to prevent tampering */ 198/* This object is mapped PROT_READ after initialisation to prevent tampering */
@@ -526,6 +528,12 @@ omalloc_init(struct dir_info **dp)
526 case 'A': 528 case 'A':
527 mopts.malloc_abort = 1; 529 mopts.malloc_abort = 1;
528 break; 530 break;
531 case 'c':
532 mopts.malloc_canaries = 0;
533 break;
534 case 'C':
535 mopts.malloc_canaries = sizeof(void *);
536 break;
529#ifdef MALLOC_STATS 537#ifdef MALLOC_STATS
530 case 'd': 538 case 'd':
531 mopts.malloc_stats = 0; 539 mopts.malloc_stats = 0;
@@ -619,6 +627,9 @@ omalloc_init(struct dir_info **dp)
619 while ((mopts.malloc_canary = arc4random()) == 0) 627 while ((mopts.malloc_canary = arc4random()) == 0)
620 ; 628 ;
621 629
630 arc4random_buf(&mopts.malloc_chunk_canary,
631 sizeof(mopts.malloc_chunk_canary));
632
622 /* 633 /*
623 * Allocate dir_info with a guard page on either side. Also 634 * Allocate dir_info with a guard page on either side. Also
624 * randomise offset inside the page at which the dir_info 635 * randomise offset inside the page at which the dir_info
@@ -984,8 +995,15 @@ malloc_bytes(struct dir_info *d, size_t size, void *f)
984 k += (lp - bp->bits) * MALLOC_BITS; 995 k += (lp - bp->bits) * MALLOC_BITS;
985 k <<= bp->shift; 996 k <<= bp->shift;
986 997
998 if (mopts.malloc_canaries && bp->size > 0) {
999 char *end = (char *)bp->page + k + bp->size;
1000 uintptr_t *canary = (uintptr_t *)(end - mopts.malloc_canaries);
1001 *canary = mopts.malloc_chunk_canary ^ hash(canary);
1002 }
1003
987 if (mopts.malloc_junk == 2 && bp->size > 0) 1004 if (mopts.malloc_junk == 2 && bp->size > 0)
988 memset((char *)bp->page + k, SOME_JUNK, bp->size); 1005 memset((char *)bp->page + k, SOME_JUNK,
1006 bp->size - mopts.malloc_canaries);
989 return ((char *)bp->page + k); 1007 return ((char *)bp->page + k);
990} 1008}
991 1009
@@ -999,6 +1017,13 @@ find_chunknum(struct dir_info *d, struct region_info *r, void *ptr)
999 if (info->canary != d->canary1) 1017 if (info->canary != d->canary1)
1000 wrterror("chunk info corrupted", NULL); 1018 wrterror("chunk info corrupted", NULL);
1001 1019
1020 if (mopts.malloc_canaries && info->size > 0) {
1021 char *end = (char *)ptr + info->size;
1022 uintptr_t *canary = (uintptr_t *)(end - mopts.malloc_canaries);
1023 if (*canary != (mopts.malloc_chunk_canary ^ hash(canary)))
1024 wrterror("chunk canary corrupted", ptr);
1025 }
1026
1002 /* Find the chunk number on the page */ 1027 /* Find the chunk number on the page */
1003 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 1028 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1004 1029
@@ -1121,7 +1146,7 @@ omalloc(size_t sz, int zero_fill, void *f)
1121 /* takes care of SOME_JUNK */ 1146 /* takes care of SOME_JUNK */
1122 p = malloc_bytes(pool, sz, f); 1147 p = malloc_bytes(pool, sz, f);
1123 if (zero_fill && p != NULL && sz > 0) 1148 if (zero_fill && p != NULL && sz > 0)
1124 memset(p, 0, sz); 1149 memset(p, 0, sz - mopts.malloc_canaries);
1125 } 1150 }
1126 1151
1127 return p; 1152 return p;
@@ -1176,6 +1201,8 @@ malloc(size_t size)
1176 malloc_recurse(); 1201 malloc_recurse();
1177 return NULL; 1202 return NULL;
1178 } 1203 }
1204 if (size > 0 && size <= MALLOC_MAXCHUNK)
1205 size += mopts.malloc_canaries;
1179 r = omalloc(size, 0, CALLER); 1206 r = omalloc(size, 0, CALLER);
1180 malloc_active--; 1207 malloc_active--;
1181 _MALLOC_UNLOCK(); 1208 _MALLOC_UNLOCK();
@@ -1190,6 +1217,30 @@ malloc(size_t size)
1190/*DEF_STRONG(malloc);*/ 1217/*DEF_STRONG(malloc);*/
1191 1218
1192static void 1219static void
1220validate_junk(void *p) {
1221 struct region_info *r;
1222 struct dir_info *pool = getpool();
1223 size_t byte, sz;
1224
1225 if (p == NULL)
1226 return;
1227 r = find(pool, p);
1228 if (r == NULL)
1229 wrterror("bogus pointer in validate_junk", p);
1230 REALSIZE(sz, r);
1231 if (sz > 0 && sz <= MALLOC_MAXCHUNK)
1232 sz -= mopts.malloc_canaries;
1233 if (sz > 32)
1234 sz = 32;
1235 for (byte = 0; byte < sz; byte++) {
1236 if (((unsigned char *)p)[byte] != SOME_FREEJUNK) {
1237 wrterror("use after free", p);
1238 return;
1239 }
1240 }
1241}
1242
1243static void
1193ofree(void *p) 1244ofree(void *p)
1194{ 1245{
1195 struct dir_info *pool = getpool(); 1246 struct dir_info *pool = getpool();
@@ -1242,7 +1293,7 @@ ofree(void *p)
1242 int i; 1293 int i;
1243 1294
1244 if (mopts.malloc_junk && sz > 0) 1295 if (mopts.malloc_junk && sz > 0)
1245 memset(p, SOME_FREEJUNK, sz); 1296 memset(p, SOME_FREEJUNK, sz - mopts.malloc_canaries);
1246 if (!mopts.malloc_freenow) { 1297 if (!mopts.malloc_freenow) {
1247 if (find_chunknum(pool, r, p) == -1) 1298 if (find_chunknum(pool, r, p) == -1)
1248 return; 1299 return;
@@ -1253,6 +1304,8 @@ ofree(void *p)
1253 wrterror("double free", p); 1304 wrterror("double free", p);
1254 return; 1305 return;
1255 } 1306 }
1307 if (mopts.malloc_junk)
1308 validate_junk(p);
1256 pool->delayed_chunks[i] = tmp; 1309 pool->delayed_chunks[i] = tmp;
1257 } 1310 }
1258 if (p != NULL) { 1311 if (p != NULL) {
@@ -1386,16 +1439,25 @@ gotit:
1386 } 1439 }
1387 } 1440 }
1388 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { 1441 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1389 if (mopts.malloc_junk == 2 && newsz > 0) 1442 if (mopts.malloc_junk == 2 && newsz > 0) {
1390 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1443 size_t usable_oldsz = oldsz;
1444 if (oldsz <= MALLOC_MAXCHUNK)
1445 usable_oldsz -= mopts.malloc_canaries;
1446 if (newsz < usable_oldsz)
1447 memset((char *)p + newsz, SOME_JUNK, usable_oldsz - newsz);
1448 }
1391 STATS_SETF(r, f); 1449 STATS_SETF(r, f);
1392 return p; 1450 return p;
1393 } else if (newsz != oldsz || mopts.malloc_realloc) { 1451 } else if (newsz != oldsz || mopts.malloc_realloc) {
1394 q = omalloc(newsz, 0, f); 1452 q = omalloc(newsz, 0, f);
1395 if (q == NULL) 1453 if (q == NULL)
1396 return NULL; 1454 return NULL;
1397 if (newsz != 0 && oldsz != 0) 1455 if (newsz != 0 && oldsz != 0) {
1398 memcpy(q, p, oldsz < newsz ? oldsz : newsz); 1456 size_t copysz = oldsz < newsz ? oldsz : newsz;
1457 if (copysz <= MALLOC_MAXCHUNK)
1458 copysz -= mopts.malloc_canaries;
1459 memcpy(q, p, copysz);
1460 }
1399 ofree(p); 1461 ofree(p);
1400 return q; 1462 return q;
1401 } else { 1463 } else {
@@ -1420,6 +1482,8 @@ realloc(void *ptr, size_t size)
1420 malloc_recurse(); 1482 malloc_recurse();
1421 return NULL; 1483 return NULL;
1422 } 1484 }
1485 if (size > 0 && size <= MALLOC_MAXCHUNK)
1486 size += mopts.malloc_canaries;
1423 r = orealloc(ptr, size, CALLER); 1487 r = orealloc(ptr, size, CALLER);
1424 1488
1425 malloc_active--; 1489 malloc_active--;
@@ -1468,6 +1532,8 @@ calloc(size_t nmemb, size_t size)
1468 } 1532 }
1469 1533
1470 size *= nmemb; 1534 size *= nmemb;
1535 if (size > 0 && size <= MALLOC_MAXCHUNK)
1536 size += mopts.malloc_canaries;
1471 r = omalloc(size, 1, CALLER); 1537 r = omalloc(size, 1, CALLER);
1472 1538
1473 malloc_active--; 1539 malloc_active--;
@@ -1595,6 +1661,8 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
1595 malloc_recurse(); 1661 malloc_recurse();
1596 goto err; 1662 goto err;
1597 } 1663 }
1664 if (size > 0 && size <= MALLOC_MAXCHUNK)
1665 size += mopts.malloc_canaries;
1598 r = omemalign(alignment, size, 0, CALLER); 1666 r = omemalign(alignment, size, 0, CALLER);
1599 malloc_active--; 1667 malloc_active--;
1600 _MALLOC_UNLOCK(); 1668 _MALLOC_UNLOCK();