summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorotto <>2017-01-21 07:47:42 +0000
committerotto <>2017-01-21 07:47:42 +0000
commit2fab07685aec49694184f0859caa564a3f0db47b (patch)
tree227b544524386f440e7e8e9a75ce954ea08bdfa4 /src
parent85977a5fc077a3570226e3ec00bf221c6c1951cd (diff)
downloadopenbsd-2fab07685aec49694184f0859caa564a3f0db47b.tar.gz
openbsd-2fab07685aec49694184f0859caa564a3f0db47b.tar.bz2
openbsd-2fab07685aec49694184f0859caa564a3f0db47b.zip
1. When shrinking a chunk allocation, compare the size of the current
allocation to the size of the new allocation (instead of the requested size). 2. Previously realloc takes the easy way and always reallocates if C is active. This commit fixes by carefully updating the recorded requested size in all cases, and writing the canary bytes in the proper location after reallocating. 3. Introduce defines to test if MALLOC_MOVE should be done and to compute the new value.
Diffstat (limited to 'src')
-rw-r--r--src/lib/libc/stdlib/malloc.c133
1 files changed, 87 insertions, 46 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index a353c933ef..30cfd48986 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.211 2016/11/04 09:11:20 otto Exp $ */ 1/* $OpenBSD: malloc.c,v 1.212 2017/01/21 07:47:42 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -73,6 +73,12 @@
73 * Set to zero to be the most strict. 73 * Set to zero to be the most strict.
74 */ 74 */
75#define MALLOC_LEEWAY 0 75#define MALLOC_LEEWAY 0
76#define MALLOC_MOVE_COND(sz) ((sz) - mopts.malloc_guard < \
77 MALLOC_PAGESIZE - MALLOC_LEEWAY)
78#define MALLOC_MOVE(p, sz) (((char *)(p)) + \
79 ((MALLOC_PAGESIZE - MALLOC_LEEWAY - \
80 ((sz) - mopts.malloc_guard)) & \
81 ~(MALLOC_MINSIZE - 1)))
76 82
77#define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 83#define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
78 84
@@ -199,6 +205,7 @@ char *malloc_options; /* compile-time options */
199static u_char getrbyte(struct dir_info *d); 205static u_char getrbyte(struct dir_info *d);
200static __dead void wrterror(struct dir_info *d, char *msg, ...) 206static __dead void wrterror(struct dir_info *d, char *msg, ...)
201 __attribute__((__format__ (printf, 2, 3))); 207 __attribute__((__format__ (printf, 2, 3)));
208static void fill_canary(char *ptr, size_t sz, size_t allocated);
202 209
203#ifdef MALLOC_STATS 210#ifdef MALLOC_STATS
204void malloc_dump(int, int, struct dir_info *); 211void malloc_dump(int, int, struct dir_info *);
@@ -209,8 +216,8 @@ static void malloc_exit(void);
209#define CALLER NULL 216#define CALLER NULL
210#endif 217#endif
211 218
212/* low bits of r->p determine size: 0 means >= page size and p->size holding 219/* low bits of r->p determine size: 0 means >= page size and r->size holding
213 * real size, otherwise r->size is a shift count, or 1 for malloc(0) 220 * real size, otherwise low bits are a shift count, or 1 for malloc(0)
214 */ 221 */
215#define REALSIZE(sz, r) \ 222#define REALSIZE(sz, r) \
216 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 223 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \
@@ -905,23 +912,10 @@ omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
905 return bp; 912 return bp;
906} 913}
907 914
908 915static int
909/* 916find_chunksize(size_t size)
910 * Allocate a chunk
911 */
912static void *
913malloc_bytes(struct dir_info *d, size_t argsize, void *f)
914{ 917{
915 int i, j, listnum; 918 int i, j;
916 size_t k, size;
917 u_short u, *lp;
918 struct chunk_info *bp;
919
920 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
921 d->canary1 != ~d->canary2)
922 wrterror(d, "internal struct corrupt");
923
924 size = argsize;
925 919
926 /* Don't bother with anything less than this */ 920 /* Don't bother with anything less than this */
927 /* unless we have a malloc(0) requests */ 921 /* unless we have a malloc(0) requests */
@@ -937,6 +931,25 @@ malloc_bytes(struct dir_info *d, size_t argsize, void *f)
937 while (i >>= 1) 931 while (i >>= 1)
938 j++; 932 j++;
939 } 933 }
934 return j;
935}
936
937/*
938 * Allocate a chunk
939 */
940static void *
941malloc_bytes(struct dir_info *d, size_t size, void *f)
942{
943 int i, j, listnum;
944 size_t k;
945 u_short u, *lp;
946 struct chunk_info *bp;
947
948 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
949 d->canary1 != ~d->canary2)
950 wrterror(d, "internal struct corrupt");
951
952 j = find_chunksize(size);
940 953
941 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 954 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
942 /* If it's empty, make a page more of that size chunks */ 955 /* If it's empty, make a page more of that size chunks */
@@ -990,25 +1003,30 @@ malloc_bytes(struct dir_info *d, size_t argsize, void *f)
990 k += (lp - bp->bits) * MALLOC_BITS; 1003 k += (lp - bp->bits) * MALLOC_BITS;
991 1004
992 if (mopts.chunk_canaries) 1005 if (mopts.chunk_canaries)
993 bp->bits[bp->offset + k] = argsize; 1006 bp->bits[bp->offset + k] = size;
994 1007
995 k <<= bp->shift; 1008 k <<= bp->shift;
996 1009
997 if (bp->size > 0) { 1010 if (bp->size > 0) {
998 if (mopts.malloc_junk == 2) 1011 if (mopts.malloc_junk == 2)
999 memset((char *)bp->page + k, SOME_JUNK, bp->size); 1012 memset((char *)bp->page + k, SOME_JUNK, bp->size);
1000 else if (mopts.chunk_canaries) { 1013 else if (mopts.chunk_canaries)
1001 size_t sz = bp->size - argsize; 1014 fill_canary((char *)bp->page + k, size, bp->size);
1002
1003 if (sz > CHUNK_CHECK_LENGTH)
1004 sz = CHUNK_CHECK_LENGTH;
1005 memset((char *)bp->page + k + argsize, SOME_JUNK, sz);
1006 }
1007 } 1015 }
1008 return ((char *)bp->page + k); 1016 return ((char *)bp->page + k);
1009} 1017}
1010 1018
1011static void 1019static void
1020fill_canary(char *ptr, size_t sz, size_t allocated)
1021{
1022 size_t check_sz = allocated - sz;
1023
1024 if (check_sz > CHUNK_CHECK_LENGTH)
1025 check_sz = CHUNK_CHECK_LENGTH;
1026 memset(ptr + sz, SOME_JUNK, check_sz);
1027}
1028
1029static void
1012validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) 1030validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1013{ 1031{
1014 size_t check_sz = allocated - sz; 1032 size_t check_sz = allocated - sz;
@@ -1130,13 +1148,12 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1130 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1148 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1131 } 1149 }
1132 1150
1133 if (sz - mopts.malloc_guard < MALLOC_PAGESIZE - MALLOC_LEEWAY) { 1151 if (MALLOC_MOVE_COND(sz)) {
1134 /* fill whole allocation */ 1152 /* fill whole allocation */
1135 if (mopts.malloc_junk == 2) 1153 if (mopts.malloc_junk == 2)
1136 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1154 memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1137 /* shift towards the end */ 1155 /* shift towards the end */
1138 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY - 1156 p = MALLOC_MOVE(p, sz);
1139 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1140 /* fill zeros if needed and overwritten above */ 1157 /* fill zeros if needed and overwritten above */
1141 if (zero_fill && mopts.malloc_junk == 2) 1158 if (zero_fill && mopts.malloc_junk == 2)
1142 memset(p, 0, sz - mopts.malloc_guard); 1159 memset(p, 0, sz - mopts.malloc_guard);
@@ -1149,14 +1166,9 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1149 memset(p, SOME_JUNK, 1166 memset(p, SOME_JUNK,
1150 psz - mopts.malloc_guard); 1167 psz - mopts.malloc_guard);
1151 } 1168 }
1152 else if (mopts.chunk_canaries) { 1169 else if (mopts.chunk_canaries)
1153 size_t csz = psz - sz; 1170 fill_canary(p, sz - mopts.malloc_guard,
1154 1171 psz - mopts.malloc_guard);
1155 if (csz > CHUNK_CHECK_LENGTH)
1156 csz = CHUNK_CHECK_LENGTH;
1157 memset((char *)p + sz - mopts.malloc_guard,
1158 SOME_JUNK, csz);
1159 }
1160 } 1172 }
1161 1173
1162 } else { 1174 } else {
@@ -1308,8 +1320,7 @@ ofree(struct dir_info *argpool, void *p)
1308 1320
1309 REALSIZE(sz, r); 1321 REALSIZE(sz, r);
1310 if (sz > MALLOC_MAXCHUNK) { 1322 if (sz > MALLOC_MAXCHUNK) {
1311 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE - 1323 if (!MALLOC_MOVE_COND(sz)) {
1312 MALLOC_LEEWAY) {
1313 if (r->p != p) 1324 if (r->p != p)
1314 wrterror(pool, "bogus pointer %p", p); 1325 wrterror(pool, "bogus pointer %p", p);
1315 if (mopts.chunk_canaries) 1326 if (mopts.chunk_canaries)
@@ -1410,9 +1421,11 @@ orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1410{ 1421{
1411 struct dir_info *pool; 1422 struct dir_info *pool;
1412 struct region_info *r; 1423 struct region_info *r;
1424 struct chunk_info *info;
1413 size_t oldsz, goldsz, gnewsz; 1425 size_t oldsz, goldsz, gnewsz;
1414 void *q, *ret; 1426 void *q, *ret;
1415 int i; 1427 int i;
1428 uint32_t chunknum;
1416 1429
1417 pool = argpool; 1430 pool = argpool;
1418 1431
@@ -1445,6 +1458,11 @@ orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1445 } 1458 }
1446 1459
1447 REALSIZE(oldsz, r); 1460 REALSIZE(oldsz, r);
1461 if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1462 chunknum = find_chunknum(pool, r, p, 0);
1463 info = (struct chunk_info *)r->size;
1464 }
1465
1448 goldsz = oldsz; 1466 goldsz = oldsz;
1449 if (oldsz > MALLOC_MAXCHUNK) { 1467 if (oldsz > MALLOC_MAXCHUNK) {
1450 if (oldsz < mopts.malloc_guard) 1468 if (oldsz < mopts.malloc_guard)
@@ -1457,11 +1475,14 @@ orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1457 gnewsz += mopts.malloc_guard; 1475 gnewsz += mopts.malloc_guard;
1458 1476
1459 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && 1477 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1460 !mopts.chunk_canaries && !mopts.malloc_realloc) { 1478 !mopts.malloc_realloc) {
1479 /* First case: from n pages sized allocation to m pages sized
1480 allocation, no malloc_move in effect */
1461 size_t roldsz = PAGEROUND(goldsz); 1481 size_t roldsz = PAGEROUND(goldsz);
1462 size_t rnewsz = PAGEROUND(gnewsz); 1482 size_t rnewsz = PAGEROUND(gnewsz);
1463 1483
1464 if (rnewsz > roldsz) { 1484 if (rnewsz > roldsz) {
1485 /* try to extend existing region */
1465 if (!mopts.malloc_guard) { 1486 if (!mopts.malloc_guard) {
1466 void *hint = (char *)p + roldsz; 1487 void *hint = (char *)p + roldsz;
1467 size_t needed = rnewsz - roldsz; 1488 size_t needed = rnewsz - roldsz;
@@ -1482,6 +1503,8 @@ gotit:
1482 if (mopts.malloc_junk == 2) 1503 if (mopts.malloc_junk == 2)
1483 memset(q, SOME_JUNK, needed); 1504 memset(q, SOME_JUNK, needed);
1484 r->size = newsz; 1505 r->size = newsz;
1506 if (mopts.chunk_canaries)
1507 fill_canary(p, newsz, PAGEROUND(newsz));
1485 STATS_SETF(r, f); 1508 STATS_SETF(r, f);
1486 STATS_INC(pool->cheap_reallocs); 1509 STATS_INC(pool->cheap_reallocs);
1487 ret = p; 1510 ret = p;
@@ -1492,6 +1515,7 @@ gotit:
1492 } 1515 }
1493 } 1516 }
1494 } else if (rnewsz < roldsz) { 1517 } else if (rnewsz < roldsz) {
1518 /* shrink number of pages */
1495 if (mopts.malloc_guard) { 1519 if (mopts.malloc_guard) {
1496 if (mprotect((char *)p + roldsz - 1520 if (mprotect((char *)p + roldsz -
1497 mopts.malloc_guard, mopts.malloc_guard, 1521 mopts.malloc_guard, mopts.malloc_guard,
@@ -1504,27 +1528,38 @@ gotit:
1504 } 1528 }
1505 unmap(pool, (char *)p + rnewsz, roldsz - rnewsz); 1529 unmap(pool, (char *)p + rnewsz, roldsz - rnewsz);
1506 r->size = gnewsz; 1530 r->size = gnewsz;
1531 if (mopts.chunk_canaries)
1532 fill_canary(p, newsz, PAGEROUND(newsz));
1507 STATS_SETF(r, f); 1533 STATS_SETF(r, f);
1508 ret = p; 1534 ret = p;
1509 goto done; 1535 goto done;
1510 } else { 1536 } else {
1537 /* number of pages remains the same */
1511 if (newsz > oldsz && mopts.malloc_junk == 2) 1538 if (newsz > oldsz && mopts.malloc_junk == 2)
1512 memset((char *)p + newsz, SOME_JUNK, 1539 memset((char *)p + newsz, SOME_JUNK,
1513 rnewsz - mopts.malloc_guard - newsz); 1540 rnewsz - mopts.malloc_guard - newsz);
1514 r->size = gnewsz; 1541 r->size = gnewsz;
1542 if (mopts.chunk_canaries)
1543 fill_canary(p, newsz, PAGEROUND(newsz));
1515 STATS_SETF(r, f); 1544 STATS_SETF(r, f);
1516 ret = p; 1545 ret = p;
1517 goto done; 1546 goto done;
1518 } 1547 }
1519 } 1548 }
1520 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.chunk_canaries && 1549 if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1521 !mopts.malloc_realloc) { 1550 newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1522 if (mopts.malloc_junk == 2 && newsz > 0) 1551 1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) {
1552 /* do not reallocate if new size fits good in existing chunk */
1553 if (mopts.malloc_junk == 2)
1523 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1554 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1555 if (mopts.chunk_canaries) {
1556 info->bits[info->offset + chunknum] = newsz;
1557 fill_canary(p, newsz, info->size);
1558 }
1524 STATS_SETF(r, f); 1559 STATS_SETF(r, f);
1525 ret = p; 1560 ret = p;
1526 } else if (newsz != oldsz || mopts.chunk_canaries || 1561 } else if (newsz != oldsz || mopts.malloc_realloc) {
1527 mopts.malloc_realloc) { 1562 /* create new allocation */
1528 q = omalloc(pool, newsz, 0, f); 1563 q = omalloc(pool, newsz, 0, f);
1529 if (q == NULL) { 1564 if (q == NULL) {
1530 ret = NULL; 1565 ret = NULL;
@@ -1535,6 +1570,12 @@ gotit:
1535 ofree(pool, p); 1570 ofree(pool, p);
1536 ret = q; 1571 ret = q;
1537 } else { 1572 } else {
1573 /* > page size allocation didnt change */
1574 if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1575 info->bits[info->offset + chunknum] = newsz;
1576 if (info->size > 0)
1577 fill_canary(p, newsz, info->size);
1578 }
1538 STATS_SETF(r, f); 1579 STATS_SETF(r, f);
1539 ret = p; 1580 ret = p;
1540 } 1581 }