summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorotto <>2011-05-12 09:29:30 +0000
committerotto <>2011-05-12 09:29:30 +0000
commit7cbee63d86258af6a5374f3aeb06adc3f93b8e24 (patch)
tree6629d608d445ed3a7b896d6f6ba745427b34a304
parent245579a6674cf45b87ca39127330004e8da22f80 (diff)
downloadopenbsd-7cbee63d86258af6a5374f3aeb06adc3f93b8e24.tar.gz
openbsd-7cbee63d86258af6a5374f3aeb06adc3f93b8e24.tar.bz2
openbsd-7cbee63d86258af6a5374f3aeb06adc3f93b8e24.zip
Introduce leak detection code for MALLOC_STATS
-rw-r--r--src/lib/libc/stdlib/malloc.c190
1 files changed, 165 insertions, 25 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 025508a335..d82c326914 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.131 2011/05/08 07:08:13 otto Exp $ */ 1/* $OpenBSD: malloc.c,v 1.132 2011/05/12 09:29:30 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4 * 4 *
@@ -43,6 +43,7 @@
43#include <unistd.h> 43#include <unistd.h>
44 44
45#ifdef MALLOC_STATS 45#ifdef MALLOC_STATS
46#include <sys/tree.h>
46#include <fcntl.h> 47#include <fcntl.h>
47#endif 48#endif
48 49
@@ -94,6 +95,9 @@
94struct region_info { 95struct region_info {
95 void *p; /* page; low bits used to mark chunks */ 96 void *p; /* page; low bits used to mark chunks */
96 uintptr_t size; /* size for pages, or chunk_info pointer */ 97 uintptr_t size; /* size for pages, or chunk_info pointer */
98#ifdef MALLOC_STATS
99 void *f; /* where allocated from */
100#endif
97}; 101};
98 102
99LIST_HEAD(chunk_head, chunk_info); 103LIST_HEAD(chunk_head, chunk_info);
@@ -125,9 +129,11 @@ struct dir_info {
125 size_t cheap_reallocs; 129 size_t cheap_reallocs;
126#define STATS_INC(x) ((x)++) 130#define STATS_INC(x) ((x)++)
127#define STATS_ZERO(x) ((x) = 0) 131#define STATS_ZERO(x) ((x) = 0)
132#define STATS_SETF(x,y) ((x)->f = (y))
128#else 133#else
129#define STATS_INC(x) /* nothing */ 134#define STATS_INC(x) /* nothing */
130#define STATS_ZERO(x) /* nothing */ 135#define STATS_ZERO(x) /* nothing */
136#define STATS_SETF(x,y) /* nothing */
131#endif /* MALLOC_STATS */ 137#endif /* MALLOC_STATS */
132 u_int32_t canary2; 138 u_int32_t canary2;
133}; 139};
@@ -195,6 +201,9 @@ extern char *__progname;
195#ifdef MALLOC_STATS 201#ifdef MALLOC_STATS
196void malloc_dump(int); 202void malloc_dump(int);
197static void malloc_exit(void); 203static void malloc_exit(void);
204#define CALLER __builtin_return_address(0)
205#else
206#define CALLER NULL
198#endif 207#endif
199 208
200/* low bits of r->p determine size: 0 means >= page size and p->size holding 209/* low bits of r->p determine size: 0 means >= page size and p->size holding
@@ -696,7 +705,7 @@ alloc_chunk_info(struct dir_info *d)
696} 705}
697 706
698static int 707static int
699insert(struct dir_info *d, void *p, size_t sz) 708insert(struct dir_info *d, void *p, size_t sz, void *f)
700{ 709{
701 size_t index; 710 size_t index;
702 size_t mask; 711 size_t mask;
@@ -717,6 +726,9 @@ insert(struct dir_info *d, void *p, size_t sz)
717 } 726 }
718 d->r[index].p = p; 727 d->r[index].p = p;
719 d->r[index].size = sz; 728 d->r[index].size = sz;
729#ifdef MALLOC_STATS
730 d->r[index].f = f;
731#endif
720 d->regions_free--; 732 d->regions_free--;
721 return 0; 733 return 0;
722} 734}
@@ -839,7 +851,7 @@ omalloc_make_chunks(struct dir_info *d, int bits)
839 if ((uintptr_t)pp & bits) 851 if ((uintptr_t)pp & bits)
840 wrterror("pp & bits", pp); 852 wrterror("pp & bits", pp);
841 853
842 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp); 854 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL);
843 return bp; 855 return bp;
844} 856}
845 857
@@ -848,7 +860,7 @@ omalloc_make_chunks(struct dir_info *d, int bits)
848 * Allocate a chunk 860 * Allocate a chunk
849 */ 861 */
850static void * 862static void *
851malloc_bytes(struct dir_info *d, size_t size) 863malloc_bytes(struct dir_info *d, size_t size, void *f)
852{ 864{
853 int i, j; 865 int i, j;
854 size_t k; 866 size_t k;
@@ -908,6 +920,12 @@ malloc_bytes(struct dir_info *d, size_t size)
908 i = 0; 920 i = 0;
909 } 921 }
910 d->chunk_start += i + 1; 922 d->chunk_start += i + 1;
923#ifdef MALLOC_STATS
924 if (i == 0) {
925 struct region_info *r = find(d, bp->page);
926 r->f = f;
927 }
928#endif
911 929
912 *lp ^= u; 930 *lp ^= u;
913 931
@@ -980,7 +998,7 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
980 998
981 999
982static void * 1000static void *
983omalloc(size_t sz, int zero_fill) 1001omalloc(size_t sz, int zero_fill, void *f)
984{ 1002{
985 void *p; 1003 void *p;
986 size_t psz; 1004 size_t psz;
@@ -997,7 +1015,7 @@ omalloc(size_t sz, int zero_fill)
997 errno = ENOMEM; 1015 errno = ENOMEM;
998 return NULL; 1016 return NULL;
999 } 1017 }
1000 if (insert(g_pool, p, sz)) { 1018 if (insert(g_pool, p, sz, f)) {
1001 unmap(g_pool, p, psz); 1019 unmap(g_pool, p, psz);
1002 errno = ENOMEM; 1020 errno = ENOMEM;
1003 return NULL; 1021 return NULL;
@@ -1034,7 +1052,7 @@ omalloc(size_t sz, int zero_fill)
1034 1052
1035 } else { 1053 } else {
1036 /* takes care of SOME_JUNK */ 1054 /* takes care of SOME_JUNK */
1037 p = malloc_bytes(g_pool, sz); 1055 p = malloc_bytes(g_pool, sz, f);
1038 if (zero_fill && p != NULL && sz > 0) 1056 if (zero_fill && p != NULL && sz > 0)
1039 memset(p, 0, sz); 1057 memset(p, 0, sz);
1040 } 1058 }
@@ -1090,7 +1108,7 @@ malloc(size_t size)
1090 malloc_recurse(); 1108 malloc_recurse();
1091 return NULL; 1109 return NULL;
1092 } 1110 }
1093 r = omalloc(size, mopts.malloc_zero); 1111 r = omalloc(size, mopts.malloc_zero, CALLER);
1094 malloc_active--; 1112 malloc_active--;
1095 _MALLOC_UNLOCK(); 1113 _MALLOC_UNLOCK();
1096 if (r == NULL && mopts.malloc_xmalloc) { 1114 if (r == NULL && mopts.malloc_xmalloc) {
@@ -1198,14 +1216,14 @@ free(void *ptr)
1198 1216
1199 1217
1200static void * 1218static void *
1201orealloc(void *p, size_t newsz) 1219orealloc(void *p, size_t newsz, void *f)
1202{ 1220{
1203 struct region_info *r; 1221 struct region_info *r;
1204 size_t oldsz, goldsz, gnewsz; 1222 size_t oldsz, goldsz, gnewsz;
1205 void *q; 1223 void *q;
1206 1224
1207 if (p == NULL) 1225 if (p == NULL)
1208 return omalloc(newsz, 0); 1226 return omalloc(newsz, 0, f);
1209 1227
1210 r = find(g_pool, p); 1228 r = find(g_pool, p);
1211 if (r == NULL) { 1229 if (r == NULL) {
@@ -1245,6 +1263,7 @@ orealloc(void *p, size_t newsz)
1245 memset(q, SOME_JUNK, 1263 memset(q, SOME_JUNK,
1246 rnewsz - roldsz); 1264 rnewsz - roldsz);
1247 r->size = newsz; 1265 r->size = newsz;
1266 STATS_SETF(r, f);
1248 STATS_INC(g_pool->cheap_reallocs); 1267 STATS_INC(g_pool->cheap_reallocs);
1249 return p; 1268 return p;
1250 } else if (q != MAP_FAILED) 1269 } else if (q != MAP_FAILED)
@@ -1263,29 +1282,34 @@ orealloc(void *p, size_t newsz)
1263 } 1282 }
1264 unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz); 1283 unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1265 r->size = gnewsz; 1284 r->size = gnewsz;
1285 STATS_SETF(r, f);
1266 return p; 1286 return p;
1267 } else { 1287 } else {
1268 if (newsz > oldsz && mopts.malloc_junk) 1288 if (newsz > oldsz && mopts.malloc_junk)
1269 memset((char *)p + newsz, SOME_JUNK, 1289 memset((char *)p + newsz, SOME_JUNK,
1270 rnewsz - mopts.malloc_guard - newsz); 1290 rnewsz - mopts.malloc_guard - newsz);
1271 r->size = gnewsz; 1291 r->size = gnewsz;
1292 STATS_SETF(r, f);
1272 return p; 1293 return p;
1273 } 1294 }
1274 } 1295 }
1275 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { 1296 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1276 if (mopts.malloc_junk && newsz > 0) 1297 if (mopts.malloc_junk && newsz > 0)
1277 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1298 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1299 STATS_SETF(r, f);
1278 return p; 1300 return p;
1279 } else if (newsz != oldsz || mopts.malloc_realloc) { 1301 } else if (newsz != oldsz || mopts.malloc_realloc) {
1280 q = omalloc(newsz, 0); 1302 q = omalloc(newsz, 0, f);
1281 if (q == NULL) 1303 if (q == NULL)
1282 return NULL; 1304 return NULL;
1283 if (newsz != 0 && oldsz != 0) 1305 if (newsz != 0 && oldsz != 0)
1284 memcpy(q, p, oldsz < newsz ? oldsz : newsz); 1306 memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1285 ofree(p); 1307 ofree(p);
1286 return q; 1308 return q;
1287 } else 1309 } else {
1310 STATS_SETF(r, f);
1288 return p; 1311 return p;
1312 }
1289} 1313}
1290 1314
1291void * 1315void *
@@ -1304,7 +1328,7 @@ realloc(void *ptr, size_t size)
1304 malloc_recurse(); 1328 malloc_recurse();
1305 return NULL; 1329 return NULL;
1306 } 1330 }
1307 r = orealloc(ptr, size); 1331 r = orealloc(ptr, size, CALLER);
1308 1332
1309 malloc_active--; 1333 malloc_active--;
1310 _MALLOC_UNLOCK(); 1334 _MALLOC_UNLOCK();
@@ -1347,7 +1371,7 @@ calloc(size_t nmemb, size_t size)
1347 } 1371 }
1348 1372
1349 size *= nmemb; 1373 size *= nmemb;
1350 r = omalloc(size, 1); 1374 r = omalloc(size, 1, CALLER);
1351 1375
1352 malloc_active--; 1376 malloc_active--;
1353 _MALLOC_UNLOCK(); 1377 _MALLOC_UNLOCK();
@@ -1386,17 +1410,110 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
1386} 1410}
1387 1411
1388#ifdef MALLOC_STATS 1412#ifdef MALLOC_STATS
1413
1414struct malloc_leak {
1415 void (*f)();
1416 size_t total_size;
1417 int count;
1418};
1419
1420struct leaknode {
1421 RB_ENTRY(leaknode) entry;
1422 struct malloc_leak d;
1423};
1424
1425static int
1426leakcmp(struct leaknode *e1, struct leaknode *e2)
1427{
1428 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
1429}
1430
1431static RB_HEAD(leaktree, leaknode) leakhead;
1432RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp)
1433
1434static void
1435putleakinfo(void *f, size_t sz, int cnt)
1436{
1437 struct leaknode key, *p;
1438 static struct leaknode *page;
1439 static int used;
1440
1441 if (cnt == 0)
1442 return;
1443
1444 key.d.f = f;
1445 p = RB_FIND(leaktree, &leakhead, &key);
1446 if (p == NULL) {
1447 if (page == NULL ||
1448 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
1449 page = MMAP(MALLOC_PAGESIZE);
1450 if (page == MAP_FAILED)
1451 return;
1452 used = 0;
1453 }
1454 p = &page[used++];
1455 p->d.f = f;
1456 p->d.total_size = sz * cnt;
1457 p->d.count = cnt;
1458 RB_INSERT(leaktree, &leakhead, p);
1459 } else {
1460 p->d.total_size += sz * cnt;
1461 p->d.count += cnt;
1462 }
1463}
1464
1465static struct malloc_leak *malloc_leaks;
1466
1467static void
1468dump_leaks(int fd)
1469{
1470 struct leaknode *p;
1471 char buf[64];
1472 int i = 0;
1473
1474 snprintf(buf, sizeof(buf), "Leak report\n");
1475 write(fd, buf, strlen(buf));
1476 snprintf(buf, sizeof(buf), " f sum # avg\n");
1477 write(fd, buf, strlen(buf));
1478 /* XXX only one page of summary */
1479 if (malloc_leaks == NULL)
1480 malloc_leaks = MMAP(MALLOC_PAGESIZE);
1481 if (malloc_leaks != MAP_FAILED)
1482 memset(malloc_leaks, 0, MALLOC_PAGESIZE);
1483 RB_FOREACH(p, leaktree, &leakhead) {
1484 snprintf(buf, sizeof(buf), "%12p %7zu %6u %6zu\n", p->d.f,
1485 p->d.total_size, p->d.count, p->d.total_size / p->d.count);
1486 write(fd, buf, strlen(buf));
1487 if (malloc_leaks == MAP_FAILED ||
1488 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
1489 continue;
1490 malloc_leaks[i].f = p->d.f;
1491 malloc_leaks[i].total_size = p->d.total_size;
1492 malloc_leaks[i].count = p->d.count;
1493 i++;
1494 }
1495}
1496
1389static void 1497static void
1390dump_chunk(int fd, struct chunk_info *p, int fromfreelist) 1498dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
1391{ 1499{
1392 char buf[64]; 1500 char buf[64];
1393 1501
1394 while (p != NULL) { 1502 while (p != NULL) {
1395 snprintf(buf, sizeof(buf), "chunk %d %d/%d %p\n", p->size, 1503 snprintf(buf, sizeof(buf), "chunk %12p %12p %4d %d/%d\n",
1396 p->free, p->total, p->page); 1504 p->page, ((p->bits[0] & 1) ? NULL : f),
1505 p->size, p->free, p->total);
1397 write(fd, buf, strlen(buf)); 1506 write(fd, buf, strlen(buf));
1398 if (!fromfreelist) 1507 if (!fromfreelist) {
1508 if (p->bits[0] & 1)
1509 putleakinfo(NULL, p->size, p->total - p->free);
1510 else {
1511 putleakinfo(f, p->size, 1);
1512 putleakinfo(NULL, p->size,
1513 p->total - p->free - 1);
1514 }
1399 break; 1515 break;
1516 }
1400 p = LIST_NEXT(p, entries); 1517 p = LIST_NEXT(p, entries);
1401 if (p != NULL) { 1518 if (p != NULL) {
1402 snprintf(buf, sizeof(buf), " "); 1519 snprintf(buf, sizeof(buf), " ");
@@ -1418,7 +1535,7 @@ dump_free_chunk_info(int fd, struct dir_info *d)
1418 if (p != NULL) { 1535 if (p != NULL) {
1419 snprintf(buf, sizeof(buf), "%2d) ", i); 1536 snprintf(buf, sizeof(buf), "%2d) ", i);
1420 write(fd, buf, strlen(buf)); 1537 write(fd, buf, strlen(buf));
1421 dump_chunk(fd, p, 1); 1538 dump_chunk(fd, p, NULL, 1);
1422 } 1539 }
1423 } 1540 }
1424 1541
@@ -1472,35 +1589,58 @@ malloc_dump1(int fd, struct dir_info *d)
1472 write(fd, buf, strlen(buf)); 1589 write(fd, buf, strlen(buf));
1473 snprintf(buf, sizeof(buf), "Regions slots free %zu\n", d->regions_free); 1590 snprintf(buf, sizeof(buf), "Regions slots free %zu\n", d->regions_free);
1474 write(fd, buf, strlen(buf)); 1591 write(fd, buf, strlen(buf));
1592 dump_free_chunk_info(fd, d);
1593 dump_free_page_info(fd, d);
1594 snprintf(buf, sizeof(buf),
1595 "slot) hash d type page f size [free/n]\n");
1596 write(fd, buf, strlen(buf));
1475 for (i = 0; i < d->regions_total; i++) { 1597 for (i = 0; i < d->regions_total; i++) {
1476 if (d->r[i].p != NULL) { 1598 if (d->r[i].p != NULL) {
1477 size_t h = hash(d->r[i].p) & 1599 size_t h = hash(d->r[i].p) &
1478 (d->regions_total - 1); 1600 (d->regions_total - 1);
1479 snprintf(buf, sizeof(buf), "%4zx) #%zx %zd ", 1601 snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ",
1480 i, h, h - i); 1602 i, h, h - i);
1481 write(fd, buf, strlen(buf)); 1603 write(fd, buf, strlen(buf));
1482 REALSIZE(realsize, &d->r[i]); 1604 REALSIZE(realsize, &d->r[i]);
1483 if (realsize > MALLOC_MAXCHUNK) { 1605 if (realsize > MALLOC_MAXCHUNK) {
1606 putleakinfo(d->r[i].f, realsize, 1);
1484 snprintf(buf, sizeof(buf), 1607 snprintf(buf, sizeof(buf),
1485 "%p: %zu\n", d->r[i].p, realsize); 1608 "pages %12p %12p %zu\n", d->r[i].p,
1609 d->r[i].f, realsize);
1486 write(fd, buf, strlen(buf)); 1610 write(fd, buf, strlen(buf));
1487 } else 1611 } else
1488 dump_chunk(fd, 1612 dump_chunk(fd,
1489 (struct chunk_info *)d->r[i].size, 0); 1613 (struct chunk_info *)d->r[i].size,
1614 d->r[i].f, 0);
1490 } 1615 }
1491 } 1616 }
1492 dump_free_chunk_info(fd, d);
1493 dump_free_page_info(fd, d);
1494 snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used); 1617 snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used);
1495 write(fd, buf, strlen(buf)); 1618 write(fd, buf, strlen(buf));
1496 snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded); 1619 snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded);
1497 write(fd, buf, strlen(buf)); 1620 write(fd, buf, strlen(buf));
1621 dump_leaks(fd);
1622 write(fd, "\n", 1);
1498} 1623}
1499 1624
1500
1501void 1625void
1502malloc_dump(int fd) 1626malloc_dump(int fd)
1503{ 1627{
1628 int i;
1629 void *p;
1630 struct region_info *r;
1631
1632 for (i = 0; i <= MALLOC_DELAYED_CHUNKS; i++) {
1633 p = g_pool->delayed_chunks[i];
1634 if (p == NULL)
1635 continue;
1636 r = find(g_pool, p);
1637 if (r == NULL)
1638 wrterror("bogus pointer in malloc_dump", p);
1639 free_bytes(g_pool, r, p);
1640 g_pool->delayed_chunks[i] = NULL;
1641 }
1642 /* XXX leak when run multiple times */
1643 RB_INIT(&leakhead);
1504 malloc_dump1(fd, g_pool); 1644 malloc_dump1(fd, g_pool);
1505} 1645}
1506 1646