summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
authorotto <>2023-04-16 19:46:17 +0000
committerotto <>2023-04-16 19:46:17 +0000
commit11a95a5b8faa561cf3182b895a1cd7fc19d2fa34 (patch)
treee11ef197e01c1f2b19b11f987cbdb6e428fc6ad0 /src/lib
parent32ccf96bd2ca5101f81e5f580e844876e75150d3 (diff)
downloadopenbsd-11a95a5b8faa561cf3182b895a1cd7fc19d2fa34.tar.gz
openbsd-11a95a5b8faa561cf3182b895a1cd7fc19d2fa34.tar.bz2
openbsd-11a95a5b8faa561cf3182b895a1cd7fc19d2fa34.zip
Dump (leak) info using utrace(2) and compile the code always in
except for bootblocks. This way we have built-in leak detecction always (if enable by malloc flags). See man pages for details.
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/libc/stdlib/malloc.324
-rw-r--r--src/lib/libc/stdlib/malloc.c323
2 files changed, 199 insertions, 148 deletions
diff --git a/src/lib/libc/stdlib/malloc.3 b/src/lib/libc/stdlib/malloc.3
index b35b9220f6..e69e7c065f 100644
--- a/src/lib/libc/stdlib/malloc.3
+++ b/src/lib/libc/stdlib/malloc.3
@@ -30,9 +30,9 @@
30.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31.\" SUCH DAMAGE. 31.\" SUCH DAMAGE.
32.\" 32.\"
33.\" $OpenBSD: malloc.3,v 1.130 2023/04/01 18:47:51 otto Exp $ 33.\" $OpenBSD: malloc.3,v 1.131 2023/04/16 19:46:17 otto Exp $
34.\" 34.\"
35.Dd $Mdocdate: April 1 2023 $ 35.Dd $Mdocdate: April 16 2023 $
36.Dt MALLOC 3 36.Dt MALLOC 3
37.Os 37.Os
38.Sh NAME 38.Sh NAME
@@ -284,12 +284,17 @@ If it has been corrupted, the process is aborted.
284.It Cm D 284.It Cm D
285.Dq Dump . 285.Dq Dump .
286.Fn malloc 286.Fn malloc
287will dump statistics to the file 287will dump a leak report using
288.Pa ./malloc.out , 288.Xr utrace 2
289if it already exists,
290at exit. 289at exit.
291This option requires the library to have been compiled with -DMALLOC_STATS in 290To record the dump:
292order to have any effect. 291.Pp
292.Dl $ MALLOC_OPTIONS=D ktrace -tu program ...
293.Pp
294To view the leak report:
295.Pp
296.Dl $ kdump -u malloc ...
297.Pp
293.It Cm F 298.It Cm F
294.Dq Freecheck . 299.Dq Freecheck .
295Enable more extensive double free and use after free detection. 300Enable more extensive double free and use after free detection.
@@ -340,6 +345,11 @@ Enable all options suitable for security auditing.
340Enable use after free protection for larger allocations. 345Enable use after free protection for larger allocations.
341Unused pages on the freelist are read and write protected to 346Unused pages on the freelist are read and write protected to
342cause a segmentation fault upon access. 347cause a segmentation fault upon access.
348.It Cm V
349.Dq Verbose .
350Use with
351.Cm D
352to get a verbose dump of malloc's internal state.
343.It Cm X 353.It Cm X
344.Dq xmalloc . 354.Dq xmalloc .
345Rather than return failure, 355Rather than return failure,
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 0df1fe3e4a..a661b9d252 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,6 +1,6 @@
1/* $OpenBSD: malloc.c,v 1.280 2023/04/05 06:25:38 otto Exp $ */ 1/* $OpenBSD: malloc.c,v 1.281 2023/04/16 19:46:17 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
@@ -23,7 +23,9 @@
23 * can buy me a beer in return. Poul-Henning Kamp 23 * can buy me a beer in return. Poul-Henning Kamp
24 */ 24 */
25 25
26/* #define MALLOC_STATS */ 26#ifndef MALLOC_SMALL
27#define MALLOC_STATS
28#endif
27 29
28#include <sys/types.h> 30#include <sys/types.h>
29#include <sys/queue.h> 31#include <sys/queue.h>
@@ -39,8 +41,10 @@
39#include <unistd.h> 41#include <unistd.h>
40 42
41#ifdef MALLOC_STATS 43#ifdef MALLOC_STATS
44#include <sys/types.h>
42#include <sys/tree.h> 45#include <sys/tree.h>
43#include <fcntl.h> 46#include <sys/ktrace.h>
47#include <dlfcn.h>
44#endif 48#endif
45 49
46#include "thread_private.h" 50#include "thread_private.h"
@@ -224,11 +228,16 @@ struct malloc_readonly {
224 u_int junk_loc; /* variation in location of junk */ 228 u_int junk_loc; /* variation in location of junk */
225 size_t malloc_guard; /* use guard pages after allocations? */ 229 size_t malloc_guard; /* use guard pages after allocations? */
226#ifdef MALLOC_STATS 230#ifdef MALLOC_STATS
227 int malloc_stats; /* dump statistics at end */ 231 int malloc_stats; /* dump leak report at end */
232 int malloc_verbose; /* dump verbose statistics at end */
233#define DO_STATS mopts.malloc_stats
234#else
235#define DO_STATS 0
228#endif 236#endif
229 u_int32_t malloc_canary; /* Matched against ones in pool */ 237 u_int32_t malloc_canary; /* Matched against ones in pool */
230}; 238};
231 239
240
232/* This object is mapped PROT_READ after initialisation to prevent tampering */ 241/* This object is mapped PROT_READ after initialisation to prevent tampering */
233static union { 242static union {
234 struct malloc_readonly mopts; 243 struct malloc_readonly mopts;
@@ -243,15 +252,11 @@ static __dead void wrterror(struct dir_info *d, char *msg, ...)
243 __attribute__((__format__ (printf, 2, 3))); 252 __attribute__((__format__ (printf, 2, 3)));
244 253
245#ifdef MALLOC_STATS 254#ifdef MALLOC_STATS
246void malloc_dump(int, int, struct dir_info *); 255void malloc_dump(void);
247PROTO_NORMAL(malloc_dump); 256PROTO_NORMAL(malloc_dump);
248void malloc_gdump(int);
249PROTO_NORMAL(malloc_gdump);
250static void malloc_exit(void); 257static void malloc_exit(void);
251#define CALLER __builtin_return_address(0)
252#else
253#define CALLER NULL
254#endif 258#endif
259#define CALLER (DO_STATS ? __builtin_return_address(0) : NULL)
255 260
256/* low bits of r->p determine size: 0 means >= page size and r->size holding 261/* low bits of r->p determine size: 0 means >= page size and r->size holding
257 * real size, otherwise low bits is the bucket + 1 262 * real size, otherwise low bits is the bucket + 1
@@ -318,9 +323,9 @@ wrterror(struct dir_info *d, char *msg, ...)
318 dprintf(STDERR_FILENO, "\n"); 323 dprintf(STDERR_FILENO, "\n");
319 324
320#ifdef MALLOC_STATS 325#ifdef MALLOC_STATS
321 if (mopts.malloc_stats) 326 if (DO_STATS && mopts.malloc_verbose)
322 malloc_gdump(STDERR_FILENO); 327 malloc_dump();
323#endif /* MALLOC_STATS */ 328#endif
324 329
325 errno = saved_errno; 330 errno = saved_errno;
326 331
@@ -416,6 +421,14 @@ omalloc_parseopt(char opt)
416 case 'U': 421 case 'U':
417 mopts.malloc_freeunmap = 1; 422 mopts.malloc_freeunmap = 1;
418 break; 423 break;
424#ifdef MALLOC_STATS
425 case 'v':
426 mopts.malloc_verbose = 0;
427 break;
428 case 'V':
429 mopts.malloc_verbose = 1;
430 break;
431#endif /* MALLOC_STATS */
419 case 'x': 432 case 'x':
420 mopts.malloc_xmalloc = 0; 433 mopts.malloc_xmalloc = 0;
421 break; 434 break;
@@ -486,11 +499,11 @@ omalloc_init(void)
486 } 499 }
487 500
488#ifdef MALLOC_STATS 501#ifdef MALLOC_STATS
489 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { 502 if (DO_STATS && (atexit(malloc_exit) == -1)) {
490 dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed." 503 dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
491 " Will not be able to dump stats on exit\n"); 504 " Will not be able to dump stats on exit\n");
492 } 505 }
493#endif /* MALLOC_STATS */ 506#endif
494 507
495 while ((mopts.malloc_canary = arc4random()) == 0) 508 while ((mopts.malloc_canary = arc4random()) == 0)
496 ; 509 ;
@@ -596,9 +609,7 @@ insert(struct dir_info *d, void *p, size_t sz, void *f)
596 } 609 }
597 d->r[index].p = p; 610 d->r[index].p = p;
598 d->r[index].size = sz; 611 d->r[index].size = sz;
599#ifdef MALLOC_STATS 612 STATS_SETF(&d->r[index], f);
600 d->r[index].f = f;
601#endif
602 d->regions_free--; 613 d->regions_free--;
603 return 0; 614 return 0;
604} 615}
@@ -1104,12 +1115,10 @@ malloc_bytes(struct dir_info *d, size_t size, void *f)
1104 } 1115 }
1105 } 1116 }
1106found: 1117found:
1107#ifdef MALLOC_STATS 1118 if (i == 0 && k == 0 && DO_STATS) {
1108 if (i == 0 && k == 0) {
1109 struct region_info *r = find(d, bp->page); 1119 struct region_info *r = find(d, bp->page);
1110 r->f = f; 1120 STATS_SETF(r, f);
1111 } 1121 }
1112#endif
1113 1122
1114 *lp ^= 1 << k; 1123 *lp ^= 1 << k;
1115 1124
@@ -1193,6 +1202,9 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1193 info = (struct chunk_info *)r->size; 1202 info = (struct chunk_info *)r->size;
1194 chunknum = find_chunknum(d, info, ptr, 0); 1203 chunknum = find_chunknum(d, info, ptr, 0);
1195 1204
1205 if (chunknum == 0)
1206 STATS_SETF(r, NULL);
1207
1196 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1208 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1197 info->free++; 1209 info->free++;
1198 1210
@@ -1214,7 +1226,7 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1214 unmap(d, info->page, MALLOC_PAGESIZE, 0); 1226 unmap(d, info->page, MALLOC_PAGESIZE, 0);
1215 1227
1216 delete(d, r); 1228 delete(d, r);
1217 mp = &d->chunk_info_list[info->bucket]; 1229 mp = &d->chunk_info_list[info->bucket];
1218 LIST_INSERT_HEAD(mp, info, entries); 1230 LIST_INSERT_HEAD(mp, info, entries);
1219} 1231}
1220 1232
@@ -1694,9 +1706,11 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1694 r = findpool(p, *argpool, &pool, &saved_function); 1706 r = findpool(p, *argpool, &pool, &saved_function);
1695 1707
1696 REALSIZE(oldsz, r); 1708 REALSIZE(oldsz, r);
1697 if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) { 1709 if (oldsz <= MALLOC_MAXCHUNK) {
1698 info = (struct chunk_info *)r->size; 1710 if (DO_STATS || mopts.chunk_canaries) {
1699 chunknum = find_chunknum(pool, info, p, 0); 1711 info = (struct chunk_info *)r->size;
1712 chunknum = find_chunknum(pool, info, p, 0);
1713 }
1700 } 1714 }
1701 1715
1702 goldsz = oldsz; 1716 goldsz = oldsz;
@@ -1808,7 +1822,8 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1808 info->bits[info->offset + chunknum] = newsz; 1822 info->bits[info->offset + chunknum] = newsz;
1809 fill_canary(p, newsz, B2SIZE(info->bucket)); 1823 fill_canary(p, newsz, B2SIZE(info->bucket));
1810 } 1824 }
1811 STATS_SETF(r, f); 1825 if (DO_STATS && chunknum == 0)
1826 STATS_SETF(r, f);
1812 ret = p; 1827 ret = p;
1813 } else if (newsz != oldsz || forced) { 1828 } else if (newsz != oldsz || forced) {
1814 /* create new allocation */ 1829 /* create new allocation */
@@ -1825,7 +1840,8 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1825 /* oldsz == newsz */ 1840 /* oldsz == newsz */
1826 if (newsz != 0) 1841 if (newsz != 0)
1827 wrterror(pool, "realloc internal inconsistency"); 1842 wrterror(pool, "realloc internal inconsistency");
1828 STATS_SETF(r, f); 1843 if (DO_STATS && chunknum == 0)
1844 STATS_SETF(r, f);
1829 ret = p; 1845 ret = p;
1830 } 1846 }
1831done: 1847done:
@@ -2225,6 +2241,35 @@ aligned_alloc(size_t alignment, size_t size)
2225 2241
2226#ifdef MALLOC_STATS 2242#ifdef MALLOC_STATS
2227 2243
2244static void
2245ulog(const char *format, ...)
2246{
2247 va_list ap;
2248 static char* buf;
2249 static size_t filled;
2250 int len;
2251
2252 if (buf == NULL)
2253 buf = MMAP(KTR_USER_MAXLEN, 0);
2254 if (buf == MAP_FAILED)
2255 return;
2256
2257 va_start(ap, format);
2258 len = vsnprintf(buf + filled, KTR_USER_MAXLEN - filled, format, ap);
2259 va_end(ap);
2260 if (len < 0)
2261 return;
2262 if (len > KTR_USER_MAXLEN - filled)
2263 len = KTR_USER_MAXLEN - filled;
2264 filled += len;
2265 if (filled > 0) {
2266 if (filled == KTR_USER_MAXLEN || buf[filled - 1] == '\n') {
2267 utrace("malloc", buf, filled);
2268 filled = 0;
2269 }
2270 }
2271}
2272
2228struct malloc_leak { 2273struct malloc_leak {
2229 void *f; 2274 void *f;
2230 size_t total_size; 2275 size_t total_size;
@@ -2242,12 +2287,12 @@ leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2242 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; 2287 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2243} 2288}
2244 2289
2245static RBT_HEAD(leaktree, leaknode) leakhead; 2290RBT_HEAD(leaktree, leaknode);
2246RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp); 2291RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2247RBT_GENERATE(leaktree, leaknode, entry, leakcmp); 2292RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2248 2293
2249static void 2294static void
2250putleakinfo(void *f, size_t sz, int cnt) 2295putleakinfo(struct leaktree *leaks, void *f, size_t sz, int cnt)
2251{ 2296{
2252 struct leaknode key, *p; 2297 struct leaknode key, *p;
2253 static struct leaknode *page; 2298 static struct leaknode *page;
@@ -2257,7 +2302,7 @@ putleakinfo(void *f, size_t sz, int cnt)
2257 return; 2302 return;
2258 2303
2259 key.d.f = f; 2304 key.d.f = f;
2260 p = RBT_FIND(leaktree, &leakhead, &key); 2305 p = RBT_FIND(leaktree, leaks, &key);
2261 if (p == NULL) { 2306 if (p == NULL) {
2262 if (page == NULL || 2307 if (page == NULL ||
2263 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { 2308 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
@@ -2270,72 +2315,75 @@ putleakinfo(void *f, size_t sz, int cnt)
2270 p->d.f = f; 2315 p->d.f = f;
2271 p->d.total_size = sz * cnt; 2316 p->d.total_size = sz * cnt;
2272 p->d.count = cnt; 2317 p->d.count = cnt;
2273 RBT_INSERT(leaktree, &leakhead, p); 2318 RBT_INSERT(leaktree, leaks, p);
2274 } else { 2319 } else {
2275 p->d.total_size += sz * cnt; 2320 p->d.total_size += sz * cnt;
2276 p->d.count += cnt; 2321 p->d.count += cnt;
2277 } 2322 }
2278} 2323}
2279 2324
2280static struct malloc_leak *malloc_leaks;
2281
2282static void 2325static void
2283dump_leaks(int fd) 2326dump_leaks(struct leaktree *leaks)
2284{ 2327{
2285 struct leaknode *p; 2328 struct leaknode *p;
2286 unsigned int i = 0; 2329
2287 2330 ulog("Leak report:\n");
2288 dprintf(fd, "Leak report\n"); 2331 ulog(" f sum # avg\n");
2289 dprintf(fd, " f sum # avg\n"); 2332
2290 /* XXX only one page of summary */ 2333 RBT_FOREACH(p, leaktree, leaks) {
2291 if (malloc_leaks == NULL) 2334 Dl_info info;
2292 malloc_leaks = MMAP(MALLOC_PAGESIZE, 0); 2335 const char *caller = p->d.f;
2293 if (malloc_leaks != MAP_FAILED) 2336 const char *object = ".";
2294 memset(malloc_leaks, 0, MALLOC_PAGESIZE); 2337
2295 RBT_FOREACH(p, leaktree, &leakhead) { 2338 if (caller != NULL) {
2296 dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f, 2339 if (dladdr(p->d.f, &info) != 0) {
2297 p->d.total_size, p->d.count, p->d.total_size / p->d.count); 2340 caller -= (uintptr_t)info.dli_fbase;
2298 if (malloc_leaks == MAP_FAILED || 2341 object = info.dli_fname;
2299 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) 2342 }
2300 continue; 2343 }
2301 malloc_leaks[i].f = p->d.f; 2344 ulog("%18p %7zu %6u %6zu addr2line -e %s %p\n",
2302 malloc_leaks[i].total_size = p->d.total_size; 2345 p->d.f, p->d.total_size, p->d.count,
2303 malloc_leaks[i].count = p->d.count; 2346 p->d.total_size / p->d.count,
2304 i++; 2347 object, caller);
2305 } 2348 }
2306} 2349}
2307 2350
2308static void 2351static void
2309dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) 2352dump_chunk(struct leaktree* leaks, struct chunk_info *p, void *f,
2353 int fromfreelist)
2310{ 2354{
2311 while (p != NULL) { 2355 while (p != NULL) {
2312 dprintf(fd, "chunk %18p %18p %4zu %d/%d\n", 2356 if (mopts.malloc_verbose)
2313 p->page, ((p->bits[0] & 1) ? NULL : f), 2357 ulog("chunk %18p %18p %4zu %d/%d\n",
2314 B2SIZE(p->bucket), p->free, p->total); 2358 p->page, ((p->bits[0] & 1) ? NULL : f),
2359 B2SIZE(p->bucket), p->free, p->total);
2315 if (!fromfreelist) { 2360 if (!fromfreelist) {
2316 size_t sz = B2SIZE(p->bucket); 2361 size_t sz = B2SIZE(p->bucket);
2317 if (p->bits[0] & 1) 2362 if (p->bits[0] & 1)
2318 putleakinfo(NULL, sz, p->total - p->free); 2363 putleakinfo(leaks, NULL, sz, p->total -
2364 p->free);
2319 else { 2365 else {
2320 putleakinfo(f, sz, 1); 2366 putleakinfo(leaks, f, sz, 1);
2321 putleakinfo(NULL, sz, 2367 putleakinfo(leaks, NULL, sz,
2322 p->total - p->free - 1); 2368 p->total - p->free - 1);
2323 } 2369 }
2324 break; 2370 break;
2325 } 2371 }
2326 p = LIST_NEXT(p, entries); 2372 p = LIST_NEXT(p, entries);
2327 if (p != NULL) 2373 if (mopts.malloc_verbose && p != NULL)
2328 dprintf(fd, " "); 2374 ulog(" ->");
2329 } 2375 }
2330} 2376}
2331 2377
2332static void 2378static void
2333dump_free_chunk_info(int fd, struct dir_info *d) 2379dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks)
2334{ 2380{
2335 int i, j, count; 2381 int i, j, count;
2336 struct chunk_info *p; 2382 struct chunk_info *p;
2337 2383
2338 dprintf(fd, "Free chunk structs:\n"); 2384 ulog("Free chunk structs:\n");
2385 ulog("Bkt) #CI page"
2386 " f size free/n\n");
2339 for (i = 0; i <= BUCKETS; i++) { 2387 for (i = 0; i <= BUCKETS; i++) {
2340 count = 0; 2388 count = 0;
2341 LIST_FOREACH(p, &d->chunk_info_list[i], entries) 2389 LIST_FOREACH(p, &d->chunk_info_list[i], entries)
@@ -2345,99 +2393,100 @@ dump_free_chunk_info(int fd, struct dir_info *d)
2345 if (p == NULL && count == 0) 2393 if (p == NULL && count == 0)
2346 continue; 2394 continue;
2347 if (j == 0) 2395 if (j == 0)
2348 dprintf(fd, "%3d) %3d ", i, count); 2396 ulog("%3d) %3d ", i, count);
2349 else 2397 else
2350 dprintf(fd, " "); 2398 ulog(" ");
2351 if (p != NULL) 2399 if (p != NULL)
2352 dump_chunk(fd, p, NULL, 1); 2400 dump_chunk(leaks, p, NULL, 1);
2353 else 2401 else
2354 dprintf(fd, "\n"); 2402 ulog(".\n");
2355 } 2403 }
2356 } 2404 }
2357 2405
2358} 2406}
2359 2407
2360static void 2408static void
2361dump_free_page_info(int fd, struct dir_info *d) 2409dump_free_page_info(struct dir_info *d)
2362{ 2410{
2363 struct smallcache *cache; 2411 struct smallcache *cache;
2364 size_t i, total = 0; 2412 size_t i, total = 0;
2365 2413
2366 dprintf(fd, "Cached in small cache:\n"); 2414 ulog("Cached in small cache:\n");
2367 for (i = 0; i < MAX_SMALLCACHEABLE_SIZE; i++) { 2415 for (i = 0; i < MAX_SMALLCACHEABLE_SIZE; i++) {
2368 cache = &d->smallcache[i]; 2416 cache = &d->smallcache[i];
2369 if (cache->length != 0) 2417 if (cache->length != 0)
2370 dprintf(fd, "%zu(%u): %u = %zu\n", i + 1, cache->max, 2418 ulog("%zu(%u): %u = %zu\n", i + 1, cache->max,
2371 cache->length, cache->length * (i + 1)); 2419 cache->length, cache->length * (i + 1));
2372 total += cache->length * (i + 1); 2420 total += cache->length * (i + 1);
2373 } 2421 }
2374 2422
2375 dprintf(fd, "Cached in big cache: %zu/%zu\n", d->bigcache_used, 2423 ulog("Cached in big cache: %zu/%zu\n", d->bigcache_used,
2376 d->bigcache_size); 2424 d->bigcache_size);
2377 for (i = 0; i < d->bigcache_size; i++) { 2425 for (i = 0; i < d->bigcache_size; i++) {
2378 if (d->bigcache[i].psize != 0) 2426 if (d->bigcache[i].psize != 0)
2379 dprintf(fd, "%zu: %zu\n", i, d->bigcache[i].psize); 2427 ulog("%zu: %zu\n", i, d->bigcache[i].psize);
2380 total += d->bigcache[i].psize; 2428 total += d->bigcache[i].psize;
2381 } 2429 }
2382 dprintf(fd, "Free pages cached: %zu\n", total); 2430 ulog("Free pages cached: %zu\n", total);
2383} 2431}
2384 2432
2385static void 2433static void
2386malloc_dump1(int fd, int poolno, struct dir_info *d) 2434malloc_dump1(int poolno, struct dir_info *d, struct leaktree *leaks)
2387{ 2435{
2388 size_t i, realsize; 2436 size_t i, realsize;
2389 2437
2390 dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d); 2438 if (mopts.malloc_verbose) {
2391 if (d == NULL) 2439 ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2392 return; 2440 ulog("MT=%d J=%d Fl=%x\n", d->malloc_mt, d->malloc_junk,
2393 dprintf(fd, "MT=%d J=%d Fl=%x\n", d->malloc_mt, d->malloc_junk, d->mmap_flag); 2441 d->mmap_flag);
2394 dprintf(fd, "Region slots free %zu/%zu\n", 2442 ulog("Region slots free %zu/%zu\n",
2395 d->regions_free, d->regions_total); 2443 d->regions_free, d->regions_total);
2396 dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions); 2444 ulog("Finds %zu/%zu\n", d->finds, d->find_collisions);
2397 dprintf(fd, "Inserts %zu/%zu\n", d->inserts, d->insert_collisions); 2445 ulog("Inserts %zu/%zu\n", d->inserts, d->insert_collisions);
2398 dprintf(fd, "Deletes %zu/%zu\n", d->deletes, d->delete_moves); 2446 ulog("Deletes %zu/%zu\n", d->deletes, d->delete_moves);
2399 dprintf(fd, "Cheap reallocs %zu/%zu\n", 2447 ulog("Cheap reallocs %zu/%zu\n",
2400 d->cheap_reallocs, d->cheap_realloc_tries); 2448 d->cheap_reallocs, d->cheap_realloc_tries);
2401 dprintf(fd, "Other pool searches %zu/%zu\n", 2449 ulog("Other pool searches %zu/%zu\n",
2402 d->other_pool, d->pool_searches); 2450 d->other_pool, d->pool_searches);
2403 dprintf(fd, "In use %zu\n", d->malloc_used); 2451 ulog("In use %zu\n", d->malloc_used);
2404 dprintf(fd, "Guarded %zu\n", d->malloc_guarded); 2452 ulog("Guarded %zu\n", d->malloc_guarded);
2405 dump_free_chunk_info(fd, d); 2453 dump_free_chunk_info(d, leaks);
2406 dump_free_page_info(fd, d); 2454 dump_free_page_info(d);
2407 dprintf(fd, 2455 ulog("Hash table:\n");
2408 "slot) hash d type page f " 2456 ulog("slot) hash d type page "
2409 "size [free/n]\n"); 2457 "f size [free/n]\n");
2458 }
2410 for (i = 0; i < d->regions_total; i++) { 2459 for (i = 0; i < d->regions_total; i++) {
2411 if (d->r[i].p != NULL) { 2460 if (d->r[i].p != NULL) {
2412 size_t h = hash(d->r[i].p) & 2461 size_t h = hash(d->r[i].p) &
2413 (d->regions_total - 1); 2462 (d->regions_total - 1);
2414 dprintf(fd, "%4zx) #%4zx %zd ", 2463 if (mopts.malloc_verbose)
2415 i, h, h - i); 2464 ulog("%4zx) #%4zx %zd ",
2465 i, h, h - i);
2416 REALSIZE(realsize, &d->r[i]); 2466 REALSIZE(realsize, &d->r[i]);
2417 if (realsize > MALLOC_MAXCHUNK) { 2467 if (realsize > MALLOC_MAXCHUNK) {
2418 putleakinfo(d->r[i].f, realsize, 1); 2468 putleakinfo(leaks, d->r[i].f, realsize, 1);
2419 dprintf(fd, 2469 if (mopts.malloc_verbose)
2420 "pages %18p %18p %zu\n", d->r[i].p, 2470 ulog("pages %18p %18p %zu\n", d->r[i].p,
2421 d->r[i].f, realsize); 2471 d->r[i].f, realsize);
2422 } else 2472 } else
2423 dump_chunk(fd, 2473 dump_chunk(leaks,
2424 (struct chunk_info *)d->r[i].size, 2474 (struct chunk_info *)d->r[i].size,
2425 d->r[i].f, 0); 2475 d->r[i].f, 0);
2426 } 2476 }
2427 } 2477 }
2428 dump_leaks(fd); 2478 if (mopts.malloc_verbose)
2429 dprintf(fd, "\n"); 2479 ulog("\n");
2430} 2480}
2431 2481
2432void 2482static void
2433malloc_dump(int fd, int poolno, struct dir_info *pool) 2483malloc_dump0(int poolno, struct dir_info *pool, struct leaktree *leaks)
2434{ 2484{
2435 int i; 2485 int i;
2436 void *p; 2486 void *p;
2437 struct region_info *r; 2487 struct region_info *r;
2438 int saved_errno = errno;
2439 2488
2440 if (pool == NULL) 2489 if (pool == NULL || pool->r == NULL)
2441 return; 2490 return;
2442 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) { 2491 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2443 p = pool->delayed_chunks[i]; 2492 p = pool->delayed_chunks[i];
@@ -2449,52 +2498,44 @@ malloc_dump(int fd, int poolno, struct dir_info *pool)
2449 free_bytes(pool, r, p); 2498 free_bytes(pool, r, p);
2450 pool->delayed_chunks[i] = NULL; 2499 pool->delayed_chunks[i] = NULL;
2451 } 2500 }
2452 /* XXX leak when run multiple times */ 2501 malloc_dump1(poolno, pool, leaks);
2453 RBT_INIT(leaktree, &leakhead);
2454 malloc_dump1(fd, poolno, pool);
2455 errno = saved_errno;
2456} 2502}
2457DEF_WEAK(malloc_dump);
2458 2503
2459void 2504void
2460malloc_gdump(int fd) 2505malloc_dump(void)
2461{ 2506{
2462 int i; 2507 int i;
2463 int saved_errno = errno; 2508 int saved_errno = errno;
2464 2509
2510 /* XXX leak when run multiple times */
2511 struct leaktree leaks = RBT_INITIALIZER(&leaks);
2512
2465 for (i = 0; i < mopts.malloc_mutexes; i++) 2513 for (i = 0; i < mopts.malloc_mutexes; i++)
2466 malloc_dump(fd, i, mopts.malloc_pool[i]); 2514 malloc_dump0(i, mopts.malloc_pool[i], &leaks);
2467 2515
2516 dump_leaks(&leaks);
2517 ulog("\n");
2468 errno = saved_errno; 2518 errno = saved_errno;
2469} 2519}
2470DEF_WEAK(malloc_gdump); 2520DEF_WEAK(malloc_dump);
2471 2521
2472static void 2522static void
2473malloc_exit(void) 2523malloc_exit(void)
2474{ 2524{
2475 int save_errno = errno, fd; 2525 int save_errno = errno;
2476 unsigned i; 2526
2477 2527 ulog("******** Start dump %s *******\n", __progname);
2478 fd = open("malloc.out", O_RDWR|O_APPEND); 2528 ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u "
2479 if (fd != -1) { 2529 "G=%zu\n",
2480 dprintf(fd, "******** Start dump %s *******\n", __progname); 2530 mopts.malloc_mutexes,
2481 dprintf(fd, 2531 mopts.internal_funcs, mopts.malloc_freecheck,
2482 "M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u " 2532 mopts.malloc_freeunmap, mopts.def_malloc_junk,
2483 "G=%zu\n", 2533 mopts.malloc_realloc, mopts.malloc_xmalloc,
2484 mopts.malloc_mutexes, 2534 mopts.chunk_canaries, mopts.def_maxcache,
2485 mopts.internal_funcs, mopts.malloc_freecheck, 2535 mopts.malloc_guard);
2486 mopts.malloc_freeunmap, mopts.def_malloc_junk, 2536
2487 mopts.malloc_realloc, mopts.malloc_xmalloc, 2537 malloc_dump();
2488 mopts.chunk_canaries, mopts.def_maxcache, 2538 ulog("******** End dump %s *******\n", __progname);
2489 mopts.malloc_guard);
2490
2491 for (i = 0; i < mopts.malloc_mutexes; i++)
2492 malloc_dump(fd, i, mopts.malloc_pool[i]);
2493 dprintf(fd, "******** End dump %s *******\n", __progname);
2494 close(fd);
2495 } else
2496 dprintf(STDERR_FILENO,
2497 "malloc() warning: Couldn't dump stats\n");
2498 errno = save_errno; 2539 errno = save_errno;
2499} 2540}
2500 2541