summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorotto <>2023-12-04 07:01:45 +0000
committerotto <>2023-12-04 07:01:45 +0000
commit6e97e62c7adb0ca900dde49c141872924f78b41c (patch)
tree0bb77dcd4ea5a7d1013a0219b5cfc2aff8f2bd4e
parent872a37b58324189a9256ba4c78b1eabf7e497d47 (diff)
downloadopenbsd-6e97e62c7adb0ca900dde49c141872924f78b41c.tar.gz
openbsd-6e97e62c7adb0ca900dde49c141872924f78b41c.tar.bz2
openbsd-6e97e62c7adb0ca900dde49c141872924f78b41c.zip
Save backtraces to show in leak dump. Depth of backtrace set by
malloc option D (aka 1), 2, 3 or 4. No performance impact if not used. ok asou@
-rw-r--r--src/lib/libc/stdlib/malloc.318
-rw-r--r--src/lib/libc/stdlib/malloc.c253
2 files changed, 184 insertions, 87 deletions
diff --git a/src/lib/libc/stdlib/malloc.3 b/src/lib/libc/stdlib/malloc.3
index 3e7f7b9876..21464dc363 100644
--- a/src/lib/libc/stdlib/malloc.3
+++ b/src/lib/libc/stdlib/malloc.3
@@ -30,9 +30,9 @@
30.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31.\" SUCH DAMAGE. 31.\" SUCH DAMAGE.
32.\" 32.\"
33.\" $OpenBSD: malloc.3,v 1.139 2023/10/22 14:04:52 jmc Exp $ 33.\" $OpenBSD: malloc.3,v 1.140 2023/12/04 07:01:45 otto Exp $
34.\" 34.\"
35.Dd $Mdocdate: October 22 2023 $ 35.Dd $Mdocdate: December 4 2023 $
36.Dt MALLOC 3 36.Dt MALLOC 3
37.Os 37.Os
38.Sh NAME 38.Sh NAME
@@ -298,11 +298,12 @@ To view the leak report:
298By default, the immediate caller of a 298By default, the immediate caller of a
299.Nm 299.Nm
300function will be recorded. 300function will be recorded.
301Use malloc options 301Use malloc option
302.Cm 2 302.Cm 2 ,
303or
304.Cm 3 303.Cm 3
305to record the caller one or two stack frames deeper instead. 304or
305.Cm 4
306to record deeper call stacks.
306These malloc options imply 307These malloc options imply
307.Cm D . 308.Cm D .
308.It Cm F 309.It Cm F
@@ -843,9 +844,10 @@ to avoid these problems on
843.Ox . 844.Ox .
844.Pp 845.Pp
845The mechanism to record caller functions when using malloc options 846The mechanism to record caller functions when using malloc options
846.Cm 2 847.Cm 2 ,
848.Cm 3 ,
847or 849or
848.Cm 3 850.Cm 4
849is not guaranteed to work for all platforms, compilers or compilation 851is not guaranteed to work for all platforms, compilers or compilation
850options, 852options,
851and might even crash your program. 853and might even crash your program.
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 9da180d814..7c226657df 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.293 2023/11/04 11:02:35 otto Exp $ */ 1/* $OpenBSD: malloc.c,v 1.294 2023/12/04 07:01:45 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -139,6 +139,16 @@ struct bigcache {
139 size_t psize; 139 size_t psize;
140}; 140};
141 141
142#ifdef MALLOC_STATS
143#define NUM_FRAMES 4
144struct btnode {
145 RBT_ENTRY(btnode) entry;
146 void *caller[NUM_FRAMES];
147};
148RBT_HEAD(btshead, btnode);
149RBT_PROTOTYPE(btshead, btnode, entry, btcmp);
150#endif /* MALLOC_STATS */
151
142struct dir_info { 152struct dir_info {
143 u_int32_t canary1; 153 u_int32_t canary1;
144 int active; /* status of malloc */ 154 int active; /* status of malloc */
@@ -175,6 +185,9 @@ struct dir_info {
175 size_t cheap_reallocs; 185 size_t cheap_reallocs;
176 size_t malloc_used; /* bytes allocated */ 186 size_t malloc_used; /* bytes allocated */
177 size_t malloc_guarded; /* bytes used for guards */ 187 size_t malloc_guarded; /* bytes used for guards */
188 struct btshead btraces; /* backtraces seen */
189 struct btnode *btnodes; /* store of backtrace nodes */
190 size_t btnodesused;
178#define STATS_ADD(x,y) ((x) += (y)) 191#define STATS_ADD(x,y) ((x) += (y))
179#define STATS_SUB(x,y) ((x) -= (y)) 192#define STATS_SUB(x,y) ((x) -= (y))
180#define STATS_INC(x) ((x)++) 193#define STATS_INC(x) ((x)++)
@@ -261,38 +274,52 @@ void malloc_dump(void);
261PROTO_NORMAL(malloc_dump); 274PROTO_NORMAL(malloc_dump);
262static void malloc_exit(void); 275static void malloc_exit(void);
263static void print_chunk_details(struct dir_info *, void *, size_t, size_t); 276static void print_chunk_details(struct dir_info *, void *, size_t, size_t);
264#endif 277static void* store_caller(struct dir_info *, struct btnode *);
265 278
279/* below are the arches for which deeper caller info has been tested */
266#if defined(__aarch64__) || \ 280#if defined(__aarch64__) || \
267 defined(__amd64__) || \ 281 defined(__amd64__) || \
268 defined(__arm__) 282 defined(__arm__) || \
269static inline void* caller(void) 283 defined(__i386__) || \
270{ 284 defined(__powerpc__)
271 void *p; 285__attribute__((always_inline))
272 286static inline void*
273 switch (DO_STATS) { 287caller(struct dir_info *d)
274 case 0: 288{
275 default: 289 struct btnode p;
290 int level = DO_STATS;
291 if (level == 0)
276 return NULL; 292 return NULL;
277 case 1: 293
278 p = __builtin_return_address(0); 294 memset(&p.caller, 0, sizeof(p.caller));
279 break; 295 if (level >= 1)
280 case 2: 296 p.caller[0] = __builtin_extract_return_addr(
281 p = __builtin_return_address(1); 297 __builtin_return_address(0));
282 break; 298 if (p.caller[0] != NULL && level >= 2)
283 case 3: 299 p.caller[1] = __builtin_extract_return_addr(
284 p = __builtin_return_address(2); 300 __builtin_return_address(1));
285 break; 301 if (p.caller[1] != NULL && level >= 3)
286 } 302 p.caller[2] = __builtin_extract_return_addr(
287 return __builtin_extract_return_addr(p); 303 __builtin_return_address(2));
304 if (p.caller[2] != NULL && level >= 4)
305 p.caller[3] = __builtin_extract_return_addr(
306 __builtin_return_address(3));
307 return store_caller(d, &p);
288} 308}
289#else 309#else
290static inline void* caller(void) 310__attribute__((always_inline))
311static inline void* caller(struct dir_info *d)
291{ 312{
292 return DO_STATS == 0 ? NULL : 313 struct btnode p;
293 __builtin_extract_return_addr(__builtin_return_address(0)); 314
315 if (DO_STATS == 0)
316 return NULL;
317 memset(&p.caller, 0, sizeof(p.caller));
318 p.caller[0] = __builtin_extract_return_addr(__builtin_return_address(0));
319 return store_caller(d, &p);
294} 320}
295#endif 321#endif
322#endif /* MALLOC_STATS */
296 323
297/* low bits of r->p determine size: 0 means >= page size and r->size holding 324/* low bits of r->p determine size: 0 means >= page size and r->size holding
298 * real size, otherwise low bits is the bucket + 1 325 * real size, otherwise low bits is the bucket + 1
@@ -411,6 +438,9 @@ omalloc_parseopt(char opt)
411 case '3': 438 case '3':
412 mopts.malloc_stats = 3; 439 mopts.malloc_stats = 3;
413 break; 440 break;
441 case '4':
442 mopts.malloc_stats = 4;
443 break;
414#endif /* MALLOC_STATS */ 444#endif /* MALLOC_STATS */
415 case 'f': 445 case 'f':
416 mopts.malloc_freecheck = 0; 446 mopts.malloc_freecheck = 0;
@@ -525,7 +555,7 @@ omalloc_init(void)
525 555
526#ifdef MALLOC_STATS 556#ifdef MALLOC_STATS
527 if (DO_STATS && (atexit(malloc_exit) == -1)) { 557 if (DO_STATS && (atexit(malloc_exit) == -1)) {
528 dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed." 558 dprintf(STDERR_FILENO, "malloc() warning: atexit(3) failed."
529 " Will not be able to dump stats on exit\n"); 559 " Will not be able to dump stats on exit\n");
530 } 560 }
531#endif 561#endif
@@ -555,6 +585,9 @@ omalloc_poolinit(struct dir_info *d, int mmap_flag)
555 } 585 }
556 d->mmap_flag = mmap_flag; 586 d->mmap_flag = mmap_flag;
557 d->malloc_junk = mopts.def_malloc_junk; 587 d->malloc_junk = mopts.def_malloc_junk;
588#ifdef MALLOC_STATS
589 RBT_INIT(btshead, &d->btraces);
590#endif
558 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 591 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
559 d->canary2 = ~d->canary1; 592 d->canary2 = ~d->canary1;
560} 593}
@@ -1091,15 +1124,13 @@ bin_of(unsigned int size)
1091 const unsigned int linear = 6; 1124 const unsigned int linear = 6;
1092 const unsigned int subbin = 2; 1125 const unsigned int subbin = 2;
1093 1126
1094 unsigned int mask, range, rounded, sub_index, rounded_size; 1127 unsigned int mask, rounded, rounded_size;
1095 unsigned int n_bits, shift; 1128 unsigned int n_bits, shift;
1096 1129
1097 n_bits = lb(size | (1U << linear)); 1130 n_bits = lb(size | (1U << linear));
1098 shift = n_bits - subbin; 1131 shift = n_bits - subbin;
1099 mask = (1ULL << shift) - 1; 1132 mask = (1ULL << shift) - 1;
1100 rounded = size + mask; /* XXX: overflow. */ 1133 rounded = size + mask; /* XXX: overflow. */
1101 sub_index = rounded >> shift;
1102 range = n_bits - linear;
1103 1134
1104 rounded_size = rounded & ~mask; 1135 rounded_size = rounded & ~mask;
1105 return rounded_size; 1136 return rounded_size;
@@ -1135,6 +1166,7 @@ static void *
1135malloc_bytes(struct dir_info *d, size_t size) 1166malloc_bytes(struct dir_info *d, size_t size)
1136{ 1167{
1137 u_int i, k, r, bucket, listnum; 1168 u_int i, k, r, bucket, listnum;
1169 int j;
1138 u_short *lp; 1170 u_short *lp;
1139 struct chunk_info *bp; 1171 struct chunk_info *bp;
1140 void *p; 1172 void *p;
@@ -1155,7 +1187,7 @@ malloc_bytes(struct dir_info *d, size_t size)
1155 return NULL; 1187 return NULL;
1156 } 1188 }
1157 1189
1158 if (bp->canary != (u_short)d->canary1) 1190 if (bp->canary != (u_short)d->canary1 || bucket != bp->bucket)
1159 wrterror(d, "chunk info corrupted"); 1191 wrterror(d, "chunk info corrupted");
1160 1192
1161 /* bias, as bp->total is not a power of 2 */ 1193 /* bias, as bp->total is not a power of 2 */
@@ -1163,8 +1195,8 @@ malloc_bytes(struct dir_info *d, size_t size)
1163 1195
1164 /* potentially start somewhere in a short */ 1196 /* potentially start somewhere in a short */
1165 lp = &bp->bits[i / MALLOC_BITS]; 1197 lp = &bp->bits[i / MALLOC_BITS];
1166 if (*lp) { 1198 j = i % MALLOC_BITS; /* j must be signed */
1167 int j = i % MALLOC_BITS; /* j must be signed */ 1199 if (*lp >> j) {
1168 k = ffs(*lp >> j); 1200 k = ffs(*lp >> j);
1169 if (k != 0) { 1201 if (k != 0) {
1170 k += j - 1; 1202 k += j - 1;
@@ -1200,13 +1232,13 @@ found:
1200 STATS_SETFN(r, k, d->caller); 1232 STATS_SETFN(r, k, d->caller);
1201 } 1233 }
1202 1234
1203 k *= B2ALLOC(bp->bucket); 1235 k *= B2ALLOC(bucket);
1204 1236
1205 p = (char *)bp->page + k; 1237 p = (char *)bp->page + k;
1206 if (bp->bucket > 0) { 1238 if (bucket > 0) {
1207 validate_junk(d, p, B2SIZE(bp->bucket)); 1239 validate_junk(d, p, B2SIZE(bucket));
1208 if (mopts.chunk_canaries) 1240 if (mopts.chunk_canaries)
1209 fill_canary(p, size, B2SIZE(bp->bucket)); 1241 fill_canary(p, size, B2SIZE(bucket));
1210 } 1242 }
1211 return p; 1243 return p;
1212} 1244}
@@ -1511,7 +1543,7 @@ malloc(size_t size)
1511 int saved_errno = errno; 1543 int saved_errno = errno;
1512 1544
1513 PROLOGUE(getpool(), "malloc") 1545 PROLOGUE(getpool(), "malloc")
1514 SET_CALLER(d, caller()); 1546 SET_CALLER(d, caller(d));
1515 r = omalloc(d, size, 0); 1547 r = omalloc(d, size, 0);
1516 EPILOGUE() 1548 EPILOGUE()
1517 return r; 1549 return r;
@@ -1526,7 +1558,7 @@ malloc_conceal(size_t size)
1526 int saved_errno = errno; 1558 int saved_errno = errno;
1527 1559
1528 PROLOGUE(mopts.malloc_pool[0], "malloc_conceal") 1560 PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
1529 SET_CALLER(d, caller()); 1561 SET_CALLER(d, caller(d));
1530 r = omalloc(d, size, 0); 1562 r = omalloc(d, size, 0);
1531 EPILOGUE() 1563 EPILOGUE()
1532 return r; 1564 return r;
@@ -1937,7 +1969,7 @@ realloc(void *ptr, size_t size)
1937 int saved_errno = errno; 1969 int saved_errno = errno;
1938 1970
1939 PROLOGUE(getpool(), "realloc") 1971 PROLOGUE(getpool(), "realloc")
1940 SET_CALLER(d, caller()); 1972 SET_CALLER(d, caller(d));
1941 r = orealloc(&d, ptr, size); 1973 r = orealloc(&d, ptr, size);
1942 EPILOGUE() 1974 EPILOGUE()
1943 return r; 1975 return r;
@@ -1958,7 +1990,7 @@ calloc(size_t nmemb, size_t size)
1958 int saved_errno = errno; 1990 int saved_errno = errno;
1959 1991
1960 PROLOGUE(getpool(), "calloc") 1992 PROLOGUE(getpool(), "calloc")
1961 SET_CALLER(d, caller()); 1993 SET_CALLER(d, caller(d));
1962 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1994 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1963 nmemb > 0 && SIZE_MAX / nmemb < size) { 1995 nmemb > 0 && SIZE_MAX / nmemb < size) {
1964 d->active--; 1996 d->active--;
@@ -1984,7 +2016,7 @@ calloc_conceal(size_t nmemb, size_t size)
1984 int saved_errno = errno; 2016 int saved_errno = errno;
1985 2017
1986 PROLOGUE(mopts.malloc_pool[0], "calloc_conceal") 2018 PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
1987 SET_CALLER(d, caller()); 2019 SET_CALLER(d, caller(d));
1988 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 2020 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1989 nmemb > 0 && SIZE_MAX / nmemb < size) { 2021 nmemb > 0 && SIZE_MAX / nmemb < size) {
1990 d->active--; 2022 d->active--;
@@ -2130,7 +2162,7 @@ recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2130 return recallocarray_p(ptr, oldnmemb, newnmemb, size); 2162 return recallocarray_p(ptr, oldnmemb, newnmemb, size);
2131 2163
2132 PROLOGUE(getpool(), "recallocarray") 2164 PROLOGUE(getpool(), "recallocarray")
2133 SET_CALLER(d, caller()); 2165 SET_CALLER(d, caller(d));
2134 2166
2135 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 2167 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2136 newnmemb > 0 && SIZE_MAX / newnmemb < size) { 2168 newnmemb > 0 && SIZE_MAX / newnmemb < size) {
@@ -2289,7 +2321,7 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
2289 malloc_recurse(d); 2321 malloc_recurse(d);
2290 goto err; 2322 goto err;
2291 } 2323 }
2292 SET_CALLER(d, caller()); 2324 SET_CALLER(d, caller(d));
2293 r = omemalign(d, alignment, size, 0); 2325 r = omemalign(d, alignment, size, 0);
2294 d->active--; 2326 d->active--;
2295 _MALLOC_UNLOCK(d->mutex); 2327 _MALLOC_UNLOCK(d->mutex);
@@ -2328,7 +2360,7 @@ aligned_alloc(size_t alignment, size_t size)
2328 } 2360 }
2329 2361
2330 PROLOGUE(getpool(), "aligned_alloc") 2362 PROLOGUE(getpool(), "aligned_alloc")
2331 SET_CALLER(d, caller()); 2363 SET_CALLER(d, caller(d));
2332 r = omemalign(d, alignment, size, 0); 2364 r = omemalign(d, alignment, size, 0);
2333 EPILOGUE() 2365 EPILOGUE()
2334 return r; 2366 return r;
@@ -2337,43 +2369,74 @@ DEF_STRONG(aligned_alloc);
2337 2369
2338#ifdef MALLOC_STATS 2370#ifdef MALLOC_STATS
2339 2371
2372static int
2373btcmp(const struct btnode *e1, const struct btnode *e2)
2374{
2375 return memcmp(e1->caller, e2->caller, sizeof(e1->caller));
2376}
2377
2378RBT_GENERATE(btshead, btnode, entry, btcmp);
2379
2380static void*
2381store_caller(struct dir_info *d, struct btnode *f)
2382{
2383 struct btnode *p;
2384
2385 if (DO_STATS == 0 || d->btnodes == MAP_FAILED)
2386 return NULL;
2387
2388 p = RBT_FIND(btshead, &d->btraces, f);
2389 if (p != NULL)
2390 return p;
2391 if (d->btnodes == NULL ||
2392 d->btnodesused >= MALLOC_PAGESIZE / sizeof(struct btnode)) {
2393 d->btnodes = map(d, MALLOC_PAGESIZE, 0);
2394 if (d->btnodes == MAP_FAILED)
2395 return NULL;
2396 d->btnodesused = 0;
2397 }
2398 p = &d->btnodes[d->btnodesused++];
2399 memcpy(p->caller, f->caller, sizeof(p->caller[0]) * DO_STATS);
2400 RBT_INSERT(btshead, &d->btraces, p);
2401 return p;
2402}
2403
2404static void fabstorel(const void *, char *, size_t);
2405
2340static void 2406static void
2341print_chunk_details(struct dir_info *pool, void *p, size_t sz, size_t i) 2407print_chunk_details(struct dir_info *pool, void *p, size_t sz, size_t index)
2342{ 2408{
2343 struct region_info *r; 2409 struct region_info *r;
2344 struct chunk_info *chunkinfo; 2410 struct chunk_info *chunkinfo;
2411 struct btnode* btnode;
2345 uint32_t chunknum; 2412 uint32_t chunknum;
2346 Dl_info info; 2413 int frame;
2347 const char *caller, *pcaller = NULL; 2414 char buf1[128];
2348 const char *object = "."; 2415 char buf2[128];
2349 const char *pobject = ".";
2350 const char *msg = ""; 2416 const char *msg = "";
2351 2417
2352 r = find(pool, p); 2418 r = find(pool, p);
2353 chunkinfo = (struct chunk_info *)r->size; 2419 chunkinfo = (struct chunk_info *)r->size;
2354 chunknum = find_chunknum(pool, chunkinfo, p, 0); 2420 chunknum = find_chunknum(pool, chunkinfo, p, 0);
2355 caller = r->f[chunknum]; 2421 btnode = (struct btnode *)r->f[chunknum];
2356 if (dladdr(caller, &info) != 0) { 2422 frame = DO_STATS - 1;
2357 caller -= (uintptr_t)info.dli_fbase; 2423 if (btnode != NULL)
2358 object = info.dli_fname; 2424 fabstorel(btnode->caller[frame], buf1, sizeof(buf1));
2359 } 2425 strlcpy(buf2, ". 0x0", sizeof(buf2));
2360 if (chunknum > 0) { 2426 if (chunknum > 0) {
2361 chunknum--; 2427 chunknum--;
2362 pcaller = r->f[chunknum]; 2428 btnode = (struct btnode *)r->f[chunknum];
2363 if (dladdr(pcaller, &info) != 0) { 2429 if (btnode != NULL)
2364 pcaller -= (uintptr_t)info.dli_fbase; 2430 fabstorel(btnode->caller[frame], buf2, sizeof(buf2));
2365 pobject = info.dli_fname;
2366 }
2367 if (CHUNK_FREE(chunkinfo, chunknum)) 2431 if (CHUNK_FREE(chunkinfo, chunknum))
2368 msg = " (now free)"; 2432 msg = " (now free)";
2369 } 2433 }
2370 2434
2371 wrterror(pool, 2435 wrterror(pool,
2372 "write to free chunk %p[%zu..%zu]@%zu allocated at %s %p " 2436 "write to free chunk %p[%zu..%zu]@%zu allocated at %s "
2373 "(preceding chunk %p allocated at %s %p%s)", 2437 "(preceding chunk %p allocated at %s%s)",
2374 p, i * sizeof(uint64_t), 2438 p, index * sizeof(uint64_t), (index + 1) * sizeof(uint64_t) - 1,
2375 (i + 1) * sizeof(uint64_t) - 1, sz, object, caller, p - sz, 2439 sz, buf1, p - sz, buf2, msg);
2376 pobject, pcaller, msg);
2377} 2440}
2378 2441
2379static void 2442static void
@@ -2476,6 +2539,52 @@ putleakinfo(struct leaktree *leaks, void *f, size_t sz, int cnt)
2476} 2539}
2477 2540
2478static void 2541static void
2542fabstorel(const void *f, char *buf, size_t size)
2543{
2544 Dl_info info;
2545 const char *object = ".";
2546 const char *caller;
2547
2548 caller = f;
2549 if (caller != NULL && dladdr(f, &info) != 0) {
2550 caller -= (uintptr_t)info.dli_fbase;
2551 object = info.dli_fname;
2552 }
2553 snprintf(buf, size, "%s %p", object, caller);
2554}
2555
2556static void
2557dump_leak(struct leaknode *p)
2558{
2559 int i;
2560 char buf[128];
2561
2562 if (p->d.f == NULL) {
2563 fabstorel(NULL, buf, sizeof(buf));
2564 ulog("%18p %7zu %6u %6zu addr2line -e %s\n",
2565 p->d.f, p->d.total_size, p->d.count,
2566 p->d.total_size / p->d.count, buf);
2567 return;
2568 }
2569
2570 for (i = 0; i < DO_STATS; i++) {
2571 const char *abscaller;
2572
2573 abscaller = ((struct btnode*)p->d.f)->caller[i];
2574 if (abscaller == NULL)
2575 break;
2576 fabstorel(abscaller, buf, sizeof(buf));
2577 if (i == 0)
2578 ulog("%18p %7zu %6u %6zu addr2line -e %s\n",
2579 abscaller, p->d.total_size, p->d.count,
2580 p->d.total_size / p->d.count, buf);
2581 else
2582 ulog("%*p %*s %6s %6s addr2line -e %s\n",
2583 i + 18, abscaller, 7 - i, "-", "-", "-", buf);
2584 }
2585}
2586
2587static void
2479dump_leaks(struct leaktree *leaks) 2588dump_leaks(struct leaktree *leaks)
2480{ 2589{
2481 struct leaknode *p; 2590 struct leaknode *p;
@@ -2483,22 +2592,8 @@ dump_leaks(struct leaktree *leaks)
2483 ulog("Leak report:\n"); 2592 ulog("Leak report:\n");
2484 ulog(" f sum # avg\n"); 2593 ulog(" f sum # avg\n");
2485 2594
2486 RBT_FOREACH(p, leaktree, leaks) { 2595 RBT_FOREACH(p, leaktree, leaks)
2487 Dl_info info; 2596 dump_leak(p);
2488 const char *caller = p->d.f;
2489 const char *object = ".";
2490
2491 if (caller != NULL) {
2492 if (dladdr(p->d.f, &info) != 0) {
2493 caller -= (uintptr_t)info.dli_fbase;
2494 object = info.dli_fname;
2495 }
2496 }
2497 ulog("%18p %7zu %6u %6zu addr2line -e %s %p\n",
2498 p->d.f, p->d.total_size, p->d.count,
2499 p->d.total_size / p->d.count,
2500 object, caller);
2501 }
2502} 2597}
2503 2598
2504static void 2599static void