diff options
author | otto <> | 2023-11-04 11:02:35 +0000 |
---|---|---|
committer | otto <> | 2023-11-04 11:02:35 +0000 |
commit | 3e34a07b0fe589c20ebaceaca4a5d4fe38674db2 (patch) | |
tree | a5cb8aaf324b7ddce7eba7d8d3700828e8be4463 | |
parent | 9338f2e503bcf0185656636d62eec3005ed27254 (diff) | |
download | openbsd-3e34a07b0fe589c20ebaceaca4a5d4fe38674db2.tar.gz openbsd-3e34a07b0fe589c20ebaceaca4a5d4fe38674db2.tar.bz2 openbsd-3e34a07b0fe589c20ebaceaca4a5d4fe38674db2.zip |
KNF plus fixed a few signed vs unsigned compares (that we actually
not real problems)
-rw-r--r-- | src/lib/libc/stdlib/malloc.c | 55 |
1 files changed, 33 insertions, 22 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index 81ad79dfd3..9da180d814 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* $OpenBSD: malloc.c,v 1.292 2023/10/26 17:59:16 otto Exp $ */ | 1 | /* $OpenBSD: malloc.c,v 1.293 2023/11/04 11:02:35 otto Exp $ */ |
2 | /* | 2 | /* |
3 | * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net> | 3 | * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net> |
4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> | 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> |
@@ -215,7 +215,8 @@ struct chunk_info { | |||
215 | u_short bits[CHUNK_INFO_TAIL]; /* which chunks are free */ | 215 | u_short bits[CHUNK_INFO_TAIL]; /* which chunks are free */ |
216 | }; | 216 | }; |
217 | 217 | ||
218 | #define CHUNK_FREE(i, n) ((i)->bits[(n) / MALLOC_BITS] & (1U << ((n) % MALLOC_BITS))) | 218 | #define CHUNK_FREE(i, n) ((i)->bits[(n) / MALLOC_BITS] & \ |
219 | (1U << ((n) % MALLOC_BITS))) | ||
219 | 220 | ||
220 | struct malloc_readonly { | 221 | struct malloc_readonly { |
221 | /* Main bookkeeping information */ | 222 | /* Main bookkeeping information */ |
@@ -232,7 +233,7 @@ struct malloc_readonly { | |||
232 | u_int junk_loc; /* variation in location of junk */ | 233 | u_int junk_loc; /* variation in location of junk */ |
233 | size_t malloc_guard; /* use guard pages after allocations? */ | 234 | size_t malloc_guard; /* use guard pages after allocations? */ |
234 | #ifdef MALLOC_STATS | 235 | #ifdef MALLOC_STATS |
235 | int malloc_stats; /* save callers, dump leak report at end */ | 236 | int malloc_stats; /* save callers, dump leak report */ |
236 | int malloc_verbose; /* dump verbose statistics at end */ | 237 | int malloc_verbose; /* dump verbose statistics at end */ |
237 | #define DO_STATS mopts.malloc_stats | 238 | #define DO_STATS mopts.malloc_stats |
238 | #else | 239 | #else |
@@ -542,7 +543,7 @@ omalloc_init(void) | |||
542 | static void | 543 | static void |
543 | omalloc_poolinit(struct dir_info *d, int mmap_flag) | 544 | omalloc_poolinit(struct dir_info *d, int mmap_flag) |
544 | { | 545 | { |
545 | int i, j; | 546 | u_int i, j; |
546 | 547 | ||
547 | d->r = NULL; | 548 | d->r = NULL; |
548 | d->rbytesused = sizeof(d->rbytes); | 549 | d->rbytesused = sizeof(d->rbytes); |
@@ -597,7 +598,8 @@ omalloc_grow(struct dir_info *d) | |||
597 | } | 598 | } |
598 | 599 | ||
599 | if (d->regions_total > 0) { | 600 | if (d->regions_total > 0) { |
600 | oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info)); | 601 | oldpsz = PAGEROUND(d->regions_total * |
602 | sizeof(struct region_info)); | ||
601 | /* clear to avoid meta info ending up in the cache */ | 603 | /* clear to avoid meta info ending up in the cache */ |
602 | unmap(d, d->r, oldpsz, oldpsz); | 604 | unmap(d, d->r, oldpsz, oldpsz); |
603 | } | 605 | } |
@@ -995,7 +997,8 @@ alloc_chunk_info(struct dir_info *d, u_int bucket) | |||
995 | 997 | ||
996 | for (i = 0; i < count; i++, q += size) { | 998 | for (i = 0; i < count; i++, q += size) { |
997 | p = (struct chunk_info *)q; | 999 | p = (struct chunk_info *)q; |
998 | LIST_INSERT_HEAD(&d->chunk_info_list[bucket], p, entries); | 1000 | LIST_INSERT_HEAD(&d->chunk_info_list[bucket], p, |
1001 | entries); | ||
999 | } | 1002 | } |
1000 | } | 1003 | } |
1001 | p = LIST_FIRST(&d->chunk_info_list[bucket]); | 1004 | p = LIST_FIRST(&d->chunk_info_list[bucket]); |
@@ -1023,7 +1026,8 @@ omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum) | |||
1023 | ff = map(d, MALLOC_PAGESIZE, 0); | 1026 | ff = map(d, MALLOC_PAGESIZE, 0); |
1024 | if (ff == MAP_FAILED) | 1027 | if (ff == MAP_FAILED) |
1025 | goto err; | 1028 | goto err; |
1026 | memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE / B2ALLOC(bucket)); | 1029 | memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE / |
1030 | B2ALLOC(bucket)); | ||
1027 | } | 1031 | } |
1028 | 1032 | ||
1029 | /* memory protect the page allocated in the malloc(0) case */ | 1033 | /* memory protect the page allocated in the malloc(0) case */ |
@@ -1405,14 +1409,14 @@ _malloc_init(int from_rthreads) | |||
1405 | sz = mopts.malloc_mutexes * sizeof(*d) + 2 * MALLOC_PAGESIZE; | 1409 | sz = mopts.malloc_mutexes * sizeof(*d) + 2 * MALLOC_PAGESIZE; |
1406 | if ((p = MMAPNONE(sz, 0)) == MAP_FAILED) | 1410 | if ((p = MMAPNONE(sz, 0)) == MAP_FAILED) |
1407 | wrterror(NULL, "malloc_init mmap1 failed"); | 1411 | wrterror(NULL, "malloc_init mmap1 failed"); |
1408 | if (mprotect(p + MALLOC_PAGESIZE, mopts.malloc_mutexes * sizeof(*d), | 1412 | if (mprotect(p + MALLOC_PAGESIZE, mopts.malloc_mutexes * |
1409 | PROT_READ | PROT_WRITE)) | 1413 | sizeof(*d), PROT_READ | PROT_WRITE)) |
1410 | wrterror(NULL, "malloc_init mprotect1 failed"); | 1414 | wrterror(NULL, "malloc_init mprotect1 failed"); |
1411 | if (mimmutable(p, sz)) | 1415 | if (mimmutable(p, sz)) |
1412 | wrterror(NULL, "malloc_init mimmutable1 failed"); | 1416 | wrterror(NULL, "malloc_init mimmutable1 failed"); |
1413 | d_avail = (((mopts.malloc_mutexes * sizeof(*d) + MALLOC_PAGEMASK) & | 1417 | d_avail = (((mopts.malloc_mutexes * sizeof(*d) + |
1414 | ~MALLOC_PAGEMASK) - (mopts.malloc_mutexes * sizeof(*d))) >> | 1418 | MALLOC_PAGEMASK) & ~MALLOC_PAGEMASK) - |
1415 | MALLOC_MINSHIFT; | 1419 | (mopts.malloc_mutexes * sizeof(*d))) >> MALLOC_MINSHIFT; |
1416 | d = (struct dir_info *)(p + MALLOC_PAGESIZE + | 1420 | d = (struct dir_info *)(p + MALLOC_PAGESIZE + |
1417 | (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); | 1421 | (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); |
1418 | STATS_ADD(d[1].malloc_used, sz); | 1422 | STATS_ADD(d[1].malloc_used, sz); |
@@ -1422,9 +1426,12 @@ _malloc_init(int from_rthreads) | |||
1422 | if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) { | 1426 | if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) { |
1423 | if (mprotect(&malloc_readonly, sizeof(malloc_readonly), | 1427 | if (mprotect(&malloc_readonly, sizeof(malloc_readonly), |
1424 | PROT_READ)) | 1428 | PROT_READ)) |
1425 | wrterror(NULL, "malloc_init mprotect r/o failed"); | 1429 | wrterror(NULL, |
1426 | if (mimmutable(&malloc_readonly, sizeof(malloc_readonly))) | 1430 | "malloc_init mprotect r/o failed"); |
1427 | wrterror(NULL, "malloc_init mimmutable r/o failed"); | 1431 | if (mimmutable(&malloc_readonly, |
1432 | sizeof(malloc_readonly))) | ||
1433 | wrterror(NULL, | ||
1434 | "malloc_init mimmutable r/o failed"); | ||
1428 | } | 1435 | } |
1429 | } | 1436 | } |
1430 | 1437 | ||
@@ -1458,7 +1465,8 @@ _malloc_init(int from_rthreads) | |||
1458 | wrterror(NULL, | 1465 | wrterror(NULL, |
1459 | "malloc_init mmap2 failed"); | 1466 | "malloc_init mmap2 failed"); |
1460 | if (mimmutable(p, sz)) | 1467 | if (mimmutable(p, sz)) |
1461 | wrterror(NULL, "malloc_init mimmutable2 failed"); | 1468 | wrterror(NULL, |
1469 | "malloc_init mimmutable2 failed"); | ||
1462 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) { | 1470 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) { |
1463 | d->smallcache[j].pages = p; | 1471 | d->smallcache[j].pages = p; |
1464 | p = (char *)p + d->smallcache[j].max * | 1472 | p = (char *)p + d->smallcache[j].max * |
@@ -1535,7 +1543,8 @@ findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool, | |||
1535 | if (r == NULL) { | 1543 | if (r == NULL) { |
1536 | u_int i, nmutexes; | 1544 | u_int i, nmutexes; |
1537 | 1545 | ||
1538 | nmutexes = mopts.malloc_pool[1]->malloc_mt ? mopts.malloc_mutexes : 2; | 1546 | nmutexes = mopts.malloc_pool[1]->malloc_mt ? |
1547 | mopts.malloc_mutexes : 2; | ||
1539 | for (i = 1; i < nmutexes; i++) { | 1548 | for (i = 1; i < nmutexes; i++) { |
1540 | u_int j = (argpool->mutex + i) & (nmutexes - 1); | 1549 | u_int j = (argpool->mutex + i) & (nmutexes - 1); |
1541 | 1550 | ||
@@ -1813,7 +1822,8 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz) | |||
1813 | size_t needed = rnewsz - roldsz; | 1822 | size_t needed = rnewsz - roldsz; |
1814 | 1823 | ||
1815 | STATS_INC(pool->cheap_realloc_tries); | 1824 | STATS_INC(pool->cheap_realloc_tries); |
1816 | q = MMAPA(hint, needed, MAP_FIXED | __MAP_NOREPLACE | pool->mmap_flag); | 1825 | q = MMAPA(hint, needed, MAP_FIXED | |
1826 | __MAP_NOREPLACE | pool->mmap_flag); | ||
1817 | if (q == hint) { | 1827 | if (q == hint) { |
1818 | STATS_ADD(pool->malloc_used, needed); | 1828 | STATS_ADD(pool->malloc_used, needed); |
1819 | if (pool->malloc_junk == 2) | 1829 | if (pool->malloc_junk == 2) |
@@ -2030,7 +2040,8 @@ orecallocarray(struct dir_info **argpool, void *p, size_t oldsize, | |||
2030 | wrterror(pool, "recorded size %zu < %zu", | 2040 | wrterror(pool, "recorded size %zu < %zu", |
2031 | sz - mopts.malloc_guard, oldsize); | 2041 | sz - mopts.malloc_guard, oldsize); |
2032 | if (oldsize < (sz - mopts.malloc_guard) / 2) | 2042 | if (oldsize < (sz - mopts.malloc_guard) / 2) |
2033 | wrterror(pool, "recorded size %zu inconsistent with %zu", | 2043 | wrterror(pool, |
2044 | "recorded size %zu inconsistent with %zu", | ||
2034 | sz - mopts.malloc_guard, oldsize); | 2045 | sz - mopts.malloc_guard, oldsize); |
2035 | } | 2046 | } |
2036 | 2047 | ||
@@ -2383,7 +2394,7 @@ ulog(const char *format, ...) | |||
2383 | va_end(ap); | 2394 | va_end(ap); |
2384 | if (len < 0) | 2395 | if (len < 0) |
2385 | return; | 2396 | return; |
2386 | if (len > KTR_USER_MAXLEN - filled) | 2397 | if ((size_t)len > KTR_USER_MAXLEN - filled) |
2387 | len = KTR_USER_MAXLEN - filled; | 2398 | len = KTR_USER_MAXLEN - filled; |
2388 | filled += len; | 2399 | filled += len; |
2389 | if (filled > 0) { | 2400 | if (filled > 0) { |
@@ -2516,7 +2527,7 @@ dump_chunk(struct leaktree* leaks, struct chunk_info *p, void **f, | |||
2516 | static void | 2527 | static void |
2517 | dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks) | 2528 | dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks) |
2518 | { | 2529 | { |
2519 | int i, j, count; | 2530 | u_int i, j, count; |
2520 | struct chunk_info *p; | 2531 | struct chunk_info *p; |
2521 | 2532 | ||
2522 | ulog("Free chunk structs:\n"); | 2533 | ulog("Free chunk structs:\n"); |
@@ -2639,7 +2650,7 @@ malloc_dump0(int poolno, struct dir_info *pool, struct leaktree *leaks) | |||
2639 | void | 2650 | void |
2640 | malloc_dump(void) | 2651 | malloc_dump(void) |
2641 | { | 2652 | { |
2642 | int i; | 2653 | u_int i; |
2643 | int saved_errno = errno; | 2654 | int saved_errno = errno; |
2644 | 2655 | ||
2645 | /* XXX leak when run multiple times */ | 2656 | /* XXX leak when run multiple times */ |