diff options
Diffstat (limited to 'src/lib/libc/stdlib/malloc.c')
| -rw-r--r-- | src/lib/libc/stdlib/malloc.c | 146 |
1 files changed, 81 insertions, 65 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index e86f941aa2..ad0b187e03 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: malloc.c,v 1.236 2017/11/02 14:01:50 otto Exp $ */ | 1 | /* $OpenBSD: malloc.c,v 1.237 2017/12/27 10:05:23 otto Exp $ */ |
| 2 | /* | 2 | /* |
| 3 | * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> | 3 | * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> |
| 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> | 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> |
| @@ -123,12 +123,12 @@ struct dir_info { | |||
| 123 | /* free pages cache */ | 123 | /* free pages cache */ |
| 124 | struct region_info free_regions[MALLOC_MAXCACHE]; | 124 | struct region_info free_regions[MALLOC_MAXCACHE]; |
| 125 | /* delayed free chunk slots */ | 125 | /* delayed free chunk slots */ |
| 126 | u_int rotor; | ||
| 126 | void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; | 127 | void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; |
| 127 | size_t rbytesused; /* random bytes used */ | 128 | size_t rbytesused; /* random bytes used */ |
| 128 | char *func; /* current function */ | 129 | char *func; /* current function */ |
| 129 | int mutex; | 130 | int mutex; |
| 130 | u_char rbytes[32]; /* random bytes */ | 131 | u_char rbytes[32]; /* random bytes */ |
| 131 | u_short chunk_start; | ||
| 132 | #ifdef MALLOC_STATS | 132 | #ifdef MALLOC_STATS |
| 133 | size_t inserts; | 133 | size_t inserts; |
| 134 | size_t insert_collisions; | 134 | size_t insert_collisions; |
| @@ -166,14 +166,14 @@ struct dir_info { | |||
| 166 | struct chunk_info { | 166 | struct chunk_info { |
| 167 | LIST_ENTRY(chunk_info) entries; | 167 | LIST_ENTRY(chunk_info) entries; |
| 168 | void *page; /* pointer to the page */ | 168 | void *page; /* pointer to the page */ |
| 169 | u_int32_t canary; | 169 | u_short canary; |
| 170 | u_short size; /* size of this page's chunks */ | 170 | u_short size; /* size of this page's chunks */ |
| 171 | u_short shift; /* how far to shift for this size */ | 171 | u_short shift; /* how far to shift for this size */ |
| 172 | u_short free; /* how many free chunks */ | 172 | u_short free; /* how many free chunks */ |
| 173 | u_short total; /* how many chunks */ | 173 | u_short total; /* how many chunks */ |
| 174 | u_short offset; /* requested size table offset */ | 174 | u_short offset; /* requested size table offset */ |
| 175 | /* which chunks are free */ | 175 | u_short rotor; /* randomization rotor */ |
| 176 | u_short bits[1]; | 176 | u_short bits[1]; /* which chunks are free */ |
| 177 | }; | 177 | }; |
| 178 | 178 | ||
| 179 | struct malloc_readonly { | 179 | struct malloc_readonly { |
| @@ -416,7 +416,7 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill) | |||
| 416 | { | 416 | { |
| 417 | size_t psz = sz >> MALLOC_PAGESHIFT; | 417 | size_t psz = sz >> MALLOC_PAGESHIFT; |
| 418 | struct region_info *r, *big = NULL; | 418 | struct region_info *r, *big = NULL; |
| 419 | u_int i, offset; | 419 | u_int i; |
| 420 | void *p; | 420 | void *p; |
| 421 | 421 | ||
| 422 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | 422 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || |
| @@ -425,7 +425,7 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill) | |||
| 425 | if (sz != PAGEROUND(sz)) | 425 | if (sz != PAGEROUND(sz)) |
| 426 | wrterror(d, "map round"); | 426 | wrterror(d, "map round"); |
| 427 | 427 | ||
| 428 | if (!hint && psz > d->free_regions_size) { | 428 | if (hint == NULL && psz > d->free_regions_size) { |
| 429 | _MALLOC_LEAVE(d); | 429 | _MALLOC_LEAVE(d); |
| 430 | p = MMAP(sz); | 430 | p = MMAP(sz); |
| 431 | _MALLOC_ENTER(d); | 431 | _MALLOC_ENTER(d); |
| @@ -434,11 +434,10 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill) | |||
| 434 | /* zero fill not needed */ | 434 | /* zero fill not needed */ |
| 435 | return p; | 435 | return p; |
| 436 | } | 436 | } |
| 437 | offset = getrbyte(d); | ||
| 438 | for (i = 0; i < mopts.malloc_cache; i++) { | 437 | for (i = 0; i < mopts.malloc_cache; i++) { |
| 439 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; | 438 | r = &d->free_regions[(i + d->rotor) & (mopts.malloc_cache - 1)]; |
| 440 | if (r->p != NULL) { | 439 | if (r->p != NULL) { |
| 441 | if (hint && r->p != hint) | 440 | if (hint != NULL && r->p != hint) |
| 442 | continue; | 441 | continue; |
| 443 | if (r->size == psz) { | 442 | if (r->size == psz) { |
| 444 | p = r->p; | 443 | p = r->p; |
| @@ -452,6 +451,7 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill) | |||
| 452 | else if (mopts.malloc_junk == 2 && | 451 | else if (mopts.malloc_junk == 2 && |
| 453 | mopts.malloc_freeunmap) | 452 | mopts.malloc_freeunmap) |
| 454 | memset(p, SOME_FREEJUNK, sz); | 453 | memset(p, SOME_FREEJUNK, sz); |
| 454 | d->rotor += i + 1; | ||
| 455 | return p; | 455 | return p; |
| 456 | } else if (r->size > psz) | 456 | } else if (r->size > psz) |
| 457 | big = r; | 457 | big = r; |
| @@ -471,7 +471,7 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill) | |||
| 471 | memset(p, SOME_FREEJUNK, sz); | 471 | memset(p, SOME_FREEJUNK, sz); |
| 472 | return p; | 472 | return p; |
| 473 | } | 473 | } |
| 474 | if (hint) | 474 | if (hint != NULL) |
| 475 | return MAP_FAILED; | 475 | return MAP_FAILED; |
| 476 | if (d->free_regions_size > mopts.malloc_cache) | 476 | if (d->free_regions_size > mopts.malloc_cache) |
| 477 | wrterror(d, "malloc cache"); | 477 | wrterror(d, "malloc cache"); |
| @@ -718,36 +718,35 @@ static struct chunk_info * | |||
| 718 | alloc_chunk_info(struct dir_info *d, int bits) | 718 | alloc_chunk_info(struct dir_info *d, int bits) |
| 719 | { | 719 | { |
| 720 | struct chunk_info *p; | 720 | struct chunk_info *p; |
| 721 | size_t size, count; | ||
| 722 | |||
| 723 | if (bits == 0) | ||
| 724 | count = MALLOC_PAGESIZE / MALLOC_MINSIZE; | ||
| 725 | else | ||
| 726 | count = MALLOC_PAGESIZE >> bits; | ||
| 727 | |||
| 728 | size = howmany(count, MALLOC_BITS); | ||
| 729 | size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); | ||
| 730 | if (mopts.chunk_canaries) | ||
| 731 | size += count * sizeof(u_short); | ||
| 732 | size = ALIGN(size); | ||
| 733 | 721 | ||
| 734 | if (LIST_EMPTY(&d->chunk_info_list[bits])) { | 722 | if (LIST_EMPTY(&d->chunk_info_list[bits])) { |
| 723 | size_t size, count, i; | ||
| 735 | char *q; | 724 | char *q; |
| 736 | size_t i; | 725 | |
| 726 | if (bits == 0) | ||
| 727 | count = MALLOC_PAGESIZE / MALLOC_MINSIZE; | ||
| 728 | else | ||
| 729 | count = MALLOC_PAGESIZE >> bits; | ||
| 730 | |||
| 731 | size = howmany(count, MALLOC_BITS); | ||
| 732 | size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); | ||
| 733 | if (mopts.chunk_canaries) | ||
| 734 | size += count * sizeof(u_short); | ||
| 735 | size = ALIGN(size); | ||
| 737 | 736 | ||
| 738 | q = MMAP(MALLOC_PAGESIZE); | 737 | q = MMAP(MALLOC_PAGESIZE); |
| 739 | if (q == MAP_FAILED) | 738 | if (q == MAP_FAILED) |
| 740 | return NULL; | 739 | return NULL; |
| 741 | STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); | 740 | STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); |
| 742 | count = MALLOC_PAGESIZE / size; | 741 | count = MALLOC_PAGESIZE / size; |
| 743 | for (i = 0; i < count; i++, q += size) | 742 | for (i = 0; i < count; i++, q += size) { |
| 744 | LIST_INSERT_HEAD(&d->chunk_info_list[bits], | 743 | p = (struct chunk_info *)q; |
| 745 | (struct chunk_info *)q, entries); | 744 | p->canary = (u_short)d->canary1; |
| 745 | LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries); | ||
| 746 | } | ||
| 746 | } | 747 | } |
| 747 | p = LIST_FIRST(&d->chunk_info_list[bits]); | 748 | p = LIST_FIRST(&d->chunk_info_list[bits]); |
| 748 | LIST_REMOVE(p, entries); | 749 | LIST_REMOVE(p, entries); |
| 749 | memset(p, 0, size); | ||
| 750 | p->canary = d->canary1; | ||
| 751 | return p; | 750 | return p; |
| 752 | } | 751 | } |
| 753 | 752 | ||
| @@ -896,6 +895,8 @@ omalloc_make_chunks(struct dir_info *d, int bits, int listnum) | |||
| 896 | for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) | 895 | for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) |
| 897 | bp->bits[i / MALLOC_BITS] = (u_short)~0U; | 896 | bp->bits[i / MALLOC_BITS] = (u_short)~0U; |
| 898 | 897 | ||
| 898 | if (i < k) | ||
| 899 | bp->bits[i / MALLOC_BITS] = 0; | ||
| 899 | for (; i < k; i++) | 900 | for (; i < k; i++) |
| 900 | bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); | 901 | bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); |
| 901 | 902 | ||
| @@ -937,10 +938,12 @@ find_chunksize(size_t size) | |||
| 937 | static void * | 938 | static void * |
| 938 | malloc_bytes(struct dir_info *d, size_t size, void *f) | 939 | malloc_bytes(struct dir_info *d, size_t size, void *f) |
| 939 | { | 940 | { |
| 940 | int i, j, listnum; | 941 | u_int i, r; |
| 942 | int j, listnum; | ||
| 941 | size_t k; | 943 | size_t k; |
| 942 | u_short u, *lp; | 944 | u_short u, b, *lp; |
| 943 | struct chunk_info *bp; | 945 | struct chunk_info *bp; |
| 946 | void *p; | ||
| 944 | 947 | ||
| 945 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | 948 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || |
| 946 | d->canary1 != ~d->canary2) | 949 | d->canary1 != ~d->canary2) |
| @@ -948,7 +951,8 @@ malloc_bytes(struct dir_info *d, size_t size, void *f) | |||
| 948 | 951 | ||
| 949 | j = find_chunksize(size); | 952 | j = find_chunksize(size); |
| 950 | 953 | ||
| 951 | listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; | 954 | r = getrbyte(d); |
| 955 | listnum = r % MALLOC_CHUNK_LISTS; | ||
| 952 | /* If it's empty, make a page more of that size chunks */ | 956 | /* If it's empty, make a page more of that size chunks */ |
| 953 | if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { | 957 | if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { |
| 954 | bp = omalloc_make_chunks(d, j, listnum); | 958 | bp = omalloc_make_chunks(d, j, listnum); |
| @@ -956,35 +960,47 @@ malloc_bytes(struct dir_info *d, size_t size, void *f) | |||
| 956 | return NULL; | 960 | return NULL; |
| 957 | } | 961 | } |
| 958 | 962 | ||
| 959 | if (bp->canary != d->canary1) | 963 | if (bp->canary != (u_short)d->canary1) |
| 960 | wrterror(d, "chunk info corrupted"); | 964 | wrterror(d, "chunk info corrupted"); |
| 961 | 965 | ||
| 962 | i = d->chunk_start; | ||
| 963 | if (bp->free > 1) | 966 | if (bp->free > 1) |
| 964 | i += getrbyte(d); | 967 | bp->rotor += r; |
| 965 | if (i >= bp->total) | 968 | i = bp->rotor++ & (bp->total - 1); |
| 966 | i &= bp->total - 1; | 969 | |
| 967 | for (;;) { | 970 | /* start somewhere in a short */ |
| 968 | for (;;) { | 971 | lp = &bp->bits[i / MALLOC_BITS]; |
| 969 | lp = &bp->bits[i / MALLOC_BITS]; | 972 | if (*lp) { |
| 970 | if (!*lp) { | 973 | b = *lp; |
| 971 | i += MALLOC_BITS; | ||
| 972 | i &= ~(MALLOC_BITS - 1); | ||
| 973 | if (i >= bp->total) | ||
| 974 | i = 0; | ||
| 975 | } else | ||
| 976 | break; | ||
| 977 | } | ||
| 978 | k = i % MALLOC_BITS; | 974 | k = i % MALLOC_BITS; |
| 979 | u = 1 << k; | 975 | u = 1 << k; |
| 980 | if (*lp & u) | 976 | while (k < MALLOC_BITS) { |
| 981 | break; | 977 | if (b & u) |
| 982 | if (++i >= bp->total) | 978 | goto found; |
| 979 | k++; | ||
| 980 | u <<= 1; | ||
| 981 | } | ||
| 982 | } | ||
| 983 | /* no bit halfway, go to next full short */ | ||
| 984 | i /= MALLOC_BITS; | ||
| 985 | for (;;) { | ||
| 986 | if (++i >= bp->total / MALLOC_BITS) | ||
| 983 | i = 0; | 987 | i = 0; |
| 988 | lp = &bp->bits[i]; | ||
| 989 | if (*lp) { | ||
| 990 | b = *lp; | ||
| 991 | k = 0; | ||
| 992 | u = 1; | ||
| 993 | for (;;) { | ||
| 994 | if (b & u) | ||
| 995 | goto found; | ||
| 996 | k++; | ||
| 997 | u <<= 1; | ||
| 998 | } | ||
| 999 | } | ||
| 984 | } | 1000 | } |
| 985 | d->chunk_start += i + 1; | 1001 | found: |
| 986 | #ifdef MALLOC_STATS | 1002 | #ifdef MALLOC_STATS |
| 987 | if (i == 0) { | 1003 | if (i == 0 && k == 0) { |
| 988 | struct region_info *r = find(d, bp->page); | 1004 | struct region_info *r = find(d, bp->page); |
| 989 | r->f = f; | 1005 | r->f = f; |
| 990 | } | 1006 | } |
| @@ -1004,13 +1020,14 @@ malloc_bytes(struct dir_info *d, size_t size, void *f) | |||
| 1004 | 1020 | ||
| 1005 | k <<= bp->shift; | 1021 | k <<= bp->shift; |
| 1006 | 1022 | ||
| 1023 | p = (char *)bp->page + k; | ||
| 1007 | if (bp->size > 0) { | 1024 | if (bp->size > 0) { |
| 1008 | if (mopts.malloc_junk == 2) | 1025 | if (mopts.malloc_junk == 2) |
| 1009 | memset((char *)bp->page + k, SOME_JUNK, bp->size); | 1026 | memset(p, SOME_JUNK, bp->size); |
| 1010 | else if (mopts.chunk_canaries) | 1027 | else if (mopts.chunk_canaries) |
| 1011 | fill_canary((char *)bp->page + k, size, bp->size); | 1028 | fill_canary(p, size, bp->size); |
| 1012 | } | 1029 | } |
| 1013 | return ((char *)bp->page + k); | 1030 | return p; |
| 1014 | } | 1031 | } |
| 1015 | 1032 | ||
| 1016 | static void | 1033 | static void |
| @@ -1045,13 +1062,11 @@ validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) | |||
| 1045 | } | 1062 | } |
| 1046 | 1063 | ||
| 1047 | static uint32_t | 1064 | static uint32_t |
| 1048 | find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check) | 1065 | find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check) |
| 1049 | { | 1066 | { |
| 1050 | struct chunk_info *info; | ||
| 1051 | uint32_t chunknum; | 1067 | uint32_t chunknum; |
| 1052 | 1068 | ||
| 1053 | info = (struct chunk_info *)r->size; | 1069 | if (info->canary != (u_short)d->canary1) |
| 1054 | if (info->canary != d->canary1) | ||
| 1055 | wrterror(d, "chunk info corrupted"); | 1070 | wrterror(d, "chunk info corrupted"); |
| 1056 | 1071 | ||
| 1057 | /* Find the chunk number on the page */ | 1072 | /* Find the chunk number on the page */ |
| @@ -1081,7 +1096,7 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr) | |||
| 1081 | int listnum; | 1096 | int listnum; |
| 1082 | 1097 | ||
| 1083 | info = (struct chunk_info *)r->size; | 1098 | info = (struct chunk_info *)r->size; |
| 1084 | chunknum = find_chunknum(d, r, ptr, 0); | 1099 | chunknum = find_chunknum(d, info, ptr, 0); |
| 1085 | 1100 | ||
| 1086 | info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); | 1101 | info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); |
| 1087 | info->free++; | 1102 | info->free++; |
| @@ -1326,7 +1341,7 @@ ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz) | |||
| 1326 | struct chunk_info *info = | 1341 | struct chunk_info *info = |
| 1327 | (struct chunk_info *)r->size; | 1342 | (struct chunk_info *)r->size; |
| 1328 | uint32_t chunknum = | 1343 | uint32_t chunknum = |
| 1329 | find_chunknum(pool, r, p, 0); | 1344 | find_chunknum(pool, info, p, 0); |
| 1330 | 1345 | ||
| 1331 | if (info->bits[info->offset + chunknum] < | 1346 | if (info->bits[info->offset + chunknum] < |
| 1332 | argsz) | 1347 | argsz) |
| @@ -1373,7 +1388,8 @@ ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz) | |||
| 1373 | delete(pool, r); | 1388 | delete(pool, r); |
| 1374 | } else { | 1389 | } else { |
| 1375 | /* Validate and optionally canary check */ | 1390 | /* Validate and optionally canary check */ |
| 1376 | find_chunknum(pool, r, p, mopts.chunk_canaries); | 1391 | struct chunk_info *info = (struct chunk_info *)r->size; |
| 1392 | find_chunknum(pool, info, p, mopts.chunk_canaries); | ||
| 1377 | if (!clear) { | 1393 | if (!clear) { |
| 1378 | void *tmp; | 1394 | void *tmp; |
| 1379 | int i; | 1395 | int i; |
| @@ -1520,8 +1536,8 @@ orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f) | |||
| 1520 | 1536 | ||
| 1521 | REALSIZE(oldsz, r); | 1537 | REALSIZE(oldsz, r); |
| 1522 | if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) { | 1538 | if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) { |
| 1523 | chunknum = find_chunknum(pool, r, p, 0); | ||
| 1524 | info = (struct chunk_info *)r->size; | 1539 | info = (struct chunk_info *)r->size; |
| 1540 | chunknum = find_chunknum(pool, info, p, 0); | ||
| 1525 | } | 1541 | } |
| 1526 | 1542 | ||
| 1527 | goldsz = oldsz; | 1543 | goldsz = oldsz; |
| @@ -1788,7 +1804,7 @@ orecallocarray(struct dir_info *argpool, void *p, size_t oldsize, | |||
| 1788 | if (sz <= MALLOC_MAXCHUNK) { | 1804 | if (sz <= MALLOC_MAXCHUNK) { |
| 1789 | if (mopts.chunk_canaries && sz > 0) { | 1805 | if (mopts.chunk_canaries && sz > 0) { |
| 1790 | struct chunk_info *info = (struct chunk_info *)r->size; | 1806 | struct chunk_info *info = (struct chunk_info *)r->size; |
| 1791 | uint32_t chunknum = find_chunknum(pool, r, p, 0); | 1807 | uint32_t chunknum = find_chunknum(pool, info, p, 0); |
| 1792 | 1808 | ||
| 1793 | if (info->bits[info->offset + chunknum] != oldsize) | 1809 | if (info->bits[info->offset + chunknum] != oldsize) |
| 1794 | wrterror(pool, "recorded old size %hu != %zu", | 1810 | wrterror(pool, "recorded old size %hu != %zu", |
