diff options
author | otto <> | 2021-03-09 07:39:28 +0000 |
---|---|---|
committer | otto <> | 2021-03-09 07:39:28 +0000 |
commit | 9108b7f38107e9f7ce1aaa33e615a7935b057ad0 (patch) | |
tree | 7293a3edc4dc9855d760361a27cc21da1b1563c6 | |
parent | 95b5c687013cd519d1c1d97da227d8a466fc0e22 (diff) | |
download | openbsd-9108b7f38107e9f7ce1aaa33e615a7935b057ad0.tar.gz openbsd-9108b7f38107e9f7ce1aaa33e615a7935b057ad0.tar.bz2 openbsd-9108b7f38107e9f7ce1aaa33e615a7935b057ad0.zip |
Change the implementation of the malloc cache to keep lists of
regions of a given size. In snaps for a while, committing since
no issues were reported and a wider audience is good. ok deraadt@
-rw-r--r-- | src/lib/libc/stdlib/malloc.c | 270 |
1 files changed, 118 insertions, 152 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index 46b07ff77d..9a4aacc3f9 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* $OpenBSD: malloc.c,v 1.268 2021/02/25 15:20:18 otto Exp $ */ | 1 | /* $OpenBSD: malloc.c,v 1.269 2021/03/09 07:39:28 otto Exp $ */ |
2 | /* | 2 | /* |
3 | * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> | 3 | * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> |
4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> | 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> |
@@ -113,29 +113,33 @@ struct region_info { | |||
113 | 113 | ||
114 | LIST_HEAD(chunk_head, chunk_info); | 114 | LIST_HEAD(chunk_head, chunk_info); |
115 | 115 | ||
116 | #define MAX_CACHEABLE_SIZE 32 | ||
117 | struct cache { | ||
118 | void *pages[MALLOC_MAXCACHE]; | ||
119 | ushort length; | ||
120 | ushort max; | ||
121 | }; | ||
122 | |||
116 | struct dir_info { | 123 | struct dir_info { |
117 | u_int32_t canary1; | 124 | u_int32_t canary1; |
118 | int active; /* status of malloc */ | 125 | int active; /* status of malloc */ |
119 | struct region_info *r; /* region slots */ | 126 | struct region_info *r; /* region slots */ |
120 | size_t regions_total; /* number of region slots */ | 127 | size_t regions_total; /* number of region slots */ |
121 | size_t regions_free; /* number of free slots */ | 128 | size_t regions_free; /* number of free slots */ |
122 | size_t free_regions_size; /* free pages cached */ | ||
123 | size_t rbytesused; /* random bytes used */ | 129 | size_t rbytesused; /* random bytes used */ |
124 | char *func; /* current function */ | 130 | char *func; /* current function */ |
125 | u_int malloc_cache; /* # of free pages we cache */ | ||
126 | int malloc_junk; /* junk fill? */ | 131 | int malloc_junk; /* junk fill? */ |
127 | int mmap_flag; /* extra flag for mmap */ | 132 | int mmap_flag; /* extra flag for mmap */ |
128 | u_int rotor; | ||
129 | int mutex; | 133 | int mutex; |
130 | /* lists of free chunk info structs */ | 134 | /* lists of free chunk info structs */ |
131 | struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; | 135 | struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; |
132 | /* lists of chunks with free slots */ | 136 | /* lists of chunks with free slots */ |
133 | struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; | 137 | struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; |
134 | /* free pages cache */ | ||
135 | struct region_info free_regions[MALLOC_MAXCACHE]; | ||
136 | /* delayed free chunk slots */ | 138 | /* delayed free chunk slots */ |
137 | void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; | 139 | void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; |
138 | u_char rbytes[32]; /* random bytes */ | 140 | u_char rbytes[32]; /* random bytes */ |
141 | /* free pages cache */ | ||
142 | struct cache cache[MAX_CACHEABLE_SIZE]; | ||
139 | #ifdef MALLOC_STATS | 143 | #ifdef MALLOC_STATS |
140 | size_t inserts; | 144 | size_t inserts; |
141 | size_t insert_collisions; | 145 | size_t insert_collisions; |
@@ -166,6 +170,8 @@ struct dir_info { | |||
166 | #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ | 170 | #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ |
167 | ~MALLOC_PAGEMASK) | 171 | ~MALLOC_PAGEMASK) |
168 | 172 | ||
173 | static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear); | ||
174 | |||
169 | /* | 175 | /* |
170 | * This structure describes a page worth of chunks. | 176 | * This structure describes a page worth of chunks. |
171 | * | 177 | * |
@@ -196,7 +202,7 @@ struct malloc_readonly { | |||
196 | int malloc_xmalloc; /* xmalloc behaviour? */ | 202 | int malloc_xmalloc; /* xmalloc behaviour? */ |
197 | u_int chunk_canaries; /* use canaries after chunks? */ | 203 | u_int chunk_canaries; /* use canaries after chunks? */ |
198 | int internal_funcs; /* use better recallocarray/freezero? */ | 204 | int internal_funcs; /* use better recallocarray/freezero? */ |
199 | u_int def_malloc_cache; /* free pages we cache */ | 205 | u_int def_maxcache; /* free pages we cache */ |
200 | size_t malloc_guard; /* use guard pages after allocations? */ | 206 | size_t malloc_guard; /* use guard pages after allocations? */ |
201 | #ifdef MALLOC_STATS | 207 | #ifdef MALLOC_STATS |
202 | int malloc_stats; /* dump statistics at end */ | 208 | int malloc_stats; /* dump statistics at end */ |
@@ -335,12 +341,12 @@ omalloc_parseopt(char opt) | |||
335 | mopts.malloc_mutexes = 2; | 341 | mopts.malloc_mutexes = 2; |
336 | break; | 342 | break; |
337 | case '>': | 343 | case '>': |
338 | mopts.def_malloc_cache <<= 1; | 344 | mopts.def_maxcache <<= 1; |
339 | if (mopts.def_malloc_cache > MALLOC_MAXCACHE) | 345 | if (mopts.def_maxcache > MALLOC_MAXCACHE) |
340 | mopts.def_malloc_cache = MALLOC_MAXCACHE; | 346 | mopts.def_maxcache = MALLOC_MAXCACHE; |
341 | break; | 347 | break; |
342 | case '<': | 348 | case '<': |
343 | mopts.def_malloc_cache >>= 1; | 349 | mopts.def_maxcache >>= 1; |
344 | break; | 350 | break; |
345 | case 'c': | 351 | case 'c': |
346 | mopts.chunk_canaries = 0; | 352 | mopts.chunk_canaries = 0; |
@@ -416,7 +422,7 @@ omalloc_init(void) | |||
416 | */ | 422 | */ |
417 | mopts.malloc_mutexes = 8; | 423 | mopts.malloc_mutexes = 8; |
418 | mopts.def_malloc_junk = 1; | 424 | mopts.def_malloc_junk = 1; |
419 | mopts.def_malloc_cache = MALLOC_DEFAULT_CACHE; | 425 | mopts.def_maxcache = MALLOC_DEFAULT_CACHE; |
420 | 426 | ||
421 | for (i = 0; i < 3; i++) { | 427 | for (i = 0; i < 3; i++) { |
422 | switch (i) { | 428 | switch (i) { |
@@ -445,12 +451,12 @@ omalloc_init(void) | |||
445 | case 'S': | 451 | case 'S': |
446 | for (q = "CFGJ"; *q != '\0'; q++) | 452 | for (q = "CFGJ"; *q != '\0'; q++) |
447 | omalloc_parseopt(*q); | 453 | omalloc_parseopt(*q); |
448 | mopts.def_malloc_cache = 0; | 454 | mopts.def_maxcache = 0; |
449 | break; | 455 | break; |
450 | case 's': | 456 | case 's': |
451 | for (q = "cfgj"; *q != '\0'; q++) | 457 | for (q = "cfgj"; *q != '\0'; q++) |
452 | omalloc_parseopt(*q); | 458 | omalloc_parseopt(*q); |
453 | mopts.def_malloc_cache = MALLOC_DEFAULT_CACHE; | 459 | mopts.def_maxcache = MALLOC_DEFAULT_CACHE; |
454 | break; | 460 | break; |
455 | default: | 461 | default: |
456 | omalloc_parseopt(*p); | 462 | omalloc_parseopt(*p); |
@@ -512,7 +518,6 @@ omalloc_poolinit(struct dir_info **dp, int mmap_flag) | |||
512 | STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE); | 518 | STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE); |
513 | d->mmap_flag = mmap_flag; | 519 | d->mmap_flag = mmap_flag; |
514 | d->malloc_junk = mopts.def_malloc_junk; | 520 | d->malloc_junk = mopts.def_malloc_junk; |
515 | d->malloc_cache = mopts.def_malloc_cache; | ||
516 | d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; | 521 | d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; |
517 | d->canary2 = ~d->canary1; | 522 | d->canary2 = ~d->canary1; |
518 | 523 | ||
@@ -525,16 +530,17 @@ omalloc_grow(struct dir_info *d) | |||
525 | size_t newtotal; | 530 | size_t newtotal; |
526 | size_t newsize; | 531 | size_t newsize; |
527 | size_t mask; | 532 | size_t mask; |
528 | size_t i; | 533 | size_t i, oldpsz; |
529 | struct region_info *p; | 534 | struct region_info *p; |
530 | 535 | ||
531 | if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2) | 536 | if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2) |
532 | return 1; | 537 | return 1; |
533 | 538 | ||
534 | newtotal = d->regions_total * 2; | 539 | newtotal = d->regions_total * 2; |
535 | newsize = newtotal * sizeof(struct region_info); | 540 | newsize = PAGEROUND(newtotal * sizeof(struct region_info)); |
536 | mask = newtotal - 1; | 541 | mask = newtotal - 1; |
537 | 542 | ||
543 | /* Don't use cache here, we don't want user uaf touch this */ | ||
538 | p = MMAP(newsize, d->mmap_flag); | 544 | p = MMAP(newsize, d->mmap_flag); |
539 | if (p == MAP_FAILED) | 545 | if (p == MAP_FAILED) |
540 | return 1; | 546 | return 1; |
@@ -554,13 +560,11 @@ omalloc_grow(struct dir_info *d) | |||
554 | p[index] = d->r[i]; | 560 | p[index] = d->r[i]; |
555 | } | 561 | } |
556 | } | 562 | } |
557 | /* avoid pages containing meta info to end up in cache */ | 563 | |
558 | if (munmap(d->r, d->regions_total * sizeof(struct region_info))) | 564 | oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info)); |
559 | wrterror(d, "munmap %p", (void *)d->r); | 565 | /* clear to avoid meta info ending up in the cache */ |
560 | else | 566 | unmap(d, d->r, oldpsz, oldpsz); |
561 | STATS_SUB(d->malloc_used, | 567 | d->regions_free += d->regions_total; |
562 | d->regions_total * sizeof(struct region_info)); | ||
563 | d->regions_free = d->regions_free + d->regions_total; | ||
564 | d->regions_total = newtotal; | 568 | d->regions_total = newtotal; |
565 | d->r = p; | 569 | d->r = p; |
566 | return 0; | 570 | return 0; |
@@ -700,9 +704,7 @@ validate_junk(struct dir_info *pool, void *p, size_t sz) | |||
700 | 704 | ||
701 | 705 | ||
702 | /* | 706 | /* |
703 | * Cache maintenance. We keep at most malloc_cache pages cached. | 707 | * Cache maintenance. |
704 | * If the cache is becoming full, unmap pages in the cache for real, | ||
705 | * and then add the region to the cache | ||
706 | * Opposed to the regular region data structure, the sizes in the | 708 | * Opposed to the regular region data structure, the sizes in the |
707 | * cache are in MALLOC_PAGESIZE units. | 709 | * cache are in MALLOC_PAGESIZE units. |
708 | */ | 710 | */ |
@@ -710,139 +712,100 @@ static void | |||
710 | unmap(struct dir_info *d, void *p, size_t sz, size_t clear) | 712 | unmap(struct dir_info *d, void *p, size_t sz, size_t clear) |
711 | { | 713 | { |
712 | size_t psz = sz >> MALLOC_PAGESHIFT; | 714 | size_t psz = sz >> MALLOC_PAGESHIFT; |
713 | size_t rsz; | 715 | void *r; |
714 | struct region_info *r; | 716 | u_short i; |
715 | u_int i, offset, mask; | 717 | struct cache *cache; |
716 | 718 | ||
717 | if (sz != PAGEROUND(sz)) | 719 | if (sz != PAGEROUND(sz) || psz == 0) |
718 | wrterror(d, "munmap round"); | 720 | wrterror(d, "munmap round"); |
719 | 721 | ||
720 | rsz = d->malloc_cache - d->free_regions_size; | 722 | if (psz > MAX_CACHEABLE_SIZE || d->cache[psz - 1].max == 0) { |
721 | 723 | if (munmap(p, sz)) | |
722 | /* | ||
723 | * normally the cache holds recently freed regions, but if the region | ||
724 | * to unmap is larger than the cache size or we're clearing and the | ||
725 | * cache is full, just munmap | ||
726 | */ | ||
727 | if (psz > d->malloc_cache || (clear > 0 && rsz == 0)) { | ||
728 | i = munmap(p, sz); | ||
729 | if (i) | ||
730 | wrterror(d, "munmap %p", p); | 724 | wrterror(d, "munmap %p", p); |
731 | STATS_SUB(d->malloc_used, sz); | 725 | STATS_SUB(d->malloc_used, sz); |
732 | return; | 726 | return; |
733 | } | 727 | } |
734 | offset = getrbyte(d); | 728 | cache = &d->cache[psz - 1]; |
735 | mask = d->malloc_cache - 1; | 729 | if (cache->length == cache->max) { |
736 | if (psz > rsz) { | 730 | /* use a random slot */ |
737 | size_t tounmap = psz - rsz; | 731 | i = getrbyte(d) % cache->max; |
738 | for (i = 0; ; i++) { | 732 | r = cache->pages[i]; |
739 | r = &d->free_regions[(i + offset) & mask]; | 733 | if (!mopts.malloc_freeunmap) |
740 | if (r->p != NULL) { | 734 | validate_junk(d, r, sz); |
741 | rsz = r->size << MALLOC_PAGESHIFT; | 735 | if (munmap(r, sz)) |
742 | if (!mopts.malloc_freeunmap) | 736 | wrterror(d, "munmap %p", r); |
743 | validate_junk(d, r->p, rsz); | 737 | STATS_SUB(d->malloc_used, sz); |
744 | if (munmap(r->p, rsz)) | 738 | cache->length--; |
745 | wrterror(d, "munmap %p", r->p); | 739 | } else |
746 | r->p = NULL; | 740 | i = cache->length; |
747 | if (tounmap > r->size) | 741 | |
748 | tounmap -= r->size; | 742 | /* fill slot */ |
749 | else | 743 | if (clear > 0) |
750 | tounmap = 0; | 744 | memset(p, 0, clear); |
751 | d->free_regions_size -= r->size; | 745 | if (mopts.malloc_freeunmap) |
752 | STATS_SUB(d->malloc_used, rsz); | 746 | mprotect(p, sz, PROT_NONE); |
753 | if (tounmap == 0) { | 747 | else |
754 | offset = i; | 748 | junk_free(d->malloc_junk, p, sz); |
755 | break; | 749 | cache->pages[i] = p; |
756 | } | 750 | cache->length++; |
757 | } | ||
758 | } | ||
759 | } | ||
760 | for (i = 0; ; i++) { | ||
761 | r = &d->free_regions[(i + offset) & mask]; | ||
762 | if (r->p == NULL) { | ||
763 | if (clear > 0) | ||
764 | memset(p, 0, clear); | ||
765 | if (mopts.malloc_freeunmap) | ||
766 | mprotect(p, sz, PROT_NONE); | ||
767 | else | ||
768 | junk_free(d->malloc_junk, p, | ||
769 | psz << MALLOC_PAGESHIFT); | ||
770 | r->p = p; | ||
771 | r->size = psz; | ||
772 | d->free_regions_size += psz; | ||
773 | break; | ||
774 | } | ||
775 | } | ||
776 | if (d->free_regions_size > d->malloc_cache) | ||
777 | wrterror(d, "malloc cache overflow"); | ||
778 | } | 751 | } |
779 | 752 | ||
780 | static void * | 753 | static void * |
781 | map(struct dir_info *d, size_t sz, int zero_fill) | 754 | map(struct dir_info *d, size_t sz, int zero_fill) |
782 | { | 755 | { |
783 | size_t psz = sz >> MALLOC_PAGESHIFT; | 756 | size_t i, psz = sz >> MALLOC_PAGESHIFT; |
784 | struct region_info *r, *big = NULL; | ||
785 | u_int i; | ||
786 | void *p; | 757 | void *p; |
758 | struct cache *cache; | ||
787 | 759 | ||
788 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | 760 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || |
789 | d->canary1 != ~d->canary2) | 761 | d->canary1 != ~d->canary2) |
790 | wrterror(d, "internal struct corrupt"); | 762 | wrterror(d, "internal struct corrupt"); |
791 | if (sz != PAGEROUND(sz)) | 763 | if (sz != PAGEROUND(sz) || psz == 0) |
792 | wrterror(d, "map round"); | 764 | wrterror(d, "map round"); |
793 | 765 | ||
794 | if (psz > d->free_regions_size) { | 766 | |
795 | _MALLOC_LEAVE(d); | 767 | if (psz <= MAX_CACHEABLE_SIZE && d->cache[psz - 1].max > 0) { |
796 | p = MMAP(sz, d->mmap_flag); | 768 | cache = &d->cache[psz - 1]; |
797 | _MALLOC_ENTER(d); | 769 | if (cache->length > 0) { |
798 | if (p != MAP_FAILED) | 770 | if (cache->length == 1) |
799 | STATS_ADD(d->malloc_used, sz); | 771 | p = cache->pages[--cache->length]; |
800 | /* zero fill not needed */ | 772 | else { |
801 | return p; | 773 | i = getrbyte(d) % cache->length; |
802 | } | 774 | p = cache->pages[i]; |
803 | for (i = 0; i < d->malloc_cache; i++) { | 775 | cache->pages[i] = cache->pages[--cache->length]; |
804 | r = &d->free_regions[(i + d->rotor) & (d->malloc_cache - 1)]; | 776 | } |
805 | if (r->p != NULL) { | 777 | if (!mopts.malloc_freeunmap) |
806 | if (r->size == psz) { | 778 | validate_junk(d, p, sz); |
807 | p = r->p; | 779 | if (mopts.malloc_freeunmap) |
808 | if (!mopts.malloc_freeunmap) | 780 | mprotect(p, sz, PROT_READ | PROT_WRITE); |
809 | validate_junk(d, p, | 781 | if (zero_fill) |
810 | psz << MALLOC_PAGESHIFT); | 782 | memset(p, 0, sz); |
811 | r->p = NULL; | 783 | else if (mopts.malloc_freeunmap) |
812 | d->free_regions_size -= psz; | 784 | junk_free(d->malloc_junk, p, sz); |
785 | return p; | ||
786 | } | ||
787 | if (psz <= 1) { | ||
788 | _MALLOC_LEAVE(d); | ||
789 | p = MMAP(cache->max * sz, d->mmap_flag); | ||
790 | _MALLOC_ENTER(d); | ||
791 | if (p != MAP_FAILED) { | ||
792 | STATS_ADD(d->malloc_used, cache->max * sz); | ||
793 | cache->length = cache->max - 1; | ||
794 | for (i = 0; i < cache->max - 1; i++) { | ||
795 | void *q = (char*)p + i * sz; | ||
796 | cache->pages[i] = q; | ||
797 | if (!mopts.malloc_freeunmap) | ||
798 | junk_free(d->malloc_junk, q, sz); | ||
799 | } | ||
813 | if (mopts.malloc_freeunmap) | 800 | if (mopts.malloc_freeunmap) |
814 | mprotect(p, sz, PROT_READ | PROT_WRITE); | 801 | mprotect(p, (cache->max - 1) * sz, PROT_NONE); |
815 | if (zero_fill) | 802 | p = (char*)p + (cache->max - 1) * sz; |
816 | memset(p, 0, sz); | 803 | /* zero fill not needed */ |
817 | else if (mopts.malloc_freeunmap) | ||
818 | junk_free(d->malloc_junk, p, sz); | ||
819 | d->rotor += i + 1; | ||
820 | return p; | 804 | return p; |
821 | } else if (r->size > psz) | 805 | } |
822 | big = r; | ||
823 | } | 806 | } |
807 | |||
824 | } | 808 | } |
825 | if (big != NULL) { | ||
826 | r = big; | ||
827 | p = r->p; | ||
828 | if (!mopts.malloc_freeunmap) | ||
829 | validate_junk(d, p, r->size << MALLOC_PAGESHIFT); | ||
830 | r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT); | ||
831 | r->size -= psz; | ||
832 | d->free_regions_size -= psz; | ||
833 | if (mopts.malloc_freeunmap) | ||
834 | mprotect(p, sz, PROT_READ | PROT_WRITE); | ||
835 | else | ||
836 | junk_free(d->malloc_junk, r->p, | ||
837 | r->size << MALLOC_PAGESHIFT); | ||
838 | if (zero_fill) | ||
839 | memset(p, 0, sz); | ||
840 | else if (mopts.malloc_freeunmap) | ||
841 | junk_free(d->malloc_junk, p, sz); | ||
842 | return p; | ||
843 | } | ||
844 | if (d->free_regions_size > d->malloc_cache) | ||
845 | wrterror(d, "malloc cache"); | ||
846 | _MALLOC_LEAVE(d); | 809 | _MALLOC_LEAVE(d); |
847 | p = MMAP(sz, d->mmap_flag); | 810 | p = MMAP(sz, d->mmap_flag); |
848 | _MALLOC_ENTER(d); | 811 | _MALLOC_ENTER(d); |
@@ -896,6 +859,7 @@ alloc_chunk_info(struct dir_info *d, int bits) | |||
896 | size += count * sizeof(u_short); | 859 | size += count * sizeof(u_short); |
897 | size = _ALIGN(size); | 860 | size = _ALIGN(size); |
898 | 861 | ||
862 | /* Don't use cache here, we don't want user uaf touch this */ | ||
899 | q = MMAP(MALLOC_PAGESIZE, d->mmap_flag); | 863 | q = MMAP(MALLOC_PAGESIZE, d->mmap_flag); |
900 | if (q == MAP_FAILED) | 864 | if (q == MAP_FAILED) |
901 | return NULL; | 865 | return NULL; |
@@ -1239,7 +1203,7 @@ malloc_recurse(struct dir_info *d) | |||
1239 | void | 1203 | void |
1240 | _malloc_init(int from_rthreads) | 1204 | _malloc_init(int from_rthreads) |
1241 | { | 1205 | { |
1242 | u_int i, nmutexes; | 1206 | u_int i, j, nmutexes; |
1243 | struct dir_info *d; | 1207 | struct dir_info *d; |
1244 | 1208 | ||
1245 | _MALLOC_LOCK(1); | 1209 | _MALLOC_LOCK(1); |
@@ -1260,11 +1224,13 @@ _malloc_init(int from_rthreads) | |||
1260 | if (i == 0) { | 1224 | if (i == 0) { |
1261 | omalloc_poolinit(&d, MAP_CONCEAL); | 1225 | omalloc_poolinit(&d, MAP_CONCEAL); |
1262 | d->malloc_junk = 2; | 1226 | d->malloc_junk = 2; |
1263 | d->malloc_cache = 0; | 1227 | for (j = 0; j < MAX_CACHEABLE_SIZE; j++) |
1228 | d->cache[j].max = 0; | ||
1264 | } else { | 1229 | } else { |
1265 | omalloc_poolinit(&d, 0); | 1230 | omalloc_poolinit(&d, 0); |
1266 | d->malloc_junk = mopts.def_malloc_junk; | 1231 | d->malloc_junk = mopts.def_malloc_junk; |
1267 | d->malloc_cache = mopts.def_malloc_cache; | 1232 | for (j = 0; j < MAX_CACHEABLE_SIZE; j++) |
1233 | d->cache[j].max = mopts.def_maxcache >> (j / 8); | ||
1268 | } | 1234 | } |
1269 | d->mutex = i; | 1235 | d->mutex = i; |
1270 | mopts.malloc_pool[i] = d; | 1236 | mopts.malloc_pool[i] = d; |
@@ -1591,7 +1557,7 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f) | |||
1591 | size_t rnewsz = PAGEROUND(gnewsz); | 1557 | size_t rnewsz = PAGEROUND(gnewsz); |
1592 | 1558 | ||
1593 | if (rnewsz < roldsz && rnewsz > roldsz / 2 && | 1559 | if (rnewsz < roldsz && rnewsz > roldsz / 2 && |
1594 | roldsz - rnewsz < pool->malloc_cache * MALLOC_PAGESIZE && | 1560 | roldsz - rnewsz < mopts.def_maxcache * MALLOC_PAGESIZE && |
1595 | !mopts.malloc_guard) { | 1561 | !mopts.malloc_guard) { |
1596 | 1562 | ||
1597 | ret = p; | 1563 | ret = p; |
@@ -2227,16 +2193,17 @@ dump_free_chunk_info(int fd, struct dir_info *d) | |||
2227 | static void | 2193 | static void |
2228 | dump_free_page_info(int fd, struct dir_info *d) | 2194 | dump_free_page_info(int fd, struct dir_info *d) |
2229 | { | 2195 | { |
2230 | int i; | 2196 | struct cache *cache; |
2197 | size_t i, total = 0; | ||
2231 | 2198 | ||
2232 | dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size); | 2199 | dprintf(fd, "Cached:\n"); |
2233 | for (i = 0; i < d->malloc_cache; i++) { | 2200 | for (i = 0; i < MAX_CACHEABLE_SIZE; i++) { |
2234 | if (d->free_regions[i].p != NULL) { | 2201 | cache = &d->cache[i]; |
2235 | dprintf(fd, "%2d) ", i); | 2202 | if (cache->length != 0) |
2236 | dprintf(fd, "free at %p: %zu\n", | 2203 | dprintf(fd, "%zu(%u): %u = %zu\n", i + 1, cache->max, cache->length, cache->length * (i + 1)); |
2237 | d->free_regions[i].p, d->free_regions[i].size); | 2204 | total += cache->length * (i + 1); |
2238 | } | ||
2239 | } | 2205 | } |
2206 | dprintf(fd, "Free pages cached: %zu\n", total); | ||
2240 | } | 2207 | } |
2241 | 2208 | ||
2242 | static void | 2209 | static void |
@@ -2247,8 +2214,7 @@ malloc_dump1(int fd, int poolno, struct dir_info *d) | |||
2247 | dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d); | 2214 | dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d); |
2248 | if (d == NULL) | 2215 | if (d == NULL) |
2249 | return; | 2216 | return; |
2250 | dprintf(fd, "J=%d cache=%u Fl=%x\n", | 2217 | dprintf(fd, "J=%d Fl=%x\n", d->malloc_junk, d->mmap_flag); |
2251 | d->malloc_junk, d->malloc_cache, d->mmap_flag); | ||
2252 | dprintf(fd, "Region slots free %zu/%zu\n", | 2218 | dprintf(fd, "Region slots free %zu/%zu\n", |
2253 | d->regions_free, d->regions_total); | 2219 | d->regions_free, d->regions_total); |
2254 | dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions); | 2220 | dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions); |
@@ -2340,7 +2306,7 @@ malloc_exit(void) | |||
2340 | mopts.internal_funcs, mopts.malloc_freecheck, | 2306 | mopts.internal_funcs, mopts.malloc_freecheck, |
2341 | mopts.malloc_freeunmap, mopts.def_malloc_junk, | 2307 | mopts.malloc_freeunmap, mopts.def_malloc_junk, |
2342 | mopts.malloc_realloc, mopts.malloc_xmalloc, | 2308 | mopts.malloc_realloc, mopts.malloc_xmalloc, |
2343 | mopts.chunk_canaries, mopts.def_malloc_cache, | 2309 | mopts.chunk_canaries, mopts.def_maxcache, |
2344 | mopts.malloc_guard); | 2310 | mopts.malloc_guard); |
2345 | 2311 | ||
2346 | for (i = 0; i < mopts.malloc_mutexes; i++) | 2312 | for (i = 0; i < mopts.malloc_mutexes; i++) |