summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorotto <>2020-09-06 06:41:03 +0000
committerotto <>2020-09-06 06:41:03 +0000
commit385a0acb8b98c52710c935ff179ce287cc60d7ae (patch)
tree5ce667fce922963fd1b9912e3f2e6db8ddb2574c
parent00da28bc3a07739175ba381dd144857e78835eea (diff)
downloadopenbsd-385a0acb8b98c52710c935ff179ce287cc60d7ae.tar.gz
openbsd-385a0acb8b98c52710c935ff179ce287cc60d7ae.tar.bz2
openbsd-385a0acb8b98c52710c935ff179ce287cc60d7ae.zip
For page-sized and larger allocations do not put the pages we're
shaving off into the cache but unamp them. Pages in the cache get re-used and then a future grow of the first allocation will be hampered. Also make realloc a no-op for small shrinkage. ok deraadt@
-rw-r--r--src/lib/libc/stdlib/malloc.c39
1 files changed, 18 insertions, 21 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 7d49438b7b..1755f896f0 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.262 2019/06/28 13:32:42 deraadt Exp $ */ 1/* $OpenBSD: malloc.c,v 1.263 2020/09/06 06:41:03 otto Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -749,7 +749,7 @@ zapcacheregion(struct dir_info *d, void *p, size_t len)
749} 749}
750 750
751static void * 751static void *
752map(struct dir_info *d, void *hint, size_t sz, int zero_fill) 752map(struct dir_info *d, size_t sz, int zero_fill)
753{ 753{
754 size_t psz = sz >> MALLOC_PAGESHIFT; 754 size_t psz = sz >> MALLOC_PAGESHIFT;
755 struct region_info *r, *big = NULL; 755 struct region_info *r, *big = NULL;
@@ -762,7 +762,7 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
762 if (sz != PAGEROUND(sz)) 762 if (sz != PAGEROUND(sz))
763 wrterror(d, "map round"); 763 wrterror(d, "map round");
764 764
765 if (hint == NULL && psz > d->free_regions_size) { 765 if (psz > d->free_regions_size) {
766 _MALLOC_LEAVE(d); 766 _MALLOC_LEAVE(d);
767 p = MMAP(sz, d->mmap_flag); 767 p = MMAP(sz, d->mmap_flag);
768 _MALLOC_ENTER(d); 768 _MALLOC_ENTER(d);
@@ -774,8 +774,6 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
774 for (i = 0; i < d->malloc_cache; i++) { 774 for (i = 0; i < d->malloc_cache; i++) {
775 r = &d->free_regions[(i + d->rotor) & (d->malloc_cache - 1)]; 775 r = &d->free_regions[(i + d->rotor) & (d->malloc_cache - 1)];
776 if (r->p != NULL) { 776 if (r->p != NULL) {
777 if (hint != NULL && r->p != hint)
778 continue;
779 if (r->size == psz) { 777 if (r->size == psz) {
780 p = r->p; 778 p = r->p;
781 r->p = NULL; 779 r->p = NULL;
@@ -807,8 +805,6 @@ map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
807 memset(p, SOME_FREEJUNK, sz); 805 memset(p, SOME_FREEJUNK, sz);
808 return p; 806 return p;
809 } 807 }
810 if (hint != NULL)
811 return MAP_FAILED;
812 if (d->free_regions_size > d->malloc_cache) 808 if (d->free_regions_size > d->malloc_cache)
813 wrterror(d, "malloc cache"); 809 wrterror(d, "malloc cache");
814 _MALLOC_LEAVE(d); 810 _MALLOC_LEAVE(d);
@@ -892,7 +888,7 @@ omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
892 void *pp; 888 void *pp;
893 889
894 /* Allocate a new bucket */ 890 /* Allocate a new bucket */
895 pp = map(d, NULL, MALLOC_PAGESIZE, 0); 891 pp = map(d, MALLOC_PAGESIZE, 0);
896 if (pp == MAP_FAILED) 892 if (pp == MAP_FAILED)
897 return NULL; 893 return NULL;
898 894
@@ -1136,7 +1132,7 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1136 } 1132 }
1137 sz += mopts.malloc_guard; 1133 sz += mopts.malloc_guard;
1138 psz = PAGEROUND(sz); 1134 psz = PAGEROUND(sz);
1139 p = map(pool, NULL, psz, zero_fill); 1135 p = map(pool, psz, zero_fill);
1140 if (p == MAP_FAILED) { 1136 if (p == MAP_FAILED) {
1141 errno = ENOMEM; 1137 errno = ENOMEM;
1142 return NULL; 1138 return NULL;
@@ -1576,6 +1572,14 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1576 size_t roldsz = PAGEROUND(goldsz); 1572 size_t roldsz = PAGEROUND(goldsz);
1577 size_t rnewsz = PAGEROUND(gnewsz); 1573 size_t rnewsz = PAGEROUND(gnewsz);
1578 1574
1575 if (rnewsz < roldsz && rnewsz > roldsz / 2 &&
1576 roldsz - rnewsz < pool->malloc_cache * MALLOC_PAGESIZE &&
1577 !mopts.malloc_guard) {
1578
1579 ret = p;
1580 goto done;
1581 }
1582
1579 if (rnewsz > roldsz) { 1583 if (rnewsz > roldsz) {
1580 /* try to extend existing region */ 1584 /* try to extend existing region */
1581 if (!mopts.malloc_guard) { 1585 if (!mopts.malloc_guard) {
@@ -1583,10 +1587,7 @@ orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1583 size_t needed = rnewsz - roldsz; 1587 size_t needed = rnewsz - roldsz;
1584 1588
1585 STATS_INC(pool->cheap_realloc_tries); 1589 STATS_INC(pool->cheap_realloc_tries);
1586 q = map(pool, hint, needed, 0); 1590 //zapcacheregion(pool, hint, needed);
1587 if (q == hint)
1588 goto gotit;
1589 zapcacheregion(pool, hint, needed);
1590 q = MQUERY(hint, needed, pool->mmap_flag); 1591 q = MQUERY(hint, needed, pool->mmap_flag);
1591 if (q == hint) 1592 if (q == hint)
1592 q = MMAPA(hint, needed, pool->mmap_flag); 1593 q = MMAPA(hint, needed, pool->mmap_flag);
@@ -1618,17 +1619,13 @@ gotit:
1618 } else if (rnewsz < roldsz) { 1619 } else if (rnewsz < roldsz) {
1619 /* shrink number of pages */ 1620 /* shrink number of pages */
1620 if (mopts.malloc_guard) { 1621 if (mopts.malloc_guard) {
1621 if (mprotect((char *)r->p + roldsz -
1622 mopts.malloc_guard, mopts.malloc_guard,
1623 PROT_READ | PROT_WRITE))
1624 wrterror(pool, "mprotect");
1625 if (mprotect((char *)r->p + rnewsz - 1622 if (mprotect((char *)r->p + rnewsz -
1626 mopts.malloc_guard, mopts.malloc_guard, 1623 mopts.malloc_guard, mopts.malloc_guard,
1627 PROT_NONE)) 1624 PROT_NONE))
1628 wrterror(pool, "mprotect"); 1625 wrterror(pool, "mprotect");
1629 } 1626 }
1630 unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0, 1627 if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
1631 pool->malloc_junk); 1628 wrterror(pool, "munmap %p", (char *)r->p + rnewsz);
1632 r->size = gnewsz; 1629 r->size = gnewsz;
1633 if (MALLOC_MOVE_COND(gnewsz)) { 1630 if (MALLOC_MOVE_COND(gnewsz)) {
1634 void *pp = MALLOC_MOVE(r->p, gnewsz); 1631 void *pp = MALLOC_MOVE(r->p, gnewsz);
@@ -1800,7 +1797,7 @@ orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
1800 info->bits[info->offset + chunknum], 1797 info->bits[info->offset + chunknum],
1801 oldsize); 1798 oldsize);
1802 } 1799 }
1803 } else if (oldsize != sz - mopts.malloc_guard) 1800 } else if (oldsize < (sz - mopts.malloc_guard) / 2)
1804 wrterror(pool, "recorded old size %zu != %zu", 1801 wrterror(pool, "recorded old size %zu != %zu",
1805 sz - mopts.malloc_guard, oldsize); 1802 sz - mopts.malloc_guard, oldsize);
1806 1803
@@ -1937,7 +1934,7 @@ mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1937 if (alignment > SIZE_MAX - sz) 1934 if (alignment > SIZE_MAX - sz)
1938 return MAP_FAILED; 1935 return MAP_FAILED;
1939 1936
1940 p = map(d, NULL, sz + alignment, zero_fill); 1937 p = map(d, sz + alignment, zero_fill);
1941 if (p == MAP_FAILED) 1938 if (p == MAP_FAILED)
1942 return MAP_FAILED; 1939 return MAP_FAILED;
1943 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 1940 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));