summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
authortedu <>2015-04-06 09:18:51 +0000
committertedu <>2015-04-06 09:18:51 +0000
commit624f175c4f67570150d01364c1e08bdbeeb38eb9 (patch)
tree1e82d87580934cbf818e432392d9a0e76d21d9ea /src/lib
parent3964b43b86a41724b5c18a4d8e594c5274eb25ee (diff)
downloadopenbsd-624f175c4f67570150d01364c1e08bdbeeb38eb9.tar.gz
openbsd-624f175c4f67570150d01364c1e08bdbeeb38eb9.tar.bz2
openbsd-624f175c4f67570150d01364c1e08bdbeeb38eb9.zip
improve realloc. when expanding a region, actually use the free page cache
instead of simply zapping it. this can save many syscalls in a program that repeatedly grows and shrinks a buffer, as observed in the wild.
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/libc/stdlib/malloc.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 69ae877aba..5e5dafbd17 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.173 2015/01/16 16:48:51 deraadt Exp $ */ 1/* $OpenBSD: malloc.c,v 1.174 2015/04/06 09:18:51 tedu Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -391,7 +391,7 @@ zapcacheregion(struct dir_info *d, void *p, size_t len)
391} 391}
392 392
393static void * 393static void *
394map(struct dir_info *d, size_t sz, int zero_fill) 394map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
395{ 395{
396 size_t psz = sz >> MALLOC_PAGESHIFT; 396 size_t psz = sz >> MALLOC_PAGESHIFT;
397 struct region_info *r, *big = NULL; 397 struct region_info *r, *big = NULL;
@@ -405,7 +405,7 @@ map(struct dir_info *d, size_t sz, int zero_fill)
405 wrterror("map round", NULL); 405 wrterror("map round", NULL);
406 return MAP_FAILED; 406 return MAP_FAILED;
407 } 407 }
408 if (psz > d->free_regions_size) { 408 if (!hint && psz > d->free_regions_size) {
409 _MALLOC_LEAVE(); 409 _MALLOC_LEAVE();
410 p = MMAP(sz); 410 p = MMAP(sz);
411 _MALLOC_ENTER(); 411 _MALLOC_ENTER();
@@ -418,6 +418,8 @@ map(struct dir_info *d, size_t sz, int zero_fill)
418 for (i = 0; i < mopts.malloc_cache; i++) { 418 for (i = 0; i < mopts.malloc_cache; i++) {
419 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 419 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
420 if (r->p != NULL) { 420 if (r->p != NULL) {
421 if (hint && r->p != hint)
422 continue;
421 if (r->size == psz) { 423 if (r->size == psz) {
422 p = r->p; 424 p = r->p;
423 r->p = NULL; 425 r->p = NULL;
@@ -439,7 +441,8 @@ map(struct dir_info *d, size_t sz, int zero_fill)
439 } 441 }
440 if (big != NULL) { 442 if (big != NULL) {
441 r = big; 443 r = big;
442 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT); 444 p = r->p;
445 r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
443 if (mopts.malloc_freeunmap) 446 if (mopts.malloc_freeunmap)
444 mprotect(p, sz, PROT_READ | PROT_WRITE); 447 mprotect(p, sz, PROT_READ | PROT_WRITE);
445 if (mopts.malloc_hint) 448 if (mopts.malloc_hint)
@@ -452,6 +455,8 @@ map(struct dir_info *d, size_t sz, int zero_fill)
452 memset(p, SOME_FREEJUNK, sz); 455 memset(p, SOME_FREEJUNK, sz);
453 return p; 456 return p;
454 } 457 }
458 if (hint)
459 return MAP_FAILED;
455 if (d->free_regions_size > mopts.malloc_cache) 460 if (d->free_regions_size > mopts.malloc_cache)
456 wrterror("malloc cache", NULL); 461 wrterror("malloc cache", NULL);
457 _MALLOC_LEAVE(); 462 _MALLOC_LEAVE();
@@ -842,7 +847,7 @@ omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
842 int i, k; 847 int i, k;
843 848
844 /* Allocate a new bucket */ 849 /* Allocate a new bucket */
845 pp = map(d, MALLOC_PAGESIZE, 0); 850 pp = map(d, NULL, MALLOC_PAGESIZE, 0);
846 if (pp == MAP_FAILED) 851 if (pp == MAP_FAILED)
847 return NULL; 852 return NULL;
848 853
@@ -1071,7 +1076,7 @@ omalloc(size_t sz, int zero_fill, void *f)
1071 } 1076 }
1072 sz += mopts.malloc_guard; 1077 sz += mopts.malloc_guard;
1073 psz = PAGEROUND(sz); 1078 psz = PAGEROUND(sz);
1074 p = map(pool, psz, zero_fill); 1079 p = map(pool, NULL, psz, zero_fill);
1075 if (p == MAP_FAILED) { 1080 if (p == MAP_FAILED) {
1076 errno = ENOMEM; 1081 errno = ENOMEM;
1077 return NULL; 1082 return NULL;
@@ -1330,6 +1335,9 @@ orealloc(void *p, size_t newsz, void *f)
1330 size_t needed = rnewsz - roldsz; 1335 size_t needed = rnewsz - roldsz;
1331 1336
1332 STATS_INC(pool->cheap_realloc_tries); 1337 STATS_INC(pool->cheap_realloc_tries);
1338 q = map(pool, hint, needed, 0);
1339 if (q == hint)
1340 goto gotit;
1333 zapcacheregion(pool, hint, needed); 1341 zapcacheregion(pool, hint, needed);
1334 q = MQUERY(hint, needed); 1342 q = MQUERY(hint, needed);
1335 if (q == hint) 1343 if (q == hint)
@@ -1337,6 +1345,7 @@ orealloc(void *p, size_t newsz, void *f)
1337 else 1345 else
1338 q = MAP_FAILED; 1346 q = MAP_FAILED;
1339 if (q == hint) { 1347 if (q == hint) {
1348gotit:
1340 STATS_ADD(pool->malloc_used, needed); 1349 STATS_ADD(pool->malloc_used, needed);
1341 if (mopts.malloc_junk == 2) 1350 if (mopts.malloc_junk == 2)
1342 memset(q, SOME_JUNK, needed); 1351 memset(q, SOME_JUNK, needed);
@@ -1491,7 +1500,7 @@ mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1491 if (alignment > SIZE_MAX - sz) 1500 if (alignment > SIZE_MAX - sz)
1492 return MAP_FAILED; 1501 return MAP_FAILED;
1493 1502
1494 p = map(d, sz + alignment, zero_fill); 1503 p = map(d, NULL, sz + alignment, zero_fill);
1495 if (p == MAP_FAILED) 1504 if (p == MAP_FAILED)
1496 return MAP_FAILED; 1505 return MAP_FAILED;
1497 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 1506 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));