summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortedu <>2014-08-18 14:34:58 +0000
committertedu <>2014-08-18 14:34:58 +0000
commita904076a7b693c8512f298bacec36b1e7e3ff538 (patch)
tree77af9b4c96ac96aed4cb4137da8f8db2e3eb3cee
parentcd072a630de628b8b117efa93c814542270d5ebe (diff)
downloadopenbsd-a904076a7b693c8512f298bacec36b1e7e3ff538.tar.gz
openbsd-a904076a7b693c8512f298bacec36b1e7e3ff538.tar.bz2
openbsd-a904076a7b693c8512f298bacec36b1e7e3ff538.zip
a small tweak to improve malloc in multithreaded programs. we don't need
to hold the malloc lock across mmap syscalls in all cases. dropping it allows another thread to access the existing chunk cache if necessary. could be improved to be a bit more aggressive, but i've been testing this simple diff for some time now with good results.
-rw-r--r--src/lib/libc/stdlib/malloc.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index a800ea17be..165ad70031 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.170 2014/07/09 19:11:00 tedu Exp $ */ 1/* $OpenBSD: malloc.c,v 1.171 2014/08/18 14:34:58 tedu Exp $ */
2/* 2/*
3 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -93,6 +93,15 @@
93#define MQUERY(a, sz) mquery((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ 93#define MQUERY(a, sz) mquery((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
94 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t)0) 94 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t)0)
95 95
96#define KERNENTER() if (__isthreaded) do { \
97 malloc_active--; \
98 _MALLOC_UNLOCK(); \
99} while (0)
100#define KERNEXIT() if (__isthreaded) do { \
101 _MALLOC_LOCK(); \
102 malloc_active++; \
103} while (0)
104
96struct region_info { 105struct region_info {
97 void *p; /* page; low bits used to mark chunks */ 106 void *p; /* page; low bits used to mark chunks */
98 uintptr_t size; /* size for pages, or chunk_info pointer */ 107 uintptr_t size; /* size for pages, or chunk_info pointer */
@@ -312,7 +321,8 @@ unmap(struct dir_info *d, void *p, size_t sz)
312 } 321 }
313 322
314 if (psz > mopts.malloc_cache) { 323 if (psz > mopts.malloc_cache) {
315 if (munmap(p, sz)) 324 i = munmap(p, sz);
325 if (i)
316 wrterror("munmap", p); 326 wrterror("munmap", p);
317 STATS_SUB(d->malloc_used, sz); 327 STATS_SUB(d->malloc_used, sz);
318 return; 328 return;
@@ -396,7 +406,9 @@ map(struct dir_info *d, size_t sz, int zero_fill)
396 return MAP_FAILED; 406 return MAP_FAILED;
397 } 407 }
398 if (psz > d->free_regions_size) { 408 if (psz > d->free_regions_size) {
409 KERNENTER();
399 p = MMAP(sz); 410 p = MMAP(sz);
411 KERNEXIT();
400 if (p != MAP_FAILED) 412 if (p != MAP_FAILED)
401 STATS_ADD(d->malloc_used, sz); 413 STATS_ADD(d->malloc_used, sz);
402 /* zero fill not needed */ 414 /* zero fill not needed */
@@ -408,13 +420,13 @@ map(struct dir_info *d, size_t sz, int zero_fill)
408 if (r->p != NULL) { 420 if (r->p != NULL) {
409 if (r->size == psz) { 421 if (r->size == psz) {
410 p = r->p; 422 p = r->p;
423 r->p = NULL;
424 r->size = 0;
425 d->free_regions_size -= psz;
411 if (mopts.malloc_freeunmap) 426 if (mopts.malloc_freeunmap)
412 mprotect(p, sz, PROT_READ | PROT_WRITE); 427 mprotect(p, sz, PROT_READ | PROT_WRITE);
413 if (mopts.malloc_hint) 428 if (mopts.malloc_hint)
414 madvise(p, sz, MADV_NORMAL); 429 madvise(p, sz, MADV_NORMAL);
415 r->p = NULL;
416 r->size = 0;
417 d->free_regions_size -= psz;
418 if (zero_fill) 430 if (zero_fill)
419 memset(p, 0, sz); 431 memset(p, 0, sz);
420 else if (mopts.malloc_junk == 2 && 432 else if (mopts.malloc_junk == 2 &&
@@ -440,11 +452,13 @@ map(struct dir_info *d, size_t sz, int zero_fill)
440 memset(p, SOME_FREEJUNK, sz); 452 memset(p, SOME_FREEJUNK, sz);
441 return p; 453 return p;
442 } 454 }
455 if (d->free_regions_size > mopts.malloc_cache)
456 wrterror("malloc cache", NULL);
457 KERNENTER();
443 p = MMAP(sz); 458 p = MMAP(sz);
459 KERNEXIT();
444 if (p != MAP_FAILED) 460 if (p != MAP_FAILED)
445 STATS_ADD(d->malloc_used, sz); 461 STATS_ADD(d->malloc_used, sz);
446 if (d->free_regions_size > mopts.malloc_cache)
447 wrterror("malloc cache", NULL);
448 /* zero fill not needed */ 462 /* zero fill not needed */
449 return p; 463 return p;
450} 464}