diff options
author | tedu <> | 2016-06-27 15:33:40 +0000 |
---|---|---|
committer | tedu <> | 2016-06-27 15:33:40 +0000 |
commit | 97835ed91473716ba1fc9d9c313df25fabd42fd8 (patch) | |
tree | fc5dabeefe9459075ac91c320febe558e80f100a /src/lib | |
parent | 6efa0a2980814718dcbd6d8e266811cc4ca8adc3 (diff) | |
download | openbsd-97835ed91473716ba1fc9d9c313df25fabd42fd8.tar.gz openbsd-97835ed91473716ba1fc9d9c313df25fabd42fd8.tar.bz2 openbsd-97835ed91473716ba1fc9d9c313df25fabd42fd8.zip |
defer munmap to after unlocking malloc. this can (unfortunately) be an
expensive syscall, and we don't want to tie up other threads. there's no
need to hold the lock, so defer it to afterwards.
from Michael McConville
ok deraadt
Diffstat (limited to 'src/lib')
-rw-r--r-- | src/lib/libc/stdlib/malloc.c | 55 |
1 files changed, 32 insertions, 23 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index b9f692ebb7..97092a2e0d 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* $OpenBSD: malloc.c,v 1.188 2016/04/12 18:14:02 otto Exp $ */ | 1 | /* $OpenBSD: malloc.c,v 1.189 2016/06/27 15:33:40 tedu Exp $ */ |
2 | /* | 2 | /* |
3 | * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> | 3 | * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> |
4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> | 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> |
@@ -122,6 +122,8 @@ struct dir_info { | |||
122 | char *func; /* current function */ | 122 | char *func; /* current function */ |
123 | u_char rbytes[32]; /* random bytes */ | 123 | u_char rbytes[32]; /* random bytes */ |
124 | u_short chunk_start; | 124 | u_short chunk_start; |
125 | void *unmap_me; | ||
126 | size_t unmap_me_sz; | ||
125 | #ifdef MALLOC_STATS | 127 | #ifdef MALLOC_STATS |
126 | size_t inserts; | 128 | size_t inserts; |
127 | size_t insert_collisions; | 129 | size_t insert_collisions; |
@@ -218,10 +220,16 @@ static void malloc_exit(void); | |||
218 | static inline void | 220 | static inline void |
219 | _MALLOC_LEAVE(struct dir_info *d) | 221 | _MALLOC_LEAVE(struct dir_info *d) |
220 | { | 222 | { |
223 | void *unmap_me = d->unmap_me; | ||
224 | size_t unmap_me_sz = d->unmap_me_sz; | ||
225 | |||
221 | if (__isthreaded) { | 226 | if (__isthreaded) { |
222 | d->active--; | 227 | d->active--; |
223 | _MALLOC_UNLOCK(); | 228 | _MALLOC_UNLOCK(); |
224 | } | 229 | } |
230 | |||
231 | if (unmap_me != NULL) | ||
232 | munmap(unmap_me, unmap_me_sz); | ||
225 | } | 233 | } |
226 | 234 | ||
227 | static inline void | 235 | static inline void |
@@ -231,6 +239,8 @@ _MALLOC_ENTER(struct dir_info *d) | |||
231 | _MALLOC_LOCK(); | 239 | _MALLOC_LOCK(); |
232 | d->active++; | 240 | d->active++; |
233 | } | 241 | } |
242 | d->unmap_me = NULL; | ||
243 | d->unmap_me_sz = 0; | ||
234 | } | 244 | } |
235 | 245 | ||
236 | static inline size_t | 246 | static inline size_t |
@@ -295,6 +305,16 @@ wrterror(struct dir_info *d, char *msg, void *p) | |||
295 | abort(); | 305 | abort(); |
296 | } | 306 | } |
297 | 307 | ||
308 | static inline void | ||
309 | _MUNMAP(struct dir_info *d, void *p, size_t sz) | ||
310 | { | ||
311 | if (d->unmap_me == NULL) { | ||
312 | d->unmap_me = p; | ||
313 | d->unmap_me_sz = sz; | ||
314 | } else if (munmap(p, sz) == -1) | ||
315 | wrterror(d, "munmap", p); | ||
316 | } | ||
317 | |||
298 | static void | 318 | static void |
299 | rbytes_init(struct dir_info *d) | 319 | rbytes_init(struct dir_info *d) |
300 | { | 320 | { |
@@ -335,9 +355,7 @@ unmap(struct dir_info *d, void *p, size_t sz) | |||
335 | } | 355 | } |
336 | 356 | ||
337 | if (psz > mopts.malloc_cache) { | 357 | if (psz > mopts.malloc_cache) { |
338 | i = munmap(p, sz); | 358 | _MUNMAP(d, p, sz); |
339 | if (i) | ||
340 | wrterror(d, "munmap", p); | ||
341 | STATS_SUB(d->malloc_used, sz); | 359 | STATS_SUB(d->malloc_used, sz); |
342 | return; | 360 | return; |
343 | } | 361 | } |
@@ -350,8 +368,7 @@ unmap(struct dir_info *d, void *p, size_t sz) | |||
350 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; | 368 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; |
351 | if (r->p != NULL) { | 369 | if (r->p != NULL) { |
352 | rsz = r->size << MALLOC_PAGESHIFT; | 370 | rsz = r->size << MALLOC_PAGESHIFT; |
353 | if (munmap(r->p, rsz)) | 371 | _MUNMAP(d, r->p, rsz); |
354 | wrterror(d, "munmap", r->p); | ||
355 | r->p = NULL; | 372 | r->p = NULL; |
356 | if (tounmap > r->size) | 373 | if (tounmap > r->size) |
357 | tounmap -= r->size; | 374 | tounmap -= r->size; |
@@ -394,8 +411,7 @@ zapcacheregion(struct dir_info *d, void *p, size_t len) | |||
394 | r = &d->free_regions[i]; | 411 | r = &d->free_regions[i]; |
395 | if (r->p >= p && r->p <= (void *)((char *)p + len)) { | 412 | if (r->p >= p && r->p <= (void *)((char *)p + len)) { |
396 | rsz = r->size << MALLOC_PAGESHIFT; | 413 | rsz = r->size << MALLOC_PAGESHIFT; |
397 | if (munmap(r->p, rsz)) | 414 | _MUNMAP(d, r->p, rsz); |
398 | wrterror(d, "munmap", r->p); | ||
399 | r->p = NULL; | 415 | r->p = NULL; |
400 | d->free_regions_size -= r->size; | 416 | d->free_regions_size -= r->size; |
401 | r->size = 0; | 417 | r->size = 0; |
@@ -727,11 +743,9 @@ omalloc_grow(struct dir_info *d) | |||
727 | } | 743 | } |
728 | } | 744 | } |
729 | /* avoid pages containing meta info to end up in cache */ | 745 | /* avoid pages containing meta info to end up in cache */ |
730 | if (munmap(d->r, d->regions_total * sizeof(struct region_info))) | 746 | _MUNMAP(d, d->r, d->regions_total * sizeof(struct region_info)); |
731 | wrterror(d, "munmap", d->r); | 747 | STATS_SUB(d->malloc_used, |
732 | else | 748 | d->regions_total * sizeof(struct region_info)); |
733 | STATS_SUB(d->malloc_used, | ||
734 | d->regions_total * sizeof(struct region_info)); | ||
735 | d->regions_free = d->regions_free + d->regions_total; | 749 | d->regions_free = d->regions_free + d->regions_total; |
736 | d->regions_total = newtotal; | 750 | d->regions_total = newtotal; |
737 | d->r = p; | 751 | d->r = p; |
@@ -1428,10 +1442,8 @@ gotit: | |||
1428 | STATS_SETF(r, f); | 1442 | STATS_SETF(r, f); |
1429 | STATS_INC(pool->cheap_reallocs); | 1443 | STATS_INC(pool->cheap_reallocs); |
1430 | return p; | 1444 | return p; |
1431 | } else if (q != MAP_FAILED) { | 1445 | } else if (q != MAP_FAILED) |
1432 | if (munmap(q, needed)) | 1446 | _MUNMAP(pool, q, needed); |
1433 | wrterror(pool, "munmap", q); | ||
1434 | } | ||
1435 | } | 1447 | } |
1436 | } else if (rnewsz < roldsz) { | 1448 | } else if (rnewsz < roldsz) { |
1437 | if (mopts.malloc_guard) { | 1449 | if (mopts.malloc_guard) { |
@@ -1600,12 +1612,9 @@ mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) | |||
1600 | if (p == MAP_FAILED) | 1612 | if (p == MAP_FAILED) |
1601 | return MAP_FAILED; | 1613 | return MAP_FAILED; |
1602 | q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); | 1614 | q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); |
1603 | if (q != p) { | 1615 | if (q != p) |
1604 | if (munmap(p, q - p)) | 1616 | _MUNMAP(d, p, q - p); |
1605 | wrterror(d, "munmap", p); | 1617 | _MUNMAP(d, q + sz, alignment - (q - p)); |
1606 | } | ||
1607 | if (munmap(q + sz, alignment - (q - p))) | ||
1608 | wrterror(d, "munmap", q + sz); | ||
1609 | STATS_SUB(d->malloc_used, alignment); | 1618 | STATS_SUB(d->malloc_used, alignment); |
1610 | 1619 | ||
1611 | return q; | 1620 | return q; |