diff options
| author | tb <> | 2016-06-28 06:40:11 +0000 |
|---|---|---|
| committer | tb <> | 2016-06-28 06:40:11 +0000 |
| commit | 9d2bb8c7482e2b12e62a4f44bb58bd1209fd904b (patch) | |
| tree | a34c4eabe259a4b68cafade8b625de70320660b1 /src/lib/libc | |
| parent | eb96992beb353c72a78e4497e44accb61c54c733 (diff) | |
| download | openbsd-9d2bb8c7482e2b12e62a4f44bb58bd1209fd904b.tar.gz openbsd-9d2bb8c7482e2b12e62a4f44bb58bd1209fd904b.tar.bz2 openbsd-9d2bb8c7482e2b12e62a4f44bb58bd1209fd904b.zip | |
Back out previous; otto saw a potential race that could lead to a
double unmap and I experienced a much more unstable firefox.
discussed with otto on icb
Diffstat (limited to 'src/lib/libc')
| -rw-r--r-- | src/lib/libc/stdlib/malloc.c | 55 |
1 files changed, 23 insertions, 32 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index 97092a2e0d..a0b2a7df57 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* $OpenBSD: malloc.c,v 1.189 2016/06/27 15:33:40 tedu Exp $ */ | 1 | /* $OpenBSD: malloc.c,v 1.190 2016/06/28 06:40:11 tb Exp $ */ |
| 2 | /* | 2 | /* |
| 3 | * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> | 3 | * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> |
| 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> | 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> |
| @@ -122,8 +122,6 @@ struct dir_info { | |||
| 122 | char *func; /* current function */ | 122 | char *func; /* current function */ |
| 123 | u_char rbytes[32]; /* random bytes */ | 123 | u_char rbytes[32]; /* random bytes */ |
| 124 | u_short chunk_start; | 124 | u_short chunk_start; |
| 125 | void *unmap_me; | ||
| 126 | size_t unmap_me_sz; | ||
| 127 | #ifdef MALLOC_STATS | 125 | #ifdef MALLOC_STATS |
| 128 | size_t inserts; | 126 | size_t inserts; |
| 129 | size_t insert_collisions; | 127 | size_t insert_collisions; |
| @@ -220,16 +218,10 @@ static void malloc_exit(void); | |||
| 220 | static inline void | 218 | static inline void |
| 221 | _MALLOC_LEAVE(struct dir_info *d) | 219 | _MALLOC_LEAVE(struct dir_info *d) |
| 222 | { | 220 | { |
| 223 | void *unmap_me = d->unmap_me; | ||
| 224 | size_t unmap_me_sz = d->unmap_me_sz; | ||
| 225 | |||
| 226 | if (__isthreaded) { | 221 | if (__isthreaded) { |
| 227 | d->active--; | 222 | d->active--; |
| 228 | _MALLOC_UNLOCK(); | 223 | _MALLOC_UNLOCK(); |
| 229 | } | 224 | } |
| 230 | |||
| 231 | if (unmap_me != NULL) | ||
| 232 | munmap(unmap_me, unmap_me_sz); | ||
| 233 | } | 225 | } |
| 234 | 226 | ||
| 235 | static inline void | 227 | static inline void |
| @@ -239,8 +231,6 @@ _MALLOC_ENTER(struct dir_info *d) | |||
| 239 | _MALLOC_LOCK(); | 231 | _MALLOC_LOCK(); |
| 240 | d->active++; | 232 | d->active++; |
| 241 | } | 233 | } |
| 242 | d->unmap_me = NULL; | ||
| 243 | d->unmap_me_sz = 0; | ||
| 244 | } | 234 | } |
| 245 | 235 | ||
| 246 | static inline size_t | 236 | static inline size_t |
| @@ -305,16 +295,6 @@ wrterror(struct dir_info *d, char *msg, void *p) | |||
| 305 | abort(); | 295 | abort(); |
| 306 | } | 296 | } |
| 307 | 297 | ||
| 308 | static inline void | ||
| 309 | _MUNMAP(struct dir_info *d, void *p, size_t sz) | ||
| 310 | { | ||
| 311 | if (d->unmap_me == NULL) { | ||
| 312 | d->unmap_me = p; | ||
| 313 | d->unmap_me_sz = sz; | ||
| 314 | } else if (munmap(p, sz) == -1) | ||
| 315 | wrterror(d, "munmap", p); | ||
| 316 | } | ||
| 317 | |||
| 318 | static void | 298 | static void |
| 319 | rbytes_init(struct dir_info *d) | 299 | rbytes_init(struct dir_info *d) |
| 320 | { | 300 | { |
| @@ -355,7 +335,9 @@ unmap(struct dir_info *d, void *p, size_t sz) | |||
| 355 | } | 335 | } |
| 356 | 336 | ||
| 357 | if (psz > mopts.malloc_cache) { | 337 | if (psz > mopts.malloc_cache) { |
| 358 | _MUNMAP(d, p, sz); | 338 | i = munmap(p, sz); |
| 339 | if (i) | ||
| 340 | wrterror(d, "munmap", p); | ||
| 359 | STATS_SUB(d->malloc_used, sz); | 341 | STATS_SUB(d->malloc_used, sz); |
| 360 | return; | 342 | return; |
| 361 | } | 343 | } |
| @@ -368,7 +350,8 @@ unmap(struct dir_info *d, void *p, size_t sz) | |||
| 368 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; | 350 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; |
| 369 | if (r->p != NULL) { | 351 | if (r->p != NULL) { |
| 370 | rsz = r->size << MALLOC_PAGESHIFT; | 352 | rsz = r->size << MALLOC_PAGESHIFT; |
| 371 | _MUNMAP(d, r->p, rsz); | 353 | if (munmap(r->p, rsz)) |
| 354 | wrterror(d, "munmap", r->p); | ||
| 372 | r->p = NULL; | 355 | r->p = NULL; |
| 373 | if (tounmap > r->size) | 356 | if (tounmap > r->size) |
| 374 | tounmap -= r->size; | 357 | tounmap -= r->size; |
| @@ -411,7 +394,8 @@ zapcacheregion(struct dir_info *d, void *p, size_t len) | |||
| 411 | r = &d->free_regions[i]; | 394 | r = &d->free_regions[i]; |
| 412 | if (r->p >= p && r->p <= (void *)((char *)p + len)) { | 395 | if (r->p >= p && r->p <= (void *)((char *)p + len)) { |
| 413 | rsz = r->size << MALLOC_PAGESHIFT; | 396 | rsz = r->size << MALLOC_PAGESHIFT; |
| 414 | _MUNMAP(d, r->p, rsz); | 397 | if (munmap(r->p, rsz)) |
| 398 | wrterror(d, "munmap", r->p); | ||
| 415 | r->p = NULL; | 399 | r->p = NULL; |
| 416 | d->free_regions_size -= r->size; | 400 | d->free_regions_size -= r->size; |
| 417 | r->size = 0; | 401 | r->size = 0; |
| @@ -743,9 +727,11 @@ omalloc_grow(struct dir_info *d) | |||
| 743 | } | 727 | } |
| 744 | } | 728 | } |
| 745 | /* avoid pages containing meta info to end up in cache */ | 729 | /* avoid pages containing meta info to end up in cache */ |
| 746 | _MUNMAP(d, d->r, d->regions_total * sizeof(struct region_info)); | 730 | if (munmap(d->r, d->regions_total * sizeof(struct region_info))) |
| 747 | STATS_SUB(d->malloc_used, | 731 | wrterror(d, "munmap", d->r); |
| 748 | d->regions_total * sizeof(struct region_info)); | 732 | else |
| 733 | STATS_SUB(d->malloc_used, | ||
| 734 | d->regions_total * sizeof(struct region_info)); | ||
| 749 | d->regions_free = d->regions_free + d->regions_total; | 735 | d->regions_free = d->regions_free + d->regions_total; |
| 750 | d->regions_total = newtotal; | 736 | d->regions_total = newtotal; |
| 751 | d->r = p; | 737 | d->r = p; |
| @@ -1442,8 +1428,10 @@ gotit: | |||
| 1442 | STATS_SETF(r, f); | 1428 | STATS_SETF(r, f); |
| 1443 | STATS_INC(pool->cheap_reallocs); | 1429 | STATS_INC(pool->cheap_reallocs); |
| 1444 | return p; | 1430 | return p; |
| 1445 | } else if (q != MAP_FAILED) | 1431 | } else if (q != MAP_FAILED) { |
| 1446 | _MUNMAP(pool, q, needed); | 1432 | if (munmap(q, needed)) |
| 1433 | wrterror(pool, "munmap", q); | ||
| 1434 | } | ||
| 1447 | } | 1435 | } |
| 1448 | } else if (rnewsz < roldsz) { | 1436 | } else if (rnewsz < roldsz) { |
| 1449 | if (mopts.malloc_guard) { | 1437 | if (mopts.malloc_guard) { |
| @@ -1612,9 +1600,12 @@ mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) | |||
| 1612 | if (p == MAP_FAILED) | 1600 | if (p == MAP_FAILED) |
| 1613 | return MAP_FAILED; | 1601 | return MAP_FAILED; |
| 1614 | q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); | 1602 | q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); |
| 1615 | if (q != p) | 1603 | if (q != p) { |
| 1616 | _MUNMAP(d, p, q - p); | 1604 | if (munmap(p, q - p)) |
| 1617 | _MUNMAP(d, q + sz, alignment - (q - p)); | 1605 | wrterror(d, "munmap", p); |
| 1606 | } | ||
| 1607 | if (munmap(q + sz, alignment - (q - p))) | ||
| 1608 | wrterror(d, "munmap", q + sz); | ||
| 1618 | STATS_SUB(d->malloc_used, alignment); | 1609 | STATS_SUB(d->malloc_used, alignment); |
| 1619 | 1610 | ||
| 1620 | return q; | 1611 | return q; |
