summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordjm <>2008-12-29 22:25:50 +0000
committerdjm <>2008-12-29 22:25:50 +0000
commitc2f9a0271169166b38060ef5e36ef203765e45dd (patch)
tree98faf6e6f7899c0929ab660551964d3931213b49
parent83483f908c4b6c5c812c6388c7929c6f10a8ef28 (diff)
downloadopenbsd-c2f9a0271169166b38060ef5e36ef203765e45dd.tar.gz
openbsd-c2f9a0271169166b38060ef5e36ef203765e45dd.tar.bz2
openbsd-c2f9a0271169166b38060ef5e36ef203765e45dd.zip
extra paranoia for malloc(3):
Move all runtime options into a structure that is made read-only (via mprotect) after initialisation to protect against attacks that overwrite options to turn off malloc protections (e.g. use-after-free) Allocate the main bookkeeping data (struct dir_info) using mmap(), thereby giving it an unpredictable address. Place a PROT_NONE guard page on either side to further frustrate attacks on it. Add a new 'L' option that maps struct dir_info PROT_NONE except when in the allocator code itself. Makes attacks on it basically impossible. feedback tedu deraadt otto canacar ok otto
-rw-r--r--src/lib/libc/stdlib/malloc.311
-rw-r--r--src/lib/libc/stdlib/malloc.c432
2 files changed, 278 insertions, 165 deletions
diff --git a/src/lib/libc/stdlib/malloc.3 b/src/lib/libc/stdlib/malloc.3
index edcd748ed9..2458834302 100644
--- a/src/lib/libc/stdlib/malloc.3
+++ b/src/lib/libc/stdlib/malloc.3
@@ -30,9 +30,9 @@
30.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31.\" SUCH DAMAGE. 31.\" SUCH DAMAGE.
32.\" 32.\"
33.\" $OpenBSD: malloc.3,v 1.58 2008/11/26 12:06:54 pedro Exp $ 33.\" $OpenBSD: malloc.3,v 1.59 2008/12/29 22:25:50 djm Exp $
34.\" 34.\"
35.Dd $Mdocdate: November 26 2008 $ 35.Dd $Mdocdate: December 29 2008 $
36.Dt MALLOC 3 36.Dt MALLOC 3
37.Os 37.Os
38.Sh NAME 38.Sh NAME
@@ -249,6 +249,13 @@ Currently junk is bytes of 0xd0 when allocating; this is pronounced
249.Dq Duh . 249.Dq Duh .
250\&:-) 250\&:-)
251Freed chunks are filled with 0xdf. 251Freed chunks are filled with 0xdf.
252.It Cm L
253.Dq Lock .
254Lock critical data structures using
255.Xr mprotect 2
256to protect against modification except by
257.Nm
258and related routines.
252.It Cm P 259.It Cm P
253.Dq Move allocations within a page. 260.Dq Move allocations within a page.
254Allocations larger than half a page but smaller than a page 261Allocations larger than half a page but smaller than a page
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index ee4bf9876a..e15a64ac72 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.111 2008/12/15 19:47:49 otto Exp $ */ 1/* $OpenBSD: malloc.c,v 1.112 2008/12/29 22:25:50 djm Exp $ */
2/* 2/*
3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4 * 4 *
@@ -88,6 +88,23 @@
88#define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ 88#define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
89 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 89 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
90 90
91/* Protect and unprotect g_pool structure as we enter/exit the allocator */
92#define DIR_INFO_RSZ ((sizeof(struct dir_info) + PAGE_MASK) & ~PAGE_MASK)
93#define PROTECT_G_POOL() \
94 do { \
95 if (g_pool != NULL && mopts.malloc_poolprot) { \
96 mprotect((void *)((uintptr_t)g_pool & ~PAGE_MASK), \
97 DIR_INFO_RSZ, PROT_NONE); \
98 } \
99 } while (0)
100#define UNPROTECT_G_POOL() \
101 do { \
102 if (g_pool != NULL && mopts.malloc_poolprot) { \
103 mprotect((void *)((uintptr_t)g_pool & ~PAGE_MASK), \
104 DIR_INFO_RSZ, PROT_READ | PROT_WRITE); \
105 } \
106 } while (0)
107
91struct region_info { 108struct region_info {
92 void *p; /* page; low bits used to mark chunks */ 109 void *p; /* page; low bits used to mark chunks */
93 uintptr_t size; /* size for pages, or chunk_info pointer */ 110 uintptr_t size; /* size for pages, or chunk_info pointer */
@@ -145,29 +162,41 @@ struct chunk_info {
145 u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS]; 162 u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS];
146}; 163};
147 164
148static struct dir_info g_pool; 165struct malloc_readonly {
149static char *malloc_func; /* current function */ 166 struct dir_info *g_pool; /* Main bookkeeping information */
167 int malloc_abort; /* abort() on error */
168 int malloc_poolprot; /* mprotect heap PROT_NONE? */
169 int malloc_freeprot; /* mprotect free pages PROT_NONE? */
170 int malloc_hint; /* call madvice on free pages? */
171 int malloc_junk; /* junk fill? */
172 int malloc_move; /* move allocations to end of page? */
173 int malloc_realloc; /* always realloc? */
174 int malloc_xmalloc; /* xmalloc behaviour? */
175 int malloc_zero; /* zero fill? */
176 size_t malloc_guard; /* use guard pages after allocations? */
177 u_int malloc_cache; /* free pages we cache */
178#ifdef MALLOC_STATS
179 int malloc_stats; /* dump statistics at end */
180#endif
181 u_int32_t malloc_canary; /* Matched against ones in g_pool */
182};
183
184/* This object is mapped PROT_READ after initialisation to prevent tampering */
185static union {
186 struct malloc_readonly mopts;
187 u_char _pad[PAGE_SIZE];
188} malloc_readonly __attribute__((aligned(PAGE_SIZE)));
189#define mopts malloc_readonly.mopts
190#define g_pool mopts.g_pool
191
150char *malloc_options; /* compile-time options */ 192char *malloc_options; /* compile-time options */
151 193
152static int malloc_abort = 1; /* abort() on error */ 194static char *malloc_func; /* current function */
153static int malloc_active; /* status of malloc */ 195static int malloc_active; /* status of malloc */
154static int malloc_freeprot; /* mprotect free pages PROT_NONE? */ 196
155static int malloc_hint; /* call madvice on free pages? */
156static int malloc_junk; /* junk fill? */
157static int malloc_move = 1; /* move allocations to end of page? */
158static int malloc_realloc; /* always realloc? */
159static int malloc_xmalloc; /* xmalloc behaviour? */
160static int malloc_zero; /* zero fill? */
161static size_t malloc_guard; /* use guard pages after allocations? */
162
163static u_int malloc_cache = 64; /* free pages we cache */
164static size_t malloc_guarded; /* bytes used for guards */ 197static size_t malloc_guarded; /* bytes used for guards */
165static size_t malloc_used; /* bytes allocated */ 198static size_t malloc_used; /* bytes allocated */
166 199
167#ifdef MALLOC_STATS
168static int malloc_stats; /* dump statistics at end */
169#endif
170
171static size_t rbytesused; /* random bytes used */ 200static size_t rbytesused; /* random bytes used */
172static u_char rbytes[512]; /* random bytes */ 201static u_char rbytes[512]; /* random bytes */
173static u_char getrbyte(void); 202static u_char getrbyte(void);
@@ -247,7 +276,7 @@ dump_free_page_info(int fd, struct dir_info *d)
247 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n", 276 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
248 d->free_regions_size); 277 d->free_regions_size);
249 write(fd, buf, strlen(buf)); 278 write(fd, buf, strlen(buf));
250 for (i = 0; i < malloc_cache; i++) { 279 for (i = 0; i < mopts.malloc_cache; i++) {
251 if (d->free_regions[i].p != NULL) { 280 if (d->free_regions[i].p != NULL) {
252 snprintf(buf, sizeof(buf), "%2d) ", i); 281 snprintf(buf, sizeof(buf), "%2d) ", i);
253 write(fd, buf, strlen(buf)); 282 write(fd, buf, strlen(buf));
@@ -266,6 +295,8 @@ malloc_dump1(int fd, struct dir_info *d)
266 295
267 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d); 296 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
268 write(fd, buf, strlen(buf)); 297 write(fd, buf, strlen(buf));
298 if (d == NULL)
299 return;
269 snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total); 300 snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total);
270 write(fd, buf, strlen(buf)); 301 write(fd, buf, strlen(buf));
271 snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds, 302 snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds,
@@ -313,7 +344,7 @@ malloc_dump1(int fd, struct dir_info *d)
313void 344void
314malloc_dump(int fd) 345malloc_dump(int fd)
315{ 346{
316 malloc_dump1(fd, &g_pool); 347 malloc_dump1(fd, g_pool);
317} 348}
318 349
319static void 350static void
@@ -353,11 +384,11 @@ wrterror(char *p)
353 writev(STDERR_FILENO, iov, 5); 384 writev(STDERR_FILENO, iov, 5);
354 385
355#ifdef MALLOC_STATS 386#ifdef MALLOC_STATS
356 if (malloc_stats) 387 if (mopts.malloc_stats)
357 malloc_dump(STDERR_FILENO); 388 malloc_dump(STDERR_FILENO);
358#endif /* MALLOC_STATS */ 389#endif /* MALLOC_STATS */
359 //malloc_active--; 390 //malloc_active--;
360 if (malloc_abort) 391 if (mopts.malloc_abort)
361 abort(); 392 abort();
362} 393}
363 394
@@ -381,19 +412,19 @@ unmap(struct dir_info *d, void *p, size_t sz)
381 return; 412 return;
382 } 413 }
383 414
384 if (psz > malloc_cache) { 415 if (psz > mopts.malloc_cache) {
385 if (munmap(p, sz)) 416 if (munmap(p, sz))
386 wrterror("munmap"); 417 wrterror("munmap");
387 malloc_used -= sz; 418 malloc_used -= sz;
388 return; 419 return;
389 } 420 }
390 tounmap = 0; 421 tounmap = 0;
391 rsz = malloc_cache - d->free_regions_size; 422 rsz = mopts.malloc_cache - d->free_regions_size;
392 if (psz > rsz) 423 if (psz > rsz)
393 tounmap = psz - rsz; 424 tounmap = psz - rsz;
394 offset = getrbyte(); 425 offset = getrbyte();
395 for (i = 0; tounmap > 0 && i < malloc_cache; i++) { 426 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
396 r = &d->free_regions[(i + offset) & (malloc_cache - 1)]; 427 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
397 if (r->p != NULL) { 428 if (r->p != NULL) {
398 rsz = r->size << MALLOC_PAGESHIFT; 429 rsz = r->size << MALLOC_PAGESHIFT;
399 if (munmap(r->p, rsz)) 430 if (munmap(r->p, rsz))
@@ -410,12 +441,12 @@ unmap(struct dir_info *d, void *p, size_t sz)
410 } 441 }
411 if (tounmap > 0) 442 if (tounmap > 0)
412 wrterror("malloc cache underflow"); 443 wrterror("malloc cache underflow");
413 for (i = 0; i < malloc_cache; i++) { 444 for (i = 0; i < mopts.malloc_cache; i++) {
414 r = &d->free_regions[i]; 445 r = &d->free_regions[i];
415 if (r->p == NULL) { 446 if (r->p == NULL) {
416 if (malloc_hint) 447 if (mopts.malloc_hint)
417 madvise(p, sz, MADV_FREE); 448 madvise(p, sz, MADV_FREE);
418 if (malloc_freeprot) 449 if (mopts.malloc_freeprot)
419 mprotect(p, sz, PROT_NONE); 450 mprotect(p, sz, PROT_NONE);
420 r->p = p; 451 r->p = p;
421 r->size = psz; 452 r->size = psz;
@@ -423,9 +454,9 @@ unmap(struct dir_info *d, void *p, size_t sz)
423 break; 454 break;
424 } 455 }
425 } 456 }
426 if (i == malloc_cache) 457 if (i == mopts.malloc_cache)
427 wrterror("malloc free slot lost"); 458 wrterror("malloc free slot lost");
428 if (d->free_regions_size > malloc_cache) 459 if (d->free_regions_size > mopts.malloc_cache)
429 wrterror("malloc cache overflow"); 460 wrterror("malloc cache overflow");
430} 461}
431 462
@@ -436,7 +467,7 @@ zapcacheregion(struct dir_info *d, void *p)
436 struct region_info *r; 467 struct region_info *r;
437 size_t rsz; 468 size_t rsz;
438 469
439 for (i = 0; i < malloc_cache; i++) { 470 for (i = 0; i < mopts.malloc_cache; i++) {
440 r = &d->free_regions[i]; 471 r = &d->free_regions[i];
441 if (r->p == p) { 472 if (r->p == p) {
442 rsz = r->size << MALLOC_PAGESHIFT; 473 rsz = r->size << MALLOC_PAGESHIFT;
@@ -458,6 +489,9 @@ map(struct dir_info *d, size_t sz, int zero_fill)
458 u_int i, offset; 489 u_int i, offset;
459 void *p; 490 void *p;
460 491
492 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
493 d->canary1 != ~d->canary2)
494 wrterror("internal struct corrupt");
461 if (sz != PAGEROUND(sz)) { 495 if (sz != PAGEROUND(sz)) {
462 wrterror("map round"); 496 wrterror("map round");
463 return NULL; 497 return NULL;
@@ -470,21 +504,22 @@ map(struct dir_info *d, size_t sz, int zero_fill)
470 return p; 504 return p;
471 } 505 }
472 offset = getrbyte(); 506 offset = getrbyte();
473 for (i = 0; i < malloc_cache; i++) { 507 for (i = 0; i < mopts.malloc_cache; i++) {
474 r = &d->free_regions[(i + offset) & (malloc_cache - 1)]; 508 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
475 if (r->p != NULL) { 509 if (r->p != NULL) {
476 if (r->size == psz) { 510 if (r->size == psz) {
477 p = r->p; 511 p = r->p;
478 if (malloc_freeprot) 512 if (mopts.malloc_freeprot)
479 mprotect(p, sz, PROT_READ | PROT_WRITE); 513 mprotect(p, sz, PROT_READ | PROT_WRITE);
480 if (malloc_hint) 514 if (mopts.malloc_hint)
481 madvise(p, sz, MADV_NORMAL); 515 madvise(p, sz, MADV_NORMAL);
482 r->p = NULL; 516 r->p = NULL;
483 r->size = 0; 517 r->size = 0;
484 d->free_regions_size -= psz; 518 d->free_regions_size -= psz;
485 if (zero_fill) 519 if (zero_fill)
486 memset(p, 0, sz); 520 memset(p, 0, sz);
487 else if (malloc_junk && malloc_freeprot) 521 else if (mopts.malloc_junk &&
522 mopts.malloc_freeprot)
488 memset(p, SOME_FREEJUNK, sz); 523 memset(p, SOME_FREEJUNK, sz);
489 return p; 524 return p;
490 } else if (r->size > psz) 525 } else if (r->size > psz)
@@ -494,9 +529,9 @@ map(struct dir_info *d, size_t sz, int zero_fill)
494 if (big != NULL) { 529 if (big != NULL) {
495 r = big; 530 r = big;
496 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT); 531 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
497 if (malloc_freeprot) 532 if (mopts.malloc_freeprot)
498 mprotect(p, sz, PROT_READ | PROT_WRITE); 533 mprotect(p, sz, PROT_READ | PROT_WRITE);
499 if (malloc_hint) 534 if (mopts.malloc_hint)
500 madvise(p, sz, MADV_NORMAL); 535 madvise(p, sz, MADV_NORMAL);
501 r->size -= psz; 536 r->size -= psz;
502 d->free_regions_size -= psz; 537 d->free_regions_size -= psz;
@@ -507,7 +542,7 @@ map(struct dir_info *d, size_t sz, int zero_fill)
507 p = MMAP(sz); 542 p = MMAP(sz);
508 if (p != MAP_FAILED) 543 if (p != MAP_FAILED)
509 malloc_used += sz; 544 malloc_used += sz;
510 if (d->free_regions_size > malloc_cache) 545 if (d->free_regions_size > mopts.malloc_cache)
511 wrterror("malloc cache"); 546 wrterror("malloc cache");
512 /* zero fill not needed */ 547 /* zero fill not needed */
513 return p; 548 return p;
@@ -532,14 +567,22 @@ getrbyte(void)
532 * Initialize a dir_info, which should have been cleared by caller 567 * Initialize a dir_info, which should have been cleared by caller
533 */ 568 */
534static int 569static int
535omalloc_init(struct dir_info *d) 570omalloc_init(struct dir_info **dp)
536{ 571{
537 char *p, b[64]; 572 char *p, b[64];
538 int i, j; 573 int i, j;
539 size_t regioninfo_size; 574 size_t d_avail, regioninfo_size;
575 struct dir_info *d;
540 576
541 rbytes_init(); 577 rbytes_init();
542 578
579 /*
580 * Default options
581 */
582 mopts.malloc_abort = 1;
583 mopts.malloc_move = 1;
584 mopts.malloc_cache = 64;
585
543 for (i = 0; i < 3; i++) { 586 for (i = 0; i < 3; i++) {
544 switch (i) { 587 switch (i) {
545 case 0: 588 case 0:
@@ -565,77 +608,83 @@ omalloc_init(struct dir_info *d)
565 for (; p != NULL && *p != '\0'; p++) { 608 for (; p != NULL && *p != '\0'; p++) {
566 switch (*p) { 609 switch (*p) {
567 case '>': 610 case '>':
568 malloc_cache <<= 1; 611 mopts.malloc_cache <<= 1;
569 if (malloc_cache > MALLOC_MAXCACHE) 612 if (mopts.malloc_cache > MALLOC_MAXCACHE)
570 malloc_cache = MALLOC_MAXCACHE; 613 mopts.malloc_cache = MALLOC_MAXCACHE;
571 break; 614 break;
572 case '<': 615 case '<':
573 malloc_cache >>= 1; 616 mopts.malloc_cache >>= 1;
574 break; 617 break;
575 case 'a': 618 case 'a':
576 malloc_abort = 0; 619 mopts.malloc_abort = 0;
577 break; 620 break;
578 case 'A': 621 case 'A':
579 malloc_abort = 1; 622 mopts.malloc_abort = 1;
580 break; 623 break;
581#ifdef MALLOC_STATS 624#ifdef MALLOC_STATS
582 case 'd': 625 case 'd':
583 malloc_stats = 0; 626 mopts.malloc_stats = 0;
584 break; 627 break;
585 case 'D': 628 case 'D':
586 malloc_stats = 1; 629 mopts.malloc_stats = 1;
587 break; 630 break;
588#endif /* MALLOC_STATS */ 631#endif /* MALLOC_STATS */
589 case 'f': 632 case 'f':
590 malloc_freeprot = 0; 633 mopts.malloc_freeprot = 0;
591 break; 634 break;
592 case 'F': 635 case 'F':
593 malloc_freeprot = 1; 636 mopts.malloc_freeprot = 1;
594 break; 637 break;
595 case 'g': 638 case 'g':
596 malloc_guard = 0; 639 mopts.malloc_guard = 0;
597 break; 640 break;
598 case 'G': 641 case 'G':
599 malloc_guard = MALLOC_PAGESIZE; 642 mopts.malloc_guard = MALLOC_PAGESIZE;
600 break; 643 break;
601 case 'h': 644 case 'h':
602 malloc_hint = 0; 645 mopts.malloc_hint = 0;
603 break; 646 break;
604 case 'H': 647 case 'H':
605 malloc_hint = 1; 648 mopts.malloc_hint = 1;
606 break; 649 break;
607 case 'j': 650 case 'j':
608 malloc_junk = 0; 651 mopts.malloc_junk = 0;
609 break; 652 break;
610 case 'J': 653 case 'J':
611 malloc_junk = 1; 654 mopts.malloc_junk = 1;
655 break;
656 case 'l':
657 mopts.malloc_poolprot = 0;
658 break;
659 case 'L':
660 mopts.malloc_poolprot = 1;
612 break; 661 break;
613 case 'n': 662 case 'n':
614 case 'N': 663 case 'N':
615 break; 664 break;
616 case 'p': 665 case 'p':
617 malloc_move = 0; 666 mopts.malloc_move = 0;
618 break; 667 break;
619 case 'P': 668 case 'P':
620 malloc_move = 1; 669 mopts.malloc_move = 1;
621 break; 670 break;
622 case 'r': 671 case 'r':
623 malloc_realloc = 0; 672 mopts.malloc_realloc = 0;
624 break; 673 break;
625 case 'R': 674 case 'R':
626 malloc_realloc = 1; 675 mopts.malloc_realloc = 1;
627 break; 676 break;
628 case 'x': 677 case 'x':
629 malloc_xmalloc = 0; 678 mopts.malloc_xmalloc = 0;
630 break; 679 break;
631 case 'X': 680 case 'X':
632 malloc_xmalloc = 1; 681 mopts.malloc_xmalloc = 1;
633 break; 682 break;
634 case 'z': 683 case 'z':
635 malloc_zero = 0; 684 mopts.malloc_zero = 0;
636 break; 685 break;
637 case 'Z': 686 case 'Z':
638 malloc_zero = 1; 687 mopts.malloc_zero = 1;
639 break; 688 break;
640 default: { 689 default: {
641 static const char q[] = "malloc() warning: " 690 static const char q[] = "malloc() warning: "
@@ -651,17 +700,33 @@ omalloc_init(struct dir_info *d)
651 * We want junk in the entire allocation, and zero only in the part 700 * We want junk in the entire allocation, and zero only in the part
652 * the user asked for. 701 * the user asked for.
653 */ 702 */
654 if (malloc_zero) 703 if (mopts.malloc_zero)
655 malloc_junk = 1; 704 mopts.malloc_junk = 1;
656 705
657#ifdef MALLOC_STATS 706#ifdef MALLOC_STATS
658 if (malloc_stats && (atexit(malloc_exit) == -1)) { 707 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
659 static const char q[] = "malloc() warning: atexit(2) failed." 708 static const char q[] = "malloc() warning: atexit(2) failed."
660 " Will not be able to dump stats on exit\n"; 709 " Will not be able to dump stats on exit\n";
661 write(STDERR_FILENO, q, sizeof(q) - 1); 710 write(STDERR_FILENO, q, sizeof(q) - 1);
662 } 711 }
663#endif /* MALLOC_STATS */ 712#endif /* MALLOC_STATS */
664 713
714 while ((mopts.malloc_canary = arc4random()) == 0)
715 ;
716
717 /*
718 * Allocate dir_info with a guard page on either side. Also
719 * randomise offset inside the page at which the dir_info
720 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
721 */
722 if ((p = MMAP(PAGE_SIZE + DIR_INFO_RSZ + PAGE_SIZE)) == NULL)
723 return -1;
724 mprotect(p, PAGE_SIZE, PROT_NONE);
725 mprotect(p + PAGE_SIZE + DIR_INFO_RSZ, PAGE_SIZE, PROT_NONE);
726 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
727 d = (struct dir_info *)(p + PAGE_SIZE +
728 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
729
665 d->regions_bits = 9; 730 d->regions_bits = 9;
666 d->regions_free = d->regions_total = 1 << d->regions_bits; 731 d->regions_free = d->regions_total = 1 << d->regions_bits;
667 regioninfo_size = d->regions_total * sizeof(struct region_info); 732 regioninfo_size = d->regions_total * sizeof(struct region_info);
@@ -673,8 +738,18 @@ omalloc_init(struct dir_info *d)
673 } 738 }
674 malloc_used += regioninfo_size; 739 malloc_used += regioninfo_size;
675 memset(d->r, 0, regioninfo_size); 740 memset(d->r, 0, regioninfo_size);
676 d->canary1 = arc4random(); 741 d->canary1 = mopts.malloc_canary ^ (u_int32_t)d;
677 d->canary2 = ~d->canary1; 742 d->canary2 = ~d->canary1;
743
744 *dp = d;
745
746 /*
747 * Options have been set and will never be reset.
748 * Prevent further tampering with them.
749 */
750 if (((uintptr_t)&malloc_readonly & PAGE_MASK) == 0)
751 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
752
678 return 0; 753 return 0;
679} 754}
680 755
@@ -792,7 +867,8 @@ find(struct dir_info *d, void *p)
792 size_t mask = d->regions_total - 1; 867 size_t mask = d->regions_total - 1;
793 void *q, *r; 868 void *q, *r;
794 869
795 if (d->canary1 != ~d->canary2) 870 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
871 d->canary1 != ~d->canary2)
796 wrterror("internal struct corrupt"); 872 wrterror("internal struct corrupt");
797 p = MASK_POINTER(p); 873 p = MASK_POINTER(p);
798 index = hash(p) & mask; 874 index = hash(p) & mask;
@@ -818,7 +894,7 @@ delete(struct dir_info *d, struct region_info *ri)
818 if (d->regions_total & (d->regions_total - 1)) 894 if (d->regions_total & (d->regions_total - 1))
819 wrterror("regions_total not 2^x"); 895 wrterror("regions_total not 2^x");
820 d->regions_free++; 896 d->regions_free++;
821 STATS_INC(g_pool.deletes); 897 STATS_INC(g_pool->deletes);
822 898
823 i = ri - d->r; 899 i = ri - d->r;
824 for (;;) { 900 for (;;) {
@@ -834,7 +910,7 @@ delete(struct dir_info *d, struct region_info *ri)
834 (j < i && i <= r)) 910 (j < i && i <= r))
835 continue; 911 continue;
836 d->r[j] = d->r[i]; 912 d->r[j] = d->r[i];
837 STATS_INC(g_pool.delete_moves); 913 STATS_INC(g_pool->delete_moves);
838 break; 914 break;
839 } 915 }
840 916
@@ -919,6 +995,9 @@ malloc_bytes(struct dir_info *d, size_t size)
919 u_long u, *lp; 995 u_long u, *lp;
920 struct chunk_info *bp; 996 struct chunk_info *bp;
921 997
998 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
999 d->canary1 != ~d->canary2)
1000 wrterror("internal struct corrupt");
922 /* Don't bother with anything less than this */ 1001 /* Don't bother with anything less than this */
923 /* unless we have a malloc(0) requests */ 1002 /* unless we have a malloc(0) requests */
924 if (size != 0 && size < MALLOC_MINSIZE) 1003 if (size != 0 && size < MALLOC_MINSIZE)
@@ -983,7 +1062,7 @@ malloc_bytes(struct dir_info *d, size_t size)
983 k += (lp - bp->bits) * MALLOC_BITS; 1062 k += (lp - bp->bits) * MALLOC_BITS;
984 k <<= bp->shift; 1063 k <<= bp->shift;
985 1064
986 if (malloc_junk && bp->size > 0) 1065 if (mopts.malloc_junk && bp->size > 0)
987 memset((char *)bp->page + k, SOME_JUNK, bp->size); 1066 memset((char *)bp->page + k, SOME_JUNK, bp->size);
988 return ((char *)bp->page + k); 1067 return ((char *)bp->page + k);
989} 1068}
@@ -1047,7 +1126,7 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1047 } 1126 }
1048 *mp = info->next; 1127 *mp = info->next;
1049 1128
1050 if (info->size == 0 && !malloc_freeprot) 1129 if (info->size == 0 && !mopts.malloc_freeprot)
1051 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1130 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1052 unmap(d, info->page, MALLOC_PAGESIZE); 1131 unmap(d, info->page, MALLOC_PAGESIZE);
1053 1132
@@ -1064,54 +1143,55 @@ omalloc(size_t sz, int zero_fill)
1064 size_t psz; 1143 size_t psz;
1065 1144
1066 if (sz > MALLOC_MAXCHUNK) { 1145 if (sz > MALLOC_MAXCHUNK) {
1067 if (sz >= SIZE_MAX - malloc_guard - MALLOC_PAGESIZE) { 1146 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1068 errno = ENOMEM; 1147 errno = ENOMEM;
1069 return NULL; 1148 return NULL;
1070 } 1149 }
1071 sz += malloc_guard; 1150 sz += mopts.malloc_guard;
1072 psz = PAGEROUND(sz); 1151 psz = PAGEROUND(sz);
1073 p = map(&g_pool, psz, zero_fill); 1152 p = map(g_pool, psz, zero_fill);
1074 if (p == MAP_FAILED) { 1153 if (p == MAP_FAILED) {
1075 errno = ENOMEM; 1154 errno = ENOMEM;
1076 return NULL; 1155 return NULL;
1077 } 1156 }
1078 if (insert(&g_pool, p, sz)) { 1157 if (insert(g_pool, p, sz)) {
1079 unmap(&g_pool, p, psz); 1158 unmap(g_pool, p, psz);
1080 errno = ENOMEM; 1159 errno = ENOMEM;
1081 return NULL; 1160 return NULL;
1082 } 1161 }
1083 if (malloc_guard) { 1162 if (mopts.malloc_guard) {
1084 if (mprotect((char *)p + psz - malloc_guard, 1163 if (mprotect((char *)p + psz - mopts.malloc_guard,
1085 malloc_guard, PROT_NONE)) 1164 mopts.malloc_guard, PROT_NONE))
1086 wrterror("mprotect"); 1165 wrterror("mprotect");
1087 malloc_guarded += malloc_guard; 1166 malloc_guarded += mopts.malloc_guard;
1088 } 1167 }
1089 1168
1090 if (malloc_move && 1169 if (mopts.malloc_move &&
1091 sz - malloc_guard < MALLOC_PAGESIZE - MALLOC_LEEWAY) { 1170 sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1171 MALLOC_LEEWAY) {
1092 /* fill whole allocation */ 1172 /* fill whole allocation */
1093 if (malloc_junk) 1173 if (mopts.malloc_junk)
1094 memset(p, SOME_JUNK, psz - malloc_guard); 1174 memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1095 /* shift towards the end */ 1175 /* shift towards the end */
1096 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY - 1176 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1097 (sz - malloc_guard)) & ~(MALLOC_MINSIZE-1)); 1177 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1098 /* fill zeros if needed and overwritten above */ 1178 /* fill zeros if needed and overwritten above */
1099 if (zero_fill && malloc_junk) 1179 if (zero_fill && mopts.malloc_junk)
1100 memset(p, 0, sz - malloc_guard); 1180 memset(p, 0, sz - mopts.malloc_guard);
1101 } else { 1181 } else {
1102 if (malloc_junk) { 1182 if (mopts.malloc_junk) {
1103 if (zero_fill) 1183 if (zero_fill)
1104 memset(p + sz - malloc_guard, 1184 memset(p + sz - mopts.malloc_guard,
1105 SOME_JUNK, psz - sz); 1185 SOME_JUNK, psz - sz);
1106 else 1186 else
1107 memset(p, 1187 memset(p, SOME_JUNK,
1108 SOME_JUNK, psz - malloc_guard); 1188 psz - mopts.malloc_guard);
1109 } 1189 }
1110 } 1190 }
1111 1191
1112 } else { 1192 } else {
1113 /* takes care of SOME_JUNK */ 1193 /* takes care of SOME_JUNK */
1114 p = malloc_bytes(&g_pool, sz); 1194 p = malloc_bytes(g_pool, sz);
1115 if (zero_fill && p != NULL && sz > 0) 1195 if (zero_fill && p != NULL && sz > 0)
1116 memset(p, 0, sz); 1196 memset(p, 0, sz);
1117 } 1197 }
@@ -1134,10 +1214,33 @@ malloc_recurse(void)
1134 wrterror("recursive call"); 1214 wrterror("recursive call");
1135 } 1215 }
1136 malloc_active--; 1216 malloc_active--;
1217 PROTECT_G_POOL();
1137 _MALLOC_UNLOCK(); 1218 _MALLOC_UNLOCK();
1138 errno = EDEADLK; 1219 errno = EDEADLK;
1139} 1220}
1140 1221
1222static void
1223malloc_global_corrupt(void)
1224{
1225 wrterror("global malloc data corrupt");
1226 PROTECT_G_POOL();
1227 _MALLOC_UNLOCK();
1228 errno = EINVAL;
1229}
1230
1231static int
1232malloc_init(void)
1233{
1234 if (omalloc_init(&g_pool)) {
1235 _MALLOC_UNLOCK();
1236 if (mopts.malloc_xmalloc)
1237 wrterror("out of memory");
1238 errno = ENOMEM;
1239 return -1;
1240 }
1241 return 0;
1242}
1243
1141void * 1244void *
1142malloc(size_t size) 1245malloc(size_t size)
1143{ 1246{
@@ -1145,24 +1248,21 @@ malloc(size_t size)
1145 int saved_errno = errno; 1248 int saved_errno = errno;
1146 1249
1147 _MALLOC_LOCK(); 1250 _MALLOC_LOCK();
1251 UNPROTECT_G_POOL();
1148 malloc_func = " in malloc():"; 1252 malloc_func = " in malloc():";
1149 if (!g_pool.regions_total) { 1253 if (g_pool == NULL) {
1150 if (omalloc_init(&g_pool)) { 1254 if (malloc_init() != 0)
1151 _MALLOC_UNLOCK();
1152 if (malloc_xmalloc)
1153 wrterror("out of memory");
1154 errno = ENOMEM;
1155 return NULL; 1255 return NULL;
1156 }
1157 } 1256 }
1158 if (malloc_active++) { 1257 if (malloc_active++) {
1159 malloc_recurse(); 1258 malloc_recurse();
1160 return NULL; 1259 return NULL;
1161 } 1260 }
1162 r = omalloc(size, malloc_zero); 1261 r = omalloc(size, mopts.malloc_zero);
1163 malloc_active--; 1262 malloc_active--;
1263 PROTECT_G_POOL();
1164 _MALLOC_UNLOCK(); 1264 _MALLOC_UNLOCK();
1165 if (r == NULL && malloc_xmalloc) { 1265 if (r == NULL && mopts.malloc_xmalloc) {
1166 wrterror("out of memory"); 1266 wrterror("out of memory");
1167 errno = ENOMEM; 1267 errno = ENOMEM;
1168 } 1268 }
@@ -1177,14 +1277,15 @@ ofree(void *p)
1177 struct region_info *r; 1277 struct region_info *r;
1178 size_t sz; 1278 size_t sz;
1179 1279
1180 r = find(&g_pool, p); 1280 r = find(g_pool, p);
1181 if (r == NULL) { 1281 if (r == NULL) {
1182 wrterror("bogus pointer (double free?)"); 1282 wrterror("bogus pointer (double free?)");
1183 return; 1283 return;
1184 } 1284 }
1185 REALSIZE(sz, r); 1285 REALSIZE(sz, r);
1186 if (sz > MALLOC_MAXCHUNK) { 1286 if (sz > MALLOC_MAXCHUNK) {
1187 if (sz - malloc_guard >= MALLOC_PAGESIZE - MALLOC_LEEWAY) { 1287 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1288 MALLOC_LEEWAY) {
1188 if (r->p != p) { 1289 if (r->p != p) {
1189 wrterror("bogus pointer"); 1290 wrterror("bogus pointer");
1190 return; 1291 return;
@@ -1193,46 +1294,47 @@ ofree(void *p)
1193#if notyetbecause_of_realloc 1294#if notyetbecause_of_realloc
1194 /* shifted towards the end */ 1295 /* shifted towards the end */
1195 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE - 1296 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1196 MALLOC_MINSIZE - sz - malloc_guard) & 1297 MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1197 ~(MALLOC_MINSIZE-1))) { 1298 ~(MALLOC_MINSIZE-1))) {
1198 } 1299 }
1199#endif 1300#endif
1200 p = r->p; 1301 p = r->p;
1201 } 1302 }
1202 if (malloc_guard) { 1303 if (mopts.malloc_guard) {
1203 if (sz < malloc_guard) 1304 if (sz < mopts.malloc_guard)
1204 wrterror("guard size"); 1305 wrterror("guard size");
1205 if (!malloc_freeprot) { 1306 if (!mopts.malloc_freeprot) {
1206 if (mprotect((char *)p + PAGEROUND(sz) - 1307 if (mprotect((char *)p + PAGEROUND(sz) -
1207 malloc_guard, malloc_guard, 1308 mopts.malloc_guard, mopts.malloc_guard,
1208 PROT_READ | PROT_WRITE)) 1309 PROT_READ | PROT_WRITE))
1209 wrterror("mprotect"); 1310 wrterror("mprotect");
1210 } 1311 }
1211 malloc_guarded -= malloc_guard; 1312 malloc_guarded -= mopts.malloc_guard;
1212 } 1313 }
1213 if (malloc_junk && !malloc_freeprot) 1314 if (mopts.malloc_junk && !mopts.malloc_freeprot)
1214 memset(p, SOME_FREEJUNK, PAGEROUND(sz) - malloc_guard); 1315 memset(p, SOME_FREEJUNK,
1215 unmap(&g_pool, p, PAGEROUND(sz)); 1316 PAGEROUND(sz) - mopts.malloc_guard);
1216 delete(&g_pool, r); 1317 unmap(g_pool, p, PAGEROUND(sz));
1318 delete(g_pool, r);
1217 } else { 1319 } else {
1218 void *tmp; 1320 void *tmp;
1219 int i; 1321 int i;
1220 1322
1221 if (malloc_junk && sz > 0) 1323 if (mopts.malloc_junk && sz > 0)
1222 memset(p, SOME_FREEJUNK, sz); 1324 memset(p, SOME_FREEJUNK, sz);
1223 if (!malloc_freeprot) { 1325 if (!mopts.malloc_freeprot) {
1224 i = getrbyte() & (MALLOC_DELAYED_CHUNKS - 1); 1326 i = getrbyte() & (MALLOC_DELAYED_CHUNKS - 1);
1225 tmp = p; 1327 tmp = p;
1226 p = g_pool.delayed_chunks[i]; 1328 p = g_pool->delayed_chunks[i];
1227 g_pool.delayed_chunks[i] = tmp; 1329 g_pool->delayed_chunks[i] = tmp;
1228 } 1330 }
1229 if (p != NULL) { 1331 if (p != NULL) {
1230 r = find(&g_pool, p); 1332 r = find(g_pool, p);
1231 if (r == NULL) { 1333 if (r == NULL) {
1232 wrterror("bogus pointer (double free?)"); 1334 wrterror("bogus pointer (double free?)");
1233 return; 1335 return;
1234 } 1336 }
1235 free_bytes(&g_pool, r, p); 1337 free_bytes(g_pool, r, p);
1236 } 1338 }
1237 } 1339 }
1238} 1340}
@@ -1247,13 +1349,20 @@ free(void *ptr)
1247 return; 1349 return;
1248 1350
1249 _MALLOC_LOCK(); 1351 _MALLOC_LOCK();
1352 UNPROTECT_G_POOL();
1250 malloc_func = " in free():"; 1353 malloc_func = " in free():";
1354 if (g_pool == NULL) {
1355 _MALLOC_UNLOCK();
1356 wrterror("free() called before allocation");
1357 return;
1358 }
1251 if (malloc_active++) { 1359 if (malloc_active++) {
1252 malloc_recurse(); 1360 malloc_recurse();
1253 return; 1361 return;
1254 } 1362 }
1255 ofree(ptr); 1363 ofree(ptr);
1256 malloc_active--; 1364 malloc_active--;
1365 PROTECT_G_POOL();
1257 _MALLOC_UNLOCK(); 1366 _MALLOC_UNLOCK();
1258 errno = saved_errno; 1367 errno = saved_errno;
1259} 1368}
@@ -1269,12 +1378,12 @@ orealloc(void *p, size_t newsz)
1269 if (p == NULL) 1378 if (p == NULL)
1270 return omalloc(newsz, 0); 1379 return omalloc(newsz, 0);
1271 1380
1272 r = find(&g_pool, p); 1381 r = find(g_pool, p);
1273 if (r == NULL) { 1382 if (r == NULL) {
1274 wrterror("bogus pointer (double free?)"); 1383 wrterror("bogus pointer (double free?)");
1275 return NULL; 1384 return NULL;
1276 } 1385 }
1277 if (newsz >= SIZE_MAX - malloc_guard - MALLOC_PAGESIZE) { 1386 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1278 errno = ENOMEM; 1387 errno = ENOMEM;
1279 return NULL; 1388 return NULL;
1280 } 1389 }
@@ -1282,61 +1391,63 @@ orealloc(void *p, size_t newsz)
1282 REALSIZE(oldsz, r); 1391 REALSIZE(oldsz, r);
1283 goldsz = oldsz; 1392 goldsz = oldsz;
1284 if (oldsz > MALLOC_MAXCHUNK) { 1393 if (oldsz > MALLOC_MAXCHUNK) {
1285 if (oldsz < malloc_guard) 1394 if (oldsz < mopts.malloc_guard)
1286 wrterror("guard size"); 1395 wrterror("guard size");
1287 oldsz -= malloc_guard; 1396 oldsz -= mopts.malloc_guard;
1288 } 1397 }
1289 1398
1290 gnewsz = newsz; 1399 gnewsz = newsz;
1291 if (gnewsz > MALLOC_MAXCHUNK) 1400 if (gnewsz > MALLOC_MAXCHUNK)
1292 gnewsz += malloc_guard; 1401 gnewsz += mopts.malloc_guard;
1293 1402
1294 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && 1403 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1295 !malloc_realloc) { 1404 !mopts.malloc_realloc) {
1296 size_t roldsz = PAGEROUND(goldsz); 1405 size_t roldsz = PAGEROUND(goldsz);
1297 size_t rnewsz = PAGEROUND(gnewsz); 1406 size_t rnewsz = PAGEROUND(gnewsz);
1298 1407
1299 if (rnewsz > roldsz) { 1408 if (rnewsz > roldsz) {
1300 if (!malloc_guard) { 1409 if (!mopts.malloc_guard) {
1301 STATS_INC(g_pool.cheap_realloc_tries); 1410 STATS_INC(g_pool.cheap_realloc_tries);
1302 zapcacheregion(&g_pool, p + roldsz); 1411 zapcacheregion(g_pool, p + roldsz);
1303 q = MMAPA(p + roldsz, rnewsz - roldsz); 1412 q = MMAPA(p + roldsz, rnewsz - roldsz);
1304 if (q == p + roldsz) { 1413 if (q == p + roldsz) {
1305 malloc_used += rnewsz - roldsz; 1414 malloc_used += rnewsz - roldsz;
1306 if (malloc_junk) 1415 if (mopts.malloc_junk)
1307 memset(q, SOME_JUNK, 1416 memset(q, SOME_JUNK,
1308 rnewsz - roldsz); 1417 rnewsz - roldsz);
1309 r->size = newsz; 1418 r->size = newsz;
1310 STATS_INC(g_pool.cheap_reallocs); 1419 STATS_INC(g_pool->cheap_reallocs);
1311 return p; 1420 return p;
1312 } else if (q != MAP_FAILED) 1421 } else if (q != MAP_FAILED)
1313 munmap(q, rnewsz - roldsz); 1422 munmap(q, rnewsz - roldsz);
1314 } 1423 }
1315 } else if (rnewsz < roldsz) { 1424 } else if (rnewsz < roldsz) {
1316 if (malloc_guard) { 1425 if (mopts.malloc_guard) {
1317 if (mprotect((char *)p + roldsz - malloc_guard, 1426 if (mprotect((char *)p + roldsz -
1318 malloc_guard, PROT_READ | PROT_WRITE)) 1427 mopts.malloc_guard, mopts.malloc_guard,
1428 PROT_READ | PROT_WRITE))
1319 wrterror("mprotect"); 1429 wrterror("mprotect");
1320 if (mprotect((char *)p + rnewsz - malloc_guard, 1430 if (mprotect((char *)p + rnewsz -
1321 malloc_guard, PROT_NONE)) 1431 mopts.malloc_guard, mopts.malloc_guard,
1432 PROT_NONE))
1322 wrterror("mprotect"); 1433 wrterror("mprotect");
1323 } 1434 }
1324 unmap(&g_pool, (char *)p + rnewsz, roldsz - rnewsz); 1435 unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1325 r->size = gnewsz; 1436 r->size = gnewsz;
1326 return p; 1437 return p;
1327 } else { 1438 } else {
1328 if (newsz > oldsz && malloc_junk) 1439 if (newsz > oldsz && mopts.malloc_junk)
1329 memset((char *)p + newsz, SOME_JUNK, 1440 memset((char *)p + newsz, SOME_JUNK,
1330 rnewsz - malloc_guard - newsz); 1441 rnewsz - mopts.malloc_guard - newsz);
1331 r->size = gnewsz; 1442 r->size = gnewsz;
1332 return p; 1443 return p;
1333 } 1444 }
1334 } 1445 }
1335 if (newsz <= oldsz && newsz > oldsz / 2 && !malloc_realloc) { 1446 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1336 if (malloc_junk && newsz > 0) 1447 if (mopts.malloc_junk && newsz > 0)
1337 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1448 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1338 return p; 1449 return p;
1339 } else if (newsz != oldsz || malloc_realloc) { 1450 } else if (newsz != oldsz || mopts.malloc_realloc) {
1340 q = omalloc(newsz, 0); 1451 q = omalloc(newsz, 0);
1341 if (q == NULL) 1452 if (q == NULL)
1342 return NULL; 1453 return NULL;
@@ -1355,26 +1466,22 @@ realloc(void *ptr, size_t size)
1355 int saved_errno = errno; 1466 int saved_errno = errno;
1356 1467
1357 _MALLOC_LOCK(); 1468 _MALLOC_LOCK();
1469 UNPROTECT_G_POOL();
1358 malloc_func = " in realloc():"; 1470 malloc_func = " in realloc():";
1359 if (!g_pool.regions_total) { 1471 if (g_pool == NULL) {
1360 if (omalloc_init(&g_pool)) { 1472 if (malloc_init() != 0)
1361 _MALLOC_UNLOCK();
1362 if (malloc_xmalloc)
1363 wrterror("out of memory");
1364 errno = ENOMEM;
1365 return NULL; 1473 return NULL;
1366 }
1367 } 1474 }
1368 if (malloc_active++) { 1475 if (malloc_active++) {
1369 malloc_recurse(); 1476 malloc_recurse();
1370 return NULL; 1477 return NULL;
1371 } 1478 }
1372
1373 r = orealloc(ptr, size); 1479 r = orealloc(ptr, size);
1374 1480
1375 malloc_active--; 1481 malloc_active--;
1482 PROTECT_G_POOL();
1376 _MALLOC_UNLOCK(); 1483 _MALLOC_UNLOCK();
1377 if (r == NULL && malloc_xmalloc) { 1484 if (r == NULL && mopts.malloc_xmalloc) {
1378 wrterror("out of memory"); 1485 wrterror("out of memory");
1379 errno = ENOMEM; 1486 errno = ENOMEM;
1380 } 1487 }
@@ -1393,20 +1500,17 @@ calloc(size_t nmemb, size_t size)
1393 int saved_errno = errno; 1500 int saved_errno = errno;
1394 1501
1395 _MALLOC_LOCK(); 1502 _MALLOC_LOCK();
1503 UNPROTECT_G_POOL();
1396 malloc_func = " in calloc():"; 1504 malloc_func = " in calloc():";
1397 if (!g_pool.regions_total) { 1505 if (g_pool == NULL) {
1398 if (omalloc_init(&g_pool)) { 1506 if (malloc_init() != 0)
1399 _MALLOC_UNLOCK();
1400 if (malloc_xmalloc)
1401 wrterror("out of memory");
1402 errno = ENOMEM;
1403 return NULL; 1507 return NULL;
1404 }
1405 } 1508 }
1406 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1509 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1407 nmemb > 0 && SIZE_MAX / nmemb < size) { 1510 nmemb > 0 && SIZE_MAX / nmemb < size) {
1408 _MALLOC_UNLOCK(); 1511 PROTECT_G_POOL();
1409 if (malloc_xmalloc) 1512 _MALLOC_UNLOCK();
1513 if (mopts.malloc_xmalloc)
1410 wrterror("out of memory"); 1514 wrterror("out of memory");
1411 errno = ENOMEM; 1515 errno = ENOMEM;
1412 return NULL; 1516 return NULL;
@@ -1421,8 +1525,9 @@ calloc(size_t nmemb, size_t size)
1421 r = omalloc(size, 1); 1525 r = omalloc(size, 1);
1422 1526
1423 malloc_active--; 1527 malloc_active--;
1528 PROTECT_G_POOL();
1424 _MALLOC_UNLOCK(); 1529 _MALLOC_UNLOCK();
1425 if (r == NULL && malloc_xmalloc) { 1530 if (r == NULL && mopts.malloc_xmalloc) {
1426 wrterror("out of memory"); 1531 wrterror("out of memory");
1427 errno = ENOMEM; 1532 errno = ENOMEM;
1428 } 1533 }
@@ -1430,3 +1535,4 @@ calloc(size_t nmemb, size_t size)
1430 errno = saved_errno; 1535 errno = saved_errno;
1431 return r; 1536 return r;
1432} 1537}
1538