summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/lib/libc/stdlib/malloc.c399
1 files changed, 164 insertions, 235 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 3d2e3dd29a..d46207360c 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
1/* $OpenBSD: malloc.c,v 1.113 2008/12/30 07:44:51 djm Exp $ */ 1/* $OpenBSD: malloc.c,v 1.114 2008/12/31 05:21:46 deraadt Exp $ */
2/* 2/*
3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4 * 4 *
@@ -125,7 +125,7 @@ struct dir_info {
125#endif /* MALLOC_STATS */ 125#endif /* MALLOC_STATS */
126 u_int32_t canary2; 126 u_int32_t canary2;
127}; 127};
128#define DIR_INFO_RSZ ((sizeof(struct dir_info) + PAGE_MASK) & ~PAGE_MASK) 128
129 129
130/* 130/*
131 * This structure describes a page worth of chunks. 131 * This structure describes a page worth of chunks.
@@ -145,40 +145,29 @@ struct chunk_info {
145 u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS]; 145 u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS];
146}; 146};
147 147
148struct malloc_readonly { 148static struct dir_info g_pool;
149 struct dir_info *g_pool; /* Main bookkeeping information */ 149static char *malloc_func; /* current function */
150 int malloc_abort; /* abort() on error */
151 int malloc_freeprot; /* mprotect free pages PROT_NONE? */
152 int malloc_hint; /* call madvice on free pages? */
153 int malloc_junk; /* junk fill? */
154 int malloc_move; /* move allocations to end of page? */
155 int malloc_realloc; /* always realloc? */
156 int malloc_xmalloc; /* xmalloc behaviour? */
157 int malloc_zero; /* zero fill? */
158 size_t malloc_guard; /* use guard pages after allocations? */
159 u_int malloc_cache; /* free pages we cache */
160#ifdef MALLOC_STATS
161 int malloc_stats; /* dump statistics at end */
162#endif
163 u_int32_t malloc_canary; /* Matched against ones in g_pool */
164};
165
166/* This object is mapped PROT_READ after initialisation to prevent tampering */
167static union {
168 struct malloc_readonly mopts;
169 u_char _pad[PAGE_SIZE];
170} malloc_readonly __attribute__((aligned(PAGE_SIZE)));
171#define mopts malloc_readonly.mopts
172#define g_pool mopts.g_pool
173
174char *malloc_options; /* compile-time options */ 150char *malloc_options; /* compile-time options */
175 151
176static char *malloc_func; /* current function */ 152static int malloc_abort = 1; /* abort() on error */
177static int malloc_active; /* status of malloc */ 153static int malloc_active; /* status of malloc */
178 154static int malloc_freeprot; /* mprotect free pages PROT_NONE? */
155static int malloc_hint; /* call madvice on free pages? */
156static int malloc_junk; /* junk fill? */
157static int malloc_move = 1; /* move allocations to end of page? */
158static int malloc_realloc; /* always realloc? */
159static int malloc_xmalloc; /* xmalloc behaviour? */
160static int malloc_zero; /* zero fill? */
161static size_t malloc_guard; /* use guard pages after allocations? */
162
163static u_int malloc_cache = 64; /* free pages we cache */
179static size_t malloc_guarded; /* bytes used for guards */ 164static size_t malloc_guarded; /* bytes used for guards */
180static size_t malloc_used; /* bytes allocated */ 165static size_t malloc_used; /* bytes allocated */
181 166
167#ifdef MALLOC_STATS
168static int malloc_stats; /* dump statistics at end */
169#endif
170
182static size_t rbytesused; /* random bytes used */ 171static size_t rbytesused; /* random bytes used */
183static u_char rbytes[512]; /* random bytes */ 172static u_char rbytes[512]; /* random bytes */
184static u_char getrbyte(void); 173static u_char getrbyte(void);
@@ -258,7 +247,7 @@ dump_free_page_info(int fd, struct dir_info *d)
258 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n", 247 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
259 d->free_regions_size); 248 d->free_regions_size);
260 write(fd, buf, strlen(buf)); 249 write(fd, buf, strlen(buf));
261 for (i = 0; i < mopts.malloc_cache; i++) { 250 for (i = 0; i < malloc_cache; i++) {
262 if (d->free_regions[i].p != NULL) { 251 if (d->free_regions[i].p != NULL) {
263 snprintf(buf, sizeof(buf), "%2d) ", i); 252 snprintf(buf, sizeof(buf), "%2d) ", i);
264 write(fd, buf, strlen(buf)); 253 write(fd, buf, strlen(buf));
@@ -277,8 +266,6 @@ malloc_dump1(int fd, struct dir_info *d)
277 266
278 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d); 267 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
279 write(fd, buf, strlen(buf)); 268 write(fd, buf, strlen(buf));
280 if (d == NULL)
281 return;
282 snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total); 269 snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total);
283 write(fd, buf, strlen(buf)); 270 write(fd, buf, strlen(buf));
284 snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds, 271 snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds,
@@ -326,7 +313,7 @@ malloc_dump1(int fd, struct dir_info *d)
326void 313void
327malloc_dump(int fd) 314malloc_dump(int fd)
328{ 315{
329 malloc_dump1(fd, g_pool); 316 malloc_dump1(fd, &g_pool);
330} 317}
331 318
332static void 319static void
@@ -366,11 +353,11 @@ wrterror(char *p)
366 writev(STDERR_FILENO, iov, 5); 353 writev(STDERR_FILENO, iov, 5);
367 354
368#ifdef MALLOC_STATS 355#ifdef MALLOC_STATS
369 if (mopts.malloc_stats) 356 if (malloc_stats)
370 malloc_dump(STDERR_FILENO); 357 malloc_dump(STDERR_FILENO);
371#endif /* MALLOC_STATS */ 358#endif /* MALLOC_STATS */
372 //malloc_active--; 359 //malloc_active--;
373 if (mopts.malloc_abort) 360 if (malloc_abort)
374 abort(); 361 abort();
375} 362}
376 363
@@ -394,19 +381,19 @@ unmap(struct dir_info *d, void *p, size_t sz)
394 return; 381 return;
395 } 382 }
396 383
397 if (psz > mopts.malloc_cache) { 384 if (psz > malloc_cache) {
398 if (munmap(p, sz)) 385 if (munmap(p, sz))
399 wrterror("munmap"); 386 wrterror("munmap");
400 malloc_used -= sz; 387 malloc_used -= sz;
401 return; 388 return;
402 } 389 }
403 tounmap = 0; 390 tounmap = 0;
404 rsz = mopts.malloc_cache - d->free_regions_size; 391 rsz = malloc_cache - d->free_regions_size;
405 if (psz > rsz) 392 if (psz > rsz)
406 tounmap = psz - rsz; 393 tounmap = psz - rsz;
407 offset = getrbyte(); 394 offset = getrbyte();
408 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { 395 for (i = 0; tounmap > 0 && i < malloc_cache; i++) {
409 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 396 r = &d->free_regions[(i + offset) & (malloc_cache - 1)];
410 if (r->p != NULL) { 397 if (r->p != NULL) {
411 rsz = r->size << MALLOC_PAGESHIFT; 398 rsz = r->size << MALLOC_PAGESHIFT;
412 if (munmap(r->p, rsz)) 399 if (munmap(r->p, rsz))
@@ -423,12 +410,12 @@ unmap(struct dir_info *d, void *p, size_t sz)
423 } 410 }
424 if (tounmap > 0) 411 if (tounmap > 0)
425 wrterror("malloc cache underflow"); 412 wrterror("malloc cache underflow");
426 for (i = 0; i < mopts.malloc_cache; i++) { 413 for (i = 0; i < malloc_cache; i++) {
427 r = &d->free_regions[i]; 414 r = &d->free_regions[i];
428 if (r->p == NULL) { 415 if (r->p == NULL) {
429 if (mopts.malloc_hint) 416 if (malloc_hint)
430 madvise(p, sz, MADV_FREE); 417 madvise(p, sz, MADV_FREE);
431 if (mopts.malloc_freeprot) 418 if (malloc_freeprot)
432 mprotect(p, sz, PROT_NONE); 419 mprotect(p, sz, PROT_NONE);
433 r->p = p; 420 r->p = p;
434 r->size = psz; 421 r->size = psz;
@@ -436,9 +423,9 @@ unmap(struct dir_info *d, void *p, size_t sz)
436 break; 423 break;
437 } 424 }
438 } 425 }
439 if (i == mopts.malloc_cache) 426 if (i == malloc_cache)
440 wrterror("malloc free slot lost"); 427 wrterror("malloc free slot lost");
441 if (d->free_regions_size > mopts.malloc_cache) 428 if (d->free_regions_size > malloc_cache)
442 wrterror("malloc cache overflow"); 429 wrterror("malloc cache overflow");
443} 430}
444 431
@@ -449,7 +436,7 @@ zapcacheregion(struct dir_info *d, void *p)
449 struct region_info *r; 436 struct region_info *r;
450 size_t rsz; 437 size_t rsz;
451 438
452 for (i = 0; i < mopts.malloc_cache; i++) { 439 for (i = 0; i < malloc_cache; i++) {
453 r = &d->free_regions[i]; 440 r = &d->free_regions[i];
454 if (r->p == p) { 441 if (r->p == p) {
455 rsz = r->size << MALLOC_PAGESHIFT; 442 rsz = r->size << MALLOC_PAGESHIFT;
@@ -471,9 +458,6 @@ map(struct dir_info *d, size_t sz, int zero_fill)
471 u_int i, offset; 458 u_int i, offset;
472 void *p; 459 void *p;
473 460
474 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
475 d->canary1 != ~d->canary2)
476 wrterror("internal struct corrupt");
477 if (sz != PAGEROUND(sz)) { 461 if (sz != PAGEROUND(sz)) {
478 wrterror("map round"); 462 wrterror("map round");
479 return NULL; 463 return NULL;
@@ -486,22 +470,21 @@ map(struct dir_info *d, size_t sz, int zero_fill)
486 return p; 470 return p;
487 } 471 }
488 offset = getrbyte(); 472 offset = getrbyte();
489 for (i = 0; i < mopts.malloc_cache; i++) { 473 for (i = 0; i < malloc_cache; i++) {
490 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 474 r = &d->free_regions[(i + offset) & (malloc_cache - 1)];
491 if (r->p != NULL) { 475 if (r->p != NULL) {
492 if (r->size == psz) { 476 if (r->size == psz) {
493 p = r->p; 477 p = r->p;
494 if (mopts.malloc_freeprot) 478 if (malloc_freeprot)
495 mprotect(p, sz, PROT_READ | PROT_WRITE); 479 mprotect(p, sz, PROT_READ | PROT_WRITE);
496 if (mopts.malloc_hint) 480 if (malloc_hint)
497 madvise(p, sz, MADV_NORMAL); 481 madvise(p, sz, MADV_NORMAL);
498 r->p = NULL; 482 r->p = NULL;
499 r->size = 0; 483 r->size = 0;
500 d->free_regions_size -= psz; 484 d->free_regions_size -= psz;
501 if (zero_fill) 485 if (zero_fill)
502 memset(p, 0, sz); 486 memset(p, 0, sz);
503 else if (mopts.malloc_junk && 487 else if (malloc_junk && malloc_freeprot)
504 mopts.malloc_freeprot)
505 memset(p, SOME_FREEJUNK, sz); 488 memset(p, SOME_FREEJUNK, sz);
506 return p; 489 return p;
507 } else if (r->size > psz) 490 } else if (r->size > psz)
@@ -511,9 +494,9 @@ map(struct dir_info *d, size_t sz, int zero_fill)
511 if (big != NULL) { 494 if (big != NULL) {
512 r = big; 495 r = big;
513 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT); 496 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
514 if (mopts.malloc_freeprot) 497 if (malloc_freeprot)
515 mprotect(p, sz, PROT_READ | PROT_WRITE); 498 mprotect(p, sz, PROT_READ | PROT_WRITE);
516 if (mopts.malloc_hint) 499 if (malloc_hint)
517 madvise(p, sz, MADV_NORMAL); 500 madvise(p, sz, MADV_NORMAL);
518 r->size -= psz; 501 r->size -= psz;
519 d->free_regions_size -= psz; 502 d->free_regions_size -= psz;
@@ -524,7 +507,7 @@ map(struct dir_info *d, size_t sz, int zero_fill)
524 p = MMAP(sz); 507 p = MMAP(sz);
525 if (p != MAP_FAILED) 508 if (p != MAP_FAILED)
526 malloc_used += sz; 509 malloc_used += sz;
527 if (d->free_regions_size > mopts.malloc_cache) 510 if (d->free_regions_size > malloc_cache)
528 wrterror("malloc cache"); 511 wrterror("malloc cache");
529 /* zero fill not needed */ 512 /* zero fill not needed */
530 return p; 513 return p;
@@ -549,22 +532,14 @@ getrbyte(void)
549 * Initialize a dir_info, which should have been cleared by caller 532 * Initialize a dir_info, which should have been cleared by caller
550 */ 533 */
551static int 534static int
552omalloc_init(struct dir_info **dp) 535omalloc_init(struct dir_info *d)
553{ 536{
554 char *p, b[64]; 537 char *p, b[64];
555 int i, j; 538 int i, j;
556 size_t d_avail, regioninfo_size; 539 size_t regioninfo_size;
557 struct dir_info *d;
558 540
559 rbytes_init(); 541 rbytes_init();
560 542
561 /*
562 * Default options
563 */
564 mopts.malloc_abort = 1;
565 mopts.malloc_move = 1;
566 mopts.malloc_cache = 64;
567
568 for (i = 0; i < 3; i++) { 543 for (i = 0; i < 3; i++) {
569 switch (i) { 544 switch (i) {
570 case 0: 545 case 0:
@@ -590,77 +565,77 @@ omalloc_init(struct dir_info **dp)
590 for (; p != NULL && *p != '\0'; p++) { 565 for (; p != NULL && *p != '\0'; p++) {
591 switch (*p) { 566 switch (*p) {
592 case '>': 567 case '>':
593 mopts.malloc_cache <<= 1; 568 malloc_cache <<= 1;
594 if (mopts.malloc_cache > MALLOC_MAXCACHE) 569 if (malloc_cache > MALLOC_MAXCACHE)
595 mopts.malloc_cache = MALLOC_MAXCACHE; 570 malloc_cache = MALLOC_MAXCACHE;
596 break; 571 break;
597 case '<': 572 case '<':
598 mopts.malloc_cache >>= 1; 573 malloc_cache >>= 1;
599 break; 574 break;
600 case 'a': 575 case 'a':
601 mopts.malloc_abort = 0; 576 malloc_abort = 0;
602 break; 577 break;
603 case 'A': 578 case 'A':
604 mopts.malloc_abort = 1; 579 malloc_abort = 1;
605 break; 580 break;
606#ifdef MALLOC_STATS 581#ifdef MALLOC_STATS
607 case 'd': 582 case 'd':
608 mopts.malloc_stats = 0; 583 malloc_stats = 0;
609 break; 584 break;
610 case 'D': 585 case 'D':
611 mopts.malloc_stats = 1; 586 malloc_stats = 1;
612 break; 587 break;
613#endif /* MALLOC_STATS */ 588#endif /* MALLOC_STATS */
614 case 'f': 589 case 'f':
615 mopts.malloc_freeprot = 0; 590 malloc_freeprot = 0;
616 break; 591 break;
617 case 'F': 592 case 'F':
618 mopts.malloc_freeprot = 1; 593 malloc_freeprot = 1;
619 break; 594 break;
620 case 'g': 595 case 'g':
621 mopts.malloc_guard = 0; 596 malloc_guard = 0;
622 break; 597 break;
623 case 'G': 598 case 'G':
624 mopts.malloc_guard = MALLOC_PAGESIZE; 599 malloc_guard = MALLOC_PAGESIZE;
625 break; 600 break;
626 case 'h': 601 case 'h':
627 mopts.malloc_hint = 0; 602 malloc_hint = 0;
628 break; 603 break;
629 case 'H': 604 case 'H':
630 mopts.malloc_hint = 1; 605 malloc_hint = 1;
631 break; 606 break;
632 case 'j': 607 case 'j':
633 mopts.malloc_junk = 0; 608 malloc_junk = 0;
634 break; 609 break;
635 case 'J': 610 case 'J':
636 mopts.malloc_junk = 1; 611 malloc_junk = 1;
637 break; 612 break;
638 case 'n': 613 case 'n':
639 case 'N': 614 case 'N':
640 break; 615 break;
641 case 'p': 616 case 'p':
642 mopts.malloc_move = 0; 617 malloc_move = 0;
643 break; 618 break;
644 case 'P': 619 case 'P':
645 mopts.malloc_move = 1; 620 malloc_move = 1;
646 break; 621 break;
647 case 'r': 622 case 'r':
648 mopts.malloc_realloc = 0; 623 malloc_realloc = 0;
649 break; 624 break;
650 case 'R': 625 case 'R':
651 mopts.malloc_realloc = 1; 626 malloc_realloc = 1;
652 break; 627 break;
653 case 'x': 628 case 'x':
654 mopts.malloc_xmalloc = 0; 629 malloc_xmalloc = 0;
655 break; 630 break;
656 case 'X': 631 case 'X':
657 mopts.malloc_xmalloc = 1; 632 malloc_xmalloc = 1;
658 break; 633 break;
659 case 'z': 634 case 'z':
660 mopts.malloc_zero = 0; 635 malloc_zero = 0;
661 break; 636 break;
662 case 'Z': 637 case 'Z':
663 mopts.malloc_zero = 1; 638 malloc_zero = 1;
664 break; 639 break;
665 default: { 640 default: {
666 static const char q[] = "malloc() warning: " 641 static const char q[] = "malloc() warning: "
@@ -676,33 +651,17 @@ omalloc_init(struct dir_info **dp)
676 * We want junk in the entire allocation, and zero only in the part 651 * We want junk in the entire allocation, and zero only in the part
677 * the user asked for. 652 * the user asked for.
678 */ 653 */
679 if (mopts.malloc_zero) 654 if (malloc_zero)
680 mopts.malloc_junk = 1; 655 malloc_junk = 1;
681 656
682#ifdef MALLOC_STATS 657#ifdef MALLOC_STATS
683 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { 658 if (malloc_stats && (atexit(malloc_exit) == -1)) {
684 static const char q[] = "malloc() warning: atexit(2) failed." 659 static const char q[] = "malloc() warning: atexit(2) failed."
685 " Will not be able to dump stats on exit\n"; 660 " Will not be able to dump stats on exit\n";
686 write(STDERR_FILENO, q, sizeof(q) - 1); 661 write(STDERR_FILENO, q, sizeof(q) - 1);
687 } 662 }
688#endif /* MALLOC_STATS */ 663#endif /* MALLOC_STATS */
689 664
690 while ((mopts.malloc_canary = arc4random()) == 0)
691 ;
692
693 /*
694 * Allocate dir_info with a guard page on either side. Also
695 * randomise offset inside the page at which the dir_info
696 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
697 */
698 if ((p = MMAP(PAGE_SIZE + DIR_INFO_RSZ + PAGE_SIZE)) == NULL)
699 return -1;
700 mprotect(p, PAGE_SIZE, PROT_NONE);
701 mprotect(p + PAGE_SIZE + DIR_INFO_RSZ, PAGE_SIZE, PROT_NONE);
702 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
703 d = (struct dir_info *)(p + PAGE_SIZE +
704 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
705
706 d->regions_bits = 9; 665 d->regions_bits = 9;
707 d->regions_free = d->regions_total = 1 << d->regions_bits; 666 d->regions_free = d->regions_total = 1 << d->regions_bits;
708 regioninfo_size = d->regions_total * sizeof(struct region_info); 667 regioninfo_size = d->regions_total * sizeof(struct region_info);
@@ -714,18 +673,8 @@ omalloc_init(struct dir_info **dp)
714 } 673 }
715 malloc_used += regioninfo_size; 674 malloc_used += regioninfo_size;
716 memset(d->r, 0, regioninfo_size); 675 memset(d->r, 0, regioninfo_size);
717 d->canary1 = mopts.malloc_canary ^ (u_int32_t)d; 676 d->canary1 = arc4random();
718 d->canary2 = ~d->canary1; 677 d->canary2 = ~d->canary1;
719
720 *dp = d;
721
722 /*
723 * Options have been set and will never be reset.
724 * Prevent further tampering with them.
725 */
726 if (((uintptr_t)&malloc_readonly & PAGE_MASK) == 0)
727 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
728
729 return 0; 678 return 0;
730} 679}
731 680
@@ -843,8 +792,7 @@ find(struct dir_info *d, void *p)
843 size_t mask = d->regions_total - 1; 792 size_t mask = d->regions_total - 1;
844 void *q, *r; 793 void *q, *r;
845 794
846 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) || 795 if (d->canary1 != ~d->canary2)
847 d->canary1 != ~d->canary2)
848 wrterror("internal struct corrupt"); 796 wrterror("internal struct corrupt");
849 p = MASK_POINTER(p); 797 p = MASK_POINTER(p);
850 index = hash(p) & mask; 798 index = hash(p) & mask;
@@ -870,7 +818,7 @@ delete(struct dir_info *d, struct region_info *ri)
870 if (d->regions_total & (d->regions_total - 1)) 818 if (d->regions_total & (d->regions_total - 1))
871 wrterror("regions_total not 2^x"); 819 wrterror("regions_total not 2^x");
872 d->regions_free++; 820 d->regions_free++;
873 STATS_INC(g_pool->deletes); 821 STATS_INC(g_pool.deletes);
874 822
875 i = ri - d->r; 823 i = ri - d->r;
876 for (;;) { 824 for (;;) {
@@ -886,7 +834,7 @@ delete(struct dir_info *d, struct region_info *ri)
886 (j < i && i <= r)) 834 (j < i && i <= r))
887 continue; 835 continue;
888 d->r[j] = d->r[i]; 836 d->r[j] = d->r[i];
889 STATS_INC(g_pool->delete_moves); 837 STATS_INC(g_pool.delete_moves);
890 break; 838 break;
891 } 839 }
892 840
@@ -971,9 +919,6 @@ malloc_bytes(struct dir_info *d, size_t size)
971 u_long u, *lp; 919 u_long u, *lp;
972 struct chunk_info *bp; 920 struct chunk_info *bp;
973 921
974 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
975 d->canary1 != ~d->canary2)
976 wrterror("internal struct corrupt");
977 /* Don't bother with anything less than this */ 922 /* Don't bother with anything less than this */
978 /* unless we have a malloc(0) requests */ 923 /* unless we have a malloc(0) requests */
979 if (size != 0 && size < MALLOC_MINSIZE) 924 if (size != 0 && size < MALLOC_MINSIZE)
@@ -1038,7 +983,7 @@ malloc_bytes(struct dir_info *d, size_t size)
1038 k += (lp - bp->bits) * MALLOC_BITS; 983 k += (lp - bp->bits) * MALLOC_BITS;
1039 k <<= bp->shift; 984 k <<= bp->shift;
1040 985
1041 if (mopts.malloc_junk && bp->size > 0) 986 if (malloc_junk && bp->size > 0)
1042 memset((char *)bp->page + k, SOME_JUNK, bp->size); 987 memset((char *)bp->page + k, SOME_JUNK, bp->size);
1043 return ((char *)bp->page + k); 988 return ((char *)bp->page + k);
1044} 989}
@@ -1102,7 +1047,7 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1102 } 1047 }
1103 *mp = info->next; 1048 *mp = info->next;
1104 1049
1105 if (info->size == 0 && !mopts.malloc_freeprot) 1050 if (info->size == 0 && !malloc_freeprot)
1106 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1051 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1107 unmap(d, info->page, MALLOC_PAGESIZE); 1052 unmap(d, info->page, MALLOC_PAGESIZE);
1108 1053
@@ -1119,55 +1064,54 @@ omalloc(size_t sz, int zero_fill)
1119 size_t psz; 1064 size_t psz;
1120 1065
1121 if (sz > MALLOC_MAXCHUNK) { 1066 if (sz > MALLOC_MAXCHUNK) {
1122 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1067 if (sz >= SIZE_MAX - malloc_guard - MALLOC_PAGESIZE) {
1123 errno = ENOMEM; 1068 errno = ENOMEM;
1124 return NULL; 1069 return NULL;
1125 } 1070 }
1126 sz += mopts.malloc_guard; 1071 sz += malloc_guard;
1127 psz = PAGEROUND(sz); 1072 psz = PAGEROUND(sz);
1128 p = map(g_pool, psz, zero_fill); 1073 p = map(&g_pool, psz, zero_fill);
1129 if (p == MAP_FAILED) { 1074 if (p == MAP_FAILED) {
1130 errno = ENOMEM; 1075 errno = ENOMEM;
1131 return NULL; 1076 return NULL;
1132 } 1077 }
1133 if (insert(g_pool, p, sz)) { 1078 if (insert(&g_pool, p, sz)) {
1134 unmap(g_pool, p, psz); 1079 unmap(&g_pool, p, psz);
1135 errno = ENOMEM; 1080 errno = ENOMEM;
1136 return NULL; 1081 return NULL;
1137 } 1082 }
1138 if (mopts.malloc_guard) { 1083 if (malloc_guard) {
1139 if (mprotect((char *)p + psz - mopts.malloc_guard, 1084 if (mprotect((char *)p + psz - malloc_guard,
1140 mopts.malloc_guard, PROT_NONE)) 1085 malloc_guard, PROT_NONE))
1141 wrterror("mprotect"); 1086 wrterror("mprotect");
1142 malloc_guarded += mopts.malloc_guard; 1087 malloc_guarded += malloc_guard;
1143 } 1088 }
1144 1089
1145 if (mopts.malloc_move && 1090 if (malloc_move &&
1146 sz - mopts.malloc_guard < MALLOC_PAGESIZE - 1091 sz - malloc_guard < MALLOC_PAGESIZE - MALLOC_LEEWAY) {
1147 MALLOC_LEEWAY) {
1148 /* fill whole allocation */ 1092 /* fill whole allocation */
1149 if (mopts.malloc_junk) 1093 if (malloc_junk)
1150 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1094 memset(p, SOME_JUNK, psz - malloc_guard);
1151 /* shift towards the end */ 1095 /* shift towards the end */
1152 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY - 1096 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1153 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1)); 1097 (sz - malloc_guard)) & ~(MALLOC_MINSIZE-1));
1154 /* fill zeros if needed and overwritten above */ 1098 /* fill zeros if needed and overwritten above */
1155 if (zero_fill && mopts.malloc_junk) 1099 if (zero_fill && malloc_junk)
1156 memset(p, 0, sz - mopts.malloc_guard); 1100 memset(p, 0, sz - malloc_guard);
1157 } else { 1101 } else {
1158 if (mopts.malloc_junk) { 1102 if (malloc_junk) {
1159 if (zero_fill) 1103 if (zero_fill)
1160 memset(p + sz - mopts.malloc_guard, 1104 memset(p + sz - malloc_guard,
1161 SOME_JUNK, psz - sz); 1105 SOME_JUNK, psz - sz);
1162 else 1106 else
1163 memset(p, SOME_JUNK, 1107 memset(p,
1164 psz - mopts.malloc_guard); 1108 SOME_JUNK, psz - malloc_guard);
1165 } 1109 }
1166 } 1110 }
1167 1111
1168 } else { 1112 } else {
1169 /* takes care of SOME_JUNK */ 1113 /* takes care of SOME_JUNK */
1170 p = malloc_bytes(g_pool, sz); 1114 p = malloc_bytes(&g_pool, sz);
1171 if (zero_fill && p != NULL && sz > 0) 1115 if (zero_fill && p != NULL && sz > 0)
1172 memset(p, 0, sz); 1116 memset(p, 0, sz);
1173 } 1117 }
@@ -1194,27 +1138,6 @@ malloc_recurse(void)
1194 errno = EDEADLK; 1138 errno = EDEADLK;
1195} 1139}
1196 1140
1197static void
1198malloc_global_corrupt(void)
1199{
1200 wrterror("global malloc data corrupt");
1201 _MALLOC_UNLOCK();
1202 errno = EINVAL;
1203}
1204
1205static int
1206malloc_init(void)
1207{
1208 if (omalloc_init(&g_pool)) {
1209 _MALLOC_UNLOCK();
1210 if (mopts.malloc_xmalloc)
1211 wrterror("out of memory");
1212 errno = ENOMEM;
1213 return -1;
1214 }
1215 return 0;
1216}
1217
1218void * 1141void *
1219malloc(size_t size) 1142malloc(size_t size)
1220{ 1143{
@@ -1223,18 +1146,23 @@ malloc(size_t size)
1223 1146
1224 _MALLOC_LOCK(); 1147 _MALLOC_LOCK();
1225 malloc_func = " in malloc():"; 1148 malloc_func = " in malloc():";
1226 if (g_pool == NULL) { 1149 if (!g_pool.regions_total) {
1227 if (malloc_init() != 0) 1150 if (omalloc_init(&g_pool)) {
1151 _MALLOC_UNLOCK();
1152 if (malloc_xmalloc)
1153 wrterror("out of memory");
1154 errno = ENOMEM;
1228 return NULL; 1155 return NULL;
1156 }
1229 } 1157 }
1230 if (malloc_active++) { 1158 if (malloc_active++) {
1231 malloc_recurse(); 1159 malloc_recurse();
1232 return NULL; 1160 return NULL;
1233 } 1161 }
1234 r = omalloc(size, mopts.malloc_zero); 1162 r = omalloc(size, malloc_zero);
1235 malloc_active--; 1163 malloc_active--;
1236 _MALLOC_UNLOCK(); 1164 _MALLOC_UNLOCK();
1237 if (r == NULL && mopts.malloc_xmalloc) { 1165 if (r == NULL && malloc_xmalloc) {
1238 wrterror("out of memory"); 1166 wrterror("out of memory");
1239 errno = ENOMEM; 1167 errno = ENOMEM;
1240 } 1168 }
@@ -1249,15 +1177,14 @@ ofree(void *p)
1249 struct region_info *r; 1177 struct region_info *r;
1250 size_t sz; 1178 size_t sz;
1251 1179
1252 r = find(g_pool, p); 1180 r = find(&g_pool, p);
1253 if (r == NULL) { 1181 if (r == NULL) {
1254 wrterror("bogus pointer (double free?)"); 1182 wrterror("bogus pointer (double free?)");
1255 return; 1183 return;
1256 } 1184 }
1257 REALSIZE(sz, r); 1185 REALSIZE(sz, r);
1258 if (sz > MALLOC_MAXCHUNK) { 1186 if (sz > MALLOC_MAXCHUNK) {
1259 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE - 1187 if (sz - malloc_guard >= MALLOC_PAGESIZE - MALLOC_LEEWAY) {
1260 MALLOC_LEEWAY) {
1261 if (r->p != p) { 1188 if (r->p != p) {
1262 wrterror("bogus pointer"); 1189 wrterror("bogus pointer");
1263 return; 1190 return;
@@ -1266,47 +1193,46 @@ ofree(void *p)
1266#if notyetbecause_of_realloc 1193#if notyetbecause_of_realloc
1267 /* shifted towards the end */ 1194 /* shifted towards the end */
1268 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE - 1195 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1269 MALLOC_MINSIZE - sz - mopts.malloc_guard) & 1196 MALLOC_MINSIZE - sz - malloc_guard) &
1270 ~(MALLOC_MINSIZE-1))) { 1197 ~(MALLOC_MINSIZE-1))) {
1271 } 1198 }
1272#endif 1199#endif
1273 p = r->p; 1200 p = r->p;
1274 } 1201 }
1275 if (mopts.malloc_guard) { 1202 if (malloc_guard) {
1276 if (sz < mopts.malloc_guard) 1203 if (sz < malloc_guard)
1277 wrterror("guard size"); 1204 wrterror("guard size");
1278 if (!mopts.malloc_freeprot) { 1205 if (!malloc_freeprot) {
1279 if (mprotect((char *)p + PAGEROUND(sz) - 1206 if (mprotect((char *)p + PAGEROUND(sz) -
1280 mopts.malloc_guard, mopts.malloc_guard, 1207 malloc_guard, malloc_guard,
1281 PROT_READ | PROT_WRITE)) 1208 PROT_READ | PROT_WRITE))
1282 wrterror("mprotect"); 1209 wrterror("mprotect");
1283 } 1210 }
1284 malloc_guarded -= mopts.malloc_guard; 1211 malloc_guarded -= malloc_guard;
1285 } 1212 }
1286 if (mopts.malloc_junk && !mopts.malloc_freeprot) 1213 if (malloc_junk && !malloc_freeprot)
1287 memset(p, SOME_FREEJUNK, 1214 memset(p, SOME_FREEJUNK, PAGEROUND(sz) - malloc_guard);
1288 PAGEROUND(sz) - mopts.malloc_guard); 1215 unmap(&g_pool, p, PAGEROUND(sz));
1289 unmap(g_pool, p, PAGEROUND(sz)); 1216 delete(&g_pool, r);
1290 delete(g_pool, r);
1291 } else { 1217 } else {
1292 void *tmp; 1218 void *tmp;
1293 int i; 1219 int i;
1294 1220
1295 if (mopts.malloc_junk && sz > 0) 1221 if (malloc_junk && sz > 0)
1296 memset(p, SOME_FREEJUNK, sz); 1222 memset(p, SOME_FREEJUNK, sz);
1297 if (!mopts.malloc_freeprot) { 1223 if (!malloc_freeprot) {
1298 i = getrbyte() & (MALLOC_DELAYED_CHUNKS - 1); 1224 i = getrbyte() & (MALLOC_DELAYED_CHUNKS - 1);
1299 tmp = p; 1225 tmp = p;
1300 p = g_pool->delayed_chunks[i]; 1226 p = g_pool.delayed_chunks[i];
1301 g_pool->delayed_chunks[i] = tmp; 1227 g_pool.delayed_chunks[i] = tmp;
1302 } 1228 }
1303 if (p != NULL) { 1229 if (p != NULL) {
1304 r = find(g_pool, p); 1230 r = find(&g_pool, p);
1305 if (r == NULL) { 1231 if (r == NULL) {
1306 wrterror("bogus pointer (double free?)"); 1232 wrterror("bogus pointer (double free?)");
1307 return; 1233 return;
1308 } 1234 }
1309 free_bytes(g_pool, r, p); 1235 free_bytes(&g_pool, r, p);
1310 } 1236 }
1311 } 1237 }
1312} 1238}
@@ -1322,11 +1248,6 @@ free(void *ptr)
1322 1248
1323 _MALLOC_LOCK(); 1249 _MALLOC_LOCK();
1324 malloc_func = " in free():"; 1250 malloc_func = " in free():";
1325 if (g_pool == NULL) {
1326 _MALLOC_UNLOCK();
1327 wrterror("free() called before allocation");
1328 return;
1329 }
1330 if (malloc_active++) { 1251 if (malloc_active++) {
1331 malloc_recurse(); 1252 malloc_recurse();
1332 return; 1253 return;
@@ -1348,12 +1269,12 @@ orealloc(void *p, size_t newsz)
1348 if (p == NULL) 1269 if (p == NULL)
1349 return omalloc(newsz, 0); 1270 return omalloc(newsz, 0);
1350 1271
1351 r = find(g_pool, p); 1272 r = find(&g_pool, p);
1352 if (r == NULL) { 1273 if (r == NULL) {
1353 wrterror("bogus pointer (double free?)"); 1274 wrterror("bogus pointer (double free?)");
1354 return NULL; 1275 return NULL;
1355 } 1276 }
1356 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1277 if (newsz >= SIZE_MAX - malloc_guard - MALLOC_PAGESIZE) {
1357 errno = ENOMEM; 1278 errno = ENOMEM;
1358 return NULL; 1279 return NULL;
1359 } 1280 }
@@ -1361,63 +1282,61 @@ orealloc(void *p, size_t newsz)
1361 REALSIZE(oldsz, r); 1282 REALSIZE(oldsz, r);
1362 goldsz = oldsz; 1283 goldsz = oldsz;
1363 if (oldsz > MALLOC_MAXCHUNK) { 1284 if (oldsz > MALLOC_MAXCHUNK) {
1364 if (oldsz < mopts.malloc_guard) 1285 if (oldsz < malloc_guard)
1365 wrterror("guard size"); 1286 wrterror("guard size");
1366 oldsz -= mopts.malloc_guard; 1287 oldsz -= malloc_guard;
1367 } 1288 }
1368 1289
1369 gnewsz = newsz; 1290 gnewsz = newsz;
1370 if (gnewsz > MALLOC_MAXCHUNK) 1291 if (gnewsz > MALLOC_MAXCHUNK)
1371 gnewsz += mopts.malloc_guard; 1292 gnewsz += malloc_guard;
1372 1293
1373 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && 1294 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1374 !mopts.malloc_realloc) { 1295 !malloc_realloc) {
1375 size_t roldsz = PAGEROUND(goldsz); 1296 size_t roldsz = PAGEROUND(goldsz);
1376 size_t rnewsz = PAGEROUND(gnewsz); 1297 size_t rnewsz = PAGEROUND(gnewsz);
1377 1298
1378 if (rnewsz > roldsz) { 1299 if (rnewsz > roldsz) {
1379 if (!mopts.malloc_guard) { 1300 if (!malloc_guard) {
1380 STATS_INC(g_pool.cheap_realloc_tries); 1301 STATS_INC(g_pool.cheap_realloc_tries);
1381 zapcacheregion(g_pool, p + roldsz); 1302 zapcacheregion(&g_pool, p + roldsz);
1382 q = MMAPA(p + roldsz, rnewsz - roldsz); 1303 q = MMAPA(p + roldsz, rnewsz - roldsz);
1383 if (q == p + roldsz) { 1304 if (q == p + roldsz) {
1384 malloc_used += rnewsz - roldsz; 1305 malloc_used += rnewsz - roldsz;
1385 if (mopts.malloc_junk) 1306 if (malloc_junk)
1386 memset(q, SOME_JUNK, 1307 memset(q, SOME_JUNK,
1387 rnewsz - roldsz); 1308 rnewsz - roldsz);
1388 r->size = newsz; 1309 r->size = newsz;
1389 STATS_INC(g_pool->cheap_reallocs); 1310 STATS_INC(g_pool.cheap_reallocs);
1390 return p; 1311 return p;
1391 } else if (q != MAP_FAILED) 1312 } else if (q != MAP_FAILED)
1392 munmap(q, rnewsz - roldsz); 1313 munmap(q, rnewsz - roldsz);
1393 } 1314 }
1394 } else if (rnewsz < roldsz) { 1315 } else if (rnewsz < roldsz) {
1395 if (mopts.malloc_guard) { 1316 if (malloc_guard) {
1396 if (mprotect((char *)p + roldsz - 1317 if (mprotect((char *)p + roldsz - malloc_guard,
1397 mopts.malloc_guard, mopts.malloc_guard, 1318 malloc_guard, PROT_READ | PROT_WRITE))
1398 PROT_READ | PROT_WRITE))
1399 wrterror("mprotect"); 1319 wrterror("mprotect");
1400 if (mprotect((char *)p + rnewsz - 1320 if (mprotect((char *)p + rnewsz - malloc_guard,
1401 mopts.malloc_guard, mopts.malloc_guard, 1321 malloc_guard, PROT_NONE))
1402 PROT_NONE))
1403 wrterror("mprotect"); 1322 wrterror("mprotect");
1404 } 1323 }
1405 unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz); 1324 unmap(&g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1406 r->size = gnewsz; 1325 r->size = gnewsz;
1407 return p; 1326 return p;
1408 } else { 1327 } else {
1409 if (newsz > oldsz && mopts.malloc_junk) 1328 if (newsz > oldsz && malloc_junk)
1410 memset((char *)p + newsz, SOME_JUNK, 1329 memset((char *)p + newsz, SOME_JUNK,
1411 rnewsz - mopts.malloc_guard - newsz); 1330 rnewsz - malloc_guard - newsz);
1412 r->size = gnewsz; 1331 r->size = gnewsz;
1413 return p; 1332 return p;
1414 } 1333 }
1415 } 1334 }
1416 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { 1335 if (newsz <= oldsz && newsz > oldsz / 2 && !malloc_realloc) {
1417 if (mopts.malloc_junk && newsz > 0) 1336 if (malloc_junk && newsz > 0)
1418 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1337 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1419 return p; 1338 return p;
1420 } else if (newsz != oldsz || mopts.malloc_realloc) { 1339 } else if (newsz != oldsz || malloc_realloc) {
1421 q = omalloc(newsz, 0); 1340 q = omalloc(newsz, 0);
1422 if (q == NULL) 1341 if (q == NULL)
1423 return NULL; 1342 return NULL;
@@ -1437,19 +1356,25 @@ realloc(void *ptr, size_t size)
1437 1356
1438 _MALLOC_LOCK(); 1357 _MALLOC_LOCK();
1439 malloc_func = " in realloc():"; 1358 malloc_func = " in realloc():";
1440 if (g_pool == NULL) { 1359 if (!g_pool.regions_total) {
1441 if (malloc_init() != 0) 1360 if (omalloc_init(&g_pool)) {
1361 _MALLOC_UNLOCK();
1362 if (malloc_xmalloc)
1363 wrterror("out of memory");
1364 errno = ENOMEM;
1442 return NULL; 1365 return NULL;
1366 }
1443 } 1367 }
1444 if (malloc_active++) { 1368 if (malloc_active++) {
1445 malloc_recurse(); 1369 malloc_recurse();
1446 return NULL; 1370 return NULL;
1447 } 1371 }
1372
1448 r = orealloc(ptr, size); 1373 r = orealloc(ptr, size);
1449 1374
1450 malloc_active--; 1375 malloc_active--;
1451 _MALLOC_UNLOCK(); 1376 _MALLOC_UNLOCK();
1452 if (r == NULL && mopts.malloc_xmalloc) { 1377 if (r == NULL && malloc_xmalloc) {
1453 wrterror("out of memory"); 1378 wrterror("out of memory");
1454 errno = ENOMEM; 1379 errno = ENOMEM;
1455 } 1380 }
@@ -1469,14 +1394,19 @@ calloc(size_t nmemb, size_t size)
1469 1394
1470 _MALLOC_LOCK(); 1395 _MALLOC_LOCK();
1471 malloc_func = " in calloc():"; 1396 malloc_func = " in calloc():";
1472 if (g_pool == NULL) { 1397 if (!g_pool.regions_total) {
1473 if (malloc_init() != 0) 1398 if (omalloc_init(&g_pool)) {
1399 _MALLOC_UNLOCK();
1400 if (malloc_xmalloc)
1401 wrterror("out of memory");
1402 errno = ENOMEM;
1474 return NULL; 1403 return NULL;
1404 }
1475 } 1405 }
1476 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1406 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1477 nmemb > 0 && SIZE_MAX / nmemb < size) { 1407 nmemb > 0 && SIZE_MAX / nmemb < size) {
1478 _MALLOC_UNLOCK(); 1408 _MALLOC_UNLOCK();
1479 if (mopts.malloc_xmalloc) 1409 if (malloc_xmalloc)
1480 wrterror("out of memory"); 1410 wrterror("out of memory");
1481 errno = ENOMEM; 1411 errno = ENOMEM;
1482 return NULL; 1412 return NULL;
@@ -1492,7 +1422,7 @@ calloc(size_t nmemb, size_t size)
1492 1422
1493 malloc_active--; 1423 malloc_active--;
1494 _MALLOC_UNLOCK(); 1424 _MALLOC_UNLOCK();
1495 if (r == NULL && mopts.malloc_xmalloc) { 1425 if (r == NULL && malloc_xmalloc) {
1496 wrterror("out of memory"); 1426 wrterror("out of memory");
1497 errno = ENOMEM; 1427 errno = ENOMEM;
1498 } 1428 }
@@ -1500,4 +1430,3 @@ calloc(size_t nmemb, size_t size)
1500 errno = saved_errno; 1430 errno = saved_errno;
1501 return r; 1431 return r;
1502} 1432}
1503