diff options
author | otto <> | 2018-01-01 12:41:48 +0000 |
---|---|---|
committer | otto <> | 2018-01-01 12:41:48 +0000 |
commit | 628289c5f2ded1394b54b1e9cb24164d8a514269 (patch) | |
tree | a4e1211c0adad212b15d5c88389e5284921cf602 | |
parent | 634412e48e5bb8a467c81c0e1ca56c3b5b343fed (diff) | |
download | openbsd-628289c5f2ded1394b54b1e9cb24164d8a514269.tar.gz openbsd-628289c5f2ded1394b54b1e9cb24164d8a514269.tar.bz2 openbsd-628289c5f2ded1394b54b1e9cb24164d8a514269.zip |
Only init chunk_info once, plus some moving of code to group related functions.
-rw-r--r-- | src/lib/libc/stdlib/malloc.c | 540 |
1 files changed, 267 insertions, 273 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index ad0b187e03..02349d9ef9 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* $OpenBSD: malloc.c,v 1.237 2017/12/27 10:05:23 otto Exp $ */ | 1 | /* $OpenBSD: malloc.c,v 1.238 2018/01/01 12:41:48 otto Exp $ */ |
2 | /* | 2 | /* |
3 | * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> | 3 | * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> |
4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> | 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> |
@@ -203,10 +203,8 @@ static union { | |||
203 | 203 | ||
204 | char *malloc_options; /* compile-time options */ | 204 | char *malloc_options; /* compile-time options */ |
205 | 205 | ||
206 | static u_char getrbyte(struct dir_info *d); | ||
207 | static __dead void wrterror(struct dir_info *d, char *msg, ...) | 206 | static __dead void wrterror(struct dir_info *d, char *msg, ...) |
208 | __attribute__((__format__ (printf, 2, 3))); | 207 | __attribute__((__format__ (printf, 2, 3))); |
209 | static void fill_canary(char *ptr, size_t sz, size_t allocated); | ||
210 | 208 | ||
211 | #ifdef MALLOC_STATS | 209 | #ifdef MALLOC_STATS |
212 | void malloc_dump(int, int, struct dir_info *); | 210 | void malloc_dump(int, int, struct dir_info *); |
@@ -312,178 +310,6 @@ getrbyte(struct dir_info *d) | |||
312 | return x; | 310 | return x; |
313 | } | 311 | } |
314 | 312 | ||
315 | /* | ||
316 | * Cache maintenance. We keep at most malloc_cache pages cached. | ||
317 | * If the cache is becoming full, unmap pages in the cache for real, | ||
318 | * and then add the region to the cache | ||
319 | * Opposed to the regular region data structure, the sizes in the | ||
320 | * cache are in MALLOC_PAGESIZE units. | ||
321 | */ | ||
322 | static void | ||
323 | unmap(struct dir_info *d, void *p, size_t sz, int clear) | ||
324 | { | ||
325 | size_t psz = sz >> MALLOC_PAGESHIFT; | ||
326 | size_t rsz, tounmap; | ||
327 | struct region_info *r; | ||
328 | u_int i, offset; | ||
329 | |||
330 | if (sz != PAGEROUND(sz)) | ||
331 | wrterror(d, "munmap round"); | ||
332 | |||
333 | rsz = mopts.malloc_cache - d->free_regions_size; | ||
334 | |||
335 | /* | ||
336 | * normally the cache holds recently freed regions, but if the region | ||
337 | * to unmap is larger than the cache size or we're clearing and the | ||
338 | * cache is full, just munmap | ||
339 | */ | ||
340 | if (psz > mopts.malloc_cache || (clear && rsz == 0)) { | ||
341 | i = munmap(p, sz); | ||
342 | if (i) | ||
343 | wrterror(d, "munmap %p", p); | ||
344 | STATS_SUB(d->malloc_used, sz); | ||
345 | return; | ||
346 | } | ||
347 | tounmap = 0; | ||
348 | if (psz > rsz) | ||
349 | tounmap = psz - rsz; | ||
350 | offset = getrbyte(d); | ||
351 | for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { | ||
352 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; | ||
353 | if (r->p != NULL) { | ||
354 | rsz = r->size << MALLOC_PAGESHIFT; | ||
355 | if (munmap(r->p, rsz)) | ||
356 | wrterror(d, "munmap %p", r->p); | ||
357 | r->p = NULL; | ||
358 | if (tounmap > r->size) | ||
359 | tounmap -= r->size; | ||
360 | else | ||
361 | tounmap = 0; | ||
362 | d->free_regions_size -= r->size; | ||
363 | r->size = 0; | ||
364 | STATS_SUB(d->malloc_used, rsz); | ||
365 | } | ||
366 | } | ||
367 | if (tounmap > 0) | ||
368 | wrterror(d, "malloc cache underflow"); | ||
369 | for (i = 0; i < mopts.malloc_cache; i++) { | ||
370 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; | ||
371 | if (r->p == NULL) { | ||
372 | if (clear) | ||
373 | memset(p, 0, sz - mopts.malloc_guard); | ||
374 | if (mopts.malloc_junk && !mopts.malloc_freeunmap) { | ||
375 | size_t amt = mopts.malloc_junk == 1 ? | ||
376 | MALLOC_MAXCHUNK : sz; | ||
377 | memset(p, SOME_FREEJUNK, amt); | ||
378 | } | ||
379 | if (mopts.malloc_freeunmap) | ||
380 | mprotect(p, sz, PROT_NONE); | ||
381 | r->p = p; | ||
382 | r->size = psz; | ||
383 | d->free_regions_size += psz; | ||
384 | break; | ||
385 | } | ||
386 | } | ||
387 | if (i == mopts.malloc_cache) | ||
388 | wrterror(d, "malloc free slot lost"); | ||
389 | if (d->free_regions_size > mopts.malloc_cache) | ||
390 | wrterror(d, "malloc cache overflow"); | ||
391 | } | ||
392 | |||
393 | static void | ||
394 | zapcacheregion(struct dir_info *d, void *p, size_t len) | ||
395 | { | ||
396 | u_int i; | ||
397 | struct region_info *r; | ||
398 | size_t rsz; | ||
399 | |||
400 | for (i = 0; i < mopts.malloc_cache; i++) { | ||
401 | r = &d->free_regions[i]; | ||
402 | if (r->p >= p && r->p <= (void *)((char *)p + len)) { | ||
403 | rsz = r->size << MALLOC_PAGESHIFT; | ||
404 | if (munmap(r->p, rsz)) | ||
405 | wrterror(d, "munmap %p", r->p); | ||
406 | r->p = NULL; | ||
407 | d->free_regions_size -= r->size; | ||
408 | r->size = 0; | ||
409 | STATS_SUB(d->malloc_used, rsz); | ||
410 | } | ||
411 | } | ||
412 | } | ||
413 | |||
414 | static void * | ||
415 | map(struct dir_info *d, void *hint, size_t sz, int zero_fill) | ||
416 | { | ||
417 | size_t psz = sz >> MALLOC_PAGESHIFT; | ||
418 | struct region_info *r, *big = NULL; | ||
419 | u_int i; | ||
420 | void *p; | ||
421 | |||
422 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | ||
423 | d->canary1 != ~d->canary2) | ||
424 | wrterror(d, "internal struct corrupt"); | ||
425 | if (sz != PAGEROUND(sz)) | ||
426 | wrterror(d, "map round"); | ||
427 | |||
428 | if (hint == NULL && psz > d->free_regions_size) { | ||
429 | _MALLOC_LEAVE(d); | ||
430 | p = MMAP(sz); | ||
431 | _MALLOC_ENTER(d); | ||
432 | if (p != MAP_FAILED) | ||
433 | STATS_ADD(d->malloc_used, sz); | ||
434 | /* zero fill not needed */ | ||
435 | return p; | ||
436 | } | ||
437 | for (i = 0; i < mopts.malloc_cache; i++) { | ||
438 | r = &d->free_regions[(i + d->rotor) & (mopts.malloc_cache - 1)]; | ||
439 | if (r->p != NULL) { | ||
440 | if (hint != NULL && r->p != hint) | ||
441 | continue; | ||
442 | if (r->size == psz) { | ||
443 | p = r->p; | ||
444 | r->p = NULL; | ||
445 | r->size = 0; | ||
446 | d->free_regions_size -= psz; | ||
447 | if (mopts.malloc_freeunmap) | ||
448 | mprotect(p, sz, PROT_READ | PROT_WRITE); | ||
449 | if (zero_fill) | ||
450 | memset(p, 0, sz); | ||
451 | else if (mopts.malloc_junk == 2 && | ||
452 | mopts.malloc_freeunmap) | ||
453 | memset(p, SOME_FREEJUNK, sz); | ||
454 | d->rotor += i + 1; | ||
455 | return p; | ||
456 | } else if (r->size > psz) | ||
457 | big = r; | ||
458 | } | ||
459 | } | ||
460 | if (big != NULL) { | ||
461 | r = big; | ||
462 | p = r->p; | ||
463 | r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT); | ||
464 | if (mopts.malloc_freeunmap) | ||
465 | mprotect(p, sz, PROT_READ | PROT_WRITE); | ||
466 | r->size -= psz; | ||
467 | d->free_regions_size -= psz; | ||
468 | if (zero_fill) | ||
469 | memset(p, 0, sz); | ||
470 | else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) | ||
471 | memset(p, SOME_FREEJUNK, sz); | ||
472 | return p; | ||
473 | } | ||
474 | if (hint != NULL) | ||
475 | return MAP_FAILED; | ||
476 | if (d->free_regions_size > mopts.malloc_cache) | ||
477 | wrterror(d, "malloc cache"); | ||
478 | _MALLOC_LEAVE(d); | ||
479 | p = MMAP(sz); | ||
480 | _MALLOC_ENTER(d); | ||
481 | if (p != MAP_FAILED) | ||
482 | STATS_ADD(d->malloc_used, sz); | ||
483 | /* zero fill not needed */ | ||
484 | return p; | ||
485 | } | ||
486 | |||
487 | static void | 313 | static void |
488 | omalloc_parseopt(char opt) | 314 | omalloc_parseopt(char opt) |
489 | { | 315 | { |
@@ -714,43 +540,6 @@ omalloc_grow(struct dir_info *d) | |||
714 | return 0; | 540 | return 0; |
715 | } | 541 | } |
716 | 542 | ||
717 | static struct chunk_info * | ||
718 | alloc_chunk_info(struct dir_info *d, int bits) | ||
719 | { | ||
720 | struct chunk_info *p; | ||
721 | |||
722 | if (LIST_EMPTY(&d->chunk_info_list[bits])) { | ||
723 | size_t size, count, i; | ||
724 | char *q; | ||
725 | |||
726 | if (bits == 0) | ||
727 | count = MALLOC_PAGESIZE / MALLOC_MINSIZE; | ||
728 | else | ||
729 | count = MALLOC_PAGESIZE >> bits; | ||
730 | |||
731 | size = howmany(count, MALLOC_BITS); | ||
732 | size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); | ||
733 | if (mopts.chunk_canaries) | ||
734 | size += count * sizeof(u_short); | ||
735 | size = ALIGN(size); | ||
736 | |||
737 | q = MMAP(MALLOC_PAGESIZE); | ||
738 | if (q == MAP_FAILED) | ||
739 | return NULL; | ||
740 | STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); | ||
741 | count = MALLOC_PAGESIZE / size; | ||
742 | for (i = 0; i < count; i++, q += size) { | ||
743 | p = (struct chunk_info *)q; | ||
744 | p->canary = (u_short)d->canary1; | ||
745 | LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries); | ||
746 | } | ||
747 | } | ||
748 | p = LIST_FIRST(&d->chunk_info_list[bits]); | ||
749 | LIST_REMOVE(p, entries); | ||
750 | return p; | ||
751 | } | ||
752 | |||
753 | |||
754 | /* | 543 | /* |
755 | * The hashtable uses the assumption that p is never NULL. This holds since | 544 | * The hashtable uses the assumption that p is never NULL. This holds since |
756 | * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. | 545 | * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. |
@@ -842,72 +631,277 @@ delete(struct dir_info *d, struct region_info *ri) | |||
842 | } | 631 | } |
843 | 632 | ||
844 | /* | 633 | /* |
845 | * Allocate a page of chunks | 634 | * Cache maintenance. We keep at most malloc_cache pages cached. |
635 | * If the cache is becoming full, unmap pages in the cache for real, | ||
636 | * and then add the region to the cache | ||
637 | * Opposed to the regular region data structure, the sizes in the | ||
638 | * cache are in MALLOC_PAGESIZE units. | ||
846 | */ | 639 | */ |
847 | static struct chunk_info * | 640 | static void |
848 | omalloc_make_chunks(struct dir_info *d, int bits, int listnum) | 641 | unmap(struct dir_info *d, void *p, size_t sz, int clear) |
849 | { | 642 | { |
850 | struct chunk_info *bp; | 643 | size_t psz = sz >> MALLOC_PAGESHIFT; |
851 | void *pp; | 644 | size_t rsz, tounmap; |
852 | int i, k; | 645 | struct region_info *r; |
646 | u_int i, offset; | ||
853 | 647 | ||
854 | /* Allocate a new bucket */ | 648 | if (sz != PAGEROUND(sz)) |
855 | pp = map(d, NULL, MALLOC_PAGESIZE, 0); | 649 | wrterror(d, "munmap round"); |
856 | if (pp == MAP_FAILED) | ||
857 | return NULL; | ||
858 | 650 | ||
859 | bp = alloc_chunk_info(d, bits); | 651 | rsz = mopts.malloc_cache - d->free_regions_size; |
860 | if (bp == NULL) { | 652 | |
861 | unmap(d, pp, MALLOC_PAGESIZE, 0); | 653 | /* |
862 | return NULL; | 654 | * normally the cache holds recently freed regions, but if the region |
655 | * to unmap is larger than the cache size or we're clearing and the | ||
656 | * cache is full, just munmap | ||
657 | */ | ||
658 | if (psz > mopts.malloc_cache || (clear && rsz == 0)) { | ||
659 | i = munmap(p, sz); | ||
660 | if (i) | ||
661 | wrterror(d, "munmap %p", p); | ||
662 | STATS_SUB(d->malloc_used, sz); | ||
663 | return; | ||
664 | } | ||
665 | tounmap = 0; | ||
666 | if (psz > rsz) | ||
667 | tounmap = psz - rsz; | ||
668 | offset = getrbyte(d); | ||
669 | for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { | ||
670 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; | ||
671 | if (r->p != NULL) { | ||
672 | rsz = r->size << MALLOC_PAGESHIFT; | ||
673 | if (munmap(r->p, rsz)) | ||
674 | wrterror(d, "munmap %p", r->p); | ||
675 | r->p = NULL; | ||
676 | if (tounmap > r->size) | ||
677 | tounmap -= r->size; | ||
678 | else | ||
679 | tounmap = 0; | ||
680 | d->free_regions_size -= r->size; | ||
681 | r->size = 0; | ||
682 | STATS_SUB(d->malloc_used, rsz); | ||
683 | } | ||
863 | } | 684 | } |
685 | if (tounmap > 0) | ||
686 | wrterror(d, "malloc cache underflow"); | ||
687 | for (i = 0; i < mopts.malloc_cache; i++) { | ||
688 | r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; | ||
689 | if (r->p == NULL) { | ||
690 | if (clear) | ||
691 | memset(p, 0, sz - mopts.malloc_guard); | ||
692 | if (mopts.malloc_junk && !mopts.malloc_freeunmap) { | ||
693 | size_t amt = mopts.malloc_junk == 1 ? | ||
694 | MALLOC_MAXCHUNK : sz; | ||
695 | memset(p, SOME_FREEJUNK, amt); | ||
696 | } | ||
697 | if (mopts.malloc_freeunmap) | ||
698 | mprotect(p, sz, PROT_NONE); | ||
699 | r->p = p; | ||
700 | r->size = psz; | ||
701 | d->free_regions_size += psz; | ||
702 | break; | ||
703 | } | ||
704 | } | ||
705 | if (i == mopts.malloc_cache) | ||
706 | wrterror(d, "malloc free slot lost"); | ||
707 | if (d->free_regions_size > mopts.malloc_cache) | ||
708 | wrterror(d, "malloc cache overflow"); | ||
709 | } | ||
710 | |||
711 | static void | ||
712 | zapcacheregion(struct dir_info *d, void *p, size_t len) | ||
713 | { | ||
714 | u_int i; | ||
715 | struct region_info *r; | ||
716 | size_t rsz; | ||
717 | |||
718 | for (i = 0; i < mopts.malloc_cache; i++) { | ||
719 | r = &d->free_regions[i]; | ||
720 | if (r->p >= p && r->p <= (void *)((char *)p + len)) { | ||
721 | rsz = r->size << MALLOC_PAGESHIFT; | ||
722 | if (munmap(r->p, rsz)) | ||
723 | wrterror(d, "munmap %p", r->p); | ||
724 | r->p = NULL; | ||
725 | d->free_regions_size -= r->size; | ||
726 | r->size = 0; | ||
727 | STATS_SUB(d->malloc_used, rsz); | ||
728 | } | ||
729 | } | ||
730 | } | ||
731 | |||
732 | static void * | ||
733 | map(struct dir_info *d, void *hint, size_t sz, int zero_fill) | ||
734 | { | ||
735 | size_t psz = sz >> MALLOC_PAGESHIFT; | ||
736 | struct region_info *r, *big = NULL; | ||
737 | u_int i; | ||
738 | void *p; | ||
739 | |||
740 | if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | ||
741 | d->canary1 != ~d->canary2) | ||
742 | wrterror(d, "internal struct corrupt"); | ||
743 | if (sz != PAGEROUND(sz)) | ||
744 | wrterror(d, "map round"); | ||
745 | |||
746 | if (hint == NULL && psz > d->free_regions_size) { | ||
747 | _MALLOC_LEAVE(d); | ||
748 | p = MMAP(sz); | ||
749 | _MALLOC_ENTER(d); | ||
750 | if (p != MAP_FAILED) | ||
751 | STATS_ADD(d->malloc_used, sz); | ||
752 | /* zero fill not needed */ | ||
753 | return p; | ||
754 | } | ||
755 | for (i = 0; i < mopts.malloc_cache; i++) { | ||
756 | r = &d->free_regions[(i + d->rotor) & (mopts.malloc_cache - 1)]; | ||
757 | if (r->p != NULL) { | ||
758 | if (hint != NULL && r->p != hint) | ||
759 | continue; | ||
760 | if (r->size == psz) { | ||
761 | p = r->p; | ||
762 | r->p = NULL; | ||
763 | r->size = 0; | ||
764 | d->free_regions_size -= psz; | ||
765 | if (mopts.malloc_freeunmap) | ||
766 | mprotect(p, sz, PROT_READ | PROT_WRITE); | ||
767 | if (zero_fill) | ||
768 | memset(p, 0, sz); | ||
769 | else if (mopts.malloc_junk == 2 && | ||
770 | mopts.malloc_freeunmap) | ||
771 | memset(p, SOME_FREEJUNK, sz); | ||
772 | d->rotor += i + 1; | ||
773 | return p; | ||
774 | } else if (r->size > psz) | ||
775 | big = r; | ||
776 | } | ||
777 | } | ||
778 | if (big != NULL) { | ||
779 | r = big; | ||
780 | p = r->p; | ||
781 | r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT); | ||
782 | if (mopts.malloc_freeunmap) | ||
783 | mprotect(p, sz, PROT_READ | PROT_WRITE); | ||
784 | r->size -= psz; | ||
785 | d->free_regions_size -= psz; | ||
786 | if (zero_fill) | ||
787 | memset(p, 0, sz); | ||
788 | else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) | ||
789 | memset(p, SOME_FREEJUNK, sz); | ||
790 | return p; | ||
791 | } | ||
792 | if (hint != NULL) | ||
793 | return MAP_FAILED; | ||
794 | if (d->free_regions_size > mopts.malloc_cache) | ||
795 | wrterror(d, "malloc cache"); | ||
796 | _MALLOC_LEAVE(d); | ||
797 | p = MMAP(sz); | ||
798 | _MALLOC_ENTER(d); | ||
799 | if (p != MAP_FAILED) | ||
800 | STATS_ADD(d->malloc_used, sz); | ||
801 | /* zero fill not needed */ | ||
802 | return p; | ||
803 | } | ||
804 | |||
805 | static void | ||
806 | init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits) | ||
807 | { | ||
808 | int i; | ||
864 | 809 | ||
865 | /* memory protect the page allocated in the malloc(0) case */ | ||
866 | if (bits == 0) { | 810 | if (bits == 0) { |
867 | bp->size = 0; | 811 | p->shift = 1; |
868 | bp->shift = 1; | ||
869 | i = MALLOC_MINSIZE - 1; | 812 | i = MALLOC_MINSIZE - 1; |
870 | while (i >>= 1) | 813 | while (i >>= 1) |
871 | bp->shift++; | 814 | p->shift++; |
872 | bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift; | 815 | p->total = p->free = MALLOC_PAGESIZE >> p->shift; |
873 | bp->offset = 0xdead; | 816 | p->size = 0; |
874 | bp->page = pp; | 817 | p->offset = 0xdead; |
875 | |||
876 | k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE); | ||
877 | if (k < 0) { | ||
878 | unmap(d, pp, MALLOC_PAGESIZE, 0); | ||
879 | LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries); | ||
880 | return NULL; | ||
881 | } | ||
882 | } else { | 818 | } else { |
883 | bp->size = 1U << bits; | 819 | p->shift = bits; |
884 | bp->shift = bits; | 820 | p->total = p->free = MALLOC_PAGESIZE >> p->shift; |
885 | bp->total = bp->free = MALLOC_PAGESIZE >> bits; | 821 | p->size = 1U << bits; |
886 | bp->offset = howmany(bp->total, MALLOC_BITS); | 822 | p->offset = howmany(p->total, MALLOC_BITS); |
887 | bp->page = pp; | ||
888 | } | 823 | } |
824 | p->canary = (u_short)d->canary1; | ||
889 | 825 | ||
890 | /* set all valid bits in the bitmap */ | 826 | /* set all valid bits in the bitmap */ |
891 | k = bp->total; | 827 | for (i = 0; p->total - i >= MALLOC_BITS; i += MALLOC_BITS) |
892 | i = 0; | 828 | p->bits[i / MALLOC_BITS] = (u_short)~0U; |
893 | 829 | ||
894 | /* Do a bunch at a time */ | 830 | if (i < p->total) |
895 | for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) | 831 | p->bits[i / MALLOC_BITS] = 0; |
896 | bp->bits[i / MALLOC_BITS] = (u_short)~0U; | 832 | for (; i < p->total; i++) |
833 | p->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); | ||
834 | } | ||
897 | 835 | ||
898 | if (i < k) | 836 | static struct chunk_info * |
899 | bp->bits[i / MALLOC_BITS] = 0; | 837 | alloc_chunk_info(struct dir_info *d, int bits) |
900 | for (; i < k; i++) | 838 | { |
901 | bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); | 839 | struct chunk_info *p; |
902 | 840 | ||
903 | LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); | 841 | if (LIST_EMPTY(&d->chunk_info_list[bits])) { |
842 | size_t size, count, i; | ||
843 | char *q; | ||
844 | |||
845 | if (bits == 0) | ||
846 | count = MALLOC_PAGESIZE / MALLOC_MINSIZE; | ||
847 | else | ||
848 | count = MALLOC_PAGESIZE >> bits; | ||
904 | 849 | ||
905 | bits++; | 850 | size = howmany(count, MALLOC_BITS); |
906 | if ((uintptr_t)pp & bits) | 851 | size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); |
907 | wrterror(d, "pp & bits %p", pp); | 852 | if (mopts.chunk_canaries) |
853 | size += count * sizeof(u_short); | ||
854 | size = ALIGN(size); | ||
908 | 855 | ||
909 | insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL); | 856 | q = MMAP(MALLOC_PAGESIZE); |
857 | if (q == MAP_FAILED) | ||
858 | return NULL; | ||
859 | STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); | ||
860 | count = MALLOC_PAGESIZE / size; | ||
861 | |||
862 | for (i = 0; i < count; i++, q += size) { | ||
863 | p = (struct chunk_info *)q; | ||
864 | LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries); | ||
865 | } | ||
866 | } | ||
867 | p = LIST_FIRST(&d->chunk_info_list[bits]); | ||
868 | LIST_REMOVE(p, entries); | ||
869 | if (p->shift == 0) | ||
870 | init_chunk_info(d, p, bits); | ||
871 | return p; | ||
872 | } | ||
873 | |||
874 | /* | ||
875 | * Allocate a page of chunks | ||
876 | */ | ||
877 | static struct chunk_info * | ||
878 | omalloc_make_chunks(struct dir_info *d, int bits, int listnum) | ||
879 | { | ||
880 | struct chunk_info *bp; | ||
881 | void *pp; | ||
882 | |||
883 | /* Allocate a new bucket */ | ||
884 | pp = map(d, NULL, MALLOC_PAGESIZE, 0); | ||
885 | if (pp == MAP_FAILED) | ||
886 | return NULL; | ||
887 | |||
888 | /* memory protect the page allocated in the malloc(0) case */ | ||
889 | if (bits == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) < 0) | ||
890 | goto err; | ||
891 | |||
892 | bp = alloc_chunk_info(d, bits); | ||
893 | if (bp == NULL) | ||
894 | goto err; | ||
895 | bp->page = pp; | ||
896 | |||
897 | if (insert(d, (void *)((uintptr_t)pp | bits + 1), (uintptr_t)bp, NULL)) | ||
898 | goto err; | ||
899 | LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); | ||
910 | return bp; | 900 | return bp; |
901 | |||
902 | err: | ||
903 | unmap(d, pp, MALLOC_PAGESIZE, 0); | ||
904 | return NULL; | ||
911 | } | 905 | } |
912 | 906 | ||
913 | static int | 907 | static int |
@@ -932,16 +926,26 @@ find_chunksize(size_t size) | |||
932 | return j; | 926 | return j; |
933 | } | 927 | } |
934 | 928 | ||
929 | static void | ||
930 | fill_canary(char *ptr, size_t sz, size_t allocated) | ||
931 | { | ||
932 | size_t check_sz = allocated - sz; | ||
933 | |||
934 | if (check_sz > CHUNK_CHECK_LENGTH) | ||
935 | check_sz = CHUNK_CHECK_LENGTH; | ||
936 | memset(ptr + sz, SOME_JUNK, check_sz); | ||
937 | } | ||
938 | |||
935 | /* | 939 | /* |
936 | * Allocate a chunk | 940 | * Allocate a chunk |
937 | */ | 941 | */ |
938 | static void * | 942 | static void * |
939 | malloc_bytes(struct dir_info *d, size_t size, void *f) | 943 | malloc_bytes(struct dir_info *d, size_t size, void *f) |
940 | { | 944 | { |
941 | u_int i, r; | 945 | u_int i, r; |
942 | int j, listnum; | 946 | int j, listnum; |
943 | size_t k; | 947 | size_t k; |
944 | u_short u, b, *lp; | 948 | u_short u, b, *lp; |
945 | struct chunk_info *bp; | 949 | struct chunk_info *bp; |
946 | void *p; | 950 | void *p; |
947 | 951 | ||
@@ -1031,16 +1035,6 @@ found: | |||
1031 | } | 1035 | } |
1032 | 1036 | ||
1033 | static void | 1037 | static void |
1034 | fill_canary(char *ptr, size_t sz, size_t allocated) | ||
1035 | { | ||
1036 | size_t check_sz = allocated - sz; | ||
1037 | |||
1038 | if (check_sz > CHUNK_CHECK_LENGTH) | ||
1039 | check_sz = CHUNK_CHECK_LENGTH; | ||
1040 | memset(ptr + sz, SOME_JUNK, check_sz); | ||
1041 | } | ||
1042 | |||
1043 | static void | ||
1044 | validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) | 1038 | validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) |
1045 | { | 1039 | { |
1046 | size_t check_sz = allocated - sz; | 1040 | size_t check_sz = allocated - sz; |