summaryrefslogtreecommitdiff
path: root/src/lib/libc/stdlib/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libc/stdlib/malloc.c')
-rw-r--r--src/lib/libc/stdlib/malloc.c1305
1 files changed, 1305 insertions, 0 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
new file mode 100644
index 0000000000..4e90ce402e
--- /dev/null
+++ b/src/lib/libc/stdlib/malloc.c
@@ -0,0 +1,1305 @@
1/*
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 */
9
10#if defined(LIBC_SCCS) && !defined(lint)
11static char rcsid[] = "$OpenBSD: malloc.c,v 1.48 2002/05/27 03:13:23 deraadt Exp $";
12#endif /* LIBC_SCCS and not lint */
13
14/*
15 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are
16 * related to internal conditions and consistency in malloc.c. This has
17 * a noticeable runtime performance hit, and generally will not do you
18 * any good unless you fiddle with the internals of malloc or want
19 * to catch random pointer corruption as early as possible.
20 */
21#ifndef MALLOC_EXTRA_SANITY
22#undef MALLOC_EXTRA_SANITY
23#endif
24
25/*
26 * Defining MALLOC_STATS will enable you to call malloc_dump() and set
27 * the [dD] options in the MALLOC_OPTIONS environment variable.
28 * It has no run-time performance hit, but does pull in stdio...
29 */
30#ifndef MALLOC_STATS
31#undef MALLOC_STATS
32#endif
33
34/*
35 * What to use for Junk. This is the byte value we use to fill with
36 * when the 'J' option is enabled.
37 */
38#define SOME_JUNK 0xd0 /* as in "Duh" :-) */
39
40#include <sys/types.h>
41#include <sys/param.h>
42#include <sys/mman.h>
43#include <sys/uio.h>
44#include <stdio.h>
45#include <stdlib.h>
46#include <string.h>
47#include <unistd.h>
48#include <fcntl.h>
49#include <errno.h>
50
51/*
52 * The basic parameters you can tweak.
53 *
54 * malloc_pageshift pagesize = 1 << malloc_pageshift
55 * It's probably best if this is the native
56 * page size, but it shouldn't have to be.
57 *
58 * malloc_minsize minimum size of an allocation in bytes.
59 * If this is too small it's too much work
60 * to manage them. This is also the smallest
61 * unit of alignment used for the storage
62 * returned by malloc/realloc.
63 *
64 */
65
66#if defined(__OpenBSD__) && defined(__sparc__)
67# define malloc_pageshift 13U
68#endif /* __OpenBSD__ */
69
70#ifdef _THREAD_SAFE
71# include "thread_private.h"
72# if 0
73 /* kernel threads */
74# include <pthread.h>
75 static pthread_mutex_t malloc_lock;
76# define THREAD_LOCK() pthread_mutex_lock(&malloc_lock)
77# define THREAD_UNLOCK() pthread_mutex_unlock(&malloc_lock)
78# define THREAD_LOCK_INIT() pthread_mutex_init(&malloc_lock, 0);
79# else
80 /* user threads */
81# include "spinlock.h"
82 static spinlock_t malloc_lock = _SPINLOCK_INITIALIZER;
83# define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&malloc_lock)
84# define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&malloc_lock)
85# define THREAD_LOCK_INIT()
86 /*
87 * Malloc can't use the wrapped write() if it fails very early, so
88 * we use the unwrapped syscall _thread_sys_write()
89 */
90# define write _thread_sys_write
91 ssize_t write(int, const void *, size_t);
92# undef malloc
93# undef realloc
94# undef free
95# endif
96#else
97 /* no threads */
98# define THREAD_LOCK()
99# define THREAD_UNLOCK()
100# define THREAD_LOCK_INIT()
101#endif
102
103/*
104 * No user serviceable parts behind this point.
105 *
106 * This structure describes a page worth of chunks.
107 */
108
109struct pginfo {
110 struct pginfo *next; /* next on the free list */
111 void *page; /* Pointer to the page */
112 u_short size; /* size of this page's chunks */
113 u_short shift; /* How far to shift for this size chunks */
114 u_short free; /* How many free chunks */
115 u_short total; /* How many chunk */
116 u_long bits[1]; /* Which chunks are free */
117};
118
119/*
120 * This structure describes a number of free pages.
121 */
122
123struct pgfree {
124 struct pgfree *next; /* next run of free pages */
125 struct pgfree *prev; /* prev run of free pages */
126 void *page; /* pointer to free pages */
127 void *end; /* pointer to end of free pages */
128 u_long size; /* number of bytes free */
129};
130
131/*
132 * How many bits per u_long in the bitmap.
133 * Change only if not 8 bits/byte
134 */
135#define MALLOC_BITS (8*sizeof(u_long))
136
137/*
138 * Magic values to put in the page_directory
139 */
140#define MALLOC_NOT_MINE ((struct pginfo*) 0)
141#define MALLOC_FREE ((struct pginfo*) 1)
142#define MALLOC_FIRST ((struct pginfo*) 2)
143#define MALLOC_FOLLOW ((struct pginfo*) 3)
144#define MALLOC_MAGIC ((struct pginfo*) 4)
145
146#ifndef malloc_pageshift
147#define malloc_pageshift (PGSHIFT)
148#endif
149
150#ifndef malloc_minsize
151#define malloc_minsize 16U
152#endif
153
154#ifndef malloc_pageshift
155#error "malloc_pageshift undefined"
156#endif
157
158#if !defined(malloc_pagesize)
159#define malloc_pagesize (1UL<<malloc_pageshift)
160#endif
161
162#if ((1UL<<malloc_pageshift) != malloc_pagesize)
163#error "(1UL<<malloc_pageshift) != malloc_pagesize"
164#endif
165
166#ifndef malloc_maxsize
167#define malloc_maxsize ((malloc_pagesize)>>1)
168#endif
169
170/* A mask for the offset inside a page. */
171#define malloc_pagemask ((malloc_pagesize)-1)
172
173#define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
174#define ptr2index(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo)
175
176/* fd of /dev/zero */
177#ifdef USE_DEV_ZERO
178static int fdzero;
179#define MMAP_FD fdzero
180#define INIT_MMAP() \
181 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
182 wrterror("open of /dev/zero"); }
183#else
184#define MMAP_FD (-1)
185#define INIT_MMAP()
186#endif
187
188/* Set when initialization has been done */
189static unsigned int malloc_started;
190
191/* Number of free pages we cache */
192static unsigned int malloc_cache = 16;
193
194/* The offset from pagenumber to index into the page directory */
195static u_long malloc_origo;
196
197/* The last index in the page directory we care about */
198static u_long last_index;
199
200/* Pointer to page directory. Allocated "as if with" malloc */
201static struct pginfo **page_dir;
202
203/* How many slots in the page directory */
204static size_t malloc_ninfo;
205
206/* Free pages line up here */
207static struct pgfree free_list;
208
209/* Abort(), user doesn't handle problems. */
210static int malloc_abort;
211
212/* Are we trying to die ? */
213static int suicide;
214
215#ifdef MALLOC_STATS
216/* dump statistics */
217static int malloc_stats;
218#endif
219
220/* avoid outputting warnings? */
221static int malloc_silent;
222
223/* always realloc ? */
224static int malloc_realloc;
225
226#if defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(MADV_FREE))
227/* pass the kernel a hint on free pages ? */
228static int malloc_hint;
229#endif
230
231/* xmalloc behaviour ? */
232static int malloc_xmalloc;
233
234/* zero fill ? */
235static int malloc_zero;
236
237/* junk fill ? */
238static int malloc_junk;
239
240#ifdef __FreeBSD__
241/* utrace ? */
242static int malloc_utrace;
243
244struct ut { void *p; size_t s; void *r; };
245
246void utrace(struct ut *, int);
247
248#define UTRACE(a, b, c) \
249 if (malloc_utrace) \
250 {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);}
251#else /* !__FreeBSD__ */
252#define UTRACE(a,b,c)
253#endif
254
255/* my last break. */
256static void *malloc_brk;
257
258/* one location cache for free-list holders */
259static struct pgfree *px;
260
261/* compile-time options */
262char *malloc_options;
263
264/* Name of the current public function */
265static char *malloc_func;
266
267/* Macro for mmap */
268#define MMAP(size) \
269 mmap((void *)0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
270 MMAP_FD, (off_t)0);
271
272/*
273 * Necessary function declarations
274 */
275static int extend_pgdir(u_long index);
276static void *imalloc(size_t size);
277static void ifree(void *ptr);
278static void *irealloc(void *ptr, size_t size);
279static void *malloc_bytes(size_t size);
280
281#ifdef MALLOC_STATS
282void
283malloc_dump(fd)
284 FILE *fd;
285{
286 struct pginfo **pd;
287 struct pgfree *pf;
288 int j;
289
290 pd = page_dir;
291
292 /* print out all the pages */
293 for(j=0;j<=last_index;j++) {
294 fprintf(fd, "%08lx %5d ", (j+malloc_origo) << malloc_pageshift, j);
295 if (pd[j] == MALLOC_NOT_MINE) {
296 for(j++;j<=last_index && pd[j] == MALLOC_NOT_MINE;j++)
297 ;
298 j--;
299 fprintf(fd, ".. %5d not mine\n", j);
300 } else if (pd[j] == MALLOC_FREE) {
301 for(j++;j<=last_index && pd[j] == MALLOC_FREE;j++)
302 ;
303 j--;
304 fprintf(fd, ".. %5d free\n", j);
305 } else if (pd[j] == MALLOC_FIRST) {
306 for(j++;j<=last_index && pd[j] == MALLOC_FOLLOW;j++)
307 ;
308 j--;
309 fprintf(fd, ".. %5d in use\n", j);
310 } else if (pd[j] < MALLOC_MAGIC) {
311 fprintf(fd, "(%p)\n", pd[j]);
312 } else {
313 fprintf(fd, "%p %d (of %d) x %d @ %p --> %p\n",
314 pd[j], pd[j]->free, pd[j]->total,
315 pd[j]->size, pd[j]->page, pd[j]->next);
316 }
317 }
318
319 for(pf=free_list.next; pf; pf=pf->next) {
320 fprintf(fd, "Free: @%p [%p...%p[ %ld ->%p <-%p\n",
321 pf, pf->page, pf->end, pf->size, pf->prev, pf->next);
322 if (pf == pf->next) {
323 fprintf(fd, "Free_list loops.\n");
324 break;
325 }
326 }
327
328 /* print out various info */
329 fprintf(fd, "Minsize\t%d\n", malloc_minsize);
330 fprintf(fd, "Maxsize\t%d\n", malloc_maxsize);
331 fprintf(fd, "Pagesize\t%lu\n", (u_long)malloc_pagesize);
332 fprintf(fd, "Pageshift\t%d\n", malloc_pageshift);
333 fprintf(fd, "FirstPage\t%ld\n", malloc_origo);
334 fprintf(fd, "LastPage\t%ld %lx\n", last_index+malloc_pageshift,
335 (last_index + malloc_pageshift) << malloc_pageshift);
336 fprintf(fd, "Break\t%ld\n", (u_long)sbrk(0) >> malloc_pageshift);
337}
338#endif /* MALLOC_STATS */
339
340extern char *__progname;
341
342static void
343wrterror(p)
344 char *p;
345{
346 char *q = " error: ";
347 struct iovec iov[4];
348
349 iov[0].iov_base = __progname;
350 iov[0].iov_len = strlen(__progname);
351 iov[1].iov_base = malloc_func;
352 iov[1].iov_len = strlen(malloc_func);
353 iov[2].iov_base = q;
354 iov[2].iov_len = strlen(q);
355 iov[3].iov_base = p;
356 iov[3].iov_len = strlen(p);
357 writev(STDERR_FILENO, iov, 4);
358
359 suicide = 1;
360#ifdef MALLOC_STATS
361 if (malloc_stats)
362 malloc_dump(stderr);
363#endif /* MALLOC_STATS */
364 abort();
365}
366
367static void
368wrtwarning(p)
369 char *p;
370{
371 char *q = " warning: ";
372 struct iovec iov[4];
373
374 if (malloc_abort)
375 wrterror(p);
376 else if (malloc_silent)
377 return;
378
379 iov[0].iov_base = __progname;
380 iov[0].iov_len = strlen(__progname);
381 iov[1].iov_base = malloc_func;
382 iov[1].iov_len = strlen(malloc_func);
383 iov[2].iov_base = q;
384 iov[2].iov_len = strlen(q);
385 iov[3].iov_base = p;
386 iov[3].iov_len = strlen(p);
387 writev(STDERR_FILENO, iov, 4);
388}
389
390#ifdef MALLOC_STATS
391static void
392malloc_exit()
393{
394 FILE *fd = fopen("malloc.out", "a");
395 char *q = "malloc() warning: Couldn't dump stats.\n";
396 if (fd) {
397 malloc_dump(fd);
398 fclose(fd);
399 } else
400 write(2, q, strlen(q));
401}
402#endif /* MALLOC_STATS */
403
404
405/*
406 * Allocate a number of pages from the OS
407 */
408static void *
409map_pages(pages)
410 int pages;
411{
412 caddr_t result, tail;
413
414 result = (caddr_t)pageround((u_long)sbrk(0));
415 tail = result + (pages << malloc_pageshift);
416
417 if (brk(tail)) {
418#ifdef MALLOC_EXTRA_SANITY
419 wrterror("(ES): map_pages fails\n");
420#endif /* MALLOC_EXTRA_SANITY */
421 return 0;
422 }
423
424 last_index = ptr2index(tail) - 1;
425 malloc_brk = tail;
426
427 if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index))
428 return 0;
429
430 return result;
431}
432
433/*
434 * Extend page directory
435 */
436static int
437extend_pgdir(index)
438 u_long index;
439{
440 struct pginfo **new, **old;
441 size_t i, oldlen;
442
443 /* Make it this many pages */
444 i = index * sizeof *page_dir;
445 i /= malloc_pagesize;
446 i += 2;
447
448 /* remember the old mapping size */
449 oldlen = malloc_ninfo * sizeof *page_dir;
450
451 /*
452 * NOTE: we allocate new pages and copy the directory rather than tempt
453 * fate by trying to "grow" the region.. There is nothing to prevent
454 * us from accidently re-mapping space that's been allocated by our caller
455 * via dlopen() or other mmap().
456 *
457 * The copy problem is not too bad, as there is 4K of page index per
458 * 4MB of malloc arena.
459 *
460 * We can totally avoid the copy if we open a file descriptor to associate
461 * the anon mappings with. Then, when we remap the pages at the new
462 * address, the old pages will be "magically" remapped.. But this means
463 * keeping open a "secret" file descriptor.....
464 */
465
466 /* Get new pages */
467 new = (struct pginfo**) MMAP(i * malloc_pagesize);
468 if (new == MAP_FAILED)
469 return 0;
470
471 /* Copy the old stuff */
472 memcpy(new, page_dir,
473 malloc_ninfo * sizeof *page_dir);
474
475 /* register the new size */
476 malloc_ninfo = i * malloc_pagesize / sizeof *page_dir;
477
478 /* swap the pointers */
479 old = page_dir;
480 page_dir = new;
481
482 /* Now free the old stuff */
483 munmap(old, oldlen);
484 return 1;
485}
486
487/*
488 * Initialize the world
489 */
490static void
491malloc_init ()
492{
493 char *p, b[64];
494 int i, j;
495 int save_errno = errno;
496
497 THREAD_LOCK_INIT();
498
499 INIT_MMAP();
500
501#ifdef MALLOC_EXTRA_SANITY
502 malloc_junk = 1;
503#endif /* MALLOC_EXTRA_SANITY */
504
505 for (i = 0; i < 3; i++) {
506 if (i == 0) {
507 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
508 if (j <= 0)
509 continue;
510 b[j] = '\0';
511 p = b;
512 } else if (i == 1) {
513 if (issetugid() == 0)
514 p = getenv("MALLOC_OPTIONS");
515 else
516 continue;
517 } else if (i == 2) {
518 p = malloc_options;
519 }
520 for (; p && *p; p++) {
521 switch (*p) {
522 case '>': malloc_cache <<= 1; break;
523 case '<': malloc_cache >>= 1; break;
524 case 'a': malloc_abort = 0; break;
525 case 'A': malloc_abort = 1; break;
526#ifdef MALLOC_STATS
527 case 'd': malloc_stats = 0; break;
528 case 'D': malloc_stats = 1; break;
529#endif /* MALLOC_STATS */
530#if defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(MADV_FREE))
531 case 'h': malloc_hint = 0; break;
532 case 'H': malloc_hint = 1; break;
533#endif /* __FreeBSD__ */
534 case 'r': malloc_realloc = 0; break;
535 case 'R': malloc_realloc = 1; break;
536 case 'j': malloc_junk = 0; break;
537 case 'J': malloc_junk = 1; break;
538 case 'n': malloc_silent = 0; break;
539 case 'N': malloc_silent = 1; break;
540#ifdef __FreeBSD__
541 case 'u': malloc_utrace = 0; break;
542 case 'U': malloc_utrace = 1; break;
543#endif /* __FreeBSD__ */
544 case 'x': malloc_xmalloc = 0; break;
545 case 'X': malloc_xmalloc = 1; break;
546 case 'z': malloc_zero = 0; break;
547 case 'Z': malloc_zero = 1; break;
548 default:
549 j = malloc_abort;
550 malloc_abort = 0;
551 wrtwarning("unknown char in MALLOC_OPTIONS\n");
552 malloc_abort = j;
553 break;
554 }
555 }
556 }
557
558 UTRACE(0, 0, 0);
559
560 /*
561 * We want junk in the entire allocation, and zero only in the part
562 * the user asked for.
563 */
564 if (malloc_zero)
565 malloc_junk=1;
566
567#ifdef MALLOC_STATS
568 if (malloc_stats)
569 atexit(malloc_exit);
570#endif /* MALLOC_STATS */
571
572 /* Allocate one page for the page directory */
573 page_dir = (struct pginfo **) MMAP(malloc_pagesize);
574
575 if (page_dir == MAP_FAILED)
576 wrterror("mmap(2) failed, check limits.\n");
577
578 /*
579 * We need a maximum of malloc_pageshift buckets, steal these from the
580 * front of the page_directory;
581 */
582 malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift;
583 malloc_origo -= malloc_pageshift;
584
585 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
586
587 /* Been here, done that */
588 malloc_started++;
589
590 /* Recalculate the cache size in bytes, and make sure it's nonzero */
591
592 if (!malloc_cache)
593 malloc_cache++;
594
595 malloc_cache <<= malloc_pageshift;
596
597 /*
598 * This is a nice hack from Kaleb Keithly (kaleb@x.org).
599 * We can sbrk(2) further back when we keep this on a low address.
600 */
601 px = (struct pgfree *) imalloc (sizeof *px);
602 errno = save_errno;
603}
604
605/*
606 * Allocate a number of complete pages
607 */
608static void *
609malloc_pages(size)
610 size_t size;
611{
612 void *p, *delay_free = 0;
613 int i;
614 struct pgfree *pf;
615 u_long index;
616
617 size = pageround(size);
618
619 p = 0;
620 /* Look for free pages before asking for more */
621 for(pf = free_list.next; pf; pf = pf->next) {
622
623#ifdef MALLOC_EXTRA_SANITY
624 if (pf->size & malloc_pagemask)
625 wrterror("(ES): junk length entry on free_list\n");
626 if (!pf->size)
627 wrterror("(ES): zero length entry on free_list\n");
628 if (pf->page == pf->end)
629 wrterror("(ES): zero entry on free_list\n");
630 if (pf->page > pf->end)
631 wrterror("(ES): sick entry on free_list\n");
632 if ((void*)pf->page >= (void*)sbrk(0))
633 wrterror("(ES): entry on free_list past brk\n");
634 if (page_dir[ptr2index(pf->page)] != MALLOC_FREE)
635 wrterror("(ES): non-free first page on free-list\n");
636 if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE)
637 wrterror("(ES): non-free last page on free-list\n");
638#endif /* MALLOC_EXTRA_SANITY */
639
640 if (pf->size < size)
641 continue;
642
643 if (pf->size == size) {
644 p = pf->page;
645 if (pf->next)
646 pf->next->prev = pf->prev;
647 pf->prev->next = pf->next;
648 delay_free = pf;
649 break;
650 }
651
652 p = pf->page;
653 pf->page = (char *)pf->page + size;
654 pf->size -= size;
655 break;
656 }
657
658#ifdef MALLOC_EXTRA_SANITY
659 if (p && page_dir[ptr2index(p)] != MALLOC_FREE)
660 wrterror("(ES): allocated non-free page on free-list\n");
661#endif /* MALLOC_EXTRA_SANITY */
662
663 size >>= malloc_pageshift;
664
665 /* Map new pages */
666 if (!p)
667 p = map_pages(size);
668
669 if (p) {
670
671 index = ptr2index(p);
672 page_dir[index] = MALLOC_FIRST;
673 for (i=1;i<size;i++)
674 page_dir[index+i] = MALLOC_FOLLOW;
675
676 if (malloc_junk)
677 memset(p, SOME_JUNK, size << malloc_pageshift);
678 }
679
680 if (delay_free) {
681 if (!px)
682 px = delay_free;
683 else
684 ifree(delay_free);
685 }
686
687 return p;
688}
689
690/*
691 * Allocate a page of fragments
692 */
693
694static __inline__ int
695malloc_make_chunks(bits)
696 int bits;
697{
698 struct pginfo *bp;
699 void *pp;
700 int i, k, l;
701
702 /* Allocate a new bucket */
703 pp = malloc_pages((size_t)malloc_pagesize);
704 if (!pp)
705 return 0;
706
707 /* Find length of admin structure */
708 l = sizeof *bp - sizeof(u_long);
709 l += sizeof(u_long) *
710 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
711
712 /* Don't waste more than two chunks on this */
713 /*
714 * If we are to allocate a memory protected page for the malloc(0)
715 * case (when bits=0), it must be from a different page than the
716 * pginfo page.
717 * --> Treat it like the big chunk alloc, get a second data page.
718 */
719 if (bits != 0 && (1UL<<(bits)) <= l+l) {
720 bp = (struct pginfo *)pp;
721 } else {
722 bp = (struct pginfo *)imalloc(l);
723 if (!bp) {
724 ifree(pp);
725 return 0;
726 }
727 }
728
729 /* memory protect the page allocated in the malloc(0) case */
730 if (bits == 0) {
731
732 bp->size = 0;
733 bp->shift = 1;
734 i = malloc_minsize-1;
735 while (i >>= 1)
736 bp->shift++;
737 bp->total = bp->free = malloc_pagesize >> bp->shift;
738 bp->page = pp;
739
740 k = mprotect(pp, malloc_pagesize, PROT_NONE);
741 if (k < 0) {
742 ifree(pp);
743 ifree(bp);
744 return 0;
745 }
746 } else {
747 bp->size = (1UL<<bits);
748 bp->shift = bits;
749 bp->total = bp->free = malloc_pagesize >> bits;
750 bp->page = pp;
751 }
752
753 /* set all valid bits in the bitmap */
754 k = bp->total;
755 i = 0;
756
757 /* Do a bunch at a time */
758 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
759 bp->bits[i / MALLOC_BITS] = ~0UL;
760
761 for(; i < k; i++)
762 bp->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS);
763
764 if (bp == bp->page) {
765 /* Mark the ones we stole for ourselves */
766 for(i=0;l > 0;i++) {
767 bp->bits[i/MALLOC_BITS] &= ~(1UL<<(i%MALLOC_BITS));
768 bp->free--;
769 bp->total--;
770 l -= (1 << bits);
771 }
772 }
773
774 /* MALLOC_LOCK */
775
776 page_dir[ptr2index(pp)] = bp;
777
778 bp->next = page_dir[bits];
779 page_dir[bits] = bp;
780
781 /* MALLOC_UNLOCK */
782
783 return 1;
784}
785
786/*
787 * Allocate a fragment
788 */
789static void *
790malloc_bytes(size)
791 size_t size;
792{
793 int i,j;
794 u_long u;
795 struct pginfo *bp;
796 int k;
797 u_long *lp;
798
799 /* Don't bother with anything less than this */
800 /* unless we have a malloc(0) requests */
801 if (size != 0 && size < malloc_minsize)
802 size = malloc_minsize;
803
804 /* Find the right bucket */
805 if (size == 0)
806 j=0;
807 else {
808 j = 1;
809 i = size-1;
810 while (i >>= 1)
811 j++;
812 }
813
814 /* If it's empty, make a page more of that size chunks */
815 if (!page_dir[j] && !malloc_make_chunks(j))
816 return 0;
817
818 bp = page_dir[j];
819
820 /* Find first word of bitmap which isn't empty */
821 for (lp = bp->bits; !*lp; lp++)
822 ;
823
824 /* Find that bit, and tweak it */
825 u = 1;
826 k = 0;
827 while (!(*lp & u)) {
828 u += u;
829 k++;
830 }
831 *lp ^= u;
832
833 /* If there are no more free, remove from free-list */
834 if (!--bp->free) {
835 page_dir[j] = bp->next;
836 bp->next = 0;
837 }
838
839 /* Adjust to the real offset of that chunk */
840 k += (lp-bp->bits)*MALLOC_BITS;
841 k <<= bp->shift;
842
843 if (malloc_junk && bp->size != 0)
844 memset((char *)bp->page + k, SOME_JUNK, bp->size);
845
846 return (u_char *)bp->page + k;
847}
848
849/*
850 * Allocate a piece of memory
851 */
852static void *
853imalloc(size)
854 size_t size;
855{
856 void *result;
857
858 if (!malloc_started)
859 malloc_init();
860
861 if (suicide)
862 abort();
863
864 if ((size + malloc_pagesize) < size) /* Check for overflow */
865 result = 0;
866 else if (size <= malloc_maxsize)
867 result = malloc_bytes(size);
868 else
869 result = malloc_pages(size);
870
871 if (malloc_abort && !result)
872 wrterror("allocation failed.\n");
873
874 if (malloc_zero && result)
875 memset(result, 0, size);
876
877 return result;
878}
879
880/*
881 * Change the size of an allocation.
882 */
883static void *
884irealloc(ptr, size)
885 void *ptr;
886 size_t size;
887{
888 void *p;
889 u_long osize, index;
890 struct pginfo **mp;
891 int i;
892
893 if (suicide)
894 abort();
895
896 if (!malloc_started) {
897 wrtwarning("malloc() has never been called.\n");
898 return 0;
899 }
900
901 index = ptr2index(ptr);
902
903 if (index < malloc_pageshift) {
904 wrtwarning("junk pointer, too low to make sense.\n");
905 return 0;
906 }
907
908 if (index > last_index) {
909 wrtwarning("junk pointer, too high to make sense.\n");
910 return 0;
911 }
912
913 mp = &page_dir[index];
914
915 if (*mp == MALLOC_FIRST) { /* Page allocation */
916
917 /* Check the pointer */
918 if ((u_long)ptr & malloc_pagemask) {
919 wrtwarning("modified (page-) pointer.\n");
920 return 0;
921 }
922
923 /* Find the size in bytes */
924 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
925 osize += malloc_pagesize;
926
927 if (!malloc_realloc && /* unless we have to, */
928 size <= osize && /* .. or are too small, */
929 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
930 return ptr; /* don't do anything. */
931 }
932
933 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
934
935 /* Check the pointer for sane values */
936 if ((u_long)ptr & ((1UL<<((*mp)->shift))-1)) {
937 wrtwarning("modified (chunk-) pointer.\n");
938 return 0;
939 }
940
941 /* Find the chunk index in the page */
942 i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift;
943
944 /* Verify that it isn't a free chunk already */
945 if ((*mp)->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) {
946 wrtwarning("chunk is already free.\n");
947 return 0;
948 }
949
950 osize = (*mp)->size;
951
952 if (!malloc_realloc && /* Unless we have to, */
953 size < osize && /* ..or are too small, */
954 (size > osize/2 || /* ..or could use a smaller size, */
955 osize == malloc_minsize)) { /* ..(if there is one) */
956 return ptr; /* ..Don't do anything */
957 }
958
959 } else {
960 wrtwarning("pointer to wrong page.\n");
961 return 0;
962 }
963
964 p = imalloc(size);
965
966 if (p) {
967 /* copy the lesser of the two sizes, and free the old one */
968 /* Don't move from/to 0 sized region !!! */
969 if (osize != 0 && size != 0) {
970 if (osize < size)
971 memcpy(p, ptr, osize);
972 else
973 memcpy(p, ptr, size);
974 }
975 ifree(ptr);
976 }
977 return p;
978}
979
980/*
981 * Free a sequence of pages
982 */
983
984static __inline__ void
985free_pages(ptr, index, info)
986 void *ptr;
987 int index;
988 struct pginfo *info;
989{
990 int i;
991 struct pgfree *pf, *pt=0;
992 u_long l;
993 void *tail;
994
995 if (info == MALLOC_FREE) {
996 wrtwarning("page is already free.\n");
997 return;
998 }
999
1000 if (info != MALLOC_FIRST) {
1001 wrtwarning("pointer to wrong page.\n");
1002 return;
1003 }
1004
1005 if ((u_long)ptr & malloc_pagemask) {
1006 wrtwarning("modified (page-) pointer.\n");
1007 return;
1008 }
1009
1010 /* Count how many pages and mark them free at the same time */
1011 page_dir[index] = MALLOC_FREE;
1012 for (i = 1; page_dir[index+i] == MALLOC_FOLLOW; i++)
1013 page_dir[index + i] = MALLOC_FREE;
1014
1015 l = i << malloc_pageshift;
1016
1017 if (malloc_junk)
1018 memset(ptr, SOME_JUNK, l);
1019
1020#if defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(MADV_FREE))
1021 if (malloc_hint)
1022 madvise(ptr, l, MADV_FREE);
1023#endif
1024
1025 tail = (char *)ptr+l;
1026
1027 /* add to free-list */
1028 if (!px)
1029 px = imalloc(sizeof *px); /* This cannot fail... */
1030 px->page = ptr;
1031 px->end = tail;
1032 px->size = l;
1033 if (!free_list.next) {
1034
1035 /* Nothing on free list, put this at head */
1036 px->next = free_list.next;
1037 px->prev = &free_list;
1038 free_list.next = px;
1039 pf = px;
1040 px = 0;
1041
1042 } else {
1043
1044 /* Find the right spot, leave pf pointing to the modified entry. */
1045 tail = (char *)ptr+l;
1046
1047 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
1048 ; /* Race ahead here */
1049
1050 if (pf->page > tail) {
1051 /* Insert before entry */
1052 px->next = pf;
1053 px->prev = pf->prev;
1054 pf->prev = px;
1055 px->prev->next = px;
1056 pf = px;
1057 px = 0;
1058 } else if (pf->end == ptr ) {
1059 /* Append to the previous entry */
1060 pf->end = (char *)pf->end + l;
1061 pf->size += l;
1062 if (pf->next && pf->end == pf->next->page ) {
1063 /* And collapse the next too. */
1064 pt = pf->next;
1065 pf->end = pt->end;
1066 pf->size += pt->size;
1067 pf->next = pt->next;
1068 if (pf->next)
1069 pf->next->prev = pf;
1070 }
1071 } else if (pf->page == tail) {
1072 /* Prepend to entry */
1073 pf->size += l;
1074 pf->page = ptr;
1075 } else if (!pf->next) {
1076 /* Append at tail of chain */
1077 px->next = 0;
1078 px->prev = pf;
1079 pf->next = px;
1080 pf = px;
1081 px = 0;
1082 } else {
1083 wrterror("freelist is destroyed.\n");
1084 }
1085 }
1086
1087 /* Return something to OS ? */
1088 if (!pf->next && /* If we're the last one, */
1089 pf->size > malloc_cache && /* ..and the cache is full, */
1090 pf->end == malloc_brk && /* ..and none behind us, */
1091 malloc_brk == sbrk(0)) { /* ..and it's OK to do... */
1092
1093 /*
1094 * Keep the cache intact. Notice that the '>' above guarantees that
1095 * the pf will always have at least one page afterwards.
1096 */
1097 pf->end = (char *)pf->page + malloc_cache;
1098 pf->size = malloc_cache;
1099
1100 brk(pf->end);
1101 malloc_brk = pf->end;
1102
1103 index = ptr2index(pf->end);
1104 last_index = index - 1;
1105
1106 for(i=index;i <= last_index;)
1107 page_dir[i++] = MALLOC_NOT_MINE;
1108
1109 /* XXX: We could realloc/shrink the pagedir here I guess. */
1110 }
1111 if (pt)
1112 ifree(pt);
1113}
1114
1115/*
1116 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1117 */
1118
1119/* ARGSUSED */
1120static __inline__ void
1121free_bytes(ptr, index, info)
1122 void *ptr;
1123 int index;
1124 struct pginfo *info;
1125{
1126 int i;
1127 struct pginfo **mp;
1128 void *vp;
1129
1130 /* Find the chunk number on the page */
1131 i = ((u_long)ptr & malloc_pagemask) >> info->shift;
1132
1133 if ((u_long)ptr & ((1UL<<(info->shift))-1)) {
1134 wrtwarning("modified (chunk-) pointer.\n");
1135 return;
1136 }
1137
1138 if (info->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) {
1139 wrtwarning("chunk is already free.\n");
1140 return;
1141 }
1142
1143 if (malloc_junk && info->size != 0)
1144 memset(ptr, SOME_JUNK, info->size);
1145
1146 info->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS);
1147 info->free++;
1148
1149 if (info->size != 0)
1150 mp = page_dir + info->shift;
1151 else
1152 mp = page_dir;
1153
1154 if (info->free == 1) {
1155
1156 /* Page became non-full */
1157
1158 /* Insert in address order */
1159 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1160 mp = &(*mp)->next;
1161 info->next = *mp;
1162 *mp = info;
1163 return;
1164 }
1165
1166 if (info->free != info->total)
1167 return;
1168
1169 /* Find & remove this page in the queue */
1170 while (*mp != info) {
1171 mp = &((*mp)->next);
1172#ifdef MALLOC_EXTRA_SANITY
1173 if (!*mp)
1174 wrterror("(ES): Not on queue\n");
1175#endif /* MALLOC_EXTRA_SANITY */
1176 }
1177 *mp = info->next;
1178
1179 /* Free the page & the info structure if need be */
1180 page_dir[ptr2index(info->page)] = MALLOC_FIRST;
1181
1182 /* If the page was mprotected, unprotect it before releasing it */
1183 if (info->size == 0) {
1184 mprotect(info->page, malloc_pagesize, PROT_READ|PROT_WRITE);
1185 /* Do we have to care if mprotect succeeds here ? */
1186 }
1187
1188 vp = info->page; /* Order is important ! */
1189 if(vp != (void*)info)
1190 ifree(info);
1191 ifree(vp);
1192}
1193
1194static void
1195ifree(ptr)
1196 void *ptr;
1197{
1198 struct pginfo *info;
1199 int index;
1200
1201 /* This is legal */
1202 if (!ptr)
1203 return;
1204
1205 if (!malloc_started) {
1206 wrtwarning("malloc() has never been called.\n");
1207 return;
1208 }
1209
1210 /* If we're already sinking, don't make matters any worse. */
1211 if (suicide)
1212 return;
1213
1214 index = ptr2index(ptr);
1215
1216 if (index < malloc_pageshift) {
1217 wrtwarning("junk pointer, too low to make sense.\n");
1218 return;
1219 }
1220
1221 if (index > last_index) {
1222 wrtwarning("junk pointer, too high to make sense.\n");
1223 return;
1224 }
1225
1226 info = page_dir[index];
1227
1228 if (info < MALLOC_MAGIC)
1229 free_pages(ptr, index, info);
1230 else
1231 free_bytes(ptr, index, info);
1232 return;
1233}
1234
1235/*
1236 * These are the public exported interface routines.
1237 */
1238
1239static int malloc_active;
1240
1241void *
1242malloc(size_t size)
1243{
1244 register void *r;
1245
1246 malloc_func = " in malloc():";
1247 THREAD_LOCK();
1248 if (malloc_active++) {
1249 wrtwarning("recursive call.\n");
1250 malloc_active--;
1251 THREAD_UNLOCK();
1252 return (0);
1253 }
1254 r = imalloc(size);
1255 UTRACE(0, size, r);
1256 malloc_active--;
1257 THREAD_UNLOCK();
1258 if (malloc_xmalloc && !r)
1259 wrterror("out of memory.\n");
1260 return (r);
1261}
1262
1263void
1264free(void *ptr)
1265{
1266 malloc_func = " in free():";
1267 THREAD_LOCK();
1268 if (malloc_active++) {
1269 wrtwarning("recursive call.\n");
1270 malloc_active--;
1271 THREAD_UNLOCK();
1272 return;
1273 }
1274 ifree(ptr);
1275 UTRACE(ptr, 0, 0);
1276 malloc_active--;
1277 THREAD_UNLOCK();
1278 return;
1279}
1280
1281void *
1282realloc(void *ptr, size_t size)
1283{
1284 register void *r;
1285
1286 malloc_func = " in realloc():";
1287 THREAD_LOCK();
1288 if (malloc_active++) {
1289 wrtwarning("recursive call.\n");
1290 malloc_active--;
1291 THREAD_UNLOCK();
1292 return (0);
1293 }
1294 if (!ptr) {
1295 r = imalloc(size);
1296 } else {
1297 r = irealloc(ptr, size);
1298 }
1299 UTRACE(ptr, size, r);
1300 malloc_active--;
1301 THREAD_UNLOCK();
1302 if (malloc_xmalloc && !r)
1303 wrterror("out of memory.\n");
1304 return (r);
1305}