diff options
Diffstat (limited to 'src/lib/libc/stdlib/malloc.c')
-rw-r--r-- | src/lib/libc/stdlib/malloc.c | 1529 |
1 files changed, 1175 insertions, 354 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index 3c57fad024..d1d8759791 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
@@ -1,421 +1,1242 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 1983 Regents of the University of California. | 2 | * ---------------------------------------------------------------------------- |
3 | * All rights reserved. | 3 | * "THE BEER-WARE LICENSE" (Revision 42): |
4 | * | 4 | * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you |
5 | * Redistribution and use in source and binary forms, with or without | 5 | * can do whatever you want with this stuff. If we meet some day, and you think |
6 | * modification, are permitted provided that the following conditions | 6 | * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp |
7 | * are met: | 7 | * ---------------------------------------------------------------------------- |
8 | * 1. Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * 2. Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * 3. All advertising materials mentioning features or use of this software | ||
14 | * must display the following acknowledgement: | ||
15 | * This product includes software developed by the University of | ||
16 | * California, Berkeley and its contributors. | ||
17 | * 4. Neither the name of the University nor the names of its contributors | ||
18 | * may be used to endorse or promote products derived from this software | ||
19 | * without specific prior written permission. | ||
20 | * | ||
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | ||
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | ||
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
31 | * SUCH DAMAGE. | ||
32 | */ | 8 | */ |
33 | 9 | ||
34 | #if defined(LIBC_SCCS) && !defined(lint) | 10 | #if defined(LIBC_SCCS) && !defined(lint) |
35 | /*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/ | 11 | static char rcsid[] = "$OpenBSD: malloc.c,v 1.32 1998/08/06 16:26:32 millert Exp $"; |
36 | static char *rcsid = "$Id: malloc.c,v 1.1.1.1 1995/10/18 08:42:18 deraadt Exp $"; | ||
37 | #endif /* LIBC_SCCS and not lint */ | 12 | #endif /* LIBC_SCCS and not lint */ |
38 | 13 | ||
39 | /* | 14 | /* |
40 | * malloc.c (Caltech) 2/21/82 | 15 | * Defining MALLOC_EXTRA_SANITY will enable extra checks which are |
41 | * Chris Kingsley, kingsley@cit-20. | 16 | * related to internal conditions and consistency in malloc.c. This has |
42 | * | 17 | * a noticeable runtime performance hit, and generally will not do you |
43 | * This is a very fast storage allocator. It allocates blocks of a small | 18 | * any good unless you fiddle with the internals of malloc or want |
44 | * number of different sizes, and keeps free lists of each size. Blocks that | 19 | * to catch random pointer corruption as early as possible. |
45 | * don't exactly fit are passed up to the next larger size. In this | ||
46 | * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. | ||
47 | * This is designed for use in a virtual memory environment. | ||
48 | */ | 20 | */ |
21 | #ifndef MALLOC_EXTRA_SANITY | ||
22 | #undef MALLOC_EXTRA_SANITY | ||
23 | #endif | ||
49 | 24 | ||
50 | #include <sys/types.h> | 25 | /* |
26 | * Defining MALLOC_STATS will enable you to call malloc_dump() and set | ||
27 | * the [dD] options in the MALLOC_OPTIONS environment variable. | ||
28 | * It has no run-time performance hit, but does pull in stdio... | ||
29 | */ | ||
30 | #ifndef MALLOC_STATS | ||
31 | #undef MALLOC_STATS | ||
32 | #endif | ||
33 | |||
34 | /* | ||
35 | * What to use for Junk. This is the byte value we use to fill with | ||
36 | * when the 'J' option is enabled. | ||
37 | */ | ||
38 | #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ | ||
39 | |||
40 | #include <stdio.h> | ||
51 | #include <stdlib.h> | 41 | #include <stdlib.h> |
52 | #include <string.h> | 42 | #include <string.h> |
53 | #include <unistd.h> | 43 | #include <unistd.h> |
44 | #include <fcntl.h> | ||
45 | #include <errno.h> | ||
46 | #include <sys/types.h> | ||
47 | #include <sys/param.h> | ||
48 | #include <sys/mman.h> | ||
49 | |||
50 | /* | ||
51 | * The basic parameters you can tweak. | ||
52 | * | ||
53 | * malloc_pageshift pagesize = 1 << malloc_pageshift | ||
54 | * It's probably best if this is the native | ||
55 | * page size, but it shouldn't have to be. | ||
56 | * | ||
57 | * malloc_minsize minimum size of an allocation in bytes. | ||
58 | * If this is too small it's too much work | ||
59 | * to manage them. This is also the smallest | ||
60 | * unit of alignment used for the storage | ||
61 | * returned by malloc/realloc. | ||
62 | * | ||
63 | */ | ||
64 | |||
65 | #if defined(__i386__) && defined(__FreeBSD__) | ||
66 | # define malloc_pageshift 12U | ||
67 | # define malloc_minsize 16U | ||
68 | #endif /* __i386__ && __FreeBSD__ */ | ||
54 | 69 | ||
55 | #define NULL 0 | 70 | #if defined(__sparc__) && !defined(__OpenBSD__) |
71 | # define malloc_pageshirt 12U | ||
72 | # define malloc_minsize 16U | ||
73 | # define MAP_ANON (0) | ||
74 | # define USE_DEV_ZERO | ||
75 | # define MADV_FREE MADV_DONTNEED | ||
76 | #endif /* __sparc__ */ | ||
56 | 77 | ||
57 | static void morecore(); | 78 | /* Insert your combination here... */ |
58 | static int findbucket(); | 79 | #if defined(__FOOCPU__) && defined(__BAROS__) |
80 | # define malloc_pageshift 12U | ||
81 | # define malloc_minsize 16U | ||
82 | #endif /* __FOOCPU__ && __BAROS__ */ | ||
83 | |||
84 | #if defined(__OpenBSD__) && !defined(__sparc__) | ||
85 | # define malloc_pageshift (PGSHIFT) | ||
86 | # define malloc_minsize 16U | ||
87 | #endif /* __OpenBSD__ */ | ||
88 | |||
89 | #ifdef _THREAD_SAFE | ||
90 | #include <pthread.h> | ||
91 | static pthread_mutex_t malloc_lock; | ||
92 | #define THREAD_LOCK() pthread_mutex_lock(&malloc_lock) | ||
93 | #define THREAD_UNLOCK() pthread_mutex_unlock(&malloc_lock) | ||
94 | #define THREAD_LOCK_INIT() pthread_mutex_init(&malloc_lock, 0); | ||
95 | #else | ||
96 | #define THREAD_LOCK() | ||
97 | #define THREAD_UNLOCK() | ||
98 | #define THREAD_LOCK_INIT() | ||
99 | #endif | ||
59 | 100 | ||
60 | /* | 101 | /* |
61 | * The overhead on a block is at least 4 bytes. When free, this space | 102 | * No user serviceable parts behind this point. |
62 | * contains a pointer to the next free block, and the bottom two bits must | 103 | * |
63 | * be zero. When in use, the first byte is set to MAGIC, and the second | 104 | * This structure describes a page worth of chunks. |
64 | * byte is the size index. The remaining bytes are for alignment. | ||
65 | * If range checking is enabled then a second word holds the size of the | ||
66 | * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). | ||
67 | * The order of elements is critical: ov_magic must overlay the low order | ||
68 | * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. | ||
69 | */ | 105 | */ |
70 | union overhead { | 106 | |
71 | union overhead *ov_next; /* when free */ | 107 | struct pginfo { |
72 | struct { | 108 | struct pginfo *next; /* next on the free list */ |
73 | u_char ovu_magic; /* magic number */ | 109 | void *page; /* Pointer to the page */ |
74 | u_char ovu_index; /* bucket # */ | 110 | u_short size; /* size of this page's chunks */ |
75 | #ifdef RCHECK | 111 | u_short shift; /* How far to shift for this size chunks */ |
76 | u_short ovu_rmagic; /* range magic number */ | 112 | u_short free; /* How many free chunks */ |
77 | u_long ovu_size; /* actual block size */ | 113 | u_short total; /* How many chunk */ |
78 | #endif | 114 | u_long bits[1]; /* Which chunks are free */ |
79 | } ovu; | 115 | }; |
80 | #define ov_magic ovu.ovu_magic | 116 | |
81 | #define ov_index ovu.ovu_index | 117 | /* |
82 | #define ov_rmagic ovu.ovu_rmagic | 118 | * This structure describes a number of free pages. |
83 | #define ov_size ovu.ovu_size | 119 | */ |
120 | |||
121 | struct pgfree { | ||
122 | struct pgfree *next; /* next run of free pages */ | ||
123 | struct pgfree *prev; /* prev run of free pages */ | ||
124 | void *page; /* pointer to free pages */ | ||
125 | void *end; /* pointer to end of free pages */ | ||
126 | u_long size; /* number of bytes free */ | ||
84 | }; | 127 | }; |
85 | 128 | ||
86 | #define MAGIC 0xef /* magic # on accounting info */ | 129 | /* |
87 | #define RMAGIC 0x5555 /* magic # on range info */ | 130 | * How many bits per u_long in the bitmap. |
131 | * Change only if not 8 bits/byte | ||
132 | */ | ||
133 | #define MALLOC_BITS (8*sizeof(u_long)) | ||
134 | |||
135 | /* | ||
136 | * Magic values to put in the page_directory | ||
137 | */ | ||
138 | #define MALLOC_NOT_MINE ((struct pginfo*) 0) | ||
139 | #define MALLOC_FREE ((struct pginfo*) 1) | ||
140 | #define MALLOC_FIRST ((struct pginfo*) 2) | ||
141 | #define MALLOC_FOLLOW ((struct pginfo*) 3) | ||
142 | #define MALLOC_MAGIC ((struct pginfo*) 4) | ||
143 | |||
144 | #ifndef malloc_pageshift | ||
145 | #define malloc_pageshift 12U | ||
146 | #endif | ||
147 | |||
148 | #ifndef malloc_minsize | ||
149 | #define malloc_minsize 16U | ||
150 | #endif | ||
151 | |||
152 | #ifndef malloc_pageshift | ||
153 | #error "malloc_pageshift undefined" | ||
154 | #endif | ||
155 | |||
156 | #if !defined(malloc_pagesize) | ||
157 | #define malloc_pagesize (1UL<<malloc_pageshift) | ||
158 | #endif | ||
159 | |||
160 | #if ((1UL<<malloc_pageshift) != malloc_pagesize) | ||
161 | #error "(1UL<<malloc_pageshift) != malloc_pagesize" | ||
162 | #endif | ||
163 | |||
164 | #ifndef malloc_maxsize | ||
165 | #define malloc_maxsize ((malloc_pagesize)>>1) | ||
166 | #endif | ||
167 | |||
168 | /* A mask for the offset inside a page. */ | ||
169 | #define malloc_pagemask ((malloc_pagesize)-1) | ||
170 | |||
171 | #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask))) | ||
172 | #define ptr2index(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo) | ||
88 | 173 | ||
89 | #ifdef RCHECK | 174 | /* fd of /dev/zero */ |
90 | #define RSLOP sizeof (u_short) | 175 | #ifdef USE_DEV_ZERO |
176 | static int fdzero; | ||
177 | #define MMAP_FD fdzero | ||
178 | #define INIT_MMAP() \ | ||
179 | { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \ | ||
180 | wrterror("open of /dev/zero"); } | ||
91 | #else | 181 | #else |
92 | #define RSLOP 0 | 182 | #define MMAP_FD (-1) |
183 | #define INIT_MMAP() | ||
93 | #endif | 184 | #endif |
94 | 185 | ||
186 | /* Set when initialization has been done */ | ||
187 | static unsigned malloc_started; | ||
188 | |||
189 | /* Number of free pages we cache */ | ||
190 | static unsigned malloc_cache = 16; | ||
191 | |||
192 | /* The offset from pagenumber to index into the page directory */ | ||
193 | static u_long malloc_origo; | ||
194 | |||
195 | /* The last index in the page directory we care about */ | ||
196 | static u_long last_index; | ||
197 | |||
198 | /* Pointer to page directory. Allocated "as if with" malloc */ | ||
199 | static struct pginfo **page_dir; | ||
200 | |||
201 | /* How many slots in the page directory */ | ||
202 | static size_t malloc_ninfo; | ||
203 | |||
204 | /* Free pages line up here */ | ||
205 | static struct pgfree free_list; | ||
206 | |||
207 | /* Abort(), user doesn't handle problems. */ | ||
208 | static int malloc_abort; | ||
209 | |||
210 | /* Are we trying to die ? */ | ||
211 | static int suicide; | ||
212 | |||
213 | #ifdef MALLOC_STATS | ||
214 | /* dump statistics */ | ||
215 | static int malloc_stats; | ||
216 | #endif | ||
217 | |||
218 | /* avoid outputting warnings? */ | ||
219 | static int malloc_silent; | ||
220 | |||
221 | /* always realloc ? */ | ||
222 | static int malloc_realloc; | ||
223 | |||
224 | #ifdef __FreeBSD__ | ||
225 | /* pass the kernel a hint on free pages ? */ | ||
226 | static int malloc_hint; | ||
227 | #endif | ||
228 | |||
229 | /* xmalloc behaviour ? */ | ||
230 | static int malloc_xmalloc; | ||
231 | |||
232 | /* zero fill ? */ | ||
233 | static int malloc_zero; | ||
234 | |||
235 | /* junk fill ? */ | ||
236 | static int malloc_junk; | ||
237 | |||
238 | #ifdef __FreeBSD__ | ||
239 | /* utrace ? */ | ||
240 | static int malloc_utrace; | ||
241 | |||
242 | struct ut { void *p; size_t s; void *r; }; | ||
243 | |||
244 | void utrace __P((struct ut *, int)); | ||
245 | |||
246 | #define UTRACE(a, b, c) \ | ||
247 | if (malloc_utrace) \ | ||
248 | {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);} | ||
249 | #else /* !__FreeBSD__ */ | ||
250 | #define UTRACE(a,b,c) | ||
251 | #endif | ||
252 | |||
253 | /* my last break. */ | ||
254 | static void *malloc_brk; | ||
255 | |||
256 | /* one location cache for free-list holders */ | ||
257 | static struct pgfree *px; | ||
258 | |||
259 | /* compile-time options */ | ||
260 | char *malloc_options; | ||
261 | |||
262 | /* Name of the current public function */ | ||
263 | static char *malloc_func; | ||
264 | |||
265 | /* Macro for mmap */ | ||
266 | #define MMAP(size) \ | ||
267 | mmap((void *)0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ | ||
268 | MMAP_FD, (off_t)0); | ||
269 | |||
95 | /* | 270 | /* |
96 | * nextf[i] is the pointer to the next free block of size 2^(i+3). The | 271 | * Necessary function declarations |
97 | * smallest allocatable block is 8 bytes. The overhead information | ||
98 | * precedes the data area returned to the user. | ||
99 | */ | 272 | */ |
100 | #define NBUCKETS 30 | 273 | static int extend_pgdir(u_long index); |
101 | static union overhead *nextf[NBUCKETS]; | 274 | static void *imalloc(size_t size); |
102 | extern char *sbrk(); | 275 | static void ifree(void *ptr); |
276 | static void *irealloc(void *ptr, size_t size); | ||
277 | static void *malloc_bytes(size_t size); | ||
278 | |||
279 | #ifdef MALLOC_STATS | ||
280 | void | ||
281 | malloc_dump(fd) | ||
282 | FILE *fd; | ||
283 | { | ||
284 | struct pginfo **pd; | ||
285 | struct pgfree *pf; | ||
286 | int j; | ||
287 | |||
288 | pd = page_dir; | ||
289 | |||
290 | /* print out all the pages */ | ||
291 | for(j=0;j<=last_index;j++) { | ||
292 | fprintf(fd, "%08lx %5d ", (j+malloc_origo) << malloc_pageshift, j); | ||
293 | if (pd[j] == MALLOC_NOT_MINE) { | ||
294 | for(j++;j<=last_index && pd[j] == MALLOC_NOT_MINE;j++) | ||
295 | ; | ||
296 | j--; | ||
297 | fprintf(fd, ".. %5d not mine\n", j); | ||
298 | } else if (pd[j] == MALLOC_FREE) { | ||
299 | for(j++;j<=last_index && pd[j] == MALLOC_FREE;j++) | ||
300 | ; | ||
301 | j--; | ||
302 | fprintf(fd, ".. %5d free\n", j); | ||
303 | } else if (pd[j] == MALLOC_FIRST) { | ||
304 | for(j++;j<=last_index && pd[j] == MALLOC_FOLLOW;j++) | ||
305 | ; | ||
306 | j--; | ||
307 | fprintf(fd, ".. %5d in use\n", j); | ||
308 | } else if (pd[j] < MALLOC_MAGIC) { | ||
309 | fprintf(fd, "(%p)\n", pd[j]); | ||
310 | } else { | ||
311 | fprintf(fd, "%p %d (of %d) x %d @ %p --> %p\n", | ||
312 | pd[j], pd[j]->free, pd[j]->total, | ||
313 | pd[j]->size, pd[j]->page, pd[j]->next); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | for(pf=free_list.next; pf; pf=pf->next) { | ||
318 | fprintf(fd, "Free: @%p [%p...%p[ %ld ->%p <-%p\n", | ||
319 | pf, pf->page, pf->end, pf->size, pf->prev, pf->next); | ||
320 | if (pf == pf->next) { | ||
321 | fprintf(fd, "Free_list loops.\n"); | ||
322 | break; | ||
323 | } | ||
324 | } | ||
325 | |||
326 | /* print out various info */ | ||
327 | fprintf(fd, "Minsize\t%d\n", malloc_minsize); | ||
328 | fprintf(fd, "Maxsize\t%d\n", malloc_maxsize); | ||
329 | fprintf(fd, "Pagesize\t%lu\n", (u_long)malloc_pagesize); | ||
330 | fprintf(fd, "Pageshift\t%d\n", malloc_pageshift); | ||
331 | fprintf(fd, "FirstPage\t%ld\n", malloc_origo); | ||
332 | fprintf(fd, "LastPage\t%ld %lx\n", last_index+malloc_pageshift, | ||
333 | (last_index + malloc_pageshift) << malloc_pageshift); | ||
334 | fprintf(fd, "Break\t%ld\n", (u_long)sbrk(0) >> malloc_pageshift); | ||
335 | } | ||
336 | #endif /* MALLOC_STATS */ | ||
337 | |||
338 | extern char *__progname; | ||
339 | |||
340 | static void | ||
341 | wrterror(p) | ||
342 | char *p; | ||
343 | { | ||
344 | char *q = " error: "; | ||
345 | write(2, __progname, strlen(__progname)); | ||
346 | write(2, malloc_func, strlen(malloc_func)); | ||
347 | write(2, q, strlen(q)); | ||
348 | write(2, p, strlen(p)); | ||
349 | suicide = 1; | ||
350 | #ifdef MALLOC_STATS | ||
351 | if (malloc_stats) | ||
352 | malloc_dump(stderr); | ||
353 | #endif /* MALLOC_STATS */ | ||
354 | abort(); | ||
355 | } | ||
356 | |||
357 | static void | ||
358 | wrtwarning(p) | ||
359 | char *p; | ||
360 | { | ||
361 | char *q = " warning: "; | ||
362 | if (malloc_abort) | ||
363 | wrterror(p); | ||
364 | else if (malloc_silent) | ||
365 | return; | ||
366 | write(2, __progname, strlen(__progname)); | ||
367 | write(2, malloc_func, strlen(malloc_func)); | ||
368 | write(2, q, strlen(q)); | ||
369 | write(2, p, strlen(p)); | ||
370 | } | ||
371 | |||
372 | #ifdef MALLOC_STATS | ||
373 | static void | ||
374 | malloc_exit() | ||
375 | { | ||
376 | FILE *fd = fopen("malloc.out", "a"); | ||
377 | char *q = "malloc() warning: Couldn't dump stats.\n"; | ||
378 | if (fd) { | ||
379 | malloc_dump(fd); | ||
380 | fclose(fd); | ||
381 | } else | ||
382 | write(2, q, strlen(q)); | ||
383 | } | ||
384 | #endif /* MALLOC_STATS */ | ||
103 | 385 | ||
104 | static int pagesz; /* page size */ | ||
105 | static int pagebucket; /* page size bucket */ | ||
106 | 386 | ||
107 | #ifdef MSTATS | ||
108 | /* | 387 | /* |
109 | * nmalloc[i] is the difference between the number of mallocs and frees | 388 | * Allocate a number of pages from the OS |
110 | * for a given block size. | ||
111 | */ | 389 | */ |
112 | static u_int nmalloc[NBUCKETS]; | 390 | static void * |
113 | #include <stdio.h> | 391 | map_pages(pages) |
114 | #endif | 392 | int pages; |
393 | { | ||
394 | caddr_t result, tail; | ||
115 | 395 | ||
116 | #if defined(DEBUG) || defined(RCHECK) | 396 | result = (caddr_t)pageround((u_long)sbrk(0)); |
117 | #define ASSERT(p) if (!(p)) botch("p") | 397 | tail = result + (pages << malloc_pageshift); |
118 | #include <stdio.h> | 398 | |
119 | static | 399 | if (brk(tail)) { |
120 | botch(s) | 400 | #ifdef MALLOC_EXTRA_SANITY |
121 | char *s; | 401 | wrterror("(ES): map_pages fails\n"); |
402 | #endif /* MALLOC_EXTRA_SANITY */ | ||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | last_index = ptr2index(tail) - 1; | ||
407 | malloc_brk = tail; | ||
408 | |||
409 | if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index)) | ||
410 | return 0; | ||
411 | |||
412 | return result; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * Extend page directory | ||
417 | */ | ||
418 | static int | ||
419 | extend_pgdir(index) | ||
420 | u_long index; | ||
122 | { | 421 | { |
123 | fprintf(stderr, "\r\nassertion botched: %s\r\n", s); | 422 | struct pginfo **new, **old; |
124 | (void) fflush(stderr); /* just in case user buffered it */ | 423 | size_t i, oldlen; |
125 | abort(); | 424 | |
425 | /* Make it this many pages */ | ||
426 | i = index * sizeof *page_dir; | ||
427 | i /= malloc_pagesize; | ||
428 | i += 2; | ||
429 | |||
430 | /* remember the old mapping size */ | ||
431 | oldlen = malloc_ninfo * sizeof *page_dir; | ||
432 | |||
433 | /* | ||
434 | * NOTE: we allocate new pages and copy the directory rather than tempt | ||
435 | * fate by trying to "grow" the region.. There is nothing to prevent | ||
436 | * us from accidently re-mapping space that's been allocated by our caller | ||
437 | * via dlopen() or other mmap(). | ||
438 | * | ||
439 | * The copy problem is not too bad, as there is 4K of page index per | ||
440 | * 4MB of malloc arena. | ||
441 | * | ||
442 | * We can totally avoid the copy if we open a file descriptor to associate | ||
443 | * the anon mappings with. Then, when we remap the pages at the new | ||
444 | * address, the old pages will be "magically" remapped.. But this means | ||
445 | * keeping open a "secret" file descriptor..... | ||
446 | */ | ||
447 | |||
448 | /* Get new pages */ | ||
449 | new = (struct pginfo**) MMAP(i * malloc_pagesize); | ||
450 | if (new == (struct pginfo **)-1) | ||
451 | return 0; | ||
452 | |||
453 | /* Copy the old stuff */ | ||
454 | memcpy(new, page_dir, | ||
455 | malloc_ninfo * sizeof *page_dir); | ||
456 | |||
457 | /* register the new size */ | ||
458 | malloc_ninfo = i * malloc_pagesize / sizeof *page_dir; | ||
459 | |||
460 | /* swap the pointers */ | ||
461 | old = page_dir; | ||
462 | page_dir = new; | ||
463 | |||
464 | /* Now free the old stuff */ | ||
465 | munmap(old, oldlen); | ||
466 | return 1; | ||
126 | } | 467 | } |
127 | #else | ||
128 | #define ASSERT(p) | ||
129 | #endif | ||
130 | 468 | ||
131 | void * | 469 | /* |
132 | malloc(nbytes) | 470 | * Initialize the world |
133 | size_t nbytes; | 471 | */ |
472 | static void | ||
473 | malloc_init () | ||
134 | { | 474 | { |
135 | register union overhead *op; | 475 | char *p, b[64]; |
136 | register long bucket, n; | 476 | int i, j; |
137 | register unsigned amt; | 477 | int save_errno = errno; |
138 | 478 | ||
139 | /* | 479 | THREAD_LOCK_INIT(); |
140 | * First time malloc is called, setup page size and | 480 | |
141 | * align break pointer so all data will be page aligned. | 481 | INIT_MMAP(); |
142 | */ | 482 | |
143 | if (pagesz == 0) { | 483 | #ifdef MALLOC_EXTRA_SANITY |
144 | pagesz = n = getpagesize(); | 484 | malloc_junk = 1; |
145 | op = (union overhead *)sbrk(0); | 485 | #endif /* MALLOC_EXTRA_SANITY */ |
146 | n = n - sizeof (*op) - ((long)op & (n - 1)); | 486 | |
147 | if (n < 0) | 487 | for (i = 0; i < 3; i++) { |
148 | n += pagesz; | 488 | if (i == 0) { |
149 | if (n) { | 489 | j = readlink("/etc/malloc.conf", b, sizeof b - 1); |
150 | if (sbrk(n) == (char *)-1) | 490 | if (j <= 0) |
151 | return (NULL); | 491 | continue; |
152 | } | 492 | b[j] = '\0'; |
153 | bucket = 0; | 493 | p = b; |
154 | amt = 8; | 494 | } else if (i == 1) { |
155 | while (pagesz > amt) { | 495 | if (issetugid() == 0) |
156 | amt <<= 1; | 496 | p = getenv("MALLOC_OPTIONS"); |
157 | bucket++; | 497 | else |
158 | } | 498 | continue; |
159 | pagebucket = bucket; | 499 | } else if (i == 2) { |
500 | p = malloc_options; | ||
160 | } | 501 | } |
161 | /* | 502 | for (; p && *p; p++) { |
162 | * Convert amount of memory requested into closest block size | 503 | switch (*p) { |
163 | * stored in hash buckets which satisfies request. | 504 | case '>': malloc_cache <<= 1; break; |
164 | * Account for space used per block for accounting. | 505 | case '<': malloc_cache >>= 1; break; |
165 | */ | 506 | case 'a': malloc_abort = 0; break; |
166 | if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { | 507 | case 'A': malloc_abort = 1; break; |
167 | #ifndef RCHECK | 508 | #ifdef MALLOC_STATS |
168 | amt = 8; /* size of first bucket */ | 509 | case 'd': malloc_stats = 0; break; |
169 | bucket = 0; | 510 | case 'D': malloc_stats = 1; break; |
170 | #else | 511 | #endif /* MALLOC_STATS */ |
171 | amt = 16; /* size of first bucket */ | 512 | #ifdef __FreeBSD__ |
172 | bucket = 1; | 513 | case 'h': malloc_hint = 0; break; |
173 | #endif | 514 | case 'H': malloc_hint = 1; break; |
174 | n = -((long)sizeof (*op) + RSLOP); | 515 | #endif /* __FreeBSD__ */ |
175 | } else { | 516 | case 'r': malloc_realloc = 0; break; |
176 | amt = pagesz; | 517 | case 'R': malloc_realloc = 1; break; |
177 | bucket = pagebucket; | 518 | case 'j': malloc_junk = 0; break; |
519 | case 'J': malloc_junk = 1; break; | ||
520 | case 'n': malloc_silent = 0; break; | ||
521 | case 'N': malloc_silent = 1; break; | ||
522 | #ifdef __FreeBSD__ | ||
523 | case 'u': malloc_utrace = 0; break; | ||
524 | case 'U': malloc_utrace = 1; break; | ||
525 | #endif /* __FreeBSD__ */ | ||
526 | case 'x': malloc_xmalloc = 0; break; | ||
527 | case 'X': malloc_xmalloc = 1; break; | ||
528 | case 'z': malloc_zero = 0; break; | ||
529 | case 'Z': malloc_zero = 1; break; | ||
530 | default: | ||
531 | j = malloc_abort; | ||
532 | malloc_abort = 0; | ||
533 | wrtwarning("unknown char in MALLOC_OPTIONS\n"); | ||
534 | malloc_abort = j; | ||
535 | break; | ||
536 | } | ||
178 | } | 537 | } |
179 | while (nbytes > amt + n) { | 538 | } |
180 | amt <<= 1; | 539 | |
181 | if (amt == 0) | 540 | UTRACE(0, 0, 0); |
182 | return (NULL); | 541 | |
183 | bucket++; | 542 | /* |
543 | * We want junk in the entire allocation, and zero only in the part | ||
544 | * the user asked for. | ||
545 | */ | ||
546 | if (malloc_zero) | ||
547 | malloc_junk=1; | ||
548 | |||
549 | #ifdef MALLOC_STATS | ||
550 | if (malloc_stats) | ||
551 | atexit(malloc_exit); | ||
552 | #endif /* MALLOC_STATS */ | ||
553 | |||
554 | /* Allocate one page for the page directory */ | ||
555 | page_dir = (struct pginfo **) MMAP(malloc_pagesize); | ||
556 | |||
557 | if (page_dir == (struct pginfo **) -1) | ||
558 | wrterror("mmap(2) failed, check limits.\n"); | ||
559 | |||
560 | /* | ||
561 | * We need a maximum of malloc_pageshift buckets, steal these from the | ||
562 | * front of the page_directory; | ||
563 | */ | ||
564 | malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift; | ||
565 | malloc_origo -= malloc_pageshift; | ||
566 | |||
567 | malloc_ninfo = malloc_pagesize / sizeof *page_dir; | ||
568 | |||
569 | /* Been here, done that */ | ||
570 | malloc_started++; | ||
571 | |||
572 | /* Recalculate the cache size in bytes, and make sure it's nonzero */ | ||
573 | |||
574 | if (!malloc_cache) | ||
575 | malloc_cache++; | ||
576 | |||
577 | malloc_cache <<= malloc_pageshift; | ||
578 | |||
579 | /* | ||
580 | * This is a nice hack from Kaleb Keithly (kaleb@x.org). | ||
581 | * We can sbrk(2) further back when we keep this on a low address. | ||
582 | */ | ||
583 | px = (struct pgfree *) imalloc (sizeof *px); | ||
584 | errno = save_errno; | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * Allocate a number of complete pages | ||
589 | */ | ||
590 | static void * | ||
591 | malloc_pages(size) | ||
592 | size_t size; | ||
593 | { | ||
594 | void *p, *delay_free = 0; | ||
595 | int i; | ||
596 | struct pgfree *pf; | ||
597 | u_long index; | ||
598 | |||
599 | size = pageround(size); | ||
600 | |||
601 | p = 0; | ||
602 | /* Look for free pages before asking for more */ | ||
603 | for(pf = free_list.next; pf; pf = pf->next) { | ||
604 | |||
605 | #ifdef MALLOC_EXTRA_SANITY | ||
606 | if (pf->size & malloc_pagemask) | ||
607 | wrterror("(ES): junk length entry on free_list\n"); | ||
608 | if (!pf->size) | ||
609 | wrterror("(ES): zero length entry on free_list\n"); | ||
610 | if (pf->page == pf->end) | ||
611 | wrterror("(ES): zero entry on free_list\n"); | ||
612 | if (pf->page > pf->end) | ||
613 | wrterror("(ES): sick entry on free_list\n"); | ||
614 | if ((void*)pf->page >= (void*)sbrk(0)) | ||
615 | wrterror("(ES): entry on free_list past brk\n"); | ||
616 | if (page_dir[ptr2index(pf->page)] != MALLOC_FREE) | ||
617 | wrterror("(ES): non-free first page on free-list\n"); | ||
618 | if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE) | ||
619 | wrterror("(ES): non-free last page on free-list\n"); | ||
620 | #endif /* MALLOC_EXTRA_SANITY */ | ||
621 | |||
622 | if (pf->size < size) | ||
623 | continue; | ||
624 | |||
625 | if (pf->size == size) { | ||
626 | p = pf->page; | ||
627 | if (pf->next) | ||
628 | pf->next->prev = pf->prev; | ||
629 | pf->prev->next = pf->next; | ||
630 | delay_free = pf; | ||
631 | break; | ||
632 | } | ||
633 | |||
634 | p = pf->page; | ||
635 | pf->page = (char *)pf->page + size; | ||
636 | pf->size -= size; | ||
637 | break; | ||
638 | } | ||
639 | |||
640 | #ifdef MALLOC_EXTRA_SANITY | ||
641 | if (p && page_dir[ptr2index(p)] != MALLOC_FREE) | ||
642 | wrterror("(ES): allocated non-free page on free-list\n"); | ||
643 | #endif /* MALLOC_EXTRA_SANITY */ | ||
644 | |||
645 | size >>= malloc_pageshift; | ||
646 | |||
647 | /* Map new pages */ | ||
648 | if (!p) | ||
649 | p = map_pages(size); | ||
650 | |||
651 | if (p) { | ||
652 | |||
653 | index = ptr2index(p); | ||
654 | page_dir[index] = MALLOC_FIRST; | ||
655 | for (i=1;i<size;i++) | ||
656 | page_dir[index+i] = MALLOC_FOLLOW; | ||
657 | |||
658 | if (malloc_junk) | ||
659 | memset(p, SOME_JUNK, size << malloc_pageshift); | ||
660 | } | ||
661 | |||
662 | if (delay_free) { | ||
663 | if (!px) | ||
664 | px = delay_free; | ||
665 | else | ||
666 | ifree(delay_free); | ||
667 | } | ||
668 | |||
669 | return p; | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | * Allocate a page of fragments | ||
674 | */ | ||
675 | |||
676 | static __inline__ int | ||
677 | malloc_make_chunks(bits) | ||
678 | int bits; | ||
679 | { | ||
680 | struct pginfo *bp; | ||
681 | void *pp; | ||
682 | int i, k, l; | ||
683 | |||
684 | /* Allocate a new bucket */ | ||
685 | pp = malloc_pages((size_t)malloc_pagesize); | ||
686 | if (!pp) | ||
687 | return 0; | ||
688 | |||
689 | /* Find length of admin structure */ | ||
690 | l = sizeof *bp - sizeof(u_long); | ||
691 | l += sizeof(u_long) * | ||
692 | (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS); | ||
693 | |||
694 | /* Don't waste more than two chunks on this */ | ||
695 | if ((1UL<<(bits)) <= l+l) { | ||
696 | bp = (struct pginfo *)pp; | ||
697 | } else { | ||
698 | bp = (struct pginfo *)imalloc(l); | ||
699 | if (!bp) { | ||
700 | ifree(pp); | ||
701 | return 0; | ||
184 | } | 702 | } |
185 | /* | 703 | } |
186 | * If nothing in hash bucket right now, | 704 | |
187 | * request more memory from the system. | 705 | bp->size = (1UL<<bits); |
188 | */ | 706 | bp->shift = bits; |
189 | if ((op = nextf[bucket]) == NULL) { | 707 | bp->total = bp->free = malloc_pagesize >> bits; |
190 | morecore(bucket); | 708 | bp->page = pp; |
191 | if ((op = nextf[bucket]) == NULL) | 709 | |
192 | return (NULL); | 710 | /* set all valid bits in the bitmap */ |
711 | k = bp->total; | ||
712 | i = 0; | ||
713 | |||
714 | /* Do a bunch at a time */ | ||
715 | for(;k-i >= MALLOC_BITS; i += MALLOC_BITS) | ||
716 | bp->bits[i / MALLOC_BITS] = ~0UL; | ||
717 | |||
718 | for(; i < k; i++) | ||
719 | bp->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); | ||
720 | |||
721 | if (bp == bp->page) { | ||
722 | /* Mark the ones we stole for ourselves */ | ||
723 | for(i=0;l > 0;i++) { | ||
724 | bp->bits[i/MALLOC_BITS] &= ~(1UL<<(i%MALLOC_BITS)); | ||
725 | bp->free--; | ||
726 | bp->total--; | ||
727 | l -= (1 << bits); | ||
193 | } | 728 | } |
194 | /* remove from linked list */ | 729 | } |
195 | nextf[bucket] = op->ov_next; | 730 | |
196 | op->ov_magic = MAGIC; | 731 | /* MALLOC_LOCK */ |
197 | op->ov_index = bucket; | 732 | |
198 | #ifdef MSTATS | 733 | page_dir[ptr2index(pp)] = bp; |
199 | nmalloc[bucket]++; | 734 | |
200 | #endif | 735 | bp->next = page_dir[bits]; |
201 | #ifdef RCHECK | 736 | page_dir[bits] = bp; |
202 | /* | 737 | |
203 | * Record allocated size of block and | 738 | /* MALLOC_UNLOCK */ |
204 | * bound space with magic numbers. | 739 | |
205 | */ | 740 | return 1; |
206 | op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); | ||
207 | op->ov_rmagic = RMAGIC; | ||
208 | *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; | ||
209 | #endif | ||
210 | return ((char *)(op + 1)); | ||
211 | } | 741 | } |
212 | 742 | ||
213 | /* | 743 | /* |
214 | * Allocate more memory to the indicated bucket. | 744 | * Allocate a fragment |
215 | */ | 745 | */ |
216 | static void | 746 | static void * |
217 | morecore(bucket) | 747 | malloc_bytes(size) |
218 | int bucket; | 748 | size_t size; |
219 | { | 749 | { |
220 | register union overhead *op; | 750 | int i,j; |
221 | register long sz; /* size of desired block */ | 751 | u_long u; |
222 | long amt; /* amount to allocate */ | 752 | struct pginfo *bp; |
223 | int nblks; /* how many blocks we get */ | 753 | int k; |
754 | u_long *lp; | ||
224 | 755 | ||
225 | /* | 756 | /* Don't bother with anything less than this */ |
226 | * sbrk_size <= 0 only for big, FLUFFY, requests (about | 757 | if (size < malloc_minsize) |
227 | * 2^30 bytes on a VAX, I think) or for a negative arg. | 758 | size = malloc_minsize; |
228 | */ | 759 | |
229 | sz = 1 << (bucket + 3); | 760 | /* Find the right bucket */ |
230 | #ifdef DEBUG | 761 | j = 1; |
231 | ASSERT(sz > 0); | 762 | i = size-1; |
232 | #else | 763 | while (i >>= 1) |
233 | if (sz <= 0) | 764 | j++; |
234 | return; | 765 | |
235 | #endif | 766 | /* If it's empty, make a page more of that size chunks */ |
236 | if (sz < pagesz) { | 767 | if (!page_dir[j] && !malloc_make_chunks(j)) |
237 | amt = pagesz; | 768 | return 0; |
238 | nblks = amt / sz; | 769 | |
239 | } else { | 770 | bp = page_dir[j]; |
240 | amt = sz + pagesz; | 771 | |
241 | nblks = 1; | 772 | /* Find first word of bitmap which isn't empty */ |
242 | } | 773 | for (lp = bp->bits; !*lp; lp++) |
243 | op = (union overhead *)sbrk(amt); | 774 | ; |
244 | /* no more room! */ | 775 | |
245 | if ((long)op == -1) | 776 | /* Find that bit, and tweak it */ |
246 | return; | 777 | u = 1; |
247 | /* | 778 | k = 0; |
248 | * Add new memory allocated to that on | 779 | while (!(*lp & u)) { |
249 | * free list for this hash bucket. | 780 | u += u; |
250 | */ | 781 | k++; |
251 | nextf[bucket] = op; | 782 | } |
252 | while (--nblks > 0) { | 783 | *lp ^= u; |
253 | op->ov_next = (union overhead *)((caddr_t)op + sz); | 784 | |
254 | op = (union overhead *)((caddr_t)op + sz); | 785 | /* If there are no more free, remove from free-list */ |
255 | } | 786 | if (!--bp->free) { |
787 | page_dir[j] = bp->next; | ||
788 | bp->next = 0; | ||
789 | } | ||
790 | |||
791 | /* Adjust to the real offset of that chunk */ | ||
792 | k += (lp-bp->bits)*MALLOC_BITS; | ||
793 | k <<= bp->shift; | ||
794 | |||
795 | if (malloc_junk) | ||
796 | memset((char *)bp->page + k, SOME_JUNK, bp->size); | ||
797 | |||
798 | return (u_char *)bp->page + k; | ||
256 | } | 799 | } |
257 | 800 | ||
258 | void | 801 | /* |
259 | free(cp) | 802 | * Allocate a piece of memory |
260 | void *cp; | 803 | */ |
261 | { | 804 | static void * |
262 | register long size; | 805 | imalloc(size) |
263 | register union overhead *op; | 806 | size_t size; |
264 | 807 | { | |
265 | if (cp == NULL) | 808 | void *result; |
266 | return; | 809 | |
267 | op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); | 810 | if (!malloc_started) |
268 | #ifdef DEBUG | 811 | malloc_init(); |
269 | ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ | 812 | |
270 | #else | 813 | if (suicide) |
271 | if (op->ov_magic != MAGIC) | 814 | abort(); |
272 | return; /* sanity */ | 815 | |
273 | #endif | 816 | if ((size + malloc_pagesize) < size) /* Check for overflow */ |
274 | #ifdef RCHECK | 817 | result = 0; |
275 | ASSERT(op->ov_rmagic == RMAGIC); | 818 | else if (size <= malloc_maxsize) |
276 | ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); | 819 | result = malloc_bytes(size); |
277 | #endif | 820 | else |
278 | size = op->ov_index; | 821 | result = malloc_pages(size); |
279 | ASSERT(size < NBUCKETS); | 822 | |
280 | op->ov_next = nextf[size]; /* also clobbers ov_magic */ | 823 | if (malloc_abort && !result) |
281 | nextf[size] = op; | 824 | wrterror("allocation failed.\n"); |
282 | #ifdef MSTATS | 825 | |
283 | nmalloc[size]--; | 826 | if (malloc_zero && result) |
284 | #endif | 827 | memset(result, 0, size); |
828 | |||
829 | return result; | ||
285 | } | 830 | } |
286 | 831 | ||
287 | /* | 832 | /* |
288 | * When a program attempts "storage compaction" as mentioned in the | 833 | * Change the size of an allocation. |
289 | * old malloc man page, it realloc's an already freed block. Usually | ||
290 | * this is the last block it freed; occasionally it might be farther | ||
291 | * back. We have to search all the free lists for the block in order | ||
292 | * to determine its bucket: 1st we make one pass thru the lists | ||
293 | * checking only the first block in each; if that fails we search | ||
294 | * ``realloc_srchlen'' blocks in each list for a match (the variable | ||
295 | * is extern so the caller can modify it). If that fails we just copy | ||
296 | * however many bytes was given to realloc() and hope it's not huge. | ||
297 | */ | 834 | */ |
298 | int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ | 835 | static void * |
836 | irealloc(ptr, size) | ||
837 | void *ptr; | ||
838 | size_t size; | ||
839 | { | ||
840 | void *p; | ||
841 | u_long osize, index; | ||
842 | struct pginfo **mp; | ||
843 | int i; | ||
299 | 844 | ||
300 | void * | 845 | if (suicide) |
301 | realloc(cp, nbytes) | 846 | abort(); |
302 | void *cp; | 847 | |
303 | size_t nbytes; | 848 | if (!malloc_started) { |
304 | { | 849 | wrtwarning("malloc() has never been called.\n"); |
305 | register u_long onb; | 850 | return 0; |
306 | register long i; | 851 | } |
307 | union overhead *op; | 852 | |
308 | char *res; | 853 | index = ptr2index(ptr); |
309 | int was_alloced = 0; | 854 | |
310 | 855 | if (index < malloc_pageshift) { | |
311 | if (cp == NULL) | 856 | wrtwarning("junk pointer, too low to make sense.\n"); |
312 | return (malloc(nbytes)); | 857 | return 0; |
313 | op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); | 858 | } |
314 | if (op->ov_magic == MAGIC) { | 859 | |
315 | was_alloced++; | 860 | if (index > last_index) { |
316 | i = op->ov_index; | 861 | wrtwarning("junk pointer, too high to make sense.\n"); |
317 | } else { | 862 | return 0; |
318 | /* | 863 | } |
319 | * Already free, doing "compaction". | 864 | |
320 | * | 865 | mp = &page_dir[index]; |
321 | * Search for the old block of memory on the | 866 | |
322 | * free list. First, check the most common | 867 | if (*mp == MALLOC_FIRST) { /* Page allocation */ |
323 | * case (last element free'd), then (this failing) | 868 | |
324 | * the last ``realloc_srchlen'' items free'd. | 869 | /* Check the pointer */ |
325 | * If all lookups fail, then assume the size of | 870 | if ((u_long)ptr & malloc_pagemask) { |
326 | * the memory block being realloc'd is the | 871 | wrtwarning("modified (page-) pointer.\n"); |
327 | * largest possible (so that all "nbytes" of new | 872 | return 0; |
328 | * memory are copied into). Note that this could cause | 873 | } |
329 | * a memory fault if the old area was tiny, and the moon | 874 | |
330 | * is gibbous. However, that is very unlikely. | 875 | /* Find the size in bytes */ |
331 | */ | 876 | for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;) |
332 | if ((i = findbucket(op, 1)) < 0 && | 877 | osize += malloc_pagesize; |
333 | (i = findbucket(op, realloc_srchlen)) < 0) | 878 | |
334 | i = NBUCKETS; | 879 | if (!malloc_realloc && /* unless we have to, */ |
880 | size <= osize && /* .. or are too small, */ | ||
881 | size > (osize - malloc_pagesize)) { /* .. or can free a page, */ | ||
882 | return ptr; /* don't do anything. */ | ||
883 | } | ||
884 | |||
885 | } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */ | ||
886 | |||
887 | /* Check the pointer for sane values */ | ||
888 | if (((u_long)ptr & ((*mp)->size-1))) { | ||
889 | wrtwarning("modified (chunk-) pointer.\n"); | ||
890 | return 0; | ||
335 | } | 891 | } |
336 | onb = 1 << (i + 3); | 892 | |
337 | if (onb < pagesz) | 893 | /* Find the chunk index in the page */ |
338 | onb -= sizeof (*op) + RSLOP; | 894 | i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift; |
895 | |||
896 | /* Verify that it isn't a free chunk already */ | ||
897 | if ((*mp)->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { | ||
898 | wrtwarning("chunk is already free.\n"); | ||
899 | return 0; | ||
900 | } | ||
901 | |||
902 | osize = (*mp)->size; | ||
903 | |||
904 | if (!malloc_realloc && /* Unless we have to, */ | ||
905 | size < osize && /* ..or are too small, */ | ||
906 | (size > osize/2 || /* ..or could use a smaller size, */ | ||
907 | osize == malloc_minsize)) { /* ..(if there is one) */ | ||
908 | return ptr; /* ..Don't do anything */ | ||
909 | } | ||
910 | |||
911 | } else { | ||
912 | wrtwarning("pointer to wrong page.\n"); | ||
913 | return 0; | ||
914 | } | ||
915 | |||
916 | p = imalloc(size); | ||
917 | |||
918 | if (p) { | ||
919 | /* copy the lesser of the two sizes, and free the old one */ | ||
920 | if (osize < size) | ||
921 | memcpy(p, ptr, osize); | ||
339 | else | 922 | else |
340 | onb += pagesz - sizeof (*op) - RSLOP; | 923 | memcpy(p, ptr, size); |
341 | /* avoid the copy if same size block */ | 924 | ifree(ptr); |
342 | if (was_alloced) { | 925 | } |
343 | if (i) { | 926 | return p; |
344 | i = 1 << (i + 2); | 927 | } |
345 | if (i < pagesz) | 928 | |
346 | i -= sizeof (*op) + RSLOP; | 929 | /* |
347 | else | 930 | * Free a sequence of pages |
348 | i += pagesz - sizeof (*op) - RSLOP; | 931 | */ |
349 | } | 932 | |
350 | if (nbytes <= onb && nbytes > i) { | 933 | static __inline__ void |
351 | #ifdef RCHECK | 934 | free_pages(ptr, index, info) |
352 | op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); | 935 | void *ptr; |
353 | *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; | 936 | int index; |
937 | struct pginfo *info; | ||
938 | { | ||
939 | int i; | ||
940 | struct pgfree *pf, *pt=0; | ||
941 | u_long l; | ||
942 | void *tail; | ||
943 | |||
944 | if (info == MALLOC_FREE) { | ||
945 | wrtwarning("page is already free.\n"); | ||
946 | return; | ||
947 | } | ||
948 | |||
949 | if (info != MALLOC_FIRST) { | ||
950 | wrtwarning("pointer to wrong page.\n"); | ||
951 | return; | ||
952 | } | ||
953 | |||
954 | if ((u_long)ptr & malloc_pagemask) { | ||
955 | wrtwarning("modified (page-) pointer.\n"); | ||
956 | return; | ||
957 | } | ||
958 | |||
959 | /* Count how many pages and mark them free at the same time */ | ||
960 | page_dir[index] = MALLOC_FREE; | ||
961 | for (i = 1; page_dir[index+i] == MALLOC_FOLLOW; i++) | ||
962 | page_dir[index + i] = MALLOC_FREE; | ||
963 | |||
964 | l = i << malloc_pageshift; | ||
965 | |||
966 | if (malloc_junk) | ||
967 | memset(ptr, SOME_JUNK, l); | ||
968 | |||
969 | #ifdef __FreeBSD__ | ||
970 | if (malloc_hint) | ||
971 | madvise(ptr, l, MADV_FREE); | ||
354 | #endif | 972 | #endif |
355 | return(cp); | 973 | |
356 | } else | 974 | tail = (char *)ptr+l; |
357 | free(cp); | 975 | |
976 | /* add to free-list */ | ||
977 | if (!px) | ||
978 | px = imalloc(sizeof *px); /* This cannot fail... */ | ||
979 | px->page = ptr; | ||
980 | px->end = tail; | ||
981 | px->size = l; | ||
982 | if (!free_list.next) { | ||
983 | |||
984 | /* Nothing on free list, put this at head */ | ||
985 | px->next = free_list.next; | ||
986 | px->prev = &free_list; | ||
987 | free_list.next = px; | ||
988 | pf = px; | ||
989 | px = 0; | ||
990 | |||
991 | } else { | ||
992 | |||
993 | /* Find the right spot, leave pf pointing to the modified entry. */ | ||
994 | tail = (char *)ptr+l; | ||
995 | |||
996 | for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next) | ||
997 | ; /* Race ahead here */ | ||
998 | |||
999 | if (pf->page > tail) { | ||
1000 | /* Insert before entry */ | ||
1001 | px->next = pf; | ||
1002 | px->prev = pf->prev; | ||
1003 | pf->prev = px; | ||
1004 | px->prev->next = px; | ||
1005 | pf = px; | ||
1006 | px = 0; | ||
1007 | } else if (pf->end == ptr ) { | ||
1008 | /* Append to the previous entry */ | ||
1009 | pf->end = (char *)pf->end + l; | ||
1010 | pf->size += l; | ||
1011 | if (pf->next && pf->end == pf->next->page ) { | ||
1012 | /* And collapse the next too. */ | ||
1013 | pt = pf->next; | ||
1014 | pf->end = pt->end; | ||
1015 | pf->size += pt->size; | ||
1016 | pf->next = pt->next; | ||
1017 | if (pf->next) | ||
1018 | pf->next->prev = pf; | ||
1019 | } | ||
1020 | } else if (pf->page == tail) { | ||
1021 | /* Prepend to entry */ | ||
1022 | pf->size += l; | ||
1023 | pf->page = ptr; | ||
1024 | } else if (!pf->next) { | ||
1025 | /* Append at tail of chain */ | ||
1026 | px->next = 0; | ||
1027 | px->prev = pf; | ||
1028 | pf->next = px; | ||
1029 | pf = px; | ||
1030 | px = 0; | ||
1031 | } else { | ||
1032 | wrterror("freelist is destroyed.\n"); | ||
358 | } | 1033 | } |
359 | if ((res = malloc(nbytes)) == NULL) | 1034 | } |
360 | return (NULL); | 1035 | |
361 | if (cp != res) /* common optimization if "compacting" */ | 1036 | /* Return something to OS ? */ |
362 | bcopy(cp, res, (nbytes < onb) ? nbytes : onb); | 1037 | if (!pf->next && /* If we're the last one, */ |
363 | return (res); | 1038 | pf->size > malloc_cache && /* ..and the cache is full, */ |
1039 | pf->end == malloc_brk && /* ..and none behind us, */ | ||
1040 | malloc_brk == sbrk(0)) { /* ..and it's OK to do... */ | ||
1041 | |||
1042 | /* | ||
1043 | * Keep the cache intact. Notice that the '>' above guarantees that | ||
1044 | * the pf will always have at least one page afterwards. | ||
1045 | */ | ||
1046 | pf->end = (char *)pf->page + malloc_cache; | ||
1047 | pf->size = malloc_cache; | ||
1048 | |||
1049 | brk(pf->end); | ||
1050 | malloc_brk = pf->end; | ||
1051 | |||
1052 | index = ptr2index(pf->end); | ||
1053 | last_index = index - 1; | ||
1054 | |||
1055 | for(i=index;i <= last_index;) | ||
1056 | page_dir[i++] = MALLOC_NOT_MINE; | ||
1057 | |||
1058 | /* XXX: We could realloc/shrink the pagedir here I guess. */ | ||
1059 | } | ||
1060 | if (pt) | ||
1061 | ifree(pt); | ||
364 | } | 1062 | } |
365 | 1063 | ||
366 | /* | 1064 | /* |
367 | * Search ``srchlen'' elements of each free list for a block whose | 1065 | * Free a chunk, and possibly the page it's on, if the page becomes empty. |
368 | * header starts at ``freep''. If srchlen is -1 search the whole list. | ||
369 | * Return bucket number, or -1 if not found. | ||
370 | */ | 1066 | */ |
371 | static | 1067 | |
372 | findbucket(freep, srchlen) | 1068 | /* ARGSUSED */ |
373 | union overhead *freep; | 1069 | static __inline__ void |
374 | int srchlen; | 1070 | free_bytes(ptr, index, info) |
1071 | void *ptr; | ||
1072 | int index; | ||
1073 | struct pginfo *info; | ||
375 | { | 1074 | { |
376 | register union overhead *p; | 1075 | int i; |
377 | register int i, j; | 1076 | struct pginfo **mp; |
378 | 1077 | void *vp; | |
379 | for (i = 0; i < NBUCKETS; i++) { | 1078 | |
380 | j = 0; | 1079 | /* Find the chunk number on the page */ |
381 | for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { | 1080 | i = ((u_long)ptr & malloc_pagemask) >> info->shift; |
382 | if (p == freep) | 1081 | |
383 | return (i); | 1082 | if (((u_long)ptr & (info->size-1))) { |
384 | j++; | 1083 | wrtwarning("modified (chunk-) pointer.\n"); |
385 | } | 1084 | return; |
386 | } | 1085 | } |
387 | return (-1); | 1086 | |
1087 | if (info->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { | ||
1088 | wrtwarning("chunk is already free.\n"); | ||
1089 | return; | ||
1090 | } | ||
1091 | |||
1092 | if (malloc_junk) | ||
1093 | memset(ptr, SOME_JUNK, info->size); | ||
1094 | |||
1095 | info->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); | ||
1096 | info->free++; | ||
1097 | |||
1098 | mp = page_dir + info->shift; | ||
1099 | |||
1100 | if (info->free == 1) { | ||
1101 | |||
1102 | /* Page became non-full */ | ||
1103 | |||
1104 | mp = page_dir + info->shift; | ||
1105 | /* Insert in address order */ | ||
1106 | while (*mp && (*mp)->next && (*mp)->next->page < info->page) | ||
1107 | mp = &(*mp)->next; | ||
1108 | info->next = *mp; | ||
1109 | *mp = info; | ||
1110 | return; | ||
1111 | } | ||
1112 | |||
1113 | if (info->free != info->total) | ||
1114 | return; | ||
1115 | |||
1116 | /* Find & remove this page in the queue */ | ||
1117 | while (*mp != info) { | ||
1118 | mp = &((*mp)->next); | ||
1119 | #ifdef MALLOC_EXTRA_SANITY | ||
1120 | if (!*mp) | ||
1121 | wrterror("(ES): Not on queue\n"); | ||
1122 | #endif /* MALLOC_EXTRA_SANITY */ | ||
1123 | } | ||
1124 | *mp = info->next; | ||
1125 | |||
1126 | /* Free the page & the info structure if need be */ | ||
1127 | page_dir[ptr2index(info->page)] = MALLOC_FIRST; | ||
1128 | vp = info->page; /* Order is important ! */ | ||
1129 | if(vp != (void*)info) | ||
1130 | ifree(info); | ||
1131 | ifree(vp); | ||
1132 | } | ||
1133 | |||
1134 | static void | ||
1135 | ifree(ptr) | ||
1136 | void *ptr; | ||
1137 | { | ||
1138 | struct pginfo *info; | ||
1139 | int index; | ||
1140 | |||
1141 | /* This is legal */ | ||
1142 | if (!ptr) | ||
1143 | return; | ||
1144 | |||
1145 | if (!malloc_started) { | ||
1146 | wrtwarning("malloc() has never been called.\n"); | ||
1147 | return; | ||
1148 | } | ||
1149 | |||
1150 | /* If we're already sinking, don't make matters any worse. */ | ||
1151 | if (suicide) | ||
1152 | return; | ||
1153 | |||
1154 | index = ptr2index(ptr); | ||
1155 | |||
1156 | if (index < malloc_pageshift) { | ||
1157 | wrtwarning("junk pointer, too low to make sense.\n"); | ||
1158 | return; | ||
1159 | } | ||
1160 | |||
1161 | if (index > last_index) { | ||
1162 | wrtwarning("junk pointer, too high to make sense.\n"); | ||
1163 | return; | ||
1164 | } | ||
1165 | |||
1166 | info = page_dir[index]; | ||
1167 | |||
1168 | if (info < MALLOC_MAGIC) | ||
1169 | free_pages(ptr, index, info); | ||
1170 | else | ||
1171 | free_bytes(ptr, index, info); | ||
1172 | return; | ||
388 | } | 1173 | } |
389 | 1174 | ||
390 | #ifdef MSTATS | ||
391 | /* | 1175 | /* |
392 | * mstats - print out statistics about malloc | 1176 | * These are the public exported interface routines. |
393 | * | ||
394 | * Prints two lines of numbers, one showing the length of the free list | ||
395 | * for each size category, the second showing the number of mallocs - | ||
396 | * frees for each size category. | ||
397 | */ | 1177 | */ |
398 | mstats(s) | 1178 | |
399 | char *s; | 1179 | static int malloc_active; |
1180 | |||
1181 | void * | ||
1182 | malloc(size_t size) | ||
1183 | { | ||
1184 | register void *r; | ||
1185 | |||
1186 | malloc_func = " in malloc():"; | ||
1187 | THREAD_LOCK(); | ||
1188 | if (malloc_active++) { | ||
1189 | wrtwarning("recursive call.\n"); | ||
1190 | malloc_active--; | ||
1191 | return (0); | ||
1192 | } | ||
1193 | r = imalloc(size); | ||
1194 | UTRACE(0, size, r); | ||
1195 | malloc_active--; | ||
1196 | THREAD_UNLOCK(); | ||
1197 | if (malloc_xmalloc && !r) | ||
1198 | wrterror("out of memory.\n"); | ||
1199 | return (r); | ||
1200 | } | ||
1201 | |||
1202 | void | ||
1203 | free(void *ptr) | ||
400 | { | 1204 | { |
401 | register int i, j; | 1205 | malloc_func = " in free():"; |
402 | register union overhead *p; | 1206 | THREAD_LOCK(); |
403 | int totfree = 0, | 1207 | if (malloc_active++) { |
404 | totused = 0; | 1208 | wrtwarning("recursive call.\n"); |
405 | 1209 | malloc_active--; | |
406 | fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); | 1210 | return; |
407 | for (i = 0; i < NBUCKETS; i++) { | 1211 | } |
408 | for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) | 1212 | ifree(ptr); |
409 | ; | 1213 | UTRACE(ptr, 0, 0); |
410 | fprintf(stderr, " %d", j); | 1214 | malloc_active--; |
411 | totfree += j * (1 << (i + 3)); | 1215 | THREAD_UNLOCK(); |
412 | } | 1216 | return; |
413 | fprintf(stderr, "\nused:\t"); | 1217 | } |
414 | for (i = 0; i < NBUCKETS; i++) { | 1218 | |
415 | fprintf(stderr, " %d", nmalloc[i]); | 1219 | void * |
416 | totused += nmalloc[i] * (1 << (i + 3)); | 1220 | realloc(void *ptr, size_t size) |
417 | } | 1221 | { |
418 | fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", | 1222 | register void *r; |
419 | totused, totfree); | 1223 | |
1224 | malloc_func = " in realloc():"; | ||
1225 | THREAD_LOCK(); | ||
1226 | if (malloc_active++) { | ||
1227 | wrtwarning("recursive call.\n"); | ||
1228 | malloc_active--; | ||
1229 | return (0); | ||
1230 | } | ||
1231 | if (!ptr) { | ||
1232 | r = imalloc(size); | ||
1233 | } else { | ||
1234 | r = irealloc(ptr, size); | ||
1235 | } | ||
1236 | UTRACE(ptr, size, r); | ||
1237 | malloc_active--; | ||
1238 | THREAD_UNLOCK(); | ||
1239 | if (malloc_xmalloc && !r) | ||
1240 | wrterror("out of memory.\n"); | ||
1241 | return (r); | ||
420 | } | 1242 | } |
421 | #endif | ||