diff options
Diffstat (limited to 'src/lib/libc/stdlib/malloc.c')
-rw-r--r-- | src/lib/libc/stdlib/malloc.c | 1610 |
1 files changed, 1258 insertions, 352 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index 3c57fad024..f5704754a0 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
@@ -1,421 +1,1327 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 1983 Regents of the University of California. | 2 | * ---------------------------------------------------------------------------- |
3 | * All rights reserved. | 3 | * "THE BEER-WARE LICENSE" (Revision 42): |
4 | * | 4 | * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you |
5 | * Redistribution and use in source and binary forms, with or without | 5 | * can do whatever you want with this stuff. If we meet some day, and you think |
6 | * modification, are permitted provided that the following conditions | 6 | * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp |
7 | * are met: | 7 | * ---------------------------------------------------------------------------- |
8 | * 1. Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * 2. Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * 3. All advertising materials mentioning features or use of this software | ||
14 | * must display the following acknowledgement: | ||
15 | * This product includes software developed by the University of | ||
16 | * California, Berkeley and its contributors. | ||
17 | * 4. Neither the name of the University nor the names of its contributors | ||
18 | * may be used to endorse or promote products derived from this software | ||
19 | * without specific prior written permission. | ||
20 | * | ||
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | ||
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | ||
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
31 | * SUCH DAMAGE. | ||
32 | */ | 8 | */ |
33 | 9 | ||
34 | #if defined(LIBC_SCCS) && !defined(lint) | 10 | #if defined(LIBC_SCCS) && !defined(lint) |
35 | /*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/ | 11 | static char rcsid[] = "$OpenBSD: malloc.c,v 1.66 2004/02/19 23:20:53 tdeval Exp $"; |
36 | static char *rcsid = "$Id: malloc.c,v 1.1.1.1 1995/10/18 08:42:18 deraadt Exp $"; | ||
37 | #endif /* LIBC_SCCS and not lint */ | 12 | #endif /* LIBC_SCCS and not lint */ |
38 | 13 | ||
39 | /* | 14 | /* |
40 | * malloc.c (Caltech) 2/21/82 | 15 | * Defining MALLOC_EXTRA_SANITY will enable extra checks which are |
41 | * Chris Kingsley, kingsley@cit-20. | 16 | * related to internal conditions and consistency in malloc.c. This has |
42 | * | 17 | * a noticeable runtime performance hit, and generally will not do you |
43 | * This is a very fast storage allocator. It allocates blocks of a small | 18 | * any good unless you fiddle with the internals of malloc or want |
44 | * number of different sizes, and keeps free lists of each size. Blocks that | 19 | * to catch random pointer corruption as early as possible. |
45 | * don't exactly fit are passed up to the next larger size. In this | 20 | */ |
46 | * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. | 21 | #ifndef MALLOC_EXTRA_SANITY |
47 | * This is designed for use in a virtual memory environment. | 22 | #undef MALLOC_EXTRA_SANITY |
23 | #endif | ||
24 | |||
25 | /* | ||
26 | * Defining MALLOC_STATS will enable you to call malloc_dump() and set | ||
27 | * the [dD] options in the MALLOC_OPTIONS environment variable. | ||
28 | * It has no run-time performance hit, but does pull in stdio... | ||
29 | */ | ||
30 | #ifndef MALLOC_STATS | ||
31 | #undef MALLOC_STATS | ||
32 | #endif | ||
33 | |||
34 | /* | ||
35 | * What to use for Junk. This is the byte value we use to fill with | ||
36 | * when the 'J' option is enabled. | ||
48 | */ | 37 | */ |
38 | #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ | ||
49 | 39 | ||
50 | #include <sys/types.h> | 40 | #include <sys/types.h> |
41 | #include <sys/param.h> | ||
42 | #include <sys/mman.h> | ||
43 | #include <sys/uio.h> | ||
44 | #include <stdio.h> | ||
51 | #include <stdlib.h> | 45 | #include <stdlib.h> |
52 | #include <string.h> | 46 | #include <string.h> |
53 | #include <unistd.h> | 47 | #include <unistd.h> |
48 | #include <fcntl.h> | ||
49 | #include <limits.h> | ||
50 | #include <errno.h> | ||
54 | 51 | ||
55 | #define NULL 0 | 52 | #include "thread_private.h" |
56 | 53 | ||
57 | static void morecore(); | 54 | /* |
58 | static int findbucket(); | 55 | * The basic parameters you can tweak. |
56 | * | ||
57 | * malloc_pageshift pagesize = 1 << malloc_pageshift | ||
58 | * It's probably best if this is the native | ||
59 | * page size, but it shouldn't have to be. | ||
60 | * | ||
61 | * malloc_minsize minimum size of an allocation in bytes. | ||
62 | * If this is too small it's too much work | ||
63 | * to manage them. This is also the smallest | ||
64 | * unit of alignment used for the storage | ||
65 | * returned by malloc/realloc. | ||
66 | * | ||
67 | */ | ||
68 | |||
69 | #if defined(__OpenBSD__) && defined(__sparc__) | ||
70 | # define malloc_pageshift 13U | ||
71 | #endif /* __OpenBSD__ */ | ||
59 | 72 | ||
60 | /* | 73 | /* |
61 | * The overhead on a block is at least 4 bytes. When free, this space | 74 | * No user serviceable parts behind this point. |
62 | * contains a pointer to the next free block, and the bottom two bits must | 75 | * |
63 | * be zero. When in use, the first byte is set to MAGIC, and the second | 76 | * This structure describes a page worth of chunks. |
64 | * byte is the size index. The remaining bytes are for alignment. | ||
65 | * If range checking is enabled then a second word holds the size of the | ||
66 | * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). | ||
67 | * The order of elements is critical: ov_magic must overlay the low order | ||
68 | * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. | ||
69 | */ | 77 | */ |
70 | union overhead { | 78 | |
71 | union overhead *ov_next; /* when free */ | 79 | struct pginfo { |
72 | struct { | 80 | struct pginfo *next; /* next on the free list */ |
73 | u_char ovu_magic; /* magic number */ | 81 | void *page; /* Pointer to the page */ |
74 | u_char ovu_index; /* bucket # */ | 82 | u_short size; /* size of this page's chunks */ |
75 | #ifdef RCHECK | 83 | u_short shift; /* How far to shift for this size chunks */ |
76 | u_short ovu_rmagic; /* range magic number */ | 84 | u_short free; /* How many free chunks */ |
77 | u_long ovu_size; /* actual block size */ | 85 | u_short total; /* How many chunk */ |
78 | #endif | 86 | u_long bits[1]; /* Which chunks are free */ |
79 | } ovu; | ||
80 | #define ov_magic ovu.ovu_magic | ||
81 | #define ov_index ovu.ovu_index | ||
82 | #define ov_rmagic ovu.ovu_rmagic | ||
83 | #define ov_size ovu.ovu_size | ||
84 | }; | 87 | }; |
85 | 88 | ||
86 | #define MAGIC 0xef /* magic # on accounting info */ | 89 | /* |
87 | #define RMAGIC 0x5555 /* magic # on range info */ | 90 | * This structure describes a number of free pages. |
91 | */ | ||
92 | |||
93 | struct pgfree { | ||
94 | struct pgfree *next; /* next run of free pages */ | ||
95 | struct pgfree *prev; /* prev run of free pages */ | ||
96 | void *page; /* pointer to free pages */ | ||
97 | void *end; /* pointer to end of free pages */ | ||
98 | u_long size; /* number of bytes free */ | ||
99 | }; | ||
100 | |||
101 | /* | ||
102 | * How many bits per u_long in the bitmap. | ||
103 | * Change only if not 8 bits/byte | ||
104 | */ | ||
105 | #define MALLOC_BITS (8*sizeof(u_long)) | ||
106 | |||
107 | /* | ||
108 | * Magic values to put in the page_directory | ||
109 | */ | ||
110 | #define MALLOC_NOT_MINE ((struct pginfo*) 0) | ||
111 | #define MALLOC_FREE ((struct pginfo*) 1) | ||
112 | #define MALLOC_FIRST ((struct pginfo*) 2) | ||
113 | #define MALLOC_FOLLOW ((struct pginfo*) 3) | ||
114 | #define MALLOC_MAGIC ((struct pginfo*) 4) | ||
115 | |||
116 | #ifndef malloc_pageshift | ||
117 | #define malloc_pageshift (PGSHIFT) | ||
118 | #endif | ||
119 | |||
120 | #ifndef malloc_minsize | ||
121 | #define malloc_minsize 16U | ||
122 | #endif | ||
123 | |||
124 | #ifndef malloc_pageshift | ||
125 | #error "malloc_pageshift undefined" | ||
126 | #endif | ||
127 | |||
128 | #if !defined(malloc_pagesize) | ||
129 | #define malloc_pagesize (1UL<<malloc_pageshift) | ||
130 | #endif | ||
131 | |||
132 | #if ((1UL<<malloc_pageshift) != malloc_pagesize) | ||
133 | #error "(1UL<<malloc_pageshift) != malloc_pagesize" | ||
134 | #endif | ||
135 | |||
136 | #ifndef malloc_maxsize | ||
137 | #define malloc_maxsize ((malloc_pagesize)>>1) | ||
138 | #endif | ||
139 | |||
140 | /* A mask for the offset inside a page. */ | ||
141 | #define malloc_pagemask ((malloc_pagesize)-1) | ||
142 | |||
143 | #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask))) | ||
144 | #define ptr2index(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo) | ||
88 | 145 | ||
89 | #ifdef RCHECK | 146 | /* fd of /dev/zero */ |
90 | #define RSLOP sizeof (u_short) | 147 | #ifdef USE_DEV_ZERO |
148 | static int fdzero; | ||
149 | #define MMAP_FD fdzero | ||
150 | #define INIT_MMAP() \ | ||
151 | { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \ | ||
152 | wrterror("open of /dev/zero\n"); } | ||
91 | #else | 153 | #else |
92 | #define RSLOP 0 | 154 | #define MMAP_FD (-1) |
155 | #define INIT_MMAP() | ||
156 | #endif | ||
157 | |||
158 | /* Set when initialization has been done */ | ||
159 | static unsigned int malloc_started; | ||
160 | |||
161 | /* Number of free pages we cache */ | ||
162 | static unsigned int malloc_cache = 16; | ||
163 | |||
164 | /* The offset from pagenumber to index into the page directory */ | ||
165 | static u_long malloc_origo; | ||
166 | |||
167 | /* The last index in the page directory we care about */ | ||
168 | static u_long last_index; | ||
169 | |||
170 | /* Pointer to page directory. Allocated "as if with" malloc */ | ||
171 | static struct pginfo **page_dir; | ||
172 | |||
173 | /* How many slots in the page directory */ | ||
174 | static size_t malloc_ninfo; | ||
175 | |||
176 | /* Free pages line up here */ | ||
177 | static struct pgfree free_list; | ||
178 | |||
179 | /* Abort(), user doesn't handle problems. */ | ||
180 | static int malloc_abort = 2; | ||
181 | |||
182 | /* Are we trying to die ? */ | ||
183 | static int suicide; | ||
184 | |||
185 | #ifdef MALLOC_STATS | ||
186 | /* dump statistics */ | ||
187 | static int malloc_stats; | ||
93 | #endif | 188 | #endif |
94 | 189 | ||
190 | /* avoid outputting warnings? */ | ||
191 | static int malloc_silent; | ||
192 | |||
193 | /* always realloc ? */ | ||
194 | static int malloc_realloc; | ||
195 | |||
196 | /* mprotect free pages PROT_NONE? */ | ||
197 | static int malloc_freeprot; | ||
198 | |||
199 | /* use guard pages after allocations? */ | ||
200 | static int malloc_guard = 0; | ||
201 | |||
202 | #if defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(MADV_FREE)) | ||
203 | /* pass the kernel a hint on free pages ? */ | ||
204 | static int malloc_hint; | ||
205 | #endif | ||
206 | |||
207 | /* xmalloc behaviour ? */ | ||
208 | static int malloc_xmalloc; | ||
209 | |||
210 | /* zero fill ? */ | ||
211 | static int malloc_zero; | ||
212 | |||
213 | /* junk fill ? */ | ||
214 | static int malloc_junk; | ||
215 | |||
216 | #ifdef __FreeBSD__ | ||
217 | /* utrace ? */ | ||
218 | static int malloc_utrace; | ||
219 | |||
220 | struct ut { void *p; size_t s; void *r; }; | ||
221 | |||
222 | void utrace(struct ut *, int); | ||
223 | |||
224 | #define UTRACE(a, b, c) \ | ||
225 | if (malloc_utrace) \ | ||
226 | {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);} | ||
227 | #else /* !__FreeBSD__ */ | ||
228 | #define UTRACE(a,b,c) | ||
229 | #endif | ||
230 | |||
231 | /* my last break. */ | ||
232 | static void *malloc_brk; | ||
233 | |||
234 | /* one location cache for free-list holders */ | ||
235 | static struct pgfree *px; | ||
236 | |||
237 | /* compile-time options */ | ||
238 | char *malloc_options; | ||
239 | |||
240 | /* Name of the current public function */ | ||
241 | static char *malloc_func; | ||
242 | |||
243 | /* Macro for mmap */ | ||
244 | #define MMAP(size) \ | ||
245 | mmap((void *)0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ | ||
246 | MMAP_FD, (off_t)0) | ||
247 | |||
95 | /* | 248 | /* |
96 | * nextf[i] is the pointer to the next free block of size 2^(i+3). The | 249 | * Necessary function declarations |
97 | * smallest allocatable block is 8 bytes. The overhead information | ||
98 | * precedes the data area returned to the user. | ||
99 | */ | 250 | */ |
100 | #define NBUCKETS 30 | 251 | static int extend_pgdir(u_long index); |
101 | static union overhead *nextf[NBUCKETS]; | 252 | static void *imalloc(size_t size); |
102 | extern char *sbrk(); | 253 | static void ifree(void *ptr); |
254 | static void *irealloc(void *ptr, size_t size); | ||
255 | static void *malloc_bytes(size_t size); | ||
256 | |||
257 | #ifdef MALLOC_STATS | ||
258 | void | ||
259 | malloc_dump(FILE *fd) | ||
260 | { | ||
261 | struct pginfo **pd; | ||
262 | struct pgfree *pf; | ||
263 | int j; | ||
264 | |||
265 | pd = page_dir; | ||
266 | |||
267 | /* print out all the pages */ | ||
268 | for(j=0;j<=last_index;j++) { | ||
269 | fprintf(fd, "%08lx %5d ", (j+malloc_origo) << malloc_pageshift, j); | ||
270 | if (pd[j] == MALLOC_NOT_MINE) { | ||
271 | for(j++;j<=last_index && pd[j] == MALLOC_NOT_MINE;j++) | ||
272 | ; | ||
273 | j--; | ||
274 | fprintf(fd, ".. %5d not mine\n", j); | ||
275 | } else if (pd[j] == MALLOC_FREE) { | ||
276 | for(j++;j<=last_index && pd[j] == MALLOC_FREE;j++) | ||
277 | ; | ||
278 | j--; | ||
279 | fprintf(fd, ".. %5d free\n", j); | ||
280 | } else if (pd[j] == MALLOC_FIRST) { | ||
281 | for(j++;j<=last_index && pd[j] == MALLOC_FOLLOW;j++) | ||
282 | ; | ||
283 | j--; | ||
284 | fprintf(fd, ".. %5d in use\n", j); | ||
285 | } else if (pd[j] < MALLOC_MAGIC) { | ||
286 | fprintf(fd, "(%p)\n", pd[j]); | ||
287 | } else { | ||
288 | fprintf(fd, "%p %d (of %d) x %d @ %p --> %p\n", | ||
289 | pd[j], pd[j]->free, pd[j]->total, | ||
290 | pd[j]->size, pd[j]->page, pd[j]->next); | ||
291 | } | ||
292 | } | ||
293 | |||
294 | for(pf=free_list.next; pf; pf=pf->next) { | ||
295 | fprintf(fd, "Free: @%p [%p...%p[ %ld ->%p <-%p\n", | ||
296 | pf, pf->page, pf->end, pf->size, pf->prev, pf->next); | ||
297 | if (pf == pf->next) { | ||
298 | fprintf(fd, "Free_list loops\n"); | ||
299 | break; | ||
300 | } | ||
301 | } | ||
302 | |||
303 | /* print out various info */ | ||
304 | fprintf(fd, "Minsize\t%d\n", malloc_minsize); | ||
305 | fprintf(fd, "Maxsize\t%d\n", malloc_maxsize); | ||
306 | fprintf(fd, "Pagesize\t%lu\n", (u_long)malloc_pagesize); | ||
307 | fprintf(fd, "Pageshift\t%d\n", malloc_pageshift); | ||
308 | fprintf(fd, "FirstPage\t%ld\n", malloc_origo); | ||
309 | fprintf(fd, "LastPage\t%ld %lx\n", last_index+malloc_pageshift, | ||
310 | (last_index + malloc_pageshift) << malloc_pageshift); | ||
311 | fprintf(fd, "Break\t%ld\n", (u_long)sbrk(0) >> malloc_pageshift); | ||
312 | } | ||
313 | #endif /* MALLOC_STATS */ | ||
314 | |||
315 | extern char *__progname; | ||
316 | |||
317 | static void | ||
318 | wrterror(char *p) | ||
319 | { | ||
320 | char *q = " error: "; | ||
321 | struct iovec iov[4]; | ||
322 | |||
323 | iov[0].iov_base = __progname; | ||
324 | iov[0].iov_len = strlen(__progname); | ||
325 | iov[1].iov_base = malloc_func; | ||
326 | iov[1].iov_len = strlen(malloc_func); | ||
327 | iov[2].iov_base = q; | ||
328 | iov[2].iov_len = strlen(q); | ||
329 | iov[3].iov_base = p; | ||
330 | iov[3].iov_len = strlen(p); | ||
331 | writev(STDERR_FILENO, iov, 4); | ||
332 | |||
333 | suicide = 1; | ||
334 | #ifdef MALLOC_STATS | ||
335 | if (malloc_stats) | ||
336 | malloc_dump(stderr); | ||
337 | #endif /* MALLOC_STATS */ | ||
338 | abort(); | ||
339 | } | ||
340 | |||
341 | static void | ||
342 | wrtwarning(char *p) | ||
343 | { | ||
344 | char *q = " warning: "; | ||
345 | struct iovec iov[4]; | ||
346 | |||
347 | if (malloc_abort) | ||
348 | wrterror(p); | ||
349 | else if (malloc_silent) | ||
350 | return; | ||
351 | |||
352 | iov[0].iov_base = __progname; | ||
353 | iov[0].iov_len = strlen(__progname); | ||
354 | iov[1].iov_base = malloc_func; | ||
355 | iov[1].iov_len = strlen(malloc_func); | ||
356 | iov[2].iov_base = q; | ||
357 | iov[2].iov_len = strlen(q); | ||
358 | iov[3].iov_base = p; | ||
359 | iov[3].iov_len = strlen(p); | ||
360 | writev(STDERR_FILENO, iov, 4); | ||
361 | } | ||
362 | |||
363 | #ifdef MALLOC_STATS | ||
364 | static void | ||
365 | malloc_exit(void) | ||
366 | { | ||
367 | FILE *fd = fopen("malloc.out", "a"); | ||
368 | char *q = "malloc() warning: Couldn't dump stats\n"; | ||
369 | if (fd != NULL) { | ||
370 | malloc_dump(fd); | ||
371 | fclose(fd); | ||
372 | } else | ||
373 | write(STDERR_FILENO, q, strlen(q)); | ||
374 | } | ||
375 | #endif /* MALLOC_STATS */ | ||
103 | 376 | ||
104 | static int pagesz; /* page size */ | ||
105 | static int pagebucket; /* page size bucket */ | ||
106 | 377 | ||
107 | #ifdef MSTATS | ||
108 | /* | 378 | /* |
109 | * nmalloc[i] is the difference between the number of mallocs and frees | 379 | * Allocate a number of pages from the OS |
110 | * for a given block size. | ||
111 | */ | 380 | */ |
112 | static u_int nmalloc[NBUCKETS]; | 381 | static void * |
113 | #include <stdio.h> | 382 | map_pages(size_t pages) |
114 | #endif | 383 | { |
384 | caddr_t result, tail; | ||
115 | 385 | ||
116 | #if defined(DEBUG) || defined(RCHECK) | 386 | result = (caddr_t)pageround((u_long)sbrk(0)); |
117 | #define ASSERT(p) if (!(p)) botch("p") | 387 | pages <<= malloc_pageshift; |
118 | #include <stdio.h> | 388 | if (pages > SIZE_T_MAX - (size_t)result) { |
119 | static | 389 | #ifdef MALLOC_EXTRA_SANITY |
120 | botch(s) | 390 | wrtwarning("(ES): overflow in map_pages fails\n"); |
121 | char *s; | 391 | #endif /* MALLOC_EXTRA_SANITY */ |
392 | errno = ENOMEM; | ||
393 | return (NULL); | ||
394 | } | ||
395 | tail = result + pages + malloc_guard; | ||
396 | |||
397 | if (brk(tail) == (char *)-1) { | ||
398 | #ifdef MALLOC_EXTRA_SANITY | ||
399 | wrtwarning("(ES): map_pages fails\n"); | ||
400 | #endif /* MALLOC_EXTRA_SANITY */ | ||
401 | return (NULL); | ||
402 | } | ||
403 | if (malloc_guard) | ||
404 | mprotect(result + pages, malloc_pagesize, PROT_NONE); | ||
405 | |||
406 | last_index = ptr2index(tail) - 1; | ||
407 | malloc_brk = tail; | ||
408 | |||
409 | if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index)) | ||
410 | return (NULL); | ||
411 | |||
412 | return (result); | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * Extend page directory | ||
417 | */ | ||
418 | static int | ||
419 | extend_pgdir(u_long index) | ||
420 | { | ||
421 | struct pginfo **new, **old; | ||
422 | size_t i, oldlen; | ||
423 | |||
424 | /* Make it this many pages */ | ||
425 | i = index * sizeof *page_dir; | ||
426 | i /= malloc_pagesize; | ||
427 | i += 2; | ||
428 | |||
429 | /* remember the old mapping size */ | ||
430 | oldlen = malloc_ninfo * sizeof *page_dir; | ||
431 | |||
432 | /* | ||
433 | * NOTE: we allocate new pages and copy the directory rather than tempt | ||
434 | * fate by trying to "grow" the region.. There is nothing to prevent | ||
435 | * us from accidently re-mapping space that's been allocated by our caller | ||
436 | * via dlopen() or other mmap(). | ||
437 | * | ||
438 | * The copy problem is not too bad, as there is 4K of page index per | ||
439 | * 4MB of malloc arena. | ||
440 | * | ||
441 | * We can totally avoid the copy if we open a file descriptor to associate | ||
442 | * the anon mappings with. Then, when we remap the pages at the new | ||
443 | * address, the old pages will be "magically" remapped.. But this means | ||
444 | * keeping open a "secret" file descriptor..... | ||
445 | */ | ||
446 | |||
447 | /* Get new pages */ | ||
448 | new = (struct pginfo**) MMAP(i * malloc_pagesize); | ||
449 | if (new == MAP_FAILED) | ||
450 | return (0); | ||
451 | |||
452 | /* Copy the old stuff */ | ||
453 | memcpy(new, page_dir, | ||
454 | malloc_ninfo * sizeof *page_dir); | ||
455 | |||
456 | /* register the new size */ | ||
457 | malloc_ninfo = i * malloc_pagesize / sizeof *page_dir; | ||
458 | |||
459 | /* swap the pointers */ | ||
460 | old = page_dir; | ||
461 | page_dir = new; | ||
462 | |||
463 | /* Now free the old stuff */ | ||
464 | munmap(old, oldlen); | ||
465 | return (1); | ||
466 | } | ||
467 | |||
468 | /* | ||
469 | * Initialize the world | ||
470 | */ | ||
471 | static void | ||
472 | malloc_init(void) | ||
122 | { | 473 | { |
123 | fprintf(stderr, "\r\nassertion botched: %s\r\n", s); | 474 | char *p, b[64]; |
124 | (void) fflush(stderr); /* just in case user buffered it */ | 475 | int i, j; |
476 | int save_errno = errno; | ||
477 | |||
478 | _MALLOC_LOCK_INIT(); | ||
479 | |||
480 | INIT_MMAP(); | ||
481 | |||
482 | #ifdef MALLOC_EXTRA_SANITY | ||
483 | malloc_junk = 1; | ||
484 | #endif /* MALLOC_EXTRA_SANITY */ | ||
485 | |||
486 | for (i = 0; i < 3; i++) { | ||
487 | if (i == 0) { | ||
488 | j = readlink("/etc/malloc.conf", b, sizeof b - 1); | ||
489 | if (j <= 0) | ||
490 | continue; | ||
491 | b[j] = '\0'; | ||
492 | p = b; | ||
493 | } else if (i == 1) { | ||
494 | if (issetugid() == 0) | ||
495 | p = getenv("MALLOC_OPTIONS"); | ||
496 | else | ||
497 | continue; | ||
498 | } else if (i == 2) { | ||
499 | p = malloc_options; | ||
500 | } | ||
501 | for (; p != NULL && *p != '\0'; p++) { | ||
502 | switch (*p) { | ||
503 | case '>': malloc_cache <<= 1; break; | ||
504 | case '<': malloc_cache >>= 1; break; | ||
505 | case 'a': malloc_abort = 0; break; | ||
506 | case 'A': malloc_abort = 1; break; | ||
507 | #ifdef MALLOC_STATS | ||
508 | case 'd': malloc_stats = 0; break; | ||
509 | case 'D': malloc_stats = 1; break; | ||
510 | #endif /* MALLOC_STATS */ | ||
511 | case 'f': malloc_freeprot = 0; break; | ||
512 | case 'F': malloc_freeprot = 1; break; | ||
513 | case 'g': malloc_guard = 0; break; | ||
514 | case 'G': malloc_guard = malloc_pagesize; break; | ||
515 | #if defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(MADV_FREE)) | ||
516 | case 'h': malloc_hint = 0; break; | ||
517 | case 'H': malloc_hint = 1; break; | ||
518 | #endif /* __FreeBSD__ */ | ||
519 | case 'r': malloc_realloc = 0; break; | ||
520 | case 'R': malloc_realloc = 1; break; | ||
521 | case 'j': malloc_junk = 0; break; | ||
522 | case 'J': malloc_junk = 1; break; | ||
523 | case 'n': malloc_silent = 0; break; | ||
524 | case 'N': malloc_silent = 1; break; | ||
525 | #ifdef __FreeBSD__ | ||
526 | case 'u': malloc_utrace = 0; break; | ||
527 | case 'U': malloc_utrace = 1; break; | ||
528 | #endif /* __FreeBSD__ */ | ||
529 | case 'x': malloc_xmalloc = 0; break; | ||
530 | case 'X': malloc_xmalloc = 1; break; | ||
531 | case 'z': malloc_zero = 0; break; | ||
532 | case 'Z': malloc_zero = 1; break; | ||
533 | default: | ||
534 | j = malloc_abort; | ||
535 | malloc_abort = 0; | ||
536 | wrtwarning("unknown char in MALLOC_OPTIONS\n"); | ||
537 | malloc_abort = j; | ||
538 | break; | ||
539 | } | ||
540 | } | ||
541 | } | ||
542 | |||
543 | UTRACE(0, 0, 0); | ||
544 | |||
545 | /* | ||
546 | * We want junk in the entire allocation, and zero only in the part | ||
547 | * the user asked for. | ||
548 | */ | ||
549 | if (malloc_zero) | ||
550 | malloc_junk=1; | ||
551 | |||
552 | #ifdef MALLOC_STATS | ||
553 | if (malloc_stats && (atexit(malloc_exit) == -1)) | ||
554 | wrtwarning("atexit(2) failed. Will not be able to dump malloc stats on exit\n"); | ||
555 | #endif /* MALLOC_STATS */ | ||
556 | |||
557 | /* Allocate one page for the page directory */ | ||
558 | page_dir = (struct pginfo **) MMAP(malloc_pagesize); | ||
559 | |||
560 | if (page_dir == MAP_FAILED) | ||
561 | wrterror("mmap(2) failed, check limits\n"); | ||
562 | |||
563 | /* | ||
564 | * We need a maximum of malloc_pageshift buckets, steal these from the | ||
565 | * front of the page_directory; | ||
566 | */ | ||
567 | malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift; | ||
568 | malloc_origo -= malloc_pageshift; | ||
569 | |||
570 | malloc_ninfo = malloc_pagesize / sizeof *page_dir; | ||
571 | |||
572 | /* Been here, done that */ | ||
573 | malloc_started++; | ||
574 | |||
575 | /* Recalculate the cache size in bytes, and make sure it's nonzero */ | ||
576 | |||
577 | if (!malloc_cache) | ||
578 | malloc_cache++; | ||
579 | |||
580 | malloc_cache <<= malloc_pageshift; | ||
581 | |||
582 | /* | ||
583 | * This is a nice hack from Kaleb Keithly (kaleb@x.org). | ||
584 | * We can sbrk(2) further back when we keep this on a low address. | ||
585 | */ | ||
586 | px = (struct pgfree *) imalloc (sizeof *px); | ||
587 | errno = save_errno; | ||
588 | } | ||
589 | |||
590 | /* | ||
591 | * Allocate a number of complete pages | ||
592 | */ | ||
593 | static void * | ||
594 | malloc_pages(size_t size) | ||
595 | { | ||
596 | void *p, *delay_free = NULL; | ||
597 | int i; | ||
598 | struct pgfree *pf; | ||
599 | u_long index; | ||
600 | |||
601 | size = pageround(size) + malloc_guard; | ||
602 | |||
603 | p = NULL; | ||
604 | /* Look for free pages before asking for more */ | ||
605 | for(pf = free_list.next; pf; pf = pf->next) { | ||
606 | |||
607 | #ifdef MALLOC_EXTRA_SANITY | ||
608 | if (pf->size & malloc_pagemask) | ||
609 | wrterror("(ES): junk length entry on free_list\n"); | ||
610 | if (!pf->size) | ||
611 | wrterror("(ES): zero length entry on free_list\n"); | ||
612 | if (pf->page == pf->end) | ||
613 | wrterror("(ES): zero entry on free_list\n"); | ||
614 | if (pf->page > pf->end) | ||
615 | wrterror("(ES): sick entry on free_list\n"); | ||
616 | if ((void*)pf->page >= (void*)sbrk(0)) | ||
617 | wrterror("(ES): entry on free_list past brk\n"); | ||
618 | if (page_dir[ptr2index(pf->page)] != MALLOC_FREE) | ||
619 | wrterror("(ES): non-free first page on free-list\n"); | ||
620 | if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE) | ||
621 | wrterror("(ES): non-free last page on free-list\n"); | ||
622 | #endif /* MALLOC_EXTRA_SANITY */ | ||
623 | |||
624 | if (pf->size < size) | ||
625 | continue; | ||
626 | |||
627 | if (pf->size == size) { | ||
628 | p = pf->page; | ||
629 | if (pf->next != NULL) | ||
630 | pf->next->prev = pf->prev; | ||
631 | pf->prev->next = pf->next; | ||
632 | delay_free = pf; | ||
633 | break; | ||
634 | } | ||
635 | |||
636 | p = pf->page; | ||
637 | pf->page = (char *)pf->page + size; | ||
638 | pf->size -= size; | ||
639 | break; | ||
640 | } | ||
641 | |||
642 | size -= malloc_guard; | ||
643 | |||
644 | #ifdef MALLOC_EXTRA_SANITY | ||
645 | if (p != NULL && page_dir[ptr2index(p)] != MALLOC_FREE) | ||
646 | wrterror("(ES): allocated non-free page on free-list\n"); | ||
647 | #endif /* MALLOC_EXTRA_SANITY */ | ||
648 | |||
649 | if ((malloc_guard || malloc_freeprot) && p != NULL) | ||
650 | mprotect(p, size, PROT_READ|PROT_WRITE); | ||
651 | |||
652 | size >>= malloc_pageshift; | ||
653 | |||
654 | /* Map new pages */ | ||
655 | if (p == NULL) | ||
656 | p = map_pages(size); | ||
657 | |||
658 | if (p != NULL) { | ||
659 | |||
660 | index = ptr2index(p); | ||
661 | page_dir[index] = MALLOC_FIRST; | ||
662 | for (i=1;i<size;i++) | ||
663 | page_dir[index+i] = MALLOC_FOLLOW; | ||
664 | |||
665 | if (malloc_junk) | ||
666 | memset(p, SOME_JUNK, size << malloc_pageshift); | ||
667 | } | ||
668 | |||
669 | if (delay_free) { | ||
670 | if (px == NULL) | ||
671 | px = delay_free; | ||
672 | else | ||
673 | ifree(delay_free); | ||
674 | } | ||
675 | |||
676 | return (p); | ||
677 | } | ||
678 | |||
679 | /* | ||
680 | * Allocate a page of fragments | ||
681 | */ | ||
682 | |||
683 | static __inline__ int | ||
684 | malloc_make_chunks(int bits) | ||
685 | { | ||
686 | struct pginfo *bp; | ||
687 | void *pp; | ||
688 | int i, k, l; | ||
689 | |||
690 | /* Allocate a new bucket */ | ||
691 | pp = malloc_pages((size_t)malloc_pagesize); | ||
692 | if (pp == NULL) | ||
693 | return (0); | ||
694 | |||
695 | /* Find length of admin structure */ | ||
696 | l = sizeof *bp - sizeof(u_long); | ||
697 | l += sizeof(u_long) * | ||
698 | (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS); | ||
699 | |||
700 | /* Don't waste more than two chunks on this */ | ||
701 | /* | ||
702 | * If we are to allocate a memory protected page for the malloc(0) | ||
703 | * case (when bits=0), it must be from a different page than the | ||
704 | * pginfo page. | ||
705 | * --> Treat it like the big chunk alloc, get a second data page. | ||
706 | */ | ||
707 | if (bits != 0 && (1UL<<(bits)) <= l+l) { | ||
708 | bp = (struct pginfo *)pp; | ||
709 | } else { | ||
710 | bp = (struct pginfo *)imalloc(l); | ||
711 | if (bp == NULL) { | ||
712 | ifree(pp); | ||
713 | return (0); | ||
714 | } | ||
715 | } | ||
716 | |||
717 | /* memory protect the page allocated in the malloc(0) case */ | ||
718 | if (bits == 0) { | ||
719 | |||
720 | bp->size = 0; | ||
721 | bp->shift = 1; | ||
722 | i = malloc_minsize-1; | ||
723 | while (i >>= 1) | ||
724 | bp->shift++; | ||
725 | bp->total = bp->free = malloc_pagesize >> bp->shift; | ||
726 | bp->page = pp; | ||
727 | |||
728 | k = mprotect(pp, malloc_pagesize, PROT_NONE); | ||
729 | if (k < 0) { | ||
730 | ifree(pp); | ||
731 | ifree(bp); | ||
732 | return (0); | ||
733 | } | ||
734 | } else { | ||
735 | bp->size = (1UL<<bits); | ||
736 | bp->shift = bits; | ||
737 | bp->total = bp->free = malloc_pagesize >> bits; | ||
738 | bp->page = pp; | ||
739 | } | ||
740 | |||
741 | /* set all valid bits in the bitmap */ | ||
742 | k = bp->total; | ||
743 | i = 0; | ||
744 | |||
745 | /* Do a bunch at a time */ | ||
746 | for(;k-i >= MALLOC_BITS; i += MALLOC_BITS) | ||
747 | bp->bits[i / MALLOC_BITS] = ~0UL; | ||
748 | |||
749 | for(; i < k; i++) | ||
750 | bp->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); | ||
751 | |||
752 | if (bp == bp->page) { | ||
753 | /* Mark the ones we stole for ourselves */ | ||
754 | for(i=0;l > 0;i++) { | ||
755 | bp->bits[i/MALLOC_BITS] &= ~(1UL<<(i%MALLOC_BITS)); | ||
756 | bp->free--; | ||
757 | bp->total--; | ||
758 | l -= (1 << bits); | ||
759 | } | ||
760 | } | ||
761 | |||
762 | /* MALLOC_LOCK */ | ||
763 | |||
764 | page_dir[ptr2index(pp)] = bp; | ||
765 | |||
766 | bp->next = page_dir[bits]; | ||
767 | page_dir[bits] = bp; | ||
768 | |||
769 | /* MALLOC_UNLOCK */ | ||
770 | |||
771 | return (1); | ||
772 | } | ||
773 | |||
774 | /* | ||
775 | * Allocate a fragment | ||
776 | */ | ||
777 | static void * | ||
778 | malloc_bytes(size_t size) | ||
779 | { | ||
780 | int i,j; | ||
781 | u_long u; | ||
782 | struct pginfo *bp; | ||
783 | int k; | ||
784 | u_long *lp; | ||
785 | |||
786 | /* Don't bother with anything less than this */ | ||
787 | /* unless we have a malloc(0) requests */ | ||
788 | if (size != 0 && size < malloc_minsize) | ||
789 | size = malloc_minsize; | ||
790 | |||
791 | /* Find the right bucket */ | ||
792 | if (size == 0) | ||
793 | j=0; | ||
794 | else { | ||
795 | j = 1; | ||
796 | i = size-1; | ||
797 | while (i >>= 1) | ||
798 | j++; | ||
799 | } | ||
800 | |||
801 | /* If it's empty, make a page more of that size chunks */ | ||
802 | if (page_dir[j] == NULL && !malloc_make_chunks(j)) | ||
803 | return (NULL); | ||
804 | |||
805 | bp = page_dir[j]; | ||
806 | |||
807 | /* Find first word of bitmap which isn't empty */ | ||
808 | for (lp = bp->bits; !*lp; lp++) | ||
809 | ; | ||
810 | |||
811 | /* Find that bit, and tweak it */ | ||
812 | u = 1; | ||
813 | k = 0; | ||
814 | while (!(*lp & u)) { | ||
815 | u += u; | ||
816 | k++; | ||
817 | } | ||
818 | |||
819 | if (malloc_guard) { | ||
820 | /* Walk to a random position. */ | ||
821 | i = arc4random() % bp->free; | ||
822 | while (i > 0) { | ||
823 | u += u; | ||
824 | k++; | ||
825 | if (k >= MALLOC_BITS) { | ||
826 | lp++; | ||
827 | u = 1; | ||
828 | k = 0; | ||
829 | } | ||
830 | #ifdef MALLOC_EXTRA_SANITY | ||
831 | if (lp - bp->bits > (bp->total - 1) / MALLOC_BITS) | ||
832 | wrterror("chunk overflow\n"); | ||
833 | #endif /* MALLOC_EXTRA_SANITY */ | ||
834 | if (*lp & u) | ||
835 | i--; | ||
836 | } | ||
837 | } | ||
838 | *lp ^= u; | ||
839 | |||
840 | /* If there are no more free, remove from free-list */ | ||
841 | if (!--bp->free) { | ||
842 | page_dir[j] = bp->next; | ||
843 | bp->next = NULL; | ||
844 | } | ||
845 | |||
846 | /* Adjust to the real offset of that chunk */ | ||
847 | k += (lp-bp->bits)*MALLOC_BITS; | ||
848 | k <<= bp->shift; | ||
849 | |||
850 | if (malloc_junk && bp->size != 0) | ||
851 | memset((char *)bp->page + k, SOME_JUNK, bp->size); | ||
852 | |||
853 | return ((u_char *)bp->page + k); | ||
854 | } | ||
855 | |||
856 | /* | ||
857 | * Allocate a piece of memory | ||
858 | */ | ||
859 | static void * | ||
860 | imalloc(size_t size) | ||
861 | { | ||
862 | void *result; | ||
863 | |||
864 | if (!malloc_started) | ||
865 | malloc_init(); | ||
866 | |||
867 | if (suicide) | ||
125 | abort(); | 868 | abort(); |
869 | |||
870 | if ((size + malloc_pagesize) < size) { /* Check for overflow */ | ||
871 | result = NULL; | ||
872 | errno = ENOMEM; | ||
873 | } | ||
874 | else if (size <= malloc_maxsize) | ||
875 | result = malloc_bytes(size); | ||
876 | else | ||
877 | result = malloc_pages(size); | ||
878 | |||
879 | if (malloc_abort == 1 && result == NULL) | ||
880 | wrterror("allocation failed\n"); | ||
881 | |||
882 | if (malloc_zero && result != NULL) | ||
883 | memset(result, 0, size); | ||
884 | |||
885 | return (result); | ||
126 | } | 886 | } |
127 | #else | ||
128 | #define ASSERT(p) | ||
129 | #endif | ||
130 | 887 | ||
131 | void * | 888 | /* |
132 | malloc(nbytes) | 889 | * Change the size of an allocation. |
133 | size_t nbytes; | 890 | */ |
891 | static void * | ||
892 | irealloc(void *ptr, size_t size) | ||
134 | { | 893 | { |
135 | register union overhead *op; | 894 | void *p; |
136 | register long bucket, n; | 895 | u_long osize, index; |
137 | register unsigned amt; | 896 | struct pginfo **mp; |
897 | int i; | ||
138 | 898 | ||
139 | /* | 899 | if (suicide) |
140 | * First time malloc is called, setup page size and | 900 | abort(); |
141 | * align break pointer so all data will be page aligned. | 901 | |
142 | */ | 902 | if (!malloc_started) { |
143 | if (pagesz == 0) { | 903 | wrtwarning("malloc() has never been called\n"); |
144 | pagesz = n = getpagesize(); | 904 | return (NULL); |
145 | op = (union overhead *)sbrk(0); | 905 | } |
146 | n = n - sizeof (*op) - ((long)op & (n - 1)); | 906 | |
147 | if (n < 0) | 907 | index = ptr2index(ptr); |
148 | n += pagesz; | 908 | |
149 | if (n) { | 909 | if (index < malloc_pageshift) { |
150 | if (sbrk(n) == (char *)-1) | 910 | wrtwarning("junk pointer, too low to make sense\n"); |
151 | return (NULL); | 911 | return (NULL); |
152 | } | 912 | } |
153 | bucket = 0; | 913 | |
154 | amt = 8; | 914 | if (index > last_index) { |
155 | while (pagesz > amt) { | 915 | wrtwarning("junk pointer, too high to make sense\n"); |
156 | amt <<= 1; | 916 | return (NULL); |
157 | bucket++; | 917 | } |
158 | } | 918 | |
159 | pagebucket = bucket; | 919 | mp = &page_dir[index]; |
920 | |||
921 | if (*mp == MALLOC_FIRST) { /* Page allocation */ | ||
922 | |||
923 | /* Check the pointer */ | ||
924 | if ((u_long)ptr & malloc_pagemask) { | ||
925 | wrtwarning("modified (page-) pointer\n"); | ||
926 | return (NULL); | ||
160 | } | 927 | } |
161 | /* | 928 | |
162 | * Convert amount of memory requested into closest block size | 929 | /* Find the size in bytes */ |
163 | * stored in hash buckets which satisfies request. | 930 | for (osize = malloc_pagesize; *(++mp) == MALLOC_FOLLOW;) |
164 | * Account for space used per block for accounting. | 931 | osize += malloc_pagesize; |
165 | */ | 932 | |
166 | if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { | 933 | if (!malloc_realloc && /* Unless we have to, */ |
167 | #ifndef RCHECK | 934 | size <= osize && /* .. or are too small, */ |
168 | amt = 8; /* size of first bucket */ | 935 | size > (osize - malloc_pagesize)) { /* .. or can free a page, */ |
169 | bucket = 0; | 936 | if (malloc_junk) |
170 | #else | 937 | memset((char *)ptr + size, SOME_JUNK, osize-size); |
171 | amt = 16; /* size of first bucket */ | 938 | return (ptr); /* ..don't do anything else. */ |
172 | bucket = 1; | ||
173 | #endif | ||
174 | n = -((long)sizeof (*op) + RSLOP); | ||
175 | } else { | ||
176 | amt = pagesz; | ||
177 | bucket = pagebucket; | ||
178 | } | 939 | } |
179 | while (nbytes > amt + n) { | 940 | |
180 | amt <<= 1; | 941 | } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */ |
181 | if (amt == 0) | 942 | |
182 | return (NULL); | 943 | /* Check the pointer for sane values */ |
183 | bucket++; | 944 | if ((u_long)ptr & ((1UL<<((*mp)->shift))-1)) { |
945 | wrtwarning("modified (chunk-) pointer\n"); | ||
946 | return (NULL); | ||
184 | } | 947 | } |
185 | /* | 948 | |
186 | * If nothing in hash bucket right now, | 949 | /* Find the chunk index in the page */ |
187 | * request more memory from the system. | 950 | i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift; |
188 | */ | 951 | |
189 | if ((op = nextf[bucket]) == NULL) { | 952 | /* Verify that it isn't a free chunk already */ |
190 | morecore(bucket); | 953 | if ((*mp)->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { |
191 | if ((op = nextf[bucket]) == NULL) | 954 | wrtwarning("chunk is already free\n"); |
192 | return (NULL); | 955 | return (NULL); |
193 | } | 956 | } |
194 | /* remove from linked list */ | 957 | |
195 | nextf[bucket] = op->ov_next; | 958 | osize = (*mp)->size; |
196 | op->ov_magic = MAGIC; | 959 | |
197 | op->ov_index = bucket; | 960 | if (!malloc_realloc && /* Unless we have to, */ |
198 | #ifdef MSTATS | 961 | size <= osize && /* ..or are too small, */ |
199 | nmalloc[bucket]++; | 962 | (size > osize/2 || /* ..or could use a smaller size, */ |
200 | #endif | 963 | osize == malloc_minsize)) { /* ..(if there is one) */ |
201 | #ifdef RCHECK | 964 | if (malloc_junk) |
202 | /* | 965 | memset((char *)ptr + size, SOME_JUNK, osize-size); |
203 | * Record allocated size of block and | 966 | return (ptr); /* ..don't do anything else. */ |
204 | * bound space with magic numbers. | 967 | } |
205 | */ | 968 | |
206 | op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); | 969 | } else { |
207 | op->ov_rmagic = RMAGIC; | 970 | wrtwarning("pointer to wrong page\n"); |
208 | *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; | 971 | return (NULL); |
209 | #endif | 972 | } |
210 | return ((char *)(op + 1)); | 973 | |
974 | p = imalloc(size); | ||
975 | |||
976 | if (p != NULL) { | ||
977 | /* copy the lesser of the two sizes, and free the old one */ | ||
978 | /* Don't move from/to 0 sized region !!! */ | ||
979 | if (osize != 0 && size != 0) { | ||
980 | if (osize < size) | ||
981 | memcpy(p, ptr, osize); | ||
982 | else | ||
983 | memcpy(p, ptr, size); | ||
984 | } | ||
985 | ifree(ptr); | ||
986 | } | ||
987 | return (p); | ||
211 | } | 988 | } |
212 | 989 | ||
213 | /* | 990 | /* |
214 | * Allocate more memory to the indicated bucket. | 991 | * Free a sequence of pages |
215 | */ | 992 | */ |
216 | static void | 993 | |
217 | morecore(bucket) | 994 | static __inline__ void |
218 | int bucket; | 995 | free_pages(void *ptr, u_long index, struct pginfo *info) |
219 | { | 996 | { |
220 | register union overhead *op; | 997 | u_long i, l; |
221 | register long sz; /* size of desired block */ | 998 | struct pgfree *pf, *pt=NULL; |
222 | long amt; /* amount to allocate */ | 999 | void *tail; |
223 | int nblks; /* how many blocks we get */ | ||
224 | 1000 | ||
225 | /* | 1001 | if (info == MALLOC_FREE) { |
226 | * sbrk_size <= 0 only for big, FLUFFY, requests (about | 1002 | wrtwarning("page is already free\n"); |
227 | * 2^30 bytes on a VAX, I think) or for a negative arg. | 1003 | return; |
228 | */ | 1004 | } |
229 | sz = 1 << (bucket + 3); | 1005 | |
230 | #ifdef DEBUG | 1006 | if (info != MALLOC_FIRST) { |
231 | ASSERT(sz > 0); | 1007 | wrtwarning("pointer to wrong page\n"); |
232 | #else | 1008 | return; |
233 | if (sz <= 0) | 1009 | } |
234 | return; | 1010 | |
1011 | if ((u_long)ptr & malloc_pagemask) { | ||
1012 | wrtwarning("modified (page-) pointer\n"); | ||
1013 | return; | ||
1014 | } | ||
1015 | |||
1016 | /* Count how many pages and mark them free at the same time */ | ||
1017 | page_dir[index] = MALLOC_FREE; | ||
1018 | for (i = 1; page_dir[index+i] == MALLOC_FOLLOW; i++) | ||
1019 | page_dir[index + i] = MALLOC_FREE; | ||
1020 | |||
1021 | l = i << malloc_pageshift; | ||
1022 | |||
1023 | if (malloc_junk) | ||
1024 | memset(ptr, SOME_JUNK, l); | ||
1025 | |||
1026 | #if defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(MADV_FREE)) | ||
1027 | if (malloc_hint) | ||
1028 | madvise(ptr, l, MADV_FREE); | ||
235 | #endif | 1029 | #endif |
236 | if (sz < pagesz) { | 1030 | |
237 | amt = pagesz; | 1031 | if (malloc_guard) { |
238 | nblks = amt / sz; | 1032 | page_dir[index + i] = MALLOC_FREE; |
1033 | l += malloc_guard; | ||
1034 | } | ||
1035 | tail = (char *)ptr+l; | ||
1036 | |||
1037 | if (malloc_freeprot) | ||
1038 | mprotect(ptr, tail - ptr, PROT_NONE); | ||
1039 | |||
1040 | /* add to free-list */ | ||
1041 | if (px == NULL) | ||
1042 | px = imalloc(sizeof *px); /* This cannot fail... */ | ||
1043 | px->page = ptr; | ||
1044 | px->end = tail; | ||
1045 | px->size = l; | ||
1046 | |||
1047 | if (free_list.next == NULL) { | ||
1048 | |||
1049 | /* Nothing on free list, put this at head */ | ||
1050 | px->next = free_list.next; | ||
1051 | px->prev = &free_list; | ||
1052 | free_list.next = px; | ||
1053 | pf = px; | ||
1054 | px = NULL; | ||
1055 | |||
1056 | } else { | ||
1057 | |||
1058 | /* Find the right spot, leave pf pointing to the modified entry. */ | ||
1059 | |||
1060 | for(pf = free_list.next; pf->end < ptr && pf->next != NULL; | ||
1061 | pf = pf->next) | ||
1062 | ; /* Race ahead here */ | ||
1063 | |||
1064 | if (pf->page > tail) { | ||
1065 | /* Insert before entry */ | ||
1066 | px->next = pf; | ||
1067 | px->prev = pf->prev; | ||
1068 | pf->prev = px; | ||
1069 | px->prev->next = px; | ||
1070 | pf = px; | ||
1071 | px = NULL; | ||
1072 | } else if (pf->end == ptr ) { | ||
1073 | /* Append to the previous entry */ | ||
1074 | pf->end = (char *)pf->end + l; | ||
1075 | pf->size += l; | ||
1076 | if (pf->next != NULL && pf->end == pf->next->page ) { | ||
1077 | /* And collapse the next too. */ | ||
1078 | pt = pf->next; | ||
1079 | pf->end = pt->end; | ||
1080 | pf->size += pt->size; | ||
1081 | pf->next = pt->next; | ||
1082 | if (pf->next != NULL) | ||
1083 | pf->next->prev = pf; | ||
1084 | } | ||
1085 | } else if (pf->page == tail) { | ||
1086 | /* Prepend to entry */ | ||
1087 | pf->size += l; | ||
1088 | pf->page = ptr; | ||
1089 | } else if (pf->next == NULL) { | ||
1090 | /* Append at tail of chain */ | ||
1091 | px->next = NULL; | ||
1092 | px->prev = pf; | ||
1093 | pf->next = px; | ||
1094 | pf = px; | ||
1095 | px = NULL; | ||
239 | } else { | 1096 | } else { |
240 | amt = sz + pagesz; | 1097 | wrterror("freelist is destroyed\n"); |
241 | nblks = 1; | ||
242 | } | 1098 | } |
243 | op = (union overhead *)sbrk(amt); | 1099 | } |
244 | /* no more room! */ | 1100 | |
245 | if ((long)op == -1) | 1101 | /* Return something to OS ? */ |
246 | return; | 1102 | if (pf->next == NULL && /* If we're the last one, */ |
1103 | pf->size > malloc_cache && /* ..and the cache is full, */ | ||
1104 | pf->end == malloc_brk && /* ..and none behind us, */ | ||
1105 | malloc_brk == sbrk(0)) { /* ..and it's OK to do... */ | ||
1106 | |||
247 | /* | 1107 | /* |
248 | * Add new memory allocated to that on | 1108 | * Keep the cache intact. Notice that the '>' above guarantees that |
249 | * free list for this hash bucket. | 1109 | * the pf will always have at least one page afterwards. |
250 | */ | 1110 | */ |
251 | nextf[bucket] = op; | 1111 | pf->end = (char *)pf->page + malloc_cache; |
252 | while (--nblks > 0) { | 1112 | pf->size = malloc_cache; |
253 | op->ov_next = (union overhead *)((caddr_t)op + sz); | ||
254 | op = (union overhead *)((caddr_t)op + sz); | ||
255 | } | ||
256 | } | ||
257 | 1113 | ||
258 | void | 1114 | brk(pf->end); |
259 | free(cp) | 1115 | malloc_brk = pf->end; |
260 | void *cp; | 1116 | |
261 | { | 1117 | index = ptr2index(pf->end); |
262 | register long size; | 1118 | |
263 | register union overhead *op; | 1119 | for(i=index;i <= last_index;) |
264 | 1120 | page_dir[i++] = MALLOC_NOT_MINE; | |
265 | if (cp == NULL) | 1121 | |
266 | return; | 1122 | last_index = index - 1; |
267 | op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); | 1123 | |
268 | #ifdef DEBUG | 1124 | /* XXX: We could realloc/shrink the pagedir here I guess. */ |
269 | ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ | 1125 | } |
270 | #else | 1126 | if (pt != NULL) |
271 | if (op->ov_magic != MAGIC) | 1127 | ifree(pt); |
272 | return; /* sanity */ | ||
273 | #endif | ||
274 | #ifdef RCHECK | ||
275 | ASSERT(op->ov_rmagic == RMAGIC); | ||
276 | ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); | ||
277 | #endif | ||
278 | size = op->ov_index; | ||
279 | ASSERT(size < NBUCKETS); | ||
280 | op->ov_next = nextf[size]; /* also clobbers ov_magic */ | ||
281 | nextf[size] = op; | ||
282 | #ifdef MSTATS | ||
283 | nmalloc[size]--; | ||
284 | #endif | ||
285 | } | 1128 | } |
286 | 1129 | ||
287 | /* | 1130 | /* |
288 | * When a program attempts "storage compaction" as mentioned in the | 1131 | * Free a chunk, and possibly the page it's on, if the page becomes empty. |
289 | * old malloc man page, it realloc's an already freed block. Usually | ||
290 | * this is the last block it freed; occasionally it might be farther | ||
291 | * back. We have to search all the free lists for the block in order | ||
292 | * to determine its bucket: 1st we make one pass thru the lists | ||
293 | * checking only the first block in each; if that fails we search | ||
294 | * ``realloc_srchlen'' blocks in each list for a match (the variable | ||
295 | * is extern so the caller can modify it). If that fails we just copy | ||
296 | * however many bytes was given to realloc() and hope it's not huge. | ||
297 | */ | 1132 | */ |
298 | int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ | ||
299 | 1133 | ||
300 | void * | 1134 | /* ARGSUSED */ |
301 | realloc(cp, nbytes) | 1135 | static __inline__ void |
302 | void *cp; | 1136 | free_bytes(void *ptr, int index, struct pginfo *info) |
303 | size_t nbytes; | 1137 | { |
304 | { | 1138 | int i; |
305 | register u_long onb; | 1139 | struct pginfo **mp; |
306 | register long i; | 1140 | void *vp; |
307 | union overhead *op; | 1141 | |
308 | char *res; | 1142 | /* Find the chunk number on the page */ |
309 | int was_alloced = 0; | 1143 | i = ((u_long)ptr & malloc_pagemask) >> info->shift; |
310 | 1144 | ||
311 | if (cp == NULL) | 1145 | if ((u_long)ptr & ((1UL<<(info->shift))-1)) { |
312 | return (malloc(nbytes)); | 1146 | wrtwarning("modified (chunk-) pointer\n"); |
313 | op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); | 1147 | return; |
314 | if (op->ov_magic == MAGIC) { | 1148 | } |
315 | was_alloced++; | 1149 | |
316 | i = op->ov_index; | 1150 | if (info->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { |
317 | } else { | 1151 | wrtwarning("chunk is already free\n"); |
318 | /* | 1152 | return; |
319 | * Already free, doing "compaction". | 1153 | } |
320 | * | 1154 | |
321 | * Search for the old block of memory on the | 1155 | if (malloc_junk && info->size != 0) |
322 | * free list. First, check the most common | 1156 | memset(ptr, SOME_JUNK, info->size); |
323 | * case (last element free'd), then (this failing) | 1157 | |
324 | * the last ``realloc_srchlen'' items free'd. | 1158 | info->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); |
325 | * If all lookups fail, then assume the size of | 1159 | info->free++; |
326 | * the memory block being realloc'd is the | 1160 | |
327 | * largest possible (so that all "nbytes" of new | 1161 | if (info->size != 0) |
328 | * memory are copied into). Note that this could cause | 1162 | mp = page_dir + info->shift; |
329 | * a memory fault if the old area was tiny, and the moon | 1163 | else |
330 | * is gibbous. However, that is very unlikely. | 1164 | mp = page_dir; |
331 | */ | 1165 | |
332 | if ((i = findbucket(op, 1)) < 0 && | 1166 | if (info->free == 1) { |
333 | (i = findbucket(op, realloc_srchlen)) < 0) | 1167 | |
334 | i = NBUCKETS; | 1168 | /* Page became non-full */ |
335 | } | 1169 | |
336 | onb = 1 << (i + 3); | 1170 | /* Insert in address order */ |
337 | if (onb < pagesz) | 1171 | while (*mp && (*mp)->next && (*mp)->next->page < info->page) |
338 | onb -= sizeof (*op) + RSLOP; | 1172 | mp = &(*mp)->next; |
339 | else | 1173 | info->next = *mp; |
340 | onb += pagesz - sizeof (*op) - RSLOP; | 1174 | *mp = info; |
341 | /* avoid the copy if same size block */ | 1175 | return; |
342 | if (was_alloced) { | 1176 | } |
343 | if (i) { | 1177 | |
344 | i = 1 << (i + 2); | 1178 | if (info->free != info->total) |
345 | if (i < pagesz) | 1179 | return; |
346 | i -= sizeof (*op) + RSLOP; | 1180 | |
347 | else | 1181 | /* Find & remove this page in the queue */ |
348 | i += pagesz - sizeof (*op) - RSLOP; | 1182 | while (*mp != info) { |
349 | } | 1183 | mp = &((*mp)->next); |
350 | if (nbytes <= onb && nbytes > i) { | 1184 | #ifdef MALLOC_EXTRA_SANITY |
351 | #ifdef RCHECK | 1185 | if (!*mp) |
352 | op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); | 1186 | wrterror("(ES): Not on queue\n"); |
353 | *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; | 1187 | #endif /* MALLOC_EXTRA_SANITY */ |
354 | #endif | 1188 | } |
355 | return(cp); | 1189 | *mp = info->next; |
356 | } else | 1190 | |
357 | free(cp); | 1191 | /* Free the page & the info structure if need be */ |
358 | } | 1192 | page_dir[ptr2index(info->page)] = MALLOC_FIRST; |
359 | if ((res = malloc(nbytes)) == NULL) | 1193 | |
360 | return (NULL); | 1194 | /* If the page was mprotected, unprotect it before releasing it */ |
361 | if (cp != res) /* common optimization if "compacting" */ | 1195 | if (info->size == 0) { |
362 | bcopy(cp, res, (nbytes < onb) ? nbytes : onb); | 1196 | mprotect(info->page, malloc_pagesize, PROT_READ|PROT_WRITE); |
363 | return (res); | 1197 | /* Do we have to care if mprotect succeeds here ? */ |
1198 | } | ||
1199 | |||
1200 | vp = info->page; /* Order is important ! */ | ||
1201 | if(vp != (void*)info) | ||
1202 | ifree(info); | ||
1203 | ifree(vp); | ||
1204 | } | ||
1205 | |||
1206 | static void | ||
1207 | ifree(void *ptr) | ||
1208 | { | ||
1209 | struct pginfo *info; | ||
1210 | u_long index; | ||
1211 | |||
1212 | /* This is legal */ | ||
1213 | if (ptr == NULL) | ||
1214 | return; | ||
1215 | |||
1216 | if (!malloc_started) { | ||
1217 | wrtwarning("malloc() has never been called\n"); | ||
1218 | return; | ||
1219 | } | ||
1220 | |||
1221 | /* If we're already sinking, don't make matters any worse. */ | ||
1222 | if (suicide) | ||
1223 | return; | ||
1224 | |||
1225 | index = ptr2index(ptr); | ||
1226 | |||
1227 | if (index < malloc_pageshift) { | ||
1228 | wrtwarning("junk pointer, too low to make sense\n"); | ||
1229 | return; | ||
1230 | } | ||
1231 | |||
1232 | if (index > last_index) { | ||
1233 | wrtwarning("junk pointer, too high to make sense\n"); | ||
1234 | return; | ||
1235 | } | ||
1236 | |||
1237 | info = page_dir[index]; | ||
1238 | |||
1239 | if (info < MALLOC_MAGIC) | ||
1240 | free_pages(ptr, index, info); | ||
1241 | else | ||
1242 | free_bytes(ptr, index, info); | ||
1243 | return; | ||
364 | } | 1244 | } |
365 | 1245 | ||
1246 | static int malloc_active; | ||
1247 | |||
366 | /* | 1248 | /* |
367 | * Search ``srchlen'' elements of each free list for a block whose | 1249 | * Common function for handling recursion. Only |
368 | * header starts at ``freep''. If srchlen is -1 search the whole list. | 1250 | * print the error message once, to avoid making the problem |
369 | * Return bucket number, or -1 if not found. | 1251 | * potentially worse. |
370 | */ | 1252 | */ |
371 | static | 1253 | static void |
372 | findbucket(freep, srchlen) | 1254 | malloc_recurse(void) |
373 | union overhead *freep; | ||
374 | int srchlen; | ||
375 | { | 1255 | { |
376 | register union overhead *p; | 1256 | static int noprint; |
377 | register int i, j; | 1257 | |
378 | 1258 | if (noprint == 0) | |
379 | for (i = 0; i < NBUCKETS; i++) { | 1259 | wrtwarning("recursive call\n"); |
380 | j = 0; | 1260 | noprint = 1; |
381 | for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { | 1261 | malloc_active--; |
382 | if (p == freep) | 1262 | _MALLOC_UNLOCK(); |
383 | return (i); | 1263 | errno = EDEADLK; |
384 | j++; | ||
385 | } | ||
386 | } | ||
387 | return (-1); | ||
388 | } | 1264 | } |
389 | 1265 | ||
390 | #ifdef MSTATS | ||
391 | /* | 1266 | /* |
392 | * mstats - print out statistics about malloc | 1267 | * These are the public exported interface routines. |
393 | * | ||
394 | * Prints two lines of numbers, one showing the length of the free list | ||
395 | * for each size category, the second showing the number of mallocs - | ||
396 | * frees for each size category. | ||
397 | */ | 1268 | */ |
398 | mstats(s) | 1269 | void * |
399 | char *s; | 1270 | malloc(size_t size) |
400 | { | 1271 | { |
401 | register int i, j; | 1272 | void *r; |
402 | register union overhead *p; | 1273 | |
403 | int totfree = 0, | 1274 | _MALLOC_LOCK(); |
404 | totused = 0; | 1275 | malloc_func = " in malloc():"; |
405 | 1276 | if (malloc_active++) { | |
406 | fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); | 1277 | malloc_recurse(); |
407 | for (i = 0; i < NBUCKETS; i++) { | 1278 | return (NULL); |
408 | for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) | 1279 | } |
409 | ; | 1280 | r = imalloc(size); |
410 | fprintf(stderr, " %d", j); | 1281 | UTRACE(0, size, r); |
411 | totfree += j * (1 << (i + 3)); | 1282 | malloc_active--; |
412 | } | 1283 | _MALLOC_UNLOCK(); |
413 | fprintf(stderr, "\nused:\t"); | 1284 | if (malloc_xmalloc && r == NULL) |
414 | for (i = 0; i < NBUCKETS; i++) { | 1285 | wrterror("out of memory\n"); |
415 | fprintf(stderr, " %d", nmalloc[i]); | 1286 | return (r); |
416 | totused += nmalloc[i] * (1 << (i + 3)); | 1287 | } |
417 | } | 1288 | |
418 | fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", | 1289 | void |
419 | totused, totfree); | 1290 | free(void *ptr) |
1291 | { | ||
1292 | _MALLOC_LOCK(); | ||
1293 | malloc_func = " in free():"; | ||
1294 | if (malloc_active++) { | ||
1295 | malloc_recurse(); | ||
1296 | return; | ||
1297 | } | ||
1298 | ifree(ptr); | ||
1299 | UTRACE(ptr, 0, 0); | ||
1300 | malloc_active--; | ||
1301 | _MALLOC_UNLOCK(); | ||
1302 | return; | ||
1303 | } | ||
1304 | |||
1305 | void * | ||
1306 | realloc(void *ptr, size_t size) | ||
1307 | { | ||
1308 | void *r; | ||
1309 | |||
1310 | _MALLOC_LOCK(); | ||
1311 | malloc_func = " in realloc():"; | ||
1312 | if (malloc_active++) { | ||
1313 | malloc_recurse(); | ||
1314 | return (NULL); | ||
1315 | } | ||
1316 | if (ptr == NULL) { | ||
1317 | r = imalloc(size); | ||
1318 | } else { | ||
1319 | r = irealloc(ptr, size); | ||
1320 | } | ||
1321 | UTRACE(ptr, size, r); | ||
1322 | malloc_active--; | ||
1323 | _MALLOC_UNLOCK(); | ||
1324 | if (malloc_xmalloc && r == NULL) | ||
1325 | wrterror("out of memory\n"); | ||
1326 | return (r); | ||
420 | } | 1327 | } |
421 | #endif | ||