diff options
author | tholo <> | 1996-08-02 18:08:09 +0000 |
---|---|---|
committer | tholo <> | 1996-08-02 18:08:09 +0000 |
commit | ad6b9b4b5b85ac52952082177cd5495d10409382 (patch) | |
tree | 8afe95aa32a291a80aeb9f2e37cefd2ff2b8f327 /src | |
parent | f16e645fccbb65fe48490c8afd7515b0ad5f31ee (diff) | |
download | openbsd-ad6b9b4b5b85ac52952082177cd5495d10409382.tar.gz openbsd-ad6b9b4b5b85ac52952082177cd5495d10409382.tar.bz2 openbsd-ad6b9b4b5b85ac52952082177cd5495d10409382.zip |
malloc(3) implementation from FreeBSD; uses mmap(2) to get memory
Diffstat (limited to 'src')
-rw-r--r-- | src/lib/libc/stdlib/malloc.c | 1523 |
1 files changed, 1158 insertions, 365 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index 612759d9b2..38938f96cf 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c | |||
@@ -1,430 +1,1223 @@ | |||
1 | /* $NetBSD: malloc.c,v 1.6 1996/01/17 02:45:25 jtc Exp $ */ | ||
2 | |||
3 | /* | 1 | /* |
4 | * Copyright (c) 1983 Regents of the University of California. | 2 | * ---------------------------------------------------------------------------- |
5 | * All rights reserved. | 3 | * "THE BEER-WARE LICENSE" (Revision 42): |
4 | * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you | ||
5 | * can do whatever you want with this stuff. If we meet some day, and you think | ||
6 | * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp | ||
7 | * ---------------------------------------------------------------------------- | ||
6 | * | 8 | * |
7 | * Redistribution and use in source and binary forms, with or without | 9 | * $Id: malloc.c,v 1.4 1996/08/02 18:08:09 tholo Exp $ |
8 | * modification, are permitted provided that the following conditions | ||
9 | * are met: | ||
10 | * 1. Redistributions of source code must retain the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer. | ||
12 | * 2. Redistributions in binary form must reproduce the above copyright | ||
13 | * notice, this list of conditions and the following disclaimer in the | ||
14 | * documentation and/or other materials provided with the distribution. | ||
15 | * 3. All advertising materials mentioning features or use of this software | ||
16 | * must display the following acknowledgement: | ||
17 | * This product includes software developed by the University of | ||
18 | * California, Berkeley and its contributors. | ||
19 | * 4. Neither the name of the University nor the names of its contributors | ||
20 | * may be used to endorse or promote products derived from this software | ||
21 | * without specific prior written permission. | ||
22 | * | 10 | * |
23 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | 11 | */ |
24 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 12 | |
25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | 13 | /* |
26 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | 14 | * Defining EXTRA_SANITY will enable some checks which are related |
27 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 15 | * to internal conditions and consistency in malloc.c |
28 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | 16 | */ |
29 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | 17 | #undef EXTRA_SANITY |
30 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | 18 | |
31 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 19 | /* |
32 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 20 | * Defining MALLOC_STATS will enable you to call malloc_dump() and set |
33 | * SUCH DAMAGE. | 21 | * the [dD] options in the MALLOC_OPTIONS environment variable. |
34 | */ | 22 | * It has no run-time performance hit. |
35 | 23 | */ | |
36 | #if defined(LIBC_SCCS) && !defined(lint) | 24 | #define MALLOC_STATS |
37 | #if 0 | 25 | |
38 | static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91"; | 26 | #if defined(EXTRA_SANITY) && !defined(MALLOC_STATS) |
39 | #else | 27 | # define MALLOC_STATS /* required for EXTRA_SANITY */ |
40 | static char *rcsid = "$NetBSD: malloc.c,v 1.6 1996/01/17 02:45:25 jtc Exp $"; | ||
41 | #endif | 28 | #endif |
42 | #endif /* LIBC_SCCS and not lint */ | ||
43 | 29 | ||
44 | /* | 30 | /* |
45 | * malloc.c (Caltech) 2/21/82 | 31 | * What to use for Junk |
46 | * Chris Kingsley, kingsley@cit-20. | ||
47 | * | ||
48 | * This is a very fast storage allocator. It allocates blocks of a small | ||
49 | * number of different sizes, and keeps free lists of each size. Blocks that | ||
50 | * don't exactly fit are passed up to the next larger size. In this | ||
51 | * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. | ||
52 | * This is designed for use in a virtual memory environment. | ||
53 | */ | 32 | */ |
33 | #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ | ||
54 | 34 | ||
55 | #include <sys/types.h> | 35 | #include <stdio.h> |
56 | #include <stdlib.h> | 36 | #include <stdlib.h> |
57 | #include <string.h> | ||
58 | #include <unistd.h> | 37 | #include <unistd.h> |
59 | 38 | #include <memory.h> | |
60 | #define NULL 0 | 39 | #include <errno.h> |
61 | 40 | #include <err.h> | |
62 | /* | 41 | #include <sys/types.h> |
63 | * The overhead on a block is at least 4 bytes. When free, this space | 42 | #include <sys/param.h> |
64 | * contains a pointer to the next free block, and the bottom two bits must | 43 | #include <sys/mman.h> |
65 | * be zero. When in use, the first byte is set to MAGIC, and the second | 44 | #ifdef _THREAD_SAFE |
66 | * byte is the size index. The remaining bytes are for alignment. | 45 | #include <pthread.h> |
67 | * If range checking is enabled then a second word holds the size of the | 46 | #include "pthread_private.h" |
68 | * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). | ||
69 | * The order of elements is critical: ov_magic must overlay the low order | ||
70 | * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. | ||
71 | */ | ||
72 | union overhead { | ||
73 | union overhead *ov_next; /* when free */ | ||
74 | struct { | ||
75 | u_char ovu_magic; /* magic number */ | ||
76 | u_char ovu_index; /* bucket # */ | ||
77 | #ifdef RCHECK | ||
78 | u_short ovu_rmagic; /* range magic number */ | ||
79 | u_long ovu_size; /* actual block size */ | ||
80 | #endif | 47 | #endif |
81 | } ovu; | ||
82 | #define ov_magic ovu.ovu_magic | ||
83 | #define ov_index ovu.ovu_index | ||
84 | #define ov_rmagic ovu.ovu_rmagic | ||
85 | #define ov_size ovu.ovu_size | ||
86 | }; | ||
87 | 48 | ||
88 | static void morecore __P((int)); | 49 | /* |
89 | static int findbucket __P((union overhead *, int)); | 50 | * If these weren't defined here, they would be calculated on the fly, |
51 | * at a considerable cost in performance. | ||
52 | */ | ||
53 | #ifdef __OpenBSD__ | ||
54 | # if defined(__alpha__) || defined(__m68k__) || defined(__mips__) || \ | ||
55 | defined(__i386__) || defined(__m88k__) || defined(__ns32k__) || \ | ||
56 | defined(__vax__) | ||
57 | # define malloc_pagesize (NBPG) | ||
58 | # define malloc_pageshift (PGSHIFT) | ||
59 | # define malloc_maxsize (malloc_pagesize >> 1) | ||
60 | # define malloc_minsize 16U | ||
61 | # endif /* __i386__ */ | ||
62 | #endif /* __OpenBSD__ */ | ||
90 | 63 | ||
91 | #define MAGIC 0xef /* magic # on accounting info */ | 64 | /* |
92 | #define RMAGIC 0x5555 /* magic # on range info */ | 65 | * This structure describes a page worth of chunks. |
66 | */ | ||
93 | 67 | ||
94 | #ifdef RCHECK | 68 | struct pginfo { |
95 | #define RSLOP sizeof (u_short) | 69 | struct pginfo *next; /* next on the free list */ |
96 | #else | 70 | void *page; /* Pointer to the page */ |
97 | #define RSLOP 0 | 71 | u_short size; /* size of this page's chunks */ |
98 | #endif | 72 | u_short shift; /* How far to shift for this size chunks */ |
73 | u_short free; /* How many free chunks */ | ||
74 | u_short total; /* How many chunk */ | ||
75 | u_long bits[1]; /* Which chunks are free */ | ||
76 | }; | ||
99 | 77 | ||
100 | /* | 78 | /* |
101 | * nextf[i] is the pointer to the next free block of size 2^(i+3). The | 79 | * This structure describes a number of free pages. |
102 | * smallest allocatable block is 8 bytes. The overhead information | ||
103 | * precedes the data area returned to the user. | ||
104 | */ | 80 | */ |
105 | #define NBUCKETS 30 | ||
106 | static union overhead *nextf[NBUCKETS]; | ||
107 | extern char *sbrk(); | ||
108 | 81 | ||
109 | static int pagesz; /* page size */ | 82 | struct pgfree { |
110 | static int pagebucket; /* page size bucket */ | 83 | struct pgfree *next; /* next run of free pages */ |
84 | struct pgfree *prev; /* prev run of free pages */ | ||
85 | void *page; /* pointer to free pages */ | ||
86 | void *end; /* pointer to end of free pages */ | ||
87 | u_long size; /* number of bytes free */ | ||
88 | }; | ||
111 | 89 | ||
112 | #ifdef MSTATS | ||
113 | /* | 90 | /* |
114 | * nmalloc[i] is the difference between the number of mallocs and frees | 91 | * How many bits per u_long in the bitmap. |
115 | * for a given block size. | 92 | * Change only if not 8 bits/byte |
116 | */ | 93 | */ |
117 | static u_int nmalloc[NBUCKETS]; | 94 | #define MALLOC_BITS (8*sizeof(u_long)) |
118 | #include <stdio.h> | ||
119 | #endif | ||
120 | 95 | ||
121 | #if defined(DEBUG) || defined(RCHECK) | 96 | /* |
122 | #define ASSERT(p) if (!(p)) botch("p") | 97 | * Magic values to put in the page_directory |
123 | #include <stdio.h> | 98 | */ |
124 | static | 99 | #define MALLOC_NOT_MINE ((struct pginfo*) 0) |
125 | botch(s) | 100 | #define MALLOC_FREE ((struct pginfo*) 1) |
126 | char *s; | 101 | #define MALLOC_FIRST ((struct pginfo*) 2) |
102 | #define MALLOC_FOLLOW ((struct pginfo*) 3) | ||
103 | #define MALLOC_MAGIC ((struct pginfo*) 4) | ||
104 | |||
105 | /* | ||
106 | * The i386 architecture has some very convenient instructions. | ||
107 | * We might as well use them. There are C-language backups, but | ||
108 | * they are considerably slower. | ||
109 | */ | ||
110 | #ifdef __i386__ | ||
111 | #define ffs _ffs | ||
112 | static __inline int | ||
113 | _ffs(unsigned input) | ||
127 | { | 114 | { |
128 | fprintf(stderr, "\r\nassertion botched: %s\r\n", s); | 115 | int result; |
129 | (void) fflush(stderr); /* just in case user buffered it */ | 116 | asm("bsfl %1,%0" : "=r" (result) : "r" (input)); |
130 | abort(); | 117 | return result+1; |
131 | } | 118 | } |
132 | #else | ||
133 | #define ASSERT(p) | ||
134 | #endif | ||
135 | 119 | ||
136 | void * | 120 | #define fls _fls |
137 | malloc(nbytes) | 121 | static __inline int |
138 | size_t nbytes; | 122 | _fls(unsigned input) |
123 | { | ||
124 | int result; | ||
125 | asm("bsrl %1,%0" : "=r" (result) : "r" (input)); | ||
126 | return result+1; | ||
127 | } | ||
128 | |||
129 | #define set_bit _set_bit | ||
130 | static __inline void | ||
131 | _set_bit(struct pginfo *pi, int bit) | ||
139 | { | 132 | { |
140 | register union overhead *op; | 133 | asm("btsl %0,(%1)" : |
141 | register long bucket, n; | 134 | : "r" (bit & (MALLOC_BITS-1)), "r" (pi->bits+(bit/MALLOC_BITS))); |
142 | register unsigned amt; | 135 | } |
143 | 136 | ||
144 | /* | 137 | #define clr_bit _clr_bit |
145 | * First time malloc is called, setup page size and | 138 | static __inline void |
146 | * align break pointer so all data will be page aligned. | 139 | _clr_bit(struct pginfo *pi, int bit) |
147 | */ | 140 | { |
148 | if (pagesz == 0) { | 141 | asm("btcl %0,(%1)" : |
149 | pagesz = n = getpagesize(); | 142 | : "r" (bit & (MALLOC_BITS-1)), "r" (pi->bits+(bit/MALLOC_BITS))); |
150 | op = (union overhead *)sbrk(0); | 143 | } |
151 | n = n - sizeof (*op) - ((long)op & (n - 1)); | 144 | |
152 | if (n < 0) | 145 | #endif /* __i386__ */ |
153 | n += pagesz; | 146 | |
154 | if (n) { | 147 | /* |
155 | if (sbrk(n) == (char *)-1) | 148 | * Set to one when malloc_init has been called |
156 | return (NULL); | 149 | */ |
157 | } | 150 | static unsigned initialized; |
158 | bucket = 0; | 151 | |
159 | amt = 8; | 152 | /* |
160 | while (pagesz > amt) { | 153 | * The size of a page. |
161 | amt <<= 1; | 154 | * Must be a integral multiplum of the granularity of mmap(2). |
162 | bucket++; | 155 | * Your toes will curl if it isn't a power of two |
163 | } | 156 | */ |
164 | pagebucket = bucket; | 157 | #ifndef malloc_pagesize |
165 | } | 158 | static unsigned malloc_pagesize; |
166 | /* | 159 | #endif /* malloc_pagesize */ |
167 | * Convert amount of memory requested into closest block size | 160 | |
168 | * stored in hash buckets which satisfies request. | 161 | /* |
169 | * Account for space used per block for accounting. | 162 | * A mask for the offset inside a page. |
170 | */ | 163 | */ |
171 | if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { | 164 | #define malloc_pagemask ((malloc_pagesize)-1) |
172 | #ifndef RCHECK | 165 | |
173 | amt = 8; /* size of first bucket */ | 166 | #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask))) |
174 | bucket = 0; | 167 | #define ptr2index(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo) |
175 | #else | 168 | |
176 | amt = 16; /* size of first bucket */ | 169 | /* |
177 | bucket = 1; | 170 | * malloc_pagesize == 1 << malloc_pageshift |
178 | #endif | 171 | */ |
179 | n = -((long)sizeof (*op) + RSLOP); | 172 | #ifndef malloc_pageshift |
173 | static unsigned malloc_pageshift; | ||
174 | #endif /* malloc_pageshift */ | ||
175 | |||
176 | /* | ||
177 | * The smallest allocation we bother about. | ||
178 | * Must be power of two | ||
179 | */ | ||
180 | #ifndef malloc_minsize | ||
181 | static unsigned malloc_minsize; | ||
182 | #endif /* malloc_minsize */ | ||
183 | |||
184 | /* | ||
185 | * The largest chunk we care about. | ||
186 | * Must be smaller than pagesize | ||
187 | * Must be power of two | ||
188 | */ | ||
189 | #ifndef malloc_maxsize | ||
190 | static unsigned malloc_maxsize; | ||
191 | #endif /* malloc_maxsize */ | ||
192 | |||
193 | /* | ||
194 | * The minimum size (in bytes) of the free page cache. | ||
195 | */ | ||
196 | #ifndef malloc_cache | ||
197 | static unsigned malloc_cache; | ||
198 | #endif /* malloc_cache */ | ||
199 | |||
200 | /* | ||
201 | * The offset from pagenumber to index into the page directory | ||
202 | */ | ||
203 | static u_long malloc_origo; | ||
204 | |||
205 | /* | ||
206 | * The last index in the page directory we care about | ||
207 | */ | ||
208 | static u_long last_index; | ||
209 | |||
210 | /* | ||
211 | * Pointer to page directory. | ||
212 | * Allocated "as if with" malloc | ||
213 | */ | ||
214 | static struct pginfo **page_dir; | ||
215 | |||
216 | /* | ||
217 | * How many slots in the page directory | ||
218 | */ | ||
219 | static unsigned malloc_ninfo; | ||
220 | |||
221 | /* | ||
222 | * Free pages line up here | ||
223 | */ | ||
224 | static struct pgfree free_list; | ||
225 | |||
226 | /* | ||
227 | * Abort() if we fail to get VM ? | ||
228 | */ | ||
229 | static int malloc_abort; | ||
230 | |||
231 | /* | ||
232 | * Are we trying to die ? | ||
233 | */ | ||
234 | static int suicide; | ||
235 | |||
236 | #ifdef MALLOC_STATS | ||
237 | /* | ||
238 | * dump statistics | ||
239 | */ | ||
240 | static int malloc_stats; | ||
241 | #endif /* MALLOC_STATS */ | ||
242 | |||
243 | /* | ||
244 | * always realloc ? | ||
245 | */ | ||
246 | static int malloc_realloc; | ||
247 | |||
248 | /* | ||
249 | * zero fill ? | ||
250 | */ | ||
251 | static int malloc_zero; | ||
252 | |||
253 | /* | ||
254 | * junk fill ? | ||
255 | */ | ||
256 | static int malloc_junk; | ||
257 | |||
258 | /* | ||
259 | * my last break. | ||
260 | */ | ||
261 | static void *malloc_brk; | ||
262 | |||
263 | /* | ||
264 | * one location cache for free-list holders | ||
265 | */ | ||
266 | static struct pgfree *px; | ||
267 | |||
268 | /* | ||
269 | * Necessary function declarations | ||
270 | */ | ||
271 | static int extend_pgdir(u_long index); | ||
272 | |||
273 | #ifdef MALLOC_STATS | ||
274 | void | ||
275 | malloc_dump(FILE *fd) | ||
276 | { | ||
277 | struct pginfo **pd; | ||
278 | struct pgfree *pf; | ||
279 | int j; | ||
280 | |||
281 | pd = page_dir; | ||
282 | |||
283 | /* print out all the pages */ | ||
284 | for(j=0;j<=last_index;j++) { | ||
285 | fprintf(fd,"%08lx %5d ",(j+malloc_origo) << malloc_pageshift,j); | ||
286 | if (pd[j] == MALLOC_NOT_MINE) { | ||
287 | for(j++;j<=last_index && pd[j] == MALLOC_NOT_MINE;j++) | ||
288 | ; | ||
289 | j--; | ||
290 | fprintf(fd,".. %5d not mine\n", j); | ||
291 | } else if (pd[j] == MALLOC_FREE) { | ||
292 | for(j++;j<=last_index && pd[j] == MALLOC_FREE;j++) | ||
293 | ; | ||
294 | j--; | ||
295 | fprintf(fd,".. %5d free\n", j); | ||
296 | } else if (pd[j] == MALLOC_FIRST) { | ||
297 | for(j++;j<=last_index && pd[j] == MALLOC_FOLLOW;j++) | ||
298 | ; | ||
299 | j--; | ||
300 | fprintf(fd,".. %5d in use\n", j); | ||
301 | } else if (pd[j] < MALLOC_MAGIC) { | ||
302 | fprintf(fd,"(%p)\n", pd[j]); | ||
180 | } else { | 303 | } else { |
181 | amt = pagesz; | 304 | fprintf(fd,"%p %d (of %d) x %d @ %p --> %p\n", |
182 | bucket = pagebucket; | 305 | pd[j],pd[j]->free, pd[j]->total, |
183 | } | 306 | pd[j]->size, pd[j]->page, pd[j]->next); |
184 | while (nbytes > amt + n) { | ||
185 | amt <<= 1; | ||
186 | if (amt == 0) | ||
187 | return (NULL); | ||
188 | bucket++; | ||
189 | } | 307 | } |
190 | /* | 308 | } |
191 | * If nothing in hash bucket right now, | 309 | |
192 | * request more memory from the system. | 310 | for(pf=free_list.next; pf; pf=pf->next) { |
193 | */ | 311 | fprintf(fd,"Free: @%p [%p...%p[ %ld ->%p <-%p\n", |
194 | if ((op = nextf[bucket]) == NULL) { | 312 | pf,pf->page,pf->end,pf->size,pf->prev,pf->next); |
195 | morecore(bucket); | 313 | if (pf == pf->next) { |
196 | if ((op = nextf[bucket]) == NULL) | 314 | fprintf(fd,"Free_list loops.\n"); |
197 | return (NULL); | 315 | break; |
198 | } | 316 | } |
199 | /* remove from linked list */ | 317 | } |
200 | nextf[bucket] = op->ov_next; | 318 | |
201 | op->ov_magic = MAGIC; | 319 | /* print out various info */ |
202 | op->ov_index = bucket; | 320 | fprintf(fd,"Minsize\t%d\n",malloc_minsize); |
203 | #ifdef MSTATS | 321 | fprintf(fd,"Maxsize\t%d\n",malloc_maxsize); |
204 | nmalloc[bucket]++; | 322 | fprintf(fd,"Pagesize\t%d\n",malloc_pagesize); |
205 | #endif | 323 | fprintf(fd,"Pageshift\t%d\n",malloc_pageshift); |
206 | #ifdef RCHECK | 324 | fprintf(fd,"FirstPage\t%ld\n",malloc_origo); |
207 | /* | 325 | fprintf(fd,"LastPage\t%ld %lx\n",last_index+malloc_pageshift, |
208 | * Record allocated size of block and | 326 | (last_index + malloc_pageshift) << malloc_pageshift); |
209 | * bound space with magic numbers. | 327 | fprintf(fd,"Break\t%ld\n",(u_long)sbrk(0) >> malloc_pageshift); |
210 | */ | 328 | } |
211 | op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); | 329 | #endif /* MALLOC_STATS */ |
212 | op->ov_rmagic = RMAGIC; | 330 | |
213 | *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; | 331 | static void |
214 | #endif | 332 | wrterror(char *p) |
215 | return ((char *)(op + 1)); | 333 | { |
334 | char *q = "Malloc error: "; | ||
335 | suicide = 1; | ||
336 | write(2,q,strlen(q)); | ||
337 | write(2,p,strlen(p)); | ||
338 | #ifdef MALLOC_STATS | ||
339 | if (malloc_stats) | ||
340 | malloc_dump(stderr); | ||
341 | #endif /* MALLOC_STATS */ | ||
342 | abort(); | ||
343 | } | ||
344 | |||
345 | static void | ||
346 | wrtwarning(char *p) | ||
347 | { | ||
348 | char *q = "Malloc warning: "; | ||
349 | if (malloc_abort) | ||
350 | wrterror(p); | ||
351 | write(2,q,strlen(q)); | ||
352 | write(2,p,strlen(p)); | ||
353 | } | ||
354 | |||
355 | #ifdef EXTRA_SANITY | ||
356 | static void | ||
357 | malloc_exit() | ||
358 | { | ||
359 | FILE *fd = fopen("malloc.out","a"); | ||
360 | char *q = "malloc() warning: Couldn't dump stats.\n"; | ||
361 | if (fd) { | ||
362 | malloc_dump(fd); | ||
363 | fclose(fd); | ||
364 | } else | ||
365 | write(2,q,strlen(q)); | ||
366 | } | ||
367 | #endif /* EXTRA_SANITY */ | ||
368 | |||
369 | |||
370 | /* | ||
371 | * Allocate a number of pages from the OS | ||
372 | */ | ||
373 | static caddr_t | ||
374 | map_pages(int pages) | ||
375 | { | ||
376 | caddr_t result,tail; | ||
377 | |||
378 | result = (caddr_t)pageround((u_long)sbrk(0)); | ||
379 | tail = result + (pages << malloc_pageshift); | ||
380 | |||
381 | if (brk(tail)) { | ||
382 | #ifdef EXTRA_SANITY | ||
383 | wrterror("(internal): map_pages fails\n"); | ||
384 | #endif /* EXTRA_SANITY */ | ||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | last_index = ptr2index(tail) - 1; | ||
389 | malloc_brk = tail; | ||
390 | |||
391 | if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index)) | ||
392 | return 0;; | ||
393 | |||
394 | return result; | ||
395 | } | ||
396 | |||
397 | /* | ||
398 | * Set a bit in the bitmap | ||
399 | */ | ||
400 | #ifndef set_bit | ||
401 | static __inline void | ||
402 | set_bit(struct pginfo *pi, int bit) | ||
403 | { | ||
404 | pi->bits[bit/MALLOC_BITS] |= 1<<(bit%MALLOC_BITS); | ||
405 | } | ||
406 | #endif /* set_bit */ | ||
407 | |||
408 | /* | ||
409 | * Clear a bit in the bitmap | ||
410 | */ | ||
411 | #ifndef clr_bit | ||
412 | static __inline void | ||
413 | clr_bit(struct pginfo *pi, int bit) | ||
414 | { | ||
415 | pi->bits[bit/MALLOC_BITS] &= ~(1<<(bit%MALLOC_BITS)); | ||
416 | } | ||
417 | #endif /* clr_bit */ | ||
418 | |||
419 | #ifndef tst_bit | ||
420 | /* | ||
421 | * Test a bit in the bitmap | ||
422 | */ | ||
423 | static __inline int | ||
424 | tst_bit(struct pginfo *pi, int bit) | ||
425 | { | ||
426 | return pi->bits[bit/MALLOC_BITS] & (1<<(bit%MALLOC_BITS)); | ||
427 | } | ||
428 | #endif /* tst_bit */ | ||
429 | |||
430 | /* | ||
431 | * Find last bit | ||
432 | */ | ||
433 | #ifndef fls | ||
434 | static __inline int | ||
435 | fls(int size) | ||
436 | { | ||
437 | int i = 1; | ||
438 | while (size >>= 1) | ||
439 | i++; | ||
440 | return i; | ||
216 | } | 441 | } |
442 | #endif /* fls */ | ||
217 | 443 | ||
218 | /* | 444 | /* |
219 | * Allocate more memory to the indicated bucket. | 445 | * Extend page directory |
446 | */ | ||
447 | static int | ||
448 | extend_pgdir(u_long index) | ||
449 | { | ||
450 | struct pginfo **new,**old; | ||
451 | int i, oldlen; | ||
452 | |||
453 | /* Make it this many pages */ | ||
454 | i = index * sizeof *page_dir; | ||
455 | i /= malloc_pagesize; | ||
456 | i += 2; | ||
457 | |||
458 | /* remember the old mapping size */ | ||
459 | oldlen = malloc_ninfo * sizeof *page_dir; | ||
460 | |||
461 | /* | ||
462 | * NOTE: we allocate new pages and copy the directory rather than tempt | ||
463 | * fate by trying to "grow" the region.. There is nothing to prevent | ||
464 | * us from accidently re-mapping space that's been allocated by our caller | ||
465 | * via dlopen() or other mmap(). | ||
466 | * | ||
467 | * The copy problem is not too bad, as there is 4K of page index per | ||
468 | * 4MB of malloc arena. | ||
469 | * | ||
470 | * We can totally avoid the copy if we open a file descriptor to associate | ||
471 | * the anon mappings with. Then, when we remap the pages at the new | ||
472 | * address, the old pages will be "magically" remapped.. But this means | ||
473 | * keeping open a "secret" file descriptor..... | ||
474 | */ | ||
475 | |||
476 | /* Get new pages */ | ||
477 | new = (struct pginfo**) mmap(0, i * malloc_pagesize, PROT_READ|PROT_WRITE, | ||
478 | MAP_ANON|MAP_PRIVATE, -1, 0); | ||
479 | if (new == (struct pginfo **)-1) | ||
480 | return 0; | ||
481 | |||
482 | /* Copy the old stuff */ | ||
483 | memcpy(new, page_dir, | ||
484 | malloc_ninfo * sizeof *page_dir); | ||
485 | |||
486 | /* register the new size */ | ||
487 | malloc_ninfo = i * malloc_pagesize / sizeof *page_dir; | ||
488 | |||
489 | /* swap the pointers */ | ||
490 | old = page_dir; | ||
491 | page_dir = new; | ||
492 | |||
493 | /* Now free the old stuff */ | ||
494 | munmap((caddr_t)old, oldlen); | ||
495 | return 1; | ||
496 | } | ||
497 | |||
498 | /* | ||
499 | * Initialize the world | ||
220 | */ | 500 | */ |
221 | static void | 501 | static void |
222 | morecore(bucket) | 502 | malloc_init () |
223 | int bucket; | ||
224 | { | 503 | { |
225 | register union overhead *op; | 504 | char *p; |
226 | register long sz; /* size of desired block */ | ||
227 | long amt; /* amount to allocate */ | ||
228 | int nblks; /* how many blocks we get */ | ||
229 | 505 | ||
230 | /* | 506 | #ifdef EXTRA_SANITY |
231 | * sbrk_size <= 0 only for big, FLUFFY, requests (about | 507 | malloc_junk = 1; |
232 | * 2^30 bytes on a VAX, I think) or for a negative arg. | 508 | #endif /* EXTRA_SANITY */ |
233 | */ | 509 | |
234 | sz = 1 << (bucket + 3); | 510 | for (p=getenv("MALLOC_OPTIONS"); p && *p; p++) { |
235 | #ifdef DEBUG | 511 | switch (*p) { |
236 | ASSERT(sz > 0); | 512 | case 'a': malloc_abort = 0; break; |
237 | #else | 513 | case 'A': malloc_abort = 1; break; |
238 | if (sz <= 0) | 514 | #ifdef MALLOC_STATS |
239 | return; | 515 | case 'd': malloc_stats = 0; break; |
240 | #endif | 516 | case 'D': malloc_stats = 1; break; |
241 | if (sz < pagesz) { | 517 | #endif /* MALLOC_STATS */ |
242 | amt = pagesz; | 518 | case 'r': malloc_realloc = 0; break; |
243 | nblks = amt / sz; | 519 | case 'R': malloc_realloc = 1; break; |
244 | } else { | 520 | case 'j': malloc_junk = 0; break; |
245 | amt = sz + pagesz; | 521 | case 'J': malloc_junk = 1; break; |
246 | nblks = 1; | 522 | case 'z': malloc_zero = 0; break; |
523 | case 'Z': malloc_zero = 1; break; | ||
524 | default: | ||
525 | wrtwarning("(Init): Unknown char in MALLOC_OPTIONS\n"); | ||
526 | p = 0; | ||
527 | break; | ||
247 | } | 528 | } |
248 | op = (union overhead *)sbrk(amt); | 529 | } |
249 | /* no more room! */ | 530 | |
250 | if ((long)op == -1) | 531 | /* |
251 | return; | 532 | * We want junk in the entire allocation, and zero only in the part |
252 | /* | 533 | * the user asked for. |
253 | * Add new memory allocated to that on | 534 | */ |
254 | * free list for this hash bucket. | 535 | if (malloc_zero) |
255 | */ | 536 | malloc_junk=1; |
256 | nextf[bucket] = op; | 537 | |
257 | while (--nblks > 0) { | 538 | #ifdef EXTRA_SANITY |
258 | op->ov_next = (union overhead *)((caddr_t)op + sz); | 539 | if (malloc_stats) |
259 | op = (union overhead *)((caddr_t)op + sz); | 540 | atexit(malloc_exit); |
260 | } | 541 | #endif /* EXTRA_SANITY */ |
542 | |||
543 | #ifndef malloc_pagesize | ||
544 | /* determine our pagesize */ | ||
545 | malloc_pagesize = getpagesize(); | ||
546 | #endif /* malloc_pagesize */ | ||
547 | |||
548 | #ifndef malloc_maxsize | ||
549 | malloc_maxsize = malloc_pagesize >> 1; | ||
550 | #endif /* malloc_maxsize */ | ||
551 | |||
552 | #ifndef malloc_pageshift | ||
553 | { | ||
554 | int i; | ||
555 | /* determine how much we shift by to get there */ | ||
556 | for (i = malloc_pagesize; i > 1; i >>= 1) | ||
557 | malloc_pageshift++; | ||
558 | } | ||
559 | #endif /* malloc_pageshift */ | ||
560 | |||
561 | #ifndef malloc_cache | ||
562 | malloc_cache = 100 << malloc_pageshift; | ||
563 | #endif /* malloc_cache */ | ||
564 | |||
565 | #ifndef malloc_minsize | ||
566 | { | ||
567 | int i; | ||
568 | /* | ||
569 | * find the smallest size allocation we will bother about. | ||
570 | * this is determined as the smallest allocation that can hold | ||
571 | * it's own pginfo; | ||
572 | */ | ||
573 | i = 2; | ||
574 | for(;;) { | ||
575 | int j; | ||
576 | |||
577 | /* Figure out the size of the bits */ | ||
578 | j = malloc_pagesize/i; | ||
579 | j /= 8; | ||
580 | if (j < sizeof(u_long)) | ||
581 | j = sizeof (u_long); | ||
582 | if (sizeof(struct pginfo) + j - sizeof (u_long) <= i) | ||
583 | break; | ||
584 | i += i; | ||
585 | } | ||
586 | malloc_minsize = i; | ||
587 | } | ||
588 | #endif /* malloc_minsize */ | ||
589 | |||
590 | /* Allocate one page for the page directory */ | ||
591 | page_dir = (struct pginfo **) mmap(0, malloc_pagesize, PROT_READ|PROT_WRITE, | ||
592 | MAP_ANON|MAP_PRIVATE, -1, 0); | ||
593 | if (page_dir == (struct pginfo **) -1) | ||
594 | wrterror("(Init) my first mmap failed. (check limits ?)\n"); | ||
595 | |||
596 | /* | ||
597 | * We need a maximum of malloc_pageshift buckets, steal these from the | ||
598 | * front of the page_directory; | ||
599 | */ | ||
600 | malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift; | ||
601 | malloc_origo -= malloc_pageshift; | ||
602 | |||
603 | malloc_ninfo = malloc_pagesize / sizeof *page_dir; | ||
604 | |||
605 | /* Been here, done that */ | ||
606 | initialized++; | ||
607 | |||
608 | /* | ||
609 | * This is a nice hack from Kaleb Keithly (kaleb@x.org). | ||
610 | * We can sbrk(2) further back when we keep this on a low address. | ||
611 | */ | ||
612 | px = (struct pgfree *) malloc (sizeof *px); | ||
261 | } | 613 | } |
262 | 614 | ||
263 | void | 615 | /* |
264 | free(cp) | 616 | * Allocate a number of complete pages |
265 | void *cp; | 617 | */ |
266 | { | 618 | void * |
267 | register long size; | 619 | malloc_pages(size_t size) |
268 | register union overhead *op; | 620 | { |
269 | 621 | void *p,*delay_free = 0; | |
270 | if (cp == NULL) | 622 | int i; |
271 | return; | 623 | struct pgfree *pf; |
272 | op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); | 624 | u_long index; |
273 | #ifdef DEBUG | 625 | |
274 | ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ | 626 | size = pageround(size); |
275 | #else | 627 | |
276 | if (op->ov_magic != MAGIC) | 628 | p = 0; |
277 | return; /* sanity */ | 629 | /* Look for free pages before asking for more */ |
630 | for(pf = free_list.next; pf; pf = pf->next) { | ||
631 | |||
632 | #ifdef EXTRA_SANITY | ||
633 | if (pf->size & malloc_pagemask) | ||
634 | wrterror("(ES): junk length entry on free_list\n"); | ||
635 | if (!pf->size) | ||
636 | wrterror("(ES): zero length entry on free_list\n"); | ||
637 | if (pf->page == pf->end) | ||
638 | wrterror("(ES): zero entry on free_list\n"); | ||
639 | if (pf->page > pf->end) | ||
640 | wrterror("(ES): sick entry on free_list\n"); | ||
641 | if ((void*)pf->page >= (void*)sbrk(0)) | ||
642 | wrterror("(ES): entry on free_list past brk\n"); | ||
643 | if (page_dir[ptr2index(pf->page)] != MALLOC_FREE) | ||
644 | wrterror("(ES): non-free first page on free-list\n"); | ||
645 | if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE) | ||
646 | wrterror("(ES): non-free last page on free-list\n"); | ||
647 | #endif /* EXTRA_SANITY */ | ||
648 | |||
649 | if (pf->size < size) | ||
650 | continue; | ||
651 | |||
652 | if (pf->size == size) { | ||
653 | p = pf->page; | ||
654 | if (pf->next) | ||
655 | pf->next->prev = pf->prev; | ||
656 | pf->prev->next = pf->next; | ||
657 | delay_free = pf; | ||
658 | break; | ||
659 | } | ||
660 | |||
661 | p = pf->page; | ||
662 | pf->page += size; | ||
663 | pf->size -= size; | ||
664 | break; | ||
665 | } | ||
666 | |||
667 | #ifdef EXTRA_SANITY | ||
668 | if (p && page_dir[ptr2index(p)] != MALLOC_FREE) | ||
669 | wrterror("(ES): allocated non-free page on free-list\n"); | ||
670 | #endif /* EXTRA_SANITY */ | ||
671 | |||
672 | size >>= malloc_pageshift; | ||
673 | |||
674 | /* Map new pages */ | ||
675 | if (!p) | ||
676 | p = map_pages(size); | ||
677 | |||
678 | if (p) { | ||
679 | |||
680 | index = ptr2index(p); | ||
681 | page_dir[index] = MALLOC_FIRST; | ||
682 | for (i=1;i<size;i++) | ||
683 | page_dir[index+i] = MALLOC_FOLLOW; | ||
684 | |||
685 | if (malloc_junk) | ||
686 | memset(p, SOME_JUNK,size << malloc_pageshift); | ||
687 | } | ||
688 | |||
689 | if (delay_free) { | ||
690 | if (!px) | ||
691 | px = delay_free; | ||
692 | else | ||
693 | free(delay_free); | ||
694 | } | ||
695 | |||
696 | return p; | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * Allocate a page of fragments | ||
701 | */ | ||
702 | |||
703 | static __inline int | ||
704 | malloc_make_chunks(int bits) | ||
705 | { | ||
706 | struct pginfo *bp; | ||
707 | void *pp; | ||
708 | int i,k,l; | ||
709 | |||
710 | /* Allocate a new bucket */ | ||
711 | pp = malloc_pages(malloc_pagesize); | ||
712 | if (!pp) | ||
713 | return 0; | ||
714 | |||
715 | /* Find length of admin structure */ | ||
716 | l = sizeof *bp - sizeof(u_long); | ||
717 | l += sizeof(u_long) * | ||
718 | (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS); | ||
719 | |||
720 | /* Don't waste more than two chunks on this */ | ||
721 | if ((1<<(bits)) <= l+l) { | ||
722 | bp = (struct pginfo *)pp; | ||
723 | } else { | ||
724 | bp = (struct pginfo *)malloc(l); | ||
725 | if (!bp) | ||
726 | return 0; | ||
727 | } | ||
728 | |||
729 | bp->size = (1<<bits); | ||
730 | bp->shift = bits; | ||
731 | bp->total = bp->free = malloc_pagesize >> bits; | ||
732 | bp->page = pp; | ||
733 | |||
734 | page_dir[ptr2index(pp)] = bp; | ||
735 | |||
736 | bp->next = page_dir[bits]; | ||
737 | page_dir[bits] = bp; | ||
738 | |||
739 | /* set all valid bits in the bits */ | ||
740 | k = bp->total; | ||
741 | i = 0; | ||
742 | |||
743 | /* Do a bunch at a time */ | ||
744 | for(;k-i >= MALLOC_BITS; i += MALLOC_BITS) | ||
745 | bp->bits[i / MALLOC_BITS] = ~0; | ||
746 | |||
747 | for(; i < k; i++) | ||
748 | set_bit(bp,i); | ||
749 | |||
750 | if (bp == bp->page) { | ||
751 | /* Mark the ones we stole for ourselves */ | ||
752 | for(i=0;l > 0;i++) { | ||
753 | clr_bit(bp,i); | ||
754 | bp->free--; | ||
755 | bp->total--; | ||
756 | l -= (1 << bits); | ||
757 | } | ||
758 | } | ||
759 | |||
760 | return 1; | ||
761 | } | ||
762 | |||
763 | /* | ||
764 | * Allocate a fragment | ||
765 | */ | ||
766 | static void * | ||
767 | malloc_bytes(size_t size) | ||
768 | { | ||
769 | int j; | ||
770 | struct pginfo *bp; | ||
771 | int k; | ||
772 | u_long *lp; | ||
773 | |||
774 | /* Don't bother with anything less than this */ | ||
775 | if (size < malloc_minsize) | ||
776 | size = malloc_minsize; | ||
777 | |||
778 | /* Find the right bucket */ | ||
779 | j = fls((size)-1); | ||
780 | |||
781 | /* If it's empty, make a page more of that size chunks */ | ||
782 | if (!page_dir[j] && !malloc_make_chunks(j)) | ||
783 | return 0; | ||
784 | |||
785 | bp = page_dir[j]; | ||
786 | |||
787 | /* Find first word of bitmap which isn't empty */ | ||
788 | for (lp = bp->bits; !*lp; lp++) | ||
789 | ; | ||
790 | |||
791 | /* Find that bit, and tweak it */ | ||
792 | k = ffs(*lp) - 1; | ||
793 | *lp ^= 1<<k; | ||
794 | |||
795 | /* If there are no more free, remove from free-list */ | ||
796 | if (!--bp->free) { | ||
797 | page_dir[j] = bp->next; | ||
798 | bp->next = 0; | ||
799 | } | ||
800 | |||
801 | /* Adjust to the real offset of that chunk */ | ||
802 | k += (lp-bp->bits)*MALLOC_BITS; | ||
803 | k <<= bp->shift; | ||
804 | |||
805 | if (malloc_junk) | ||
806 | memset(bp->page + k, SOME_JUNK, bp->size); | ||
807 | |||
808 | return bp->page + k; | ||
809 | } | ||
810 | |||
811 | /* | ||
812 | * Allocate a piece of memory | ||
813 | */ | ||
814 | void * | ||
815 | malloc(size_t size) | ||
816 | { | ||
817 | void *result; | ||
818 | #ifdef _THREAD_SAFE | ||
819 | int status; | ||
278 | #endif | 820 | #endif |
279 | #ifdef RCHECK | 821 | |
280 | ASSERT(op->ov_rmagic == RMAGIC); | 822 | if (!initialized) |
281 | ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); | 823 | malloc_init(); |
824 | |||
825 | if (suicide) | ||
826 | abort(); | ||
827 | |||
828 | #ifdef _THREAD_SAFE | ||
829 | _thread_kern_sig_block(&status); | ||
282 | #endif | 830 | #endif |
283 | size = op->ov_index; | 831 | if (size <= malloc_maxsize) |
284 | ASSERT(size < NBUCKETS); | 832 | result = malloc_bytes(size); |
285 | op->ov_next = nextf[size]; /* also clobbers ov_magic */ | 833 | else |
286 | nextf[size] = op; | 834 | result = malloc_pages(size); |
287 | #ifdef MSTATS | 835 | |
288 | nmalloc[size]--; | 836 | if (malloc_abort && !result) |
837 | wrterror("malloc(): returns NULL\n"); | ||
838 | |||
839 | if (malloc_zero) | ||
840 | memset(result,0,size); | ||
841 | |||
842 | #ifdef _THREAD_SAFE | ||
843 | _thread_kern_sig_unblock(status); | ||
289 | #endif | 844 | #endif |
845 | return result; | ||
290 | } | 846 | } |
291 | 847 | ||
292 | /* | 848 | /* |
293 | * When a program attempts "storage compaction" as mentioned in the | 849 | * Change the size of an allocation. |
294 | * old malloc man page, it realloc's an already freed block. Usually | ||
295 | * this is the last block it freed; occasionally it might be farther | ||
296 | * back. We have to search all the free lists for the block in order | ||
297 | * to determine its bucket: 1st we make one pass thru the lists | ||
298 | * checking only the first block in each; if that fails we search | ||
299 | * ``realloc_srchlen'' blocks in each list for a match (the variable | ||
300 | * is extern so the caller can modify it). If that fails we just copy | ||
301 | * however many bytes was given to realloc() and hope it's not huge. | ||
302 | */ | 850 | */ |
303 | int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ | ||
304 | |||
305 | void * | 851 | void * |
306 | realloc(cp, nbytes) | 852 | realloc(void *ptr, size_t size) |
307 | void *cp; | 853 | { |
308 | size_t nbytes; | 854 | void *p; |
309 | { | 855 | u_long osize,index; |
310 | register u_long onb; | 856 | struct pginfo **mp; |
311 | register long i; | 857 | int i; |
312 | union overhead *op; | 858 | #ifdef _THREAD_SAFE |
313 | char *res; | 859 | int status; |
314 | int was_alloced = 0; | 860 | #endif |
315 | 861 | ||
316 | if (cp == NULL) | 862 | if (suicide) |
317 | return (malloc(nbytes)); | 863 | return 0; |
318 | if (nbytes == 0) { | 864 | |
319 | free (cp); | 865 | if (!ptr) /* Bounce to malloc() */ |
320 | return NULL; | 866 | return malloc(size); |
867 | |||
868 | if (!initialized) { | ||
869 | wrtwarning("realloc(): malloc() never got called.\n"); | ||
870 | return 0; | ||
871 | } | ||
872 | |||
873 | if (ptr && !size) { /* Bounce to free() */ | ||
874 | free(ptr); | ||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | #ifdef _THREAD_SAFE | ||
879 | _thread_kern_sig_block(&status); | ||
880 | #endif | ||
881 | index = ptr2index(ptr); | ||
882 | |||
883 | if (index < malloc_pageshift) { | ||
884 | wrtwarning("realloc(): junk pointer (too low)\n"); | ||
885 | #ifdef _THREAD_SAFE | ||
886 | _thread_kern_sig_unblock(status); | ||
887 | #endif | ||
888 | return 0; | ||
889 | } | ||
890 | |||
891 | if (index > last_index) { | ||
892 | wrtwarning("realloc(): junk pointer (too high)\n"); | ||
893 | #ifdef _THREAD_SAFE | ||
894 | _thread_kern_sig_unblock(status); | ||
895 | #endif | ||
896 | return 0; | ||
897 | } | ||
898 | |||
899 | mp = &page_dir[index]; | ||
900 | |||
901 | if (*mp == MALLOC_FIRST) { /* Page allocation */ | ||
902 | |||
903 | /* Check the pointer */ | ||
904 | if ((u_long)ptr & malloc_pagemask) { | ||
905 | wrtwarning("realloc(): modified page pointer.\n"); | ||
906 | #ifdef _THREAD_SAFE | ||
907 | _thread_kern_sig_unblock(status); | ||
908 | #endif | ||
909 | return 0; | ||
321 | } | 910 | } |
322 | op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); | 911 | |
323 | if (op->ov_magic == MAGIC) { | 912 | /* Find the size in bytes */ |
324 | was_alloced++; | 913 | for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;) |
325 | i = op->ov_index; | 914 | osize += malloc_pagesize; |
326 | } else { | 915 | |
327 | /* | 916 | if (!malloc_realloc && /* unless we have to, */ |
328 | * Already free, doing "compaction". | 917 | size <= osize && /* .. or are too small, */ |
329 | * | 918 | size > (osize - malloc_pagesize)) { /* .. or can free a page, */ |
330 | * Search for the old block of memory on the | 919 | #ifdef _THREAD_SAFE |
331 | * free list. First, check the most common | 920 | _thread_kern_sig_unblock(status); |
332 | * case (last element free'd), then (this failing) | 921 | #endif |
333 | * the last ``realloc_srchlen'' items free'd. | 922 | return ptr; /* don't do anything. */ |
334 | * If all lookups fail, then assume the size of | ||
335 | * the memory block being realloc'd is the | ||
336 | * largest possible (so that all "nbytes" of new | ||
337 | * memory are copied into). Note that this could cause | ||
338 | * a memory fault if the old area was tiny, and the moon | ||
339 | * is gibbous. However, that is very unlikely. | ||
340 | */ | ||
341 | if ((i = findbucket(op, 1)) < 0 && | ||
342 | (i = findbucket(op, realloc_srchlen)) < 0) | ||
343 | i = NBUCKETS; | ||
344 | } | 923 | } |
345 | onb = 1 << (i + 3); | 924 | |
346 | if (onb < pagesz) | 925 | } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */ |
347 | onb -= sizeof (*op) + RSLOP; | 926 | |
348 | else | 927 | /* Check the pointer for sane values */ |
349 | onb += pagesz - sizeof (*op) - RSLOP; | 928 | if (((u_long)ptr & ((*mp)->size-1))) { |
350 | /* avoid the copy if same size block */ | 929 | wrtwarning("realloc(): modified chunk pointer.\n"); |
351 | if (was_alloced) { | 930 | #ifdef _THREAD_SAFE |
352 | if (i) { | 931 | _thread_kern_sig_unblock(status); |
353 | i = 1 << (i + 2); | 932 | #endif |
354 | if (i < pagesz) | 933 | return 0; |
355 | i -= sizeof (*op) + RSLOP; | 934 | } |
356 | else | 935 | |
357 | i += pagesz - sizeof (*op) - RSLOP; | 936 | /* Find the chunk index in the page */ |
358 | } | 937 | i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift; |
359 | if (nbytes <= onb && nbytes > i) { | 938 | |
360 | #ifdef RCHECK | 939 | /* Verify that it isn't a free chunk already */ |
361 | op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); | 940 | if (tst_bit(*mp,i)) { |
362 | *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; | 941 | wrtwarning("realloc(): already free chunk.\n"); |
942 | #ifdef _THREAD_SAFE | ||
943 | _thread_kern_sig_unblock(status); | ||
944 | #endif | ||
945 | return 0; | ||
946 | } | ||
947 | |||
948 | osize = (*mp)->size; | ||
949 | |||
950 | if (!malloc_realloc && /* Unless we have to, */ | ||
951 | size < osize && /* ..or are too small, */ | ||
952 | (size > osize/2 || /* ..or could use a smaller size, */ | ||
953 | osize == malloc_minsize)) { /* ..(if there is one) */ | ||
954 | #ifdef _THREAD_SAFE | ||
955 | _thread_kern_sig_unblock(status); | ||
363 | #endif | 956 | #endif |
364 | return(cp); | 957 | return ptr; /* ..Don't do anything */ |
365 | } else | ||
366 | free(cp); | ||
367 | } | 958 | } |
368 | if ((res = malloc(nbytes)) == NULL) | 959 | |
369 | return (NULL); | 960 | } else { |
370 | if (cp != res) /* common optimization if "compacting" */ | 961 | wrtwarning("realloc(): wrong page pointer.\n"); |
371 | bcopy(cp, res, (nbytes < onb) ? nbytes : onb); | 962 | #ifdef _THREAD_SAFE |
372 | return (res); | 963 | _thread_kern_sig_unblock(status); |
964 | #endif | ||
965 | return 0; | ||
966 | } | ||
967 | |||
968 | p = malloc(size); | ||
969 | |||
970 | if (p) { | ||
971 | /* copy the lesser of the two sizes, and free the old one */ | ||
972 | if (osize < size) | ||
973 | memcpy(p,ptr,osize); | ||
974 | else | ||
975 | memcpy(p,ptr,size); | ||
976 | free(ptr); | ||
977 | } | ||
978 | #ifdef _THREAD_SAFE | ||
979 | _thread_kern_sig_unblock(status); | ||
980 | #endif | ||
981 | return p; | ||
373 | } | 982 | } |
374 | 983 | ||
375 | /* | 984 | /* |
376 | * Search ``srchlen'' elements of each free list for a block whose | 985 | * Free a sequence of pages |
377 | * header starts at ``freep''. If srchlen is -1 search the whole list. | ||
378 | * Return bucket number, or -1 if not found. | ||
379 | */ | 986 | */ |
380 | static | 987 | |
381 | findbucket(freep, srchlen) | 988 | static __inline void |
382 | union overhead *freep; | 989 | free_pages(void *ptr, int index, struct pginfo *info) |
383 | int srchlen; | ||
384 | { | 990 | { |
385 | register union overhead *p; | 991 | int i; |
386 | register int i, j; | 992 | struct pgfree *pf,*pt; |
387 | 993 | u_long l; | |
388 | for (i = 0; i < NBUCKETS; i++) { | 994 | void *tail; |
389 | j = 0; | 995 | |
390 | for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { | 996 | if (info == MALLOC_FREE) { |
391 | if (p == freep) | 997 | wrtwarning("free(): already free page.\n"); |
392 | return (i); | 998 | return; |
393 | j++; | 999 | } |
394 | } | 1000 | |
1001 | if (info != MALLOC_FIRST) { | ||
1002 | wrtwarning("free(): freeing wrong page.\n"); | ||
1003 | return; | ||
1004 | } | ||
1005 | |||
1006 | if ((u_long)ptr & malloc_pagemask) { | ||
1007 | wrtwarning("free(): modified page pointer.\n"); | ||
1008 | return; | ||
1009 | } | ||
1010 | |||
1011 | /* Count how many pages and mark them free at the same time */ | ||
1012 | page_dir[index] = MALLOC_FREE; | ||
1013 | for (i = 1; page_dir[index+i] == MALLOC_FOLLOW; i++) | ||
1014 | page_dir[index + i] = MALLOC_FREE; | ||
1015 | |||
1016 | l = i << malloc_pageshift; | ||
1017 | |||
1018 | tail = ptr+l; | ||
1019 | |||
1020 | /* add to free-list */ | ||
1021 | if (!px) | ||
1022 | px = malloc(sizeof *pt); /* This cannot fail... */ | ||
1023 | px->page = ptr; | ||
1024 | px->end = tail; | ||
1025 | px->size = l; | ||
1026 | if (!free_list.next) { | ||
1027 | |||
1028 | /* Nothing on free list, put this at head */ | ||
1029 | px->next = free_list.next; | ||
1030 | px->prev = &free_list; | ||
1031 | free_list.next = px; | ||
1032 | pf = px; | ||
1033 | px = 0; | ||
1034 | |||
1035 | } else { | ||
1036 | |||
1037 | /* Find the right spot, leave pf pointing to the modified entry. */ | ||
1038 | tail = ptr+l; | ||
1039 | |||
1040 | for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next) | ||
1041 | ; /* Race ahead here */ | ||
1042 | |||
1043 | if (pf->page > tail) { | ||
1044 | /* Insert before entry */ | ||
1045 | px->next = pf; | ||
1046 | px->prev = pf->prev; | ||
1047 | pf->prev = px; | ||
1048 | px->prev->next = px; | ||
1049 | pf = px; | ||
1050 | px = 0; | ||
1051 | } else if (pf->end == ptr ) { | ||
1052 | /* Append to the previous entry */ | ||
1053 | pf->end += l; | ||
1054 | pf->size += l; | ||
1055 | if (pf->next && pf->end == pf->next->page ) { | ||
1056 | /* And collapse the next too. */ | ||
1057 | pt = pf->next; | ||
1058 | pf->end = pt->end; | ||
1059 | pf->size += pt->size; | ||
1060 | pf->next = pt->next; | ||
1061 | if (pf->next) | ||
1062 | pf->next->prev = pf; | ||
1063 | free(pt); | ||
1064 | } | ||
1065 | } else if (pf->page == tail) { | ||
1066 | /* Prepend to entry */ | ||
1067 | pf->size += l; | ||
1068 | pf->page = ptr; | ||
1069 | } else if (!pf->next) { | ||
1070 | /* Append at tail of chain */ | ||
1071 | px->next = 0; | ||
1072 | px->prev = pf; | ||
1073 | pf->next = px; | ||
1074 | pf = px; | ||
1075 | px = 0; | ||
1076 | } else { | ||
1077 | wrterror("messed up free list"); | ||
395 | } | 1078 | } |
396 | return (-1); | 1079 | } |
1080 | |||
1081 | /* Return something to OS ? */ | ||
1082 | if (!pf->next && /* If we're the last one, */ | ||
1083 | pf->size > malloc_cache && /* ..and the cache is full, */ | ||
1084 | pf->end == malloc_brk && /* ..and none behind us, */ | ||
1085 | malloc_brk == sbrk(0)) { /* ..and it's OK to do... */ | ||
1086 | |||
1087 | /* | ||
1088 | * Keep the cache intact. Notice that the '>' above guarantees that | ||
1089 | * the pf will always have at least one page afterwards. | ||
1090 | */ | ||
1091 | pf->end = pf->page + malloc_cache; | ||
1092 | pf->size = malloc_cache; | ||
1093 | |||
1094 | brk(pf->end); | ||
1095 | malloc_brk = pf->end; | ||
1096 | |||
1097 | index = ptr2index(pf->end); | ||
1098 | last_index = index - 1; | ||
1099 | |||
1100 | for(i=index;i <= last_index;) | ||
1101 | page_dir[i++] = MALLOC_NOT_MINE; | ||
1102 | |||
1103 | /* XXX: We could realloc/shrink the pagedir here I guess. */ | ||
1104 | } | ||
397 | } | 1105 | } |
398 | 1106 | ||
399 | #ifdef MSTATS | ||
400 | /* | 1107 | /* |
401 | * mstats - print out statistics about malloc | 1108 | * Free a chunk, and possibly the page it's on, if the page becomes empty. |
402 | * | ||
403 | * Prints two lines of numbers, one showing the length of the free list | ||
404 | * for each size category, the second showing the number of mallocs - | ||
405 | * frees for each size category. | ||
406 | */ | 1109 | */ |
407 | mstats(s) | 1110 | |
408 | char *s; | 1111 | static __inline void |
1112 | free_bytes(void *ptr, int index, struct pginfo *info) | ||
409 | { | 1113 | { |
410 | register int i, j; | 1114 | int i; |
411 | register union overhead *p; | 1115 | struct pginfo **mp; |
412 | int totfree = 0, | 1116 | void *vp; |
413 | totused = 0; | 1117 | |
414 | 1118 | /* Find the chunk number on the page */ | |
415 | fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); | 1119 | i = ((u_long)ptr & malloc_pagemask) >> info->shift; |
416 | for (i = 0; i < NBUCKETS; i++) { | 1120 | |
417 | for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) | 1121 | if (((u_long)ptr & (info->size-1))) { |
418 | ; | 1122 | wrtwarning("free(): modified pointer.\n"); |
419 | fprintf(stderr, " %d", j); | 1123 | return; |
420 | totfree += j * (1 << (i + 3)); | 1124 | } |
421 | } | 1125 | |
422 | fprintf(stderr, "\nused:\t"); | 1126 | if (tst_bit(info,i)) { |
423 | for (i = 0; i < NBUCKETS; i++) { | 1127 | wrtwarning("free(): already free chunk.\n"); |
424 | fprintf(stderr, " %d", nmalloc[i]); | 1128 | return; |
425 | totused += nmalloc[i] * (1 << (i + 3)); | 1129 | } |
426 | } | 1130 | |
427 | fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", | 1131 | set_bit(info,i); |
428 | totused, totfree); | 1132 | info->free++; |
1133 | |||
1134 | mp = page_dir + info->shift; | ||
1135 | |||
1136 | if (info->free == 1) { | ||
1137 | |||
1138 | /* Page became non-full */ | ||
1139 | |||
1140 | mp = page_dir + info->shift; | ||
1141 | /* Insert in address order */ | ||
1142 | while (*mp && (*mp)->next && (*mp)->next->page < info->page) | ||
1143 | mp = &(*mp)->next; | ||
1144 | info->next = *mp; | ||
1145 | *mp = info; | ||
1146 | return; | ||
1147 | } | ||
1148 | |||
1149 | if (info->free != info->total) | ||
1150 | return; | ||
1151 | |||
1152 | /* Find & remove this page in the queue */ | ||
1153 | while (*mp != info) { | ||
1154 | mp = &((*mp)->next); | ||
1155 | #ifdef EXTRA_SANITY | ||
1156 | if (!*mp) | ||
1157 | wrterror("(ES): Not on queue\n"); | ||
1158 | #endif /* EXTRA_SANITY */ | ||
1159 | } | ||
1160 | *mp = info->next; | ||
1161 | |||
1162 | /* Free the page & the info structure if need be */ | ||
1163 | page_dir[ptr2index(info->page)] = MALLOC_FIRST; | ||
1164 | vp = info->page; /* Order is important ! */ | ||
1165 | if(vp != (void*)info) | ||
1166 | free(info); | ||
1167 | free(vp); | ||
429 | } | 1168 | } |
1169 | |||
1170 | void | ||
1171 | free(void *ptr) | ||
1172 | { | ||
1173 | struct pginfo *info; | ||
1174 | int index; | ||
1175 | #ifdef _THREAD_SAFE | ||
1176 | int status; | ||
1177 | #endif | ||
1178 | |||
1179 | /* This is legal */ | ||
1180 | if (!ptr) | ||
1181 | return; | ||
1182 | |||
1183 | if (!initialized) { | ||
1184 | wrtwarning("free(): malloc() never got called.\n"); | ||
1185 | return; | ||
1186 | } | ||
1187 | |||
1188 | /* If we're already sinking, don't make matters any worse. */ | ||
1189 | if (suicide) | ||
1190 | return; | ||
1191 | |||
1192 | #ifdef _THREAD_SAFE | ||
1193 | _thread_kern_sig_block(&status); | ||
430 | #endif | 1194 | #endif |
1195 | index = ptr2index(ptr); | ||
1196 | |||
1197 | if (index < malloc_pageshift) { | ||
1198 | wrtwarning("free(): junk pointer (too low)\n"); | ||
1199 | #ifdef _THREAD_SAFE | ||
1200 | _thread_kern_sig_unblock(status); | ||
1201 | #endif | ||
1202 | return; | ||
1203 | } | ||
1204 | |||
1205 | if (index > last_index) { | ||
1206 | wrtwarning("free(): junk pointer (too high)\n"); | ||
1207 | #ifdef _THREAD_SAFE | ||
1208 | _thread_kern_sig_unblock(status); | ||
1209 | #endif | ||
1210 | return; | ||
1211 | } | ||
1212 | |||
1213 | info = page_dir[index]; | ||
1214 | |||
1215 | if (info < MALLOC_MAGIC) | ||
1216 | free_pages(ptr,index,info); | ||
1217 | else | ||
1218 | free_bytes(ptr,index,info); | ||
1219 | #ifdef _THREAD_SAFE | ||
1220 | _thread_kern_sig_unblock(status); | ||
1221 | #endif | ||
1222 | return; | ||
1223 | } | ||