summaryrefslogtreecommitdiff
path: root/src/lib/libc/stdlib/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libc/stdlib/malloc.c')
-rw-r--r--src/lib/libc/stdlib/malloc.c1958
1 files changed, 1615 insertions, 343 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 3c57fad024..5fc75c2c75 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,421 +1,1693 @@
1/* $OpenBSD: malloc.c,v 1.140 2011/10/06 14:37:04 otto Exp $ */
1/* 2/*
2 * Copyright (c) 1983 Regents of the University of California. 3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
3 * All rights reserved.
4 * 4 *
5 * Redistribution and use in source and binary forms, with or without 5 * Permission to use, copy, modify, and distribute this software for any
6 * modification, are permitted provided that the following conditions 6 * purpose with or without fee is hereby granted, provided that the above
7 * are met: 7 * copyright notice and this permission notice appear in all copies.
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 * 8 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */ 16 */
33 17
34#if defined(LIBC_SCCS) && !defined(lint)
35/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/
36static char *rcsid = "$Id: malloc.c,v 1.1.1.1 1995/10/18 08:42:18 deraadt Exp $";
37#endif /* LIBC_SCCS and not lint */
38
39/* 18/*
40 * malloc.c (Caltech) 2/21/82 19 * Parts of this code, mainly the sub page sized chunk management code is
41 * Chris Kingsley, kingsley@cit-20. 20 * derived from the malloc implementation with the following license:
42 * 21 */
43 * This is a very fast storage allocator. It allocates blocks of a small 22/*
44 * number of different sizes, and keeps free lists of each size. Blocks that 23 * ----------------------------------------------------------------------------
45 * don't exactly fit are passed up to the next larger size. In this 24 * "THE BEER-WARE LICENSE" (Revision 42):
46 * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. 25 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
47 * This is designed for use in a virtual memory environment. 26 * can do whatever you want with this stuff. If we meet some day, and you think
27 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
28 * ----------------------------------------------------------------------------
48 */ 29 */
49 30
31/* #define MALLOC_STATS */
32
50#include <sys/types.h> 33#include <sys/types.h>
34#include <sys/param.h>
35#include <sys/queue.h>
36#include <sys/mman.h>
37#include <sys/uio.h>
38#include <errno.h>
39#include <stdint.h>
51#include <stdlib.h> 40#include <stdlib.h>
52#include <string.h> 41#include <string.h>
42#include <stdio.h>
53#include <unistd.h> 43#include <unistd.h>
54 44
55#define NULL 0 45#ifdef MALLOC_STATS
46#include <sys/tree.h>
47#include <fcntl.h>
48#endif
49
50#include "thread_private.h"
51
52#if defined(__sparc__) && !defined(__sparcv9__)
53#define MALLOC_PAGESHIFT (13U)
54#elif defined(__mips64__)
55#define MALLOC_PAGESHIFT (14U)
56#else
57#define MALLOC_PAGESHIFT (PGSHIFT)
58#endif
59
60#define MALLOC_MINSHIFT 4
61#define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1)
62#define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT)
63#define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT)
64#define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1)
65#define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
56 66
57static void morecore(); 67#define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT)
58static int findbucket(); 68#define MALLOC_MAXCACHE 256
69#define MALLOC_DELAYED_CHUNKS 15 /* max of getrnibble() */
70#define MALLOC_INITIAL_REGIONS 512
71#define MALLOC_DEFAULT_CACHE 64
59 72
60/* 73/*
61 * The overhead on a block is at least 4 bytes. When free, this space 74 * When the P option is active, we move allocations between half a page
62 * contains a pointer to the next free block, and the bottom two bits must 75 * and a whole page towards the end, subject to alignment constraints.
63 * be zero. When in use, the first byte is set to MAGIC, and the second 76 * This is the extra headroom we allow. Set to zero to be the most
64 * byte is the size index. The remaining bytes are for alignment. 77 * strict.
65 * If range checking is enabled then a second word holds the size of the
66 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
67 * The order of elements is critical: ov_magic must overlay the low order
68 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
69 */ 78 */
70union overhead { 79#define MALLOC_LEEWAY 0
71 union overhead *ov_next; /* when free */ 80
72 struct { 81#define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
73 u_char ovu_magic; /* magic number */ 82
74 u_char ovu_index; /* bucket # */ 83/*
75#ifdef RCHECK 84 * What to use for Junk. This is the byte value we use to fill with
76 u_short ovu_rmagic; /* range magic number */ 85 * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
77 u_long ovu_size; /* actual block size */ 86 * and SOME_FREEJUNK right before free.
87 */
88#define SOME_JUNK 0xd0 /* as in "Duh" :-) */
89#define SOME_FREEJUNK 0xdf
90
91#define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
92 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
93
94#define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
95 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
96
97struct region_info {
98 void *p; /* page; low bits used to mark chunks */
99 uintptr_t size; /* size for pages, or chunk_info pointer */
100#ifdef MALLOC_STATS
101 void *f; /* where allocated from */
78#endif 102#endif
79 } ovu;
80#define ov_magic ovu.ovu_magic
81#define ov_index ovu.ovu_index
82#define ov_rmagic ovu.ovu_rmagic
83#define ov_size ovu.ovu_size
84}; 103};
85 104
86#define MAGIC 0xef /* magic # on accounting info */ 105LIST_HEAD(chunk_head, chunk_info);
87#define RMAGIC 0x5555 /* magic # on range info */
88 106
89#ifdef RCHECK 107struct dir_info {
90#define RSLOP sizeof (u_short) 108 u_int32_t canary1;
109 struct region_info *r; /* region slots */
110 size_t regions_total; /* number of region slots */
111 size_t regions_free; /* number of free slots */
112 /* lists of free chunk info structs */
113 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
114 /* lists of chunks with free slots */
115 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1];
116 size_t free_regions_size; /* free pages cached */
117 /* free pages cache */
118 struct region_info free_regions[MALLOC_MAXCACHE];
119 /* delayed free chunk slots */
120 void *delayed_chunks[MALLOC_DELAYED_CHUNKS + 1];
121 u_short chunk_start;
122#ifdef MALLOC_STATS
123 size_t inserts;
124 size_t insert_collisions;
125 size_t finds;
126 size_t find_collisions;
127 size_t deletes;
128 size_t delete_moves;
129 size_t cheap_realloc_tries;
130 size_t cheap_reallocs;
131#define STATS_INC(x) ((x)++)
132#define STATS_ZERO(x) ((x) = 0)
133#define STATS_SETF(x,y) ((x)->f = (y))
91#else 134#else
92#define RSLOP 0 135#define STATS_INC(x) /* nothing */
93#endif 136#define STATS_ZERO(x) /* nothing */
137#define STATS_SETF(x,y) /* nothing */
138#endif /* MALLOC_STATS */
139 u_int32_t canary2;
140};
141#define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
142 ~MALLOC_PAGEMASK)
94 143
95/* 144/*
96 * nextf[i] is the pointer to the next free block of size 2^(i+3). The 145 * This structure describes a page worth of chunks.
97 * smallest allocatable block is 8 bytes. The overhead information 146 *
98 * precedes the data area returned to the user. 147 * How many bits per u_short in the bitmap
99 */ 148 */
100#define NBUCKETS 30 149#define MALLOC_BITS (NBBY * sizeof(u_short))
101static union overhead *nextf[NBUCKETS]; 150struct chunk_info {
102extern char *sbrk(); 151 LIST_ENTRY(chunk_info) entries;
152 void *page; /* pointer to the page */
153 u_int32_t canary;
154 u_short size; /* size of this page's chunks */
155 u_short shift; /* how far to shift for this size */
156 u_short free; /* how many free chunks */
157 u_short total; /* how many chunk */
158 /* which chunks are free */
159 u_short bits[1];
160};
161
162struct malloc_readonly {
163 struct dir_info *g_pool; /* Main bookkeeping information */
164 int malloc_abort; /* abort() on error */
165 int malloc_freeprot; /* mprotect free pages PROT_NONE? */
166 int malloc_hint; /* call madvice on free pages? */
167 int malloc_junk; /* junk fill? */
168 int malloc_move; /* move allocations to end of page? */
169 int malloc_realloc; /* always realloc? */
170 int malloc_xmalloc; /* xmalloc behaviour? */
171 int malloc_zero; /* zero fill? */
172 size_t malloc_guard; /* use guard pages after allocations? */
173 u_int malloc_cache; /* free pages we cache */
174#ifdef MALLOC_STATS
175 int malloc_stats; /* dump statistics at end */
176#endif
177 u_int32_t malloc_canary; /* Matched against ones in g_pool */
178};
103 179
104static int pagesz; /* page size */ 180/* This object is mapped PROT_READ after initialisation to prevent tampering */
105static int pagebucket; /* page size bucket */ 181static union {
182 struct malloc_readonly mopts;
183 u_char _pad[MALLOC_PAGESIZE];
184} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
185#define mopts malloc_readonly.mopts
186#define g_pool mopts.g_pool
106 187
107#ifdef MSTATS 188char *malloc_options; /* compile-time options */
108/* 189
109 * nmalloc[i] is the difference between the number of mallocs and frees 190static char *malloc_func; /* current function */
110 * for a given block size. 191static int malloc_active; /* status of malloc */
192
193static size_t malloc_guarded; /* bytes used for guards */
194static size_t malloc_used; /* bytes allocated */
195
196static size_t rnibblesused; /* random nibbles used */
197static u_char rbytes[512]; /* random bytes */
198static u_char getrnibble(void);
199
200extern char *__progname;
201
202#ifdef MALLOC_STATS
203void malloc_dump(int);
204static void malloc_exit(void);
205#define CALLER __builtin_return_address(0)
206#else
207#define CALLER NULL
208#endif
209
210/* low bits of r->p determine size: 0 means >= page size and p->size holding
211 * real size, otherwise r->size is a shift count, or 1 for malloc(0)
111 */ 212 */
112static u_int nmalloc[NBUCKETS]; 213#define REALSIZE(sz, r) \
113#include <stdio.h> 214 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \
215 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
216
217static inline size_t
218hash(void *p)
219{
220 size_t sum;
221 union {
222 uintptr_t p;
223 unsigned short a[sizeof(void *) / sizeof(short)];
224 } u;
225 u.p = (uintptr_t)p >> MALLOC_PAGESHIFT;
226 sum = u.a[0];
227 sum = (sum << 7) - sum + u.a[1];
228#ifdef __LP64__
229 sum = (sum << 7) - sum + u.a[2];
230 sum = (sum << 7) - sum + u.a[3];
114#endif 231#endif
232 return sum;
233}
115 234
116#if defined(DEBUG) || defined(RCHECK) 235static void
117#define ASSERT(p) if (!(p)) botch("p") 236wrterror(char *msg, void *p)
118#include <stdio.h>
119static
120botch(s)
121 char *s;
122{ 237{
123 fprintf(stderr, "\r\nassertion botched: %s\r\n", s); 238 char *q = " error: ";
124 (void) fflush(stderr); /* just in case user buffered it */ 239 struct iovec iov[6];
125 abort(); 240 char buf[20];
241 int saved_errno = errno;
242
243 iov[0].iov_base = __progname;
244 iov[0].iov_len = strlen(__progname);
245 iov[1].iov_base = malloc_func;
246 iov[1].iov_len = strlen(malloc_func);
247 iov[2].iov_base = q;
248 iov[2].iov_len = strlen(q);
249 iov[3].iov_base = msg;
250 iov[3].iov_len = strlen(msg);
251 iov[4].iov_base = buf;
252 if (p == NULL)
253 iov[4].iov_len = 0;
254 else {
255 snprintf(buf, sizeof(buf), " %p", p);
256 iov[4].iov_len = strlen(buf);
257 }
258 iov[5].iov_base = "\n";
259 iov[5].iov_len = 1;
260 writev(STDERR_FILENO, iov, 6);
261
262#ifdef MALLOC_STATS
263 if (mopts.malloc_stats)
264 malloc_dump(STDERR_FILENO);
265#endif /* MALLOC_STATS */
266
267 errno = saved_errno;
268 if (mopts.malloc_abort)
269 abort();
126} 270}
127#else
128#define ASSERT(p)
129#endif
130 271
131void * 272static void
132malloc(nbytes) 273rbytes_init(void)
133 size_t nbytes; 274{
275 arc4random_buf(rbytes, sizeof(rbytes));
276 rnibblesused = 0;
277}
278
279static inline u_char
280getrnibble(void)
281{
282 u_char x;
283
284 if (rnibblesused >= 2 * sizeof(rbytes))
285 rbytes_init();
286 x = rbytes[rnibblesused++ / 2];
287 return (rnibblesused & 1 ? x & 0xf : x >> 4);
288}
289
290/*
291 * Cache maintenance. We keep at most malloc_cache pages cached.
292 * If the cache is becoming full, unmap pages in the cache for real,
293 * and then add the region to the cache
294 * Opposed to the regular region data structure, the sizes in the
295 * cache are in MALLOC_PAGESIZE units.
296 */
297static void
298unmap(struct dir_info *d, void *p, size_t sz)
134{ 299{
135 register union overhead *op; 300 size_t psz = sz >> MALLOC_PAGESHIFT;
136 register long bucket, n; 301 size_t rsz, tounmap;
137 register unsigned amt; 302 struct region_info *r;
303 u_int i, offset;
304
305 if (sz != PAGEROUND(sz)) {
306 wrterror("munmap round", NULL);
307 return;
308 }
309
310 if (psz > mopts.malloc_cache) {
311 if (munmap(p, sz))
312 wrterror("munmap", p);
313 malloc_used -= sz;
314 return;
315 }
316 tounmap = 0;
317 rsz = mopts.malloc_cache - d->free_regions_size;
318 if (psz > rsz)
319 tounmap = psz - rsz;
320 offset = getrnibble();
321 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
322 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
323 if (r->p != NULL) {
324 rsz = r->size << MALLOC_PAGESHIFT;
325 if (munmap(r->p, rsz))
326 wrterror("munmap", r->p);
327 r->p = NULL;
328 if (tounmap > r->size)
329 tounmap -= r->size;
330 else
331 tounmap = 0;
332 d->free_regions_size -= r->size;
333 r->size = 0;
334 malloc_used -= rsz;
335 }
336 }
337 if (tounmap > 0)
338 wrterror("malloc cache underflow", NULL);
339 for (i = 0; i < mopts.malloc_cache; i++) {
340 r = &d->free_regions[i];
341 if (r->p == NULL) {
342 if (mopts.malloc_hint)
343 madvise(p, sz, MADV_FREE);
344 if (mopts.malloc_freeprot)
345 mprotect(p, sz, PROT_NONE);
346 r->p = p;
347 r->size = psz;
348 d->free_regions_size += psz;
349 break;
350 }
351 }
352 if (i == mopts.malloc_cache)
353 wrterror("malloc free slot lost", NULL);
354 if (d->free_regions_size > mopts.malloc_cache)
355 wrterror("malloc cache overflow", NULL);
356}
357
358static void
359zapcacheregion(struct dir_info *d, void *p)
360{
361 u_int i;
362 struct region_info *r;
363 size_t rsz;
364
365 for (i = 0; i < mopts.malloc_cache; i++) {
366 r = &d->free_regions[i];
367 if (r->p == p) {
368 rsz = r->size << MALLOC_PAGESHIFT;
369 if (munmap(r->p, rsz))
370 wrterror("munmap", r->p);
371 r->p = NULL;
372 d->free_regions_size -= r->size;
373 r->size = 0;
374 malloc_used -= rsz;
375 }
376 }
377}
378
379static void *
380map(struct dir_info *d, size_t sz, int zero_fill)
381{
382 size_t psz = sz >> MALLOC_PAGESHIFT;
383 struct region_info *r, *big = NULL;
384 u_int i, offset;
385 void *p;
386
387 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
388 d->canary1 != ~d->canary2)
389 wrterror("internal struct corrupt", NULL);
390 if (sz != PAGEROUND(sz)) {
391 wrterror("map round", NULL);
392 return NULL;
393 }
394 if (psz > d->free_regions_size) {
395 p = MMAP(sz);
396 if (p != MAP_FAILED)
397 malloc_used += sz;
398 /* zero fill not needed */
399 return p;
400 }
401 offset = getrnibble();
402 for (i = 0; i < mopts.malloc_cache; i++) {
403 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
404 if (r->p != NULL) {
405 if (r->size == psz) {
406 p = r->p;
407 if (mopts.malloc_freeprot)
408 mprotect(p, sz, PROT_READ | PROT_WRITE);
409 if (mopts.malloc_hint)
410 madvise(p, sz, MADV_NORMAL);
411 r->p = NULL;
412 r->size = 0;
413 d->free_regions_size -= psz;
414 if (zero_fill)
415 memset(p, 0, sz);
416 else if (mopts.malloc_junk &&
417 mopts.malloc_freeprot)
418 memset(p, SOME_FREEJUNK, sz);
419 return p;
420 } else if (r->size > psz)
421 big = r;
422 }
423 }
424 if (big != NULL) {
425 r = big;
426 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
427 if (mopts.malloc_freeprot)
428 mprotect(p, sz, PROT_READ | PROT_WRITE);
429 if (mopts.malloc_hint)
430 madvise(p, sz, MADV_NORMAL);
431 r->size -= psz;
432 d->free_regions_size -= psz;
433 if (zero_fill)
434 memset(p, 0, sz);
435 else if (mopts.malloc_junk && mopts.malloc_freeprot)
436 memset(p, SOME_FREEJUNK, sz);
437 return p;
438 }
439 p = MMAP(sz);
440 if (p != MAP_FAILED)
441 malloc_used += sz;
442 if (d->free_regions_size > mopts.malloc_cache)
443 wrterror("malloc cache", NULL);
444 /* zero fill not needed */
445 return p;
446}
447
448/*
449 * Initialize a dir_info, which should have been cleared by caller
450 */
451static int
452omalloc_init(struct dir_info **dp)
453{
454 char *p, b[64];
455 int i, j;
456 size_t d_avail, regioninfo_size;
457 struct dir_info *d;
458
459 rbytes_init();
138 460
139 /* 461 /*
140 * First time malloc is called, setup page size and 462 * Default options
141 * align break pointer so all data will be page aligned.
142 */ 463 */
143 if (pagesz == 0) { 464 mopts.malloc_abort = 1;
144 pagesz = n = getpagesize(); 465 mopts.malloc_move = 1;
145 op = (union overhead *)sbrk(0); 466 mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
146 n = n - sizeof (*op) - ((long)op & (n - 1)); 467
147 if (n < 0) 468 for (i = 0; i < 3; i++) {
148 n += pagesz; 469 switch (i) {
149 if (n) { 470 case 0:
150 if (sbrk(n) == (char *)-1) 471 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
151 return (NULL); 472 if (j <= 0)
473 continue;
474 b[j] = '\0';
475 p = b;
476 break;
477 case 1:
478 if (issetugid() == 0)
479 p = getenv("MALLOC_OPTIONS");
480 else
481 continue;
482 break;
483 case 2:
484 p = malloc_options;
485 break;
486 default:
487 p = NULL;
152 } 488 }
153 bucket = 0; 489
154 amt = 8; 490 for (; p != NULL && *p != '\0'; p++) {
155 while (pagesz > amt) { 491 switch (*p) {
156 amt <<= 1; 492 case '>':
157 bucket++; 493 mopts.malloc_cache <<= 1;
494 if (mopts.malloc_cache > MALLOC_MAXCACHE)
495 mopts.malloc_cache = MALLOC_MAXCACHE;
496 break;
497 case '<':
498 mopts.malloc_cache >>= 1;
499 break;
500 case 'a':
501 mopts.malloc_abort = 0;
502 break;
503 case 'A':
504 mopts.malloc_abort = 1;
505 break;
506#ifdef MALLOC_STATS
507 case 'd':
508 mopts.malloc_stats = 0;
509 break;
510 case 'D':
511 mopts.malloc_stats = 1;
512 break;
513#endif /* MALLOC_STATS */
514 case 'f':
515 mopts.malloc_freeprot = 0;
516 break;
517 case 'F':
518 mopts.malloc_freeprot = 1;
519 break;
520 case 'g':
521 mopts.malloc_guard = 0;
522 break;
523 case 'G':
524 mopts.malloc_guard = MALLOC_PAGESIZE;
525 break;
526 case 'h':
527 mopts.malloc_hint = 0;
528 break;
529 case 'H':
530 mopts.malloc_hint = 1;
531 break;
532 case 'j':
533 mopts.malloc_junk = 0;
534 break;
535 case 'J':
536 mopts.malloc_junk = 1;
537 break;
538 case 'n':
539 case 'N':
540 break;
541 case 'p':
542 mopts.malloc_move = 0;
543 break;
544 case 'P':
545 mopts.malloc_move = 1;
546 break;
547 case 'r':
548 mopts.malloc_realloc = 0;
549 break;
550 case 'R':
551 mopts.malloc_realloc = 1;
552 break;
553 case 's':
554 mopts.malloc_freeprot = mopts.malloc_junk = 0;
555 mopts.malloc_guard = 0;
556 mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
557 break;
558 case 'S':
559 mopts.malloc_freeprot = mopts.malloc_junk = 1;
560 mopts.malloc_guard = MALLOC_PAGESIZE;
561 mopts.malloc_cache = 0;
562 break;
563 case 'x':
564 mopts.malloc_xmalloc = 0;
565 break;
566 case 'X':
567 mopts.malloc_xmalloc = 1;
568 break;
569 case 'z':
570 mopts.malloc_zero = 0;
571 break;
572 case 'Z':
573 mopts.malloc_zero = 1;
574 break;
575 default: {
576 static const char q[] = "malloc() warning: "
577 "unknown char in MALLOC_OPTIONS\n";
578 write(STDERR_FILENO, q, sizeof(q) - 1);
579 break;
580 }
581 }
158 } 582 }
159 pagebucket = bucket;
160 } 583 }
584
161 /* 585 /*
162 * Convert amount of memory requested into closest block size 586 * We want junk in the entire allocation, and zero only in the part
163 * stored in hash buckets which satisfies request. 587 * the user asked for.
164 * Account for space used per block for accounting.
165 */ 588 */
166 if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { 589 if (mopts.malloc_zero)
167#ifndef RCHECK 590 mopts.malloc_junk = 1;
168 amt = 8; /* size of first bucket */ 591
169 bucket = 0; 592#ifdef MALLOC_STATS
170#else 593 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
171 amt = 16; /* size of first bucket */ 594 static const char q[] = "malloc() warning: atexit(2) failed."
172 bucket = 1; 595 " Will not be able to dump stats on exit\n";
173#endif 596 write(STDERR_FILENO, q, sizeof(q) - 1);
174 n = -((long)sizeof (*op) + RSLOP);
175 } else {
176 amt = pagesz;
177 bucket = pagebucket;
178 }
179 while (nbytes > amt + n) {
180 amt <<= 1;
181 if (amt == 0)
182 return (NULL);
183 bucket++;
184 } 597 }
598#endif /* MALLOC_STATS */
599
600 while ((mopts.malloc_canary = arc4random()) == 0)
601 ;
602
185 /* 603 /*
186 * If nothing in hash bucket right now, 604 * Allocate dir_info with a guard page on either side. Also
187 * request more memory from the system. 605 * randomise offset inside the page at which the dir_info
606 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
188 */ 607 */
189 if ((op = nextf[bucket]) == NULL) { 608 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
190 morecore(bucket); 609 return -1;
191 if ((op = nextf[bucket]) == NULL) 610 mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
192 return (NULL); 611 mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
193 } 612 MALLOC_PAGESIZE, PROT_NONE);
194 /* remove from linked list */ 613 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
195 nextf[bucket] = op->ov_next; 614 d = (struct dir_info *)(p + MALLOC_PAGESIZE +
196 op->ov_magic = MAGIC; 615 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
197 op->ov_index = bucket; 616
198#ifdef MSTATS 617 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
199 nmalloc[bucket]++; 618 regioninfo_size = d->regions_total * sizeof(struct region_info);
200#endif 619 d->r = MMAP(regioninfo_size);
201#ifdef RCHECK 620 if (d->r == MAP_FAILED) {
621 wrterror("malloc init mmap failed", NULL);
622 d->regions_total = 0;
623 return 1;
624 }
625 for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
626 LIST_INIT(&d->chunk_info_list[i]);
627 LIST_INIT(&d->chunk_dir[i]);
628 }
629 malloc_used += regioninfo_size;
630 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
631 d->canary2 = ~d->canary1;
632
633 *dp = d;
634
202 /* 635 /*
203 * Record allocated size of block and 636 * Options have been set and will never be reset.
204 * bound space with magic numbers. 637 * Prevent further tampering with them.
205 */ 638 */
206 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 639 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
207 op->ov_rmagic = RMAGIC; 640 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
208 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 641
209#endif 642 return 0;
210 return ((char *)(op + 1));
211} 643}
212 644
213/* 645static int
214 * Allocate more memory to the indicated bucket. 646omalloc_grow(struct dir_info *d)
215 */
216static void
217morecore(bucket)
218 int bucket;
219{ 647{
220 register union overhead *op; 648 size_t newtotal;
221 register long sz; /* size of desired block */ 649 size_t newsize;
222 long amt; /* amount to allocate */ 650 size_t mask;
223 int nblks; /* how many blocks we get */ 651 size_t i;
652 struct region_info *p;
224 653
225 /* 654 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
226 * sbrk_size <= 0 only for big, FLUFFY, requests (about 655 return 1;
227 * 2^30 bytes on a VAX, I think) or for a negative arg. 656
228 */ 657 newtotal = d->regions_total * 2;
229 sz = 1 << (bucket + 3); 658 newsize = newtotal * sizeof(struct region_info);
230#ifdef DEBUG 659 mask = newtotal - 1;
231 ASSERT(sz > 0); 660
232#else 661 p = MMAP(newsize);
233 if (sz <= 0) 662 if (p == MAP_FAILED)
234 return; 663 return 1;
235#endif 664
236 if (sz < pagesz) { 665 malloc_used += newsize;
237 amt = pagesz; 666 memset(p, 0, newsize);
238 nblks = amt / sz; 667 STATS_ZERO(d->inserts);
239 } else { 668 STATS_ZERO(d->insert_collisions);
240 amt = sz + pagesz; 669 for (i = 0; i < d->regions_total; i++) {
241 nblks = 1; 670 void *q = d->r[i].p;
671 if (q != NULL) {
672 size_t index = hash(q) & mask;
673 STATS_INC(d->inserts);
674 while (p[index].p != NULL) {
675 index = (index - 1) & mask;
676 STATS_INC(d->insert_collisions);
677 }
678 p[index] = d->r[i];
679 }
242 } 680 }
243 op = (union overhead *)sbrk(amt); 681 /* avoid pages containing meta info to end up in cache */
244 /* no more room! */ 682 if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
245 if ((long)op == -1) 683 wrterror("munmap", d->r);
246 return; 684 else
247 /* 685 malloc_used -= d->regions_total * sizeof(struct region_info);
248 * Add new memory allocated to that on 686 d->regions_free = d->regions_free + d->regions_total;
249 * free list for this hash bucket. 687 d->regions_total = newtotal;
250 */ 688 d->r = p;
251 nextf[bucket] = op; 689 return 0;
252 while (--nblks > 0) {
253 op->ov_next = (union overhead *)((caddr_t)op + sz);
254 op = (union overhead *)((caddr_t)op + sz);
255 }
256} 690}
257 691
258void 692static struct chunk_info *
259free(cp) 693alloc_chunk_info(struct dir_info *d, int bits)
260 void *cp; 694{
261{ 695 struct chunk_info *p;
262 register long size; 696 size_t size, count;
263 register union overhead *op; 697
264 698 if (bits == 0)
265 if (cp == NULL) 699 count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
266 return; 700 else
267 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 701 count = MALLOC_PAGESIZE >> bits;
268#ifdef DEBUG 702
269 ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ 703 size = howmany(count, MALLOC_BITS);
270#else 704 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
271 if (op->ov_magic != MAGIC) 705 size = ALIGN(size);
272 return; /* sanity */ 706
273#endif 707 if (LIST_EMPTY(&d->chunk_info_list[bits])) {
274#ifdef RCHECK 708 void *q;
275 ASSERT(op->ov_rmagic == RMAGIC); 709 int i;
276 ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); 710
277#endif 711 q = MMAP(MALLOC_PAGESIZE);
278 size = op->ov_index; 712 if (q == MAP_FAILED)
279 ASSERT(size < NBUCKETS); 713 return NULL;
280 op->ov_next = nextf[size]; /* also clobbers ov_magic */ 714 malloc_used += MALLOC_PAGESIZE;
281 nextf[size] = op; 715 count = MALLOC_PAGESIZE / size;
282#ifdef MSTATS 716 for (i = 0; i < count; i++, q += size)
283 nmalloc[size]--; 717 LIST_INSERT_HEAD(&d->chunk_info_list[bits],
718 (struct chunk_info *)q, entries);
719 }
720 p = LIST_FIRST(&d->chunk_info_list[bits]);
721 LIST_REMOVE(p, entries);
722 memset(p, 0, size);
723 p->canary = d->canary1;
724 return p;
725}
726
727static int
728insert(struct dir_info *d, void *p, size_t sz, void *f)
729{
730 size_t index;
731 size_t mask;
732 void *q;
733
734 if (d->regions_free * 4 < d->regions_total) {
735 if (omalloc_grow(d))
736 return 1;
737 }
738 mask = d->regions_total - 1;
739 index = hash(p) & mask;
740 q = d->r[index].p;
741 STATS_INC(d->inserts);
742 while (q != NULL) {
743 index = (index - 1) & mask;
744 q = d->r[index].p;
745 STATS_INC(d->insert_collisions);
746 }
747 d->r[index].p = p;
748 d->r[index].size = sz;
749#ifdef MALLOC_STATS
750 d->r[index].f = f;
284#endif 751#endif
752 d->regions_free--;
753 return 0;
285} 754}
286 755
756static struct region_info *
757find(struct dir_info *d, void *p)
758{
759 size_t index;
760 size_t mask = d->regions_total - 1;
761 void *q, *r;
762
763 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
764 d->canary1 != ~d->canary2)
765 wrterror("internal struct corrupt", NULL);
766 p = MASK_POINTER(p);
767 index = hash(p) & mask;
768 r = d->r[index].p;
769 q = MASK_POINTER(r);
770 STATS_INC(d->finds);
771 while (q != p && r != NULL) {
772 index = (index - 1) & mask;
773 r = d->r[index].p;
774 q = MASK_POINTER(r);
775 STATS_INC(d->find_collisions);
776 }
777 return q == p ? &d->r[index] : NULL;
778}
779
780static void
781delete(struct dir_info *d, struct region_info *ri)
782{
783 /* algorithm R, Knuth Vol III section 6.4 */
784 size_t mask = d->regions_total - 1;
785 size_t i, j, r;
786
787 if (d->regions_total & (d->regions_total - 1))
788 wrterror("regions_total not 2^x", NULL);
789 d->regions_free++;
790 STATS_INC(g_pool->deletes);
791
792 i = ri - d->r;
793 for (;;) {
794 d->r[i].p = NULL;
795 d->r[i].size = 0;
796 j = i;
797 for (;;) {
798 i = (i - 1) & mask;
799 if (d->r[i].p == NULL)
800 return;
801 r = hash(d->r[i].p) & mask;
802 if ((i <= r && r < j) || (r < j && j < i) ||
803 (j < i && i <= r))
804 continue;
805 d->r[j] = d->r[i];
806 STATS_INC(g_pool->delete_moves);
807 break;
808 }
809
810 }
811}
812
287/* 813/*
288 * When a program attempts "storage compaction" as mentioned in the 814 * Allocate a page of chunks
289 * old malloc man page, it realloc's an already freed block. Usually
290 * this is the last block it freed; occasionally it might be farther
291 * back. We have to search all the free lists for the block in order
292 * to determine its bucket: 1st we make one pass thru the lists
293 * checking only the first block in each; if that fails we search
294 * ``realloc_srchlen'' blocks in each list for a match (the variable
295 * is extern so the caller can modify it). If that fails we just copy
296 * however many bytes was given to realloc() and hope it's not huge.
297 */ 815 */
298int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ 816static struct chunk_info *
817omalloc_make_chunks(struct dir_info *d, int bits)
818{
819 struct chunk_info *bp;
820 void *pp;
821 int i, k;
299 822
300void * 823 /* Allocate a new bucket */
301realloc(cp, nbytes) 824 pp = map(d, MALLOC_PAGESIZE, 0);
302 void *cp; 825 if (pp == MAP_FAILED)
303 size_t nbytes; 826 return NULL;
304{ 827
305 register u_long onb; 828 bp = alloc_chunk_info(d, bits);
306 register long i; 829 if (bp == NULL) {
307 union overhead *op; 830 unmap(d, pp, MALLOC_PAGESIZE);
308 char *res; 831 return NULL;
309 int was_alloced = 0; 832 }
310 833
311 if (cp == NULL) 834 /* memory protect the page allocated in the malloc(0) case */
312 return (malloc(nbytes)); 835 if (bits == 0) {
313 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 836 bp->size = 0;
314 if (op->ov_magic == MAGIC) { 837 bp->shift = 1;
315 was_alloced++; 838 i = MALLOC_MINSIZE - 1;
316 i = op->ov_index; 839 while (i >>= 1)
317 } else { 840 bp->shift++;
318 /* 841 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
319 * Already free, doing "compaction". 842 bp->page = pp;
320 * 843
321 * Search for the old block of memory on the 844 k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
322 * free list. First, check the most common 845 if (k < 0) {
323 * case (last element free'd), then (this failing) 846 unmap(d, pp, MALLOC_PAGESIZE);
324 * the last ``realloc_srchlen'' items free'd. 847 LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries);
325 * If all lookups fail, then assume the size of 848 return NULL;
326 * the memory block being realloc'd is the
327 * largest possible (so that all "nbytes" of new
328 * memory are copied into). Note that this could cause
329 * a memory fault if the old area was tiny, and the moon
330 * is gibbous. However, that is very unlikely.
331 */
332 if ((i = findbucket(op, 1)) < 0 &&
333 (i = findbucket(op, realloc_srchlen)) < 0)
334 i = NBUCKETS;
335 }
336 onb = 1 << (i + 3);
337 if (onb < pagesz)
338 onb -= sizeof (*op) + RSLOP;
339 else
340 onb += pagesz - sizeof (*op) - RSLOP;
341 /* avoid the copy if same size block */
342 if (was_alloced) {
343 if (i) {
344 i = 1 << (i + 2);
345 if (i < pagesz)
346 i -= sizeof (*op) + RSLOP;
347 else
348 i += pagesz - sizeof (*op) - RSLOP;
349 } 849 }
350 if (nbytes <= onb && nbytes > i) { 850 } else {
351#ifdef RCHECK 851 bp->size = 1U << bits;
352 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 852 bp->shift = bits;
353 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 853 bp->total = bp->free = MALLOC_PAGESIZE >> bits;
354#endif 854 bp->page = pp;
355 return(cp);
356 } else
357 free(cp);
358 } 855 }
359 if ((res = malloc(nbytes)) == NULL) 856
360 return (NULL); 857 /* set all valid bits in the bitmap */
361 if (cp != res) /* common optimization if "compacting" */ 858 k = bp->total;
362 bcopy(cp, res, (nbytes < onb) ? nbytes : onb); 859 i = 0;
363 return (res); 860
861 /* Do a bunch at a time */
862 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
863 bp->bits[i / MALLOC_BITS] = (u_short)~0U;
864
865 for (; i < k; i++)
866 bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS);
867
868 LIST_INSERT_HEAD(&d->chunk_dir[bits], bp, entries);
869
870 bits++;
871 if ((uintptr_t)pp & bits)
872 wrterror("pp & bits", pp);
873
874 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL);
875 return bp;
364} 876}
365 877
878
366/* 879/*
367 * Search ``srchlen'' elements of each free list for a block whose 880 * Allocate a chunk
368 * header starts at ``freep''. If srchlen is -1 search the whole list.
369 * Return bucket number, or -1 if not found.
370 */ 881 */
371static 882static void *
372findbucket(freep, srchlen) 883malloc_bytes(struct dir_info *d, size_t size, void *f)
373 union overhead *freep;
374 int srchlen;
375{ 884{
376 register union overhead *p; 885 int i, j;
377 register int i, j; 886 size_t k;
887 u_short u, *lp;
888 struct chunk_info *bp;
889
890 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
891 d->canary1 != ~d->canary2)
892 wrterror("internal struct corrupt", NULL);
893 /* Don't bother with anything less than this */
894 /* unless we have a malloc(0) requests */
895 if (size != 0 && size < MALLOC_MINSIZE)
896 size = MALLOC_MINSIZE;
378 897
379 for (i = 0; i < NBUCKETS; i++) { 898 /* Find the right bucket */
899 if (size == 0)
380 j = 0; 900 j = 0;
381 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { 901 else {
382 if (p == freep) 902 j = MALLOC_MINSHIFT;
383 return (i); 903 i = (size - 1) >> (MALLOC_MINSHIFT - 1);
904 while (i >>= 1)
384 j++; 905 j++;
906 }
907
908 /* If it's empty, make a page more of that size chunks */
909 if (LIST_EMPTY(&d->chunk_dir[j])) {
910 bp = omalloc_make_chunks(d, j);
911 if (bp == NULL)
912 return NULL;
913 } else
914 bp = LIST_FIRST(&d->chunk_dir[j]);
915
916 if (bp->canary != d->canary1)
917 wrterror("chunk info corrupted", NULL);
918
919 i = d->chunk_start;
920 if (bp->free > 1)
921 i += getrnibble();
922 if (i >= bp->total)
923 i &= bp->total - 1;
924 for (;;) {
925 for (;;) {
926 lp = &bp->bits[i / MALLOC_BITS];
927 if (!*lp) {
928 i += MALLOC_BITS;
929 i &= ~(MALLOC_BITS - 1);
930 if (i >= bp->total)
931 i = 0;
932 } else
933 break;
934 }
935 k = i % MALLOC_BITS;
936 u = 1 << k;
937 if (*lp & u)
938 break;
939 if (++i >= bp->total)
940 i = 0;
941 }
942 d->chunk_start += i + 1;
943#ifdef MALLOC_STATS
944 if (i == 0) {
945 struct region_info *r = find(d, bp->page);
946 r->f = f;
947 }
948#endif
949
950 *lp ^= u;
951
952 /* If there are no more free, remove from free-list */
953 if (!--bp->free)
954 LIST_REMOVE(bp, entries);
955
956 /* Adjust to the real offset of that chunk */
957 k += (lp - bp->bits) * MALLOC_BITS;
958 k <<= bp->shift;
959
960 if (mopts.malloc_junk && bp->size > 0)
961 memset((char *)bp->page + k, SOME_JUNK, bp->size);
962 return ((char *)bp->page + k);
963}
964
965
966/*
967 * Free a chunk, and possibly the page it's on, if the page becomes empty.
968 */
969static void
970free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
971{
972 struct chunk_head *mp;
973 struct chunk_info *info;
974 int i;
975
976 info = (struct chunk_info *)r->size;
977 if (info->canary != d->canary1)
978 wrterror("chunk info corrupted", NULL);
979
980 /* Find the chunk number on the page */
981 i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
982
983 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) {
984 wrterror("modified chunk-pointer", ptr);
985 return;
986 }
987 if (info->bits[i / MALLOC_BITS] & (1U << (i % MALLOC_BITS))) {
988 wrterror("chunk is already free", ptr);
989 return;
990 }
991
992 info->bits[i / MALLOC_BITS] |= 1U << (i % MALLOC_BITS);
993 info->free++;
994
995 if (info->size != 0)
996 mp = d->chunk_dir + info->shift;
997 else
998 mp = d->chunk_dir;
999
1000 if (info->free == 1) {
1001 /* Page became non-full */
1002 LIST_INSERT_HEAD(mp, info, entries);
1003 return;
1004 }
1005 if (info->free != info->total)
1006 return;
1007
1008 LIST_REMOVE(info, entries);
1009
1010 if (info->size == 0 && !mopts.malloc_freeprot)
1011 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1012 unmap(d, info->page, MALLOC_PAGESIZE);
1013
1014 delete(d, r);
1015 if (info->size != 0)
1016 mp = &d->chunk_info_list[info->shift];
1017 else
1018 mp = &d->chunk_info_list[0];
1019 LIST_INSERT_HEAD(mp, info, entries);
1020}
1021
1022
1023
1024static void *
1025omalloc(size_t sz, int zero_fill, void *f)
1026{
1027 void *p;
1028 size_t psz;
1029
1030 if (sz > MALLOC_MAXCHUNK) {
1031 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1032 errno = ENOMEM;
1033 return NULL;
1034 }
1035 sz += mopts.malloc_guard;
1036 psz = PAGEROUND(sz);
1037 p = map(g_pool, psz, zero_fill);
1038 if (p == MAP_FAILED) {
1039 errno = ENOMEM;
1040 return NULL;
1041 }
1042 if (insert(g_pool, p, sz, f)) {
1043 unmap(g_pool, p, psz);
1044 errno = ENOMEM;
1045 return NULL;
385 } 1046 }
1047 if (mopts.malloc_guard) {
1048 if (mprotect((char *)p + psz - mopts.malloc_guard,
1049 mopts.malloc_guard, PROT_NONE))
1050 wrterror("mprotect", NULL);
1051 malloc_guarded += mopts.malloc_guard;
1052 }
1053
1054 if (mopts.malloc_move &&
1055 sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1056 MALLOC_LEEWAY) {
1057 /* fill whole allocation */
1058 if (mopts.malloc_junk)
1059 memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1060 /* shift towards the end */
1061 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1062 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1063 /* fill zeros if needed and overwritten above */
1064 if (zero_fill && mopts.malloc_junk)
1065 memset(p, 0, sz - mopts.malloc_guard);
1066 } else {
1067 if (mopts.malloc_junk) {
1068 if (zero_fill)
1069 memset((char *)p + sz - mopts.malloc_guard,
1070 SOME_JUNK, psz - sz);
1071 else
1072 memset(p, SOME_JUNK,
1073 psz - mopts.malloc_guard);
1074 }
1075 }
1076
1077 } else {
1078 /* takes care of SOME_JUNK */
1079 p = malloc_bytes(g_pool, sz, f);
1080 if (zero_fill && p != NULL && sz > 0)
1081 memset(p, 0, sz);
386 } 1082 }
387 return (-1); 1083
1084 return p;
388} 1085}
389 1086
390#ifdef MSTATS
391/* 1087/*
392 * mstats - print out statistics about malloc 1088 * Common function for handling recursion. Only
393 * 1089 * print the error message once, to avoid making the problem
394 * Prints two lines of numbers, one showing the length of the free list 1090 * potentially worse.
395 * for each size category, the second showing the number of mallocs -
396 * frees for each size category.
397 */ 1091 */
398mstats(s) 1092static void
399 char *s; 1093malloc_recurse(void)
400{ 1094{
401 register int i, j; 1095 static int noprint;
402 register union overhead *p; 1096
403 int totfree = 0, 1097 if (noprint == 0) {
404 totused = 0; 1098 noprint = 1;
405 1099 wrterror("recursive call", NULL);
406 fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); 1100 }
407 for (i = 0; i < NBUCKETS; i++) { 1101 malloc_active--;
408 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) 1102 _MALLOC_UNLOCK();
409 ; 1103 errno = EDEADLK;
410 fprintf(stderr, " %d", j); 1104}
411 totfree += j * (1 << (i + 3)); 1105
412 } 1106static int
413 fprintf(stderr, "\nused:\t"); 1107malloc_init(void)
414 for (i = 0; i < NBUCKETS; i++) { 1108{
415 fprintf(stderr, " %d", nmalloc[i]); 1109 if (omalloc_init(&g_pool)) {
416 totused += nmalloc[i] * (1 << (i + 3)); 1110 _MALLOC_UNLOCK();
417 } 1111 if (mopts.malloc_xmalloc)
418 fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", 1112 wrterror("out of memory", NULL);
419 totused, totfree); 1113 errno = ENOMEM;
1114 return -1;
1115 }
1116 return 0;
420} 1117}
1118
1119void *
1120malloc(size_t size)
1121{
1122 void *r;
1123 int saved_errno = errno;
1124
1125 _MALLOC_LOCK();
1126 malloc_func = " in malloc():";
1127 if (g_pool == NULL) {
1128 if (malloc_init() != 0)
1129 return NULL;
1130 }
1131 if (malloc_active++) {
1132 malloc_recurse();
1133 return NULL;
1134 }
1135 r = omalloc(size, mopts.malloc_zero, CALLER);
1136 malloc_active--;
1137 _MALLOC_UNLOCK();
1138 if (r == NULL && mopts.malloc_xmalloc) {
1139 wrterror("out of memory", NULL);
1140 errno = ENOMEM;
1141 }
1142 if (r != NULL)
1143 errno = saved_errno;
1144 return r;
1145}
1146
1147static void
1148ofree(void *p)
1149{
1150 struct region_info *r;
1151 size_t sz;
1152
1153 r = find(g_pool, p);
1154 if (r == NULL) {
1155 wrterror("bogus pointer (double free?)", p);
1156 return;
1157 }
1158 REALSIZE(sz, r);
1159 if (sz > MALLOC_MAXCHUNK) {
1160 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1161 MALLOC_LEEWAY) {
1162 if (r->p != p) {
1163 wrterror("bogus pointer", p);
1164 return;
1165 }
1166 } else {
1167#if notyetbecause_of_realloc
1168 /* shifted towards the end */
1169 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1170 MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1171 ~(MALLOC_MINSIZE-1))) {
1172 }
421#endif 1173#endif
1174 p = r->p;
1175 }
1176 if (mopts.malloc_guard) {
1177 if (sz < mopts.malloc_guard)
1178 wrterror("guard size", NULL);
1179 if (!mopts.malloc_freeprot) {
1180 if (mprotect((char *)p + PAGEROUND(sz) -
1181 mopts.malloc_guard, mopts.malloc_guard,
1182 PROT_READ | PROT_WRITE))
1183 wrterror("mprotect", NULL);
1184 }
1185 malloc_guarded -= mopts.malloc_guard;
1186 }
1187 if (mopts.malloc_junk && !mopts.malloc_freeprot)
1188 memset(p, SOME_FREEJUNK,
1189 PAGEROUND(sz) - mopts.malloc_guard);
1190 unmap(g_pool, p, PAGEROUND(sz));
1191 delete(g_pool, r);
1192 } else {
1193 void *tmp;
1194 int i;
1195
1196 if (mopts.malloc_junk && sz > 0)
1197 memset(p, SOME_FREEJUNK, sz);
1198 if (!mopts.malloc_freeprot) {
1199 i = getrnibble();
1200 tmp = p;
1201 p = g_pool->delayed_chunks[i];
1202 g_pool->delayed_chunks[i] = tmp;
1203 }
1204 if (p != NULL) {
1205 r = find(g_pool, p);
1206 if (r == NULL) {
1207 wrterror("bogus pointer (double free?)", p);
1208 return;
1209 }
1210 free_bytes(g_pool, r, p);
1211 }
1212 }
1213}
1214
1215void
1216free(void *ptr)
1217{
1218 int saved_errno = errno;
1219
1220 /* This is legal. */
1221 if (ptr == NULL)
1222 return;
1223
1224 _MALLOC_LOCK();
1225 malloc_func = " in free():";
1226 if (g_pool == NULL) {
1227 _MALLOC_UNLOCK();
1228 wrterror("free() called before allocation", NULL);
1229 return;
1230 }
1231 if (malloc_active++) {
1232 malloc_recurse();
1233 return;
1234 }
1235 ofree(ptr);
1236 malloc_active--;
1237 _MALLOC_UNLOCK();
1238 errno = saved_errno;
1239}
1240
1241
1242static void *
1243orealloc(void *p, size_t newsz, void *f)
1244{
1245 struct region_info *r;
1246 size_t oldsz, goldsz, gnewsz;
1247 void *q;
1248
1249 if (p == NULL)
1250 return omalloc(newsz, 0, f);
1251
1252 r = find(g_pool, p);
1253 if (r == NULL) {
1254 wrterror("bogus pointer (double free?)", p);
1255 return NULL;
1256 }
1257 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1258 errno = ENOMEM;
1259 return NULL;
1260 }
1261
1262 REALSIZE(oldsz, r);
1263 goldsz = oldsz;
1264 if (oldsz > MALLOC_MAXCHUNK) {
1265 if (oldsz < mopts.malloc_guard)
1266 wrterror("guard size", NULL);
1267 oldsz -= mopts.malloc_guard;
1268 }
1269
1270 gnewsz = newsz;
1271 if (gnewsz > MALLOC_MAXCHUNK)
1272 gnewsz += mopts.malloc_guard;
1273
1274 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1275 !mopts.malloc_realloc) {
1276 size_t roldsz = PAGEROUND(goldsz);
1277 size_t rnewsz = PAGEROUND(gnewsz);
1278
1279 if (rnewsz > roldsz) {
1280 if (!mopts.malloc_guard) {
1281 STATS_INC(g_pool->cheap_realloc_tries);
1282 zapcacheregion(g_pool, (char *)p + roldsz);
1283 q = MMAPA((char *)p + roldsz, rnewsz - roldsz);
1284 if (q == (char *)p + roldsz) {
1285 malloc_used += rnewsz - roldsz;
1286 if (mopts.malloc_junk)
1287 memset(q, SOME_JUNK,
1288 rnewsz - roldsz);
1289 r->size = newsz;
1290 STATS_SETF(r, f);
1291 STATS_INC(g_pool->cheap_reallocs);
1292 return p;
1293 } else if (q != MAP_FAILED)
1294 munmap(q, rnewsz - roldsz);
1295 }
1296 } else if (rnewsz < roldsz) {
1297 if (mopts.malloc_guard) {
1298 if (mprotect((char *)p + roldsz -
1299 mopts.malloc_guard, mopts.malloc_guard,
1300 PROT_READ | PROT_WRITE))
1301 wrterror("mprotect", NULL);
1302 if (mprotect((char *)p + rnewsz -
1303 mopts.malloc_guard, mopts.malloc_guard,
1304 PROT_NONE))
1305 wrterror("mprotect", NULL);
1306 }
1307 unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1308 r->size = gnewsz;
1309 STATS_SETF(r, f);
1310 return p;
1311 } else {
1312 if (newsz > oldsz && mopts.malloc_junk)
1313 memset((char *)p + newsz, SOME_JUNK,
1314 rnewsz - mopts.malloc_guard - newsz);
1315 r->size = gnewsz;
1316 STATS_SETF(r, f);
1317 return p;
1318 }
1319 }
1320 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1321 if (mopts.malloc_junk && newsz > 0)
1322 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1323 STATS_SETF(r, f);
1324 return p;
1325 } else if (newsz != oldsz || mopts.malloc_realloc) {
1326 q = omalloc(newsz, 0, f);
1327 if (q == NULL)
1328 return NULL;
1329 if (newsz != 0 && oldsz != 0)
1330 memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1331 ofree(p);
1332 return q;
1333 } else {
1334 STATS_SETF(r, f);
1335 return p;
1336 }
1337}
1338
1339void *
1340realloc(void *ptr, size_t size)
1341{
1342 void *r;
1343 int saved_errno = errno;
1344
1345 _MALLOC_LOCK();
1346 malloc_func = " in realloc():";
1347 if (g_pool == NULL) {
1348 if (malloc_init() != 0)
1349 return NULL;
1350 }
1351 if (malloc_active++) {
1352 malloc_recurse();
1353 return NULL;
1354 }
1355 r = orealloc(ptr, size, CALLER);
1356
1357 malloc_active--;
1358 _MALLOC_UNLOCK();
1359 if (r == NULL && mopts.malloc_xmalloc) {
1360 wrterror("out of memory", NULL);
1361 errno = ENOMEM;
1362 }
1363 if (r != NULL)
1364 errno = saved_errno;
1365 return r;
1366}
1367
1368
1369#define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
1370
1371void *
1372calloc(size_t nmemb, size_t size)
1373{
1374 void *r;
1375 int saved_errno = errno;
1376
1377 _MALLOC_LOCK();
1378 malloc_func = " in calloc():";
1379 if (g_pool == NULL) {
1380 if (malloc_init() != 0)
1381 return NULL;
1382 }
1383 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1384 nmemb > 0 && SIZE_MAX / nmemb < size) {
1385 _MALLOC_UNLOCK();
1386 if (mopts.malloc_xmalloc)
1387 wrterror("out of memory", NULL);
1388 errno = ENOMEM;
1389 return NULL;
1390 }
1391
1392 if (malloc_active++) {
1393 malloc_recurse();
1394 return NULL;
1395 }
1396
1397 size *= nmemb;
1398 r = omalloc(size, 1, CALLER);
1399
1400 malloc_active--;
1401 _MALLOC_UNLOCK();
1402 if (r == NULL && mopts.malloc_xmalloc) {
1403 wrterror("out of memory", NULL);
1404 errno = ENOMEM;
1405 }
1406 if (r != NULL)
1407 errno = saved_errno;
1408 return r;
1409}
1410
1411int
1412posix_memalign(void **memptr, size_t alignment, size_t size)
1413{
1414 void *result;
1415
1416 /* Make sure that alignment is a large enough power of 2. */
1417 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1418 alignment > MALLOC_PAGESIZE)
1419 return EINVAL;
1420
1421 /*
1422 * max(size, alignment) is enough to assure the requested alignment,
1423 * since the allocator always allocates power-of-two blocks.
1424 */
1425 if (size < alignment)
1426 size = alignment;
1427 result = malloc(size);
1428
1429 if (result == NULL)
1430 return ENOMEM;
1431
1432 *memptr = result;
1433 return 0;
1434}
1435
1436#ifdef MALLOC_STATS
1437
1438struct malloc_leak {
1439 void (*f)();
1440 size_t total_size;
1441 int count;
1442};
1443
1444struct leaknode {
1445 RB_ENTRY(leaknode) entry;
1446 struct malloc_leak d;
1447};
1448
1449static int
1450leakcmp(struct leaknode *e1, struct leaknode *e2)
1451{
1452 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
1453}
1454
1455static RB_HEAD(leaktree, leaknode) leakhead;
1456RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp)
1457
1458static void
1459putleakinfo(void *f, size_t sz, int cnt)
1460{
1461 struct leaknode key, *p;
1462 static struct leaknode *page;
1463 static int used;
1464
1465 if (cnt == 0)
1466 return;
1467
1468 key.d.f = f;
1469 p = RB_FIND(leaktree, &leakhead, &key);
1470 if (p == NULL) {
1471 if (page == NULL ||
1472 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
1473 page = MMAP(MALLOC_PAGESIZE);
1474 if (page == MAP_FAILED)
1475 return;
1476 used = 0;
1477 }
1478 p = &page[used++];
1479 p->d.f = f;
1480 p->d.total_size = sz * cnt;
1481 p->d.count = cnt;
1482 RB_INSERT(leaktree, &leakhead, p);
1483 } else {
1484 p->d.total_size += sz * cnt;
1485 p->d.count += cnt;
1486 }
1487}
1488
1489static struct malloc_leak *malloc_leaks;
1490
1491static void
1492dump_leaks(int fd)
1493{
1494 struct leaknode *p;
1495 char buf[64];
1496 int i = 0;
1497
1498 snprintf(buf, sizeof(buf), "Leak report\n");
1499 write(fd, buf, strlen(buf));
1500 snprintf(buf, sizeof(buf), " f sum # avg\n");
1501 write(fd, buf, strlen(buf));
1502 /* XXX only one page of summary */
1503 if (malloc_leaks == NULL)
1504 malloc_leaks = MMAP(MALLOC_PAGESIZE);
1505 if (malloc_leaks != MAP_FAILED)
1506 memset(malloc_leaks, 0, MALLOC_PAGESIZE);
1507 RB_FOREACH(p, leaktree, &leakhead) {
1508 snprintf(buf, sizeof(buf), "%12p %7zu %6u %6zu\n", p->d.f,
1509 p->d.total_size, p->d.count, p->d.total_size / p->d.count);
1510 write(fd, buf, strlen(buf));
1511 if (malloc_leaks == MAP_FAILED ||
1512 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
1513 continue;
1514 malloc_leaks[i].f = p->d.f;
1515 malloc_leaks[i].total_size = p->d.total_size;
1516 malloc_leaks[i].count = p->d.count;
1517 i++;
1518 }
1519}
1520
1521static void
1522dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
1523{
1524 char buf[64];
1525
1526 while (p != NULL) {
1527 snprintf(buf, sizeof(buf), "chunk %12p %12p %4d %d/%d\n",
1528 p->page, ((p->bits[0] & 1) ? NULL : f),
1529 p->size, p->free, p->total);
1530 write(fd, buf, strlen(buf));
1531 if (!fromfreelist) {
1532 if (p->bits[0] & 1)
1533 putleakinfo(NULL, p->size, p->total - p->free);
1534 else {
1535 putleakinfo(f, p->size, 1);
1536 putleakinfo(NULL, p->size,
1537 p->total - p->free - 1);
1538 }
1539 break;
1540 }
1541 p = LIST_NEXT(p, entries);
1542 if (p != NULL) {
1543 snprintf(buf, sizeof(buf), " ");
1544 write(fd, buf, strlen(buf));
1545 }
1546 }
1547}
1548
1549static void
1550dump_free_chunk_info(int fd, struct dir_info *d)
1551{
1552 char buf[64];
1553 int i, count;
1554
1555 snprintf(buf, sizeof(buf), "Free chunk structs:\n");
1556 write(fd, buf, strlen(buf));
1557 for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
1558 struct chunk_info *p;
1559
1560 count = 0;
1561 LIST_FOREACH(p, &d->chunk_info_list[i], entries)
1562 count++;
1563 p = LIST_FIRST(&d->chunk_dir[i]);
1564 if (p == NULL && count == 0)
1565 continue;
1566 snprintf(buf, sizeof(buf), "%2d) %3d ", i, count);
1567 write(fd, buf, strlen(buf));
1568 if (p != NULL)
1569 dump_chunk(fd, p, NULL, 1);
1570 else
1571 write(fd, "\n", 1);
1572 }
1573
1574}
1575
1576static void
1577dump_free_page_info(int fd, struct dir_info *d)
1578{
1579 char buf[64];
1580 int i;
1581
1582 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
1583 d->free_regions_size);
1584 write(fd, buf, strlen(buf));
1585 for (i = 0; i < mopts.malloc_cache; i++) {
1586 if (d->free_regions[i].p != NULL) {
1587 snprintf(buf, sizeof(buf), "%2d) ", i);
1588 write(fd, buf, strlen(buf));
1589 snprintf(buf, sizeof(buf), "free at %p: %zu\n",
1590 d->free_regions[i].p, d->free_regions[i].size);
1591 write(fd, buf, strlen(buf));
1592 }
1593 }
1594}
1595
1596static void
1597malloc_dump1(int fd, struct dir_info *d)
1598{
1599 char buf[64];
1600 size_t i, realsize;
1601
1602 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
1603 write(fd, buf, strlen(buf));
1604 if (d == NULL)
1605 return;
1606 snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n",
1607 d->regions_free, d->regions_total);
1608 write(fd, buf, strlen(buf));
1609 snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds,
1610 d->find_collisions);
1611 write(fd, buf, strlen(buf));
1612 snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts,
1613 d->insert_collisions);
1614 write(fd, buf, strlen(buf));
1615 snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
1616 d->delete_moves);
1617 write(fd, buf, strlen(buf));
1618 snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
1619 d->cheap_reallocs, d->cheap_realloc_tries);
1620 write(fd, buf, strlen(buf));
1621 dump_free_chunk_info(fd, d);
1622 dump_free_page_info(fd, d);
1623 snprintf(buf, sizeof(buf),
1624 "slot) hash d type page f size [free/n]\n");
1625 write(fd, buf, strlen(buf));
1626 for (i = 0; i < d->regions_total; i++) {
1627 if (d->r[i].p != NULL) {
1628 size_t h = hash(d->r[i].p) &
1629 (d->regions_total - 1);
1630 snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ",
1631 i, h, h - i);
1632 write(fd, buf, strlen(buf));
1633 REALSIZE(realsize, &d->r[i]);
1634 if (realsize > MALLOC_MAXCHUNK) {
1635 putleakinfo(d->r[i].f, realsize, 1);
1636 snprintf(buf, sizeof(buf),
1637 "pages %12p %12p %zu\n", d->r[i].p,
1638 d->r[i].f, realsize);
1639 write(fd, buf, strlen(buf));
1640 } else
1641 dump_chunk(fd,
1642 (struct chunk_info *)d->r[i].size,
1643 d->r[i].f, 0);
1644 }
1645 }
1646 snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used);
1647 write(fd, buf, strlen(buf));
1648 snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded);
1649 write(fd, buf, strlen(buf));
1650 dump_leaks(fd);
1651 write(fd, "\n", 1);
1652}
1653
1654void
1655malloc_dump(int fd)
1656{
1657 int i;
1658 void *p;
1659 struct region_info *r;
1660 int saved_errno = errno;
1661
1662 for (i = 0; i <= MALLOC_DELAYED_CHUNKS; i++) {
1663 p = g_pool->delayed_chunks[i];
1664 if (p == NULL)
1665 continue;
1666 r = find(g_pool, p);
1667 if (r == NULL)
1668 wrterror("bogus pointer in malloc_dump", p);
1669 free_bytes(g_pool, r, p);
1670 g_pool->delayed_chunks[i] = NULL;
1671 }
1672 /* XXX leak when run multiple times */
1673 RB_INIT(&leakhead);
1674 malloc_dump1(fd, g_pool);
1675 errno = saved_errno;
1676}
1677
1678static void
1679malloc_exit(void)
1680{
1681 static const char q[] = "malloc() warning: Couldn't dump stats\n";
1682 int save_errno = errno, fd;
1683
1684 fd = open("malloc.out", O_RDWR|O_APPEND);
1685 if (fd != -1) {
1686 malloc_dump(fd);
1687 close(fd);
1688 } else
1689 write(STDERR_FILENO, q, sizeof(q) - 1);
1690 errno = save_errno;
1691}
1692
1693#endif /* MALLOC_STATS */