summaryrefslogtreecommitdiff
path: root/src/lib/libc/stdlib/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libc/stdlib/malloc.c')
-rw-r--r--src/lib/libc/stdlib/malloc.c1768
1 files changed, 1422 insertions, 346 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 3c57fad024..2ce545c5fa 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,421 +1,1497 @@
1/* $OpenBSD: malloc.c,v 1.115 2009/01/03 12:58:28 djm Exp $ */
1/* 2/*
2 * Copyright (c) 1983 Regents of the University of California. 3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
3 * All rights reserved.
4 * 4 *
5 * Redistribution and use in source and binary forms, with or without 5 * Permission to use, copy, modify, and distribute this software for any
6 * modification, are permitted provided that the following conditions 6 * purpose with or without fee is hereby granted, provided that the above
7 * are met: 7 * copyright notice and this permission notice appear in all copies.
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 * 8 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */ 16 */
33 17
34#if defined(LIBC_SCCS) && !defined(lint)
35/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/
36static char *rcsid = "$Id: malloc.c,v 1.1.1.1 1995/10/18 08:42:18 deraadt Exp $";
37#endif /* LIBC_SCCS and not lint */
38
39/* 18/*
40 * malloc.c (Caltech) 2/21/82 19 * Parts of this code, mainly the sub page sized chunk management code is
41 * Chris Kingsley, kingsley@cit-20. 20 * derived from the malloc implementation with the following license:
42 * 21 */
43 * This is a very fast storage allocator. It allocates blocks of a small 22/*
44 * number of different sizes, and keeps free lists of each size. Blocks that 23 * ----------------------------------------------------------------------------
45 * don't exactly fit are passed up to the next larger size. In this 24 * "THE BEER-WARE LICENSE" (Revision 42):
46 * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. 25 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
47 * This is designed for use in a virtual memory environment. 26 * can do whatever you want with this stuff. If we meet some day, and you think
27 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
28 * ----------------------------------------------------------------------------
48 */ 29 */
49 30
31/* #define MALLOC_STATS */
32
50#include <sys/types.h> 33#include <sys/types.h>
34#include <sys/param.h>
35#include <sys/mman.h>
36#include <sys/uio.h>
37#include <errno.h>
38#include <stdint.h>
51#include <stdlib.h> 39#include <stdlib.h>
52#include <string.h> 40#include <string.h>
41#include <stdio.h>
53#include <unistd.h> 42#include <unistd.h>
54 43
55#define NULL 0 44#ifdef MALLOC_STATS
45#include <fcntl.h>
46#endif
47
48#include "thread_private.h"
49
50#define MALLOC_MINSHIFT 4
51#define MALLOC_MAXSHIFT 16
52
53#if defined(__sparc__) && !defined(__sparcv9__)
54#define MALLOC_PAGESHIFT (13U)
55#else
56#define MALLOC_PAGESHIFT (PGSHIFT)
57#endif
58
59#define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT)
60#define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT)
61#define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1)
62#define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
63
64#define MALLOC_MAXCHUNK (1 << (MALLOC_PAGESHIFT-1))
65#define MALLOC_MAXCACHE 256
66#define MALLOC_DELAYED_CHUNKS 16 /* should be power of 2 */
67/*
68 * When the P option is active, we move allocations between half a page
69 * and a whole page towards the end, subject to alignment constraints.
70 * This is the extra headroom we allow. Set to zero to be the most
71 * strict.
72 */
73#define MALLOC_LEEWAY 0
56 74
57static void morecore(); 75#define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
58static int findbucket();
59 76
60/* 77/*
61 * The overhead on a block is at least 4 bytes. When free, this space 78 * What to use for Junk. This is the byte value we use to fill with
62 * contains a pointer to the next free block, and the bottom two bits must 79 * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
63 * be zero. When in use, the first byte is set to MAGIC, and the second 80 * and SOME_FREEJUNK right before free.
64 * byte is the size index. The remaining bytes are for alignment.
65 * If range checking is enabled then a second word holds the size of the
66 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
67 * The order of elements is critical: ov_magic must overlay the low order
68 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
69 */ 81 */
70union overhead { 82#define SOME_JUNK 0xd0 /* as in "Duh" :-) */
71 union overhead *ov_next; /* when free */ 83#define SOME_FREEJUNK 0xdf
72 struct { 84
73 u_char ovu_magic; /* magic number */ 85#define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
74 u_char ovu_index; /* bucket # */ 86 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
75#ifdef RCHECK
76 u_short ovu_rmagic; /* range magic number */
77 u_long ovu_size; /* actual block size */
78#endif
79 } ovu;
80#define ov_magic ovu.ovu_magic
81#define ov_index ovu.ovu_index
82#define ov_rmagic ovu.ovu_rmagic
83#define ov_size ovu.ovu_size
84};
85 87
86#define MAGIC 0xef /* magic # on accounting info */ 88#define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
87#define RMAGIC 0x5555 /* magic # on range info */ 89 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
88 90
89#ifdef RCHECK 91struct region_info {
90#define RSLOP sizeof (u_short) 92 void *p; /* page; low bits used to mark chunks */
93 uintptr_t size; /* size for pages, or chunk_info pointer */
94};
95
96struct dir_info {
97 u_int32_t canary1;
98 struct region_info *r; /* region slots */
99 size_t regions_total; /* number of region slots */
100 size_t regions_bits; /* log2 of total */
101 size_t regions_free; /* number of free slots */
102 /* list of free chunk info structs */
103 struct chunk_info *chunk_info_list;
104 /* lists of chunks with free slots */
105 struct chunk_info *chunk_dir[MALLOC_MAXSHIFT];
106 size_t free_regions_size; /* free pages cached */
107 /* free pages cache */
108 struct region_info free_regions[MALLOC_MAXCACHE];
109 /* delayed free chunk slots */
110 void *delayed_chunks[MALLOC_DELAYED_CHUNKS];
111#ifdef MALLOC_STATS
112 size_t inserts;
113 size_t insert_collisions;
114 size_t finds;
115 size_t find_collisions;
116 size_t deletes;
117 size_t delete_moves;
118 size_t cheap_realloc_tries;
119 size_t cheap_reallocs;
120#define STATS_INC(x) ((x)++)
121#define STATS_ZERO(x) ((x) = 0)
91#else 122#else
92#define RSLOP 0 123#define STATS_INC(x) /* nothing */
93#endif 124#define STATS_ZERO(x) /* nothing */
125#endif /* MALLOC_STATS */
126 u_int32_t canary2;
127};
128#define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
129 ~MALLOC_PAGEMASK)
94 130
95/* 131/*
96 * nextf[i] is the pointer to the next free block of size 2^(i+3). The 132 * This structure describes a page worth of chunks.
97 * smallest allocatable block is 8 bytes. The overhead information 133 *
98 * precedes the data area returned to the user. 134 * How many bits per u_long in the bitmap
99 */ 135 */
100#define NBUCKETS 30 136#define MALLOC_BITS (NBBY * sizeof(u_long))
101static union overhead *nextf[NBUCKETS]; 137struct chunk_info {
102extern char *sbrk(); 138 struct chunk_info *next; /* next on the free list */
139 void *page; /* pointer to the page */
140 u_int32_t canary;
141 u_short size; /* size of this page's chunks */
142 u_short shift; /* how far to shift for this size */
143 u_short free; /* how many free chunks */
144 u_short total; /* how many chunk */
145 /* which chunks are free */
146 u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS];
147};
103 148
104static int pagesz; /* page size */ 149struct malloc_readonly {
105static int pagebucket; /* page size bucket */ 150 struct dir_info *g_pool; /* Main bookkeeping information */
151 int malloc_abort; /* abort() on error */
152 int malloc_freeprot; /* mprotect free pages PROT_NONE? */
153 int malloc_hint; /* call madvice on free pages? */
154 int malloc_junk; /* junk fill? */
155 int malloc_move; /* move allocations to end of page? */
156 int malloc_realloc; /* always realloc? */
157 int malloc_xmalloc; /* xmalloc behaviour? */
158 int malloc_zero; /* zero fill? */
159 size_t malloc_guard; /* use guard pages after allocations? */
160 u_int malloc_cache; /* free pages we cache */
161#ifdef MALLOC_STATS
162 int malloc_stats; /* dump statistics at end */
163#endif
164 u_int32_t malloc_canary; /* Matched against ones in g_pool */
165};
106 166
107#ifdef MSTATS 167/* This object is mapped PROT_READ after initialisation to prevent tampering */
108/* 168static union {
109 * nmalloc[i] is the difference between the number of mallocs and frees 169 struct malloc_readonly mopts;
110 * for a given block size. 170 u_char _pad[MALLOC_PAGESIZE];
171} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
172#define mopts malloc_readonly.mopts
173#define g_pool mopts.g_pool
174
175char *malloc_options; /* compile-time options */
176
177static char *malloc_func; /* current function */
178static int malloc_active; /* status of malloc */
179
180static size_t malloc_guarded; /* bytes used for guards */
181static size_t malloc_used; /* bytes allocated */
182
183static size_t rbytesused; /* random bytes used */
184static u_char rbytes[512]; /* random bytes */
185static u_char getrbyte(void);
186
187extern char *__progname;
188
189/* low bits of r->p determine size: 0 means >= page size and p->size holding
190 * real size, otherwise r->size is a shift count, or 1 for malloc(0)
111 */ 191 */
112static u_int nmalloc[NBUCKETS]; 192#define REALSIZE(sz, r) \
113#include <stdio.h> 193 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \
194 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
195
196static inline size_t
197hash(void *p)
198{
199 size_t sum;
200 union {
201 uintptr_t p;
202 unsigned short a[sizeof(void *) / sizeof(short)];
203 } u;
204 u.p = (uintptr_t)p >> MALLOC_PAGESHIFT;
205 sum = u.a[0];
206 sum = (sum << 7) - sum + u.a[1];
207#ifdef __LP64__
208 sum = (sum << 7) - sum + u.a[2];
209 sum = (sum << 7) - sum + u.a[3];
114#endif 210#endif
211 return sum;
212}
115 213
116#if defined(DEBUG) || defined(RCHECK) 214#ifdef MALLOC_STATS
117#define ASSERT(p) if (!(p)) botch("p") 215static void
118#include <stdio.h> 216dump_chunk(int fd, struct chunk_info *p, int fromfreelist)
119static 217{
120botch(s) 218 char buf[64];
121 char *s; 219
220 while (p) {
221 snprintf(buf, sizeof(buf), "chunk %d %d/%d %p\n", p->size,
222 p->free, p->total, p->page);
223 write(fd, buf, strlen(buf));
224 if (!fromfreelist)
225 break;
226 p = p->next;
227 if (p != NULL) {
228 snprintf(buf, sizeof(buf), " ");
229 write(fd, buf, strlen(buf));
230 }
231 }
232}
233
234static void
235dump_free_chunk_info(int fd, struct dir_info *d)
122{ 236{
123 fprintf(stderr, "\r\nassertion botched: %s\r\n", s); 237 char buf[64];
124 (void) fflush(stderr); /* just in case user buffered it */ 238 int i;
125 abort(); 239
240 snprintf(buf, sizeof(buf), "Free chunk structs:\n");
241 write(fd, buf, strlen(buf));
242 for (i = 0; i < MALLOC_MAXSHIFT; i++) {
243 struct chunk_info *p = d->chunk_dir[i];
244 if (p != NULL) {
245 snprintf(buf, sizeof(buf), "%2d) ", i);
246 write(fd, buf, strlen(buf));
247 dump_chunk(fd, p, 1);
248 }
249 }
250
126} 251}
127#else
128#define ASSERT(p)
129#endif
130 252
131void * 253static void
132malloc(nbytes) 254dump_free_page_info(int fd, struct dir_info *d)
133 size_t nbytes; 255{
256 char buf[64];
257 int i;
258
259 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
260 d->free_regions_size);
261 write(fd, buf, strlen(buf));
262 for (i = 0; i < mopts.malloc_cache; i++) {
263 if (d->free_regions[i].p != NULL) {
264 snprintf(buf, sizeof(buf), "%2d) ", i);
265 write(fd, buf, strlen(buf));
266 snprintf(buf, sizeof(buf), "free at %p: %zu\n",
267 d->free_regions[i].p, d->free_regions[i].size);
268 write(fd, buf, strlen(buf));
269 }
270 }
271}
272
273static void
274malloc_dump1(int fd, struct dir_info *d)
275{
276 char buf[64];
277 size_t i, realsize;
278
279 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
280 write(fd, buf, strlen(buf));
281 if (d == NULL)
282 return;
283 snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total);
284 write(fd, buf, strlen(buf));
285 snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds,
286 d->find_collisions,
287 1.0 + (double)d->find_collisions / d->finds);
288 write(fd, buf, strlen(buf));
289 snprintf(buf, sizeof(buf), "Inserts %zu/%zu %f\n", d->inserts,
290 d->insert_collisions,
291 1.0 + (double)d->insert_collisions / d->inserts);
292 write(fd, buf, strlen(buf));
293 snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
294 d->delete_moves);
295 write(fd, buf, strlen(buf));
296 snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
297 d->cheap_reallocs, d->cheap_realloc_tries);
298 write(fd, buf, strlen(buf));
299 snprintf(buf, sizeof(buf), "Regions slots free %zu\n", d->regions_free);
300 write(fd, buf, strlen(buf));
301 for (i = 0; i < d->regions_total; i++) {
302 if (d->r[i].p != NULL) {
303 size_t h = hash(d->r[i].p) &
304 (d->regions_total - 1);
305 snprintf(buf, sizeof(buf), "%4zx) #%zx %zd ",
306 i, h, h - i);
307 write(fd, buf, strlen(buf));
308 REALSIZE(realsize, &d->r[i]);
309 if (realsize > MALLOC_MAXCHUNK) {
310 snprintf(buf, sizeof(buf),
311 "%p: %zu\n", d->r[i].p, realsize);
312 write(fd, buf, strlen(buf));
313 } else
314 dump_chunk(fd,
315 (struct chunk_info *)d->r[i].size, 0);
316 }
317 }
318 dump_free_chunk_info(fd, d);
319 dump_free_page_info(fd, d);
320 snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used);
321 write(fd, buf, strlen(buf));
322 snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded);
323 write(fd, buf, strlen(buf));
324}
325
326
327void
328malloc_dump(int fd)
329{
330 malloc_dump1(fd, g_pool);
331}
332
333static void
334malloc_exit(void)
335{
336 static const char q[] = "malloc() warning: Couldn't dump stats\n";
337 int save_errno = errno, fd;
338
339 fd = open("malloc.out", O_RDWR|O_APPEND);
340 if (fd != -1) {
341 malloc_dump(fd);
342 close(fd);
343 } else
344 write(STDERR_FILENO, q, sizeof(q) - 1);
345 errno = save_errno;
346}
347#endif /* MALLOC_STATS */
348
349
350
351static void
352wrterror(char *p)
353{
354 char *q = " error: ";
355 struct iovec iov[5];
356
357 iov[0].iov_base = __progname;
358 iov[0].iov_len = strlen(__progname);
359 iov[1].iov_base = malloc_func;
360 iov[1].iov_len = strlen(malloc_func);
361 iov[2].iov_base = q;
362 iov[2].iov_len = strlen(q);
363 iov[3].iov_base = p;
364 iov[3].iov_len = strlen(p);
365 iov[4].iov_base = "\n";
366 iov[4].iov_len = 1;
367 writev(STDERR_FILENO, iov, 5);
368
369#ifdef MALLOC_STATS
370 if (mopts.malloc_stats)
371 malloc_dump(STDERR_FILENO);
372#endif /* MALLOC_STATS */
373 //malloc_active--;
374 if (mopts.malloc_abort)
375 abort();
376}
377
378/*
379 * Cache maintenance. We keep at most malloc_cache pages cached.
380 * If the cache is becoming full, unmap pages in the cache for real,
381 * and then add the region to the cache
382 * Opposed to the regular region data structure, the sizes in the
383 * cache are in MALLOC_PAGESIZE units.
384 */
385static void
386unmap(struct dir_info *d, void *p, size_t sz)
387{
388 size_t psz = sz >> MALLOC_PAGESHIFT;
389 size_t rsz, tounmap;
390 struct region_info *r;
391 u_int i, offset;
392
393 if (sz != PAGEROUND(sz)) {
394 wrterror("munmap round");
395 return;
396 }
397
398 if (psz > mopts.malloc_cache) {
399 if (munmap(p, sz))
400 wrterror("munmap");
401 malloc_used -= sz;
402 return;
403 }
404 tounmap = 0;
405 rsz = mopts.malloc_cache - d->free_regions_size;
406 if (psz > rsz)
407 tounmap = psz - rsz;
408 offset = getrbyte();
409 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
410 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
411 if (r->p != NULL) {
412 rsz = r->size << MALLOC_PAGESHIFT;
413 if (munmap(r->p, rsz))
414 wrterror("munmap");
415 r->p = NULL;
416 if (tounmap > r->size)
417 tounmap -= r->size;
418 else
419 tounmap = 0;
420 d->free_regions_size -= r->size;
421 r->size = 0;
422 malloc_used -= rsz;
423 }
424 }
425 if (tounmap > 0)
426 wrterror("malloc cache underflow");
427 for (i = 0; i < mopts.malloc_cache; i++) {
428 r = &d->free_regions[i];
429 if (r->p == NULL) {
430 if (mopts.malloc_hint)
431 madvise(p, sz, MADV_FREE);
432 if (mopts.malloc_freeprot)
433 mprotect(p, sz, PROT_NONE);
434 r->p = p;
435 r->size = psz;
436 d->free_regions_size += psz;
437 break;
438 }
439 }
440 if (i == mopts.malloc_cache)
441 wrterror("malloc free slot lost");
442 if (d->free_regions_size > mopts.malloc_cache)
443 wrterror("malloc cache overflow");
444}
445
446static void
447zapcacheregion(struct dir_info *d, void *p)
448{
449 u_int i;
450 struct region_info *r;
451 size_t rsz;
452
453 for (i = 0; i < mopts.malloc_cache; i++) {
454 r = &d->free_regions[i];
455 if (r->p == p) {
456 rsz = r->size << MALLOC_PAGESHIFT;
457 if (munmap(r->p, rsz))
458 wrterror("munmap");
459 r->p = NULL;
460 d->free_regions_size -= r->size;
461 r->size = 0;
462 malloc_used -= rsz;
463 }
464 }
465}
466
467static void *
468map(struct dir_info *d, size_t sz, int zero_fill)
469{
470 size_t psz = sz >> MALLOC_PAGESHIFT;
471 struct region_info *r, *big = NULL;
472 u_int i, offset;
473 void *p;
474
475 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
476 d->canary1 != ~d->canary2)
477 wrterror("internal struct corrupt");
478 if (sz != PAGEROUND(sz)) {
479 wrterror("map round");
480 return NULL;
481 }
482 if (psz > d->free_regions_size) {
483 p = MMAP(sz);
484 if (p != MAP_FAILED)
485 malloc_used += sz;
486 /* zero fill not needed */
487 return p;
488 }
489 offset = getrbyte();
490 for (i = 0; i < mopts.malloc_cache; i++) {
491 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
492 if (r->p != NULL) {
493 if (r->size == psz) {
494 p = r->p;
495 if (mopts.malloc_freeprot)
496 mprotect(p, sz, PROT_READ | PROT_WRITE);
497 if (mopts.malloc_hint)
498 madvise(p, sz, MADV_NORMAL);
499 r->p = NULL;
500 r->size = 0;
501 d->free_regions_size -= psz;
502 if (zero_fill)
503 memset(p, 0, sz);
504 else if (mopts.malloc_junk &&
505 mopts.malloc_freeprot)
506 memset(p, SOME_FREEJUNK, sz);
507 return p;
508 } else if (r->size > psz)
509 big = r;
510 }
511 }
512 if (big != NULL) {
513 r = big;
514 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
515 if (mopts.malloc_freeprot)
516 mprotect(p, sz, PROT_READ | PROT_WRITE);
517 if (mopts.malloc_hint)
518 madvise(p, sz, MADV_NORMAL);
519 r->size -= psz;
520 d->free_regions_size -= psz;
521 if (zero_fill)
522 memset(p, 0, sz);
523 return p;
524 }
525 p = MMAP(sz);
526 if (p != MAP_FAILED)
527 malloc_used += sz;
528 if (d->free_regions_size > mopts.malloc_cache)
529 wrterror("malloc cache");
530 /* zero fill not needed */
531 return p;
532}
533
534static void
535rbytes_init(void)
536{
537 arc4random_buf(rbytes, sizeof(rbytes));
538 rbytesused = 0;
539}
540
541static u_char
542getrbyte(void)
543{
544 if (rbytesused >= sizeof(rbytes))
545 rbytes_init();
546 return rbytes[rbytesused++];
547}
548
549/*
550 * Initialize a dir_info, which should have been cleared by caller
551 */
552static int
553omalloc_init(struct dir_info **dp)
134{ 554{
135 register union overhead *op; 555 char *p, b[64];
136 register long bucket, n; 556 int i, j;
137 register unsigned amt; 557 size_t d_avail, regioninfo_size;
558 struct dir_info *d;
559
560 rbytes_init();
138 561
139 /* 562 /*
140 * First time malloc is called, setup page size and 563 * Default options
141 * align break pointer so all data will be page aligned.
142 */ 564 */
143 if (pagesz == 0) { 565 mopts.malloc_abort = 1;
144 pagesz = n = getpagesize(); 566 mopts.malloc_move = 1;
145 op = (union overhead *)sbrk(0); 567 mopts.malloc_cache = 64;
146 n = n - sizeof (*op) - ((long)op & (n - 1)); 568
147 if (n < 0) 569 for (i = 0; i < 3; i++) {
148 n += pagesz; 570 switch (i) {
149 if (n) { 571 case 0:
150 if (sbrk(n) == (char *)-1) 572 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
151 return (NULL); 573 if (j <= 0)
574 continue;
575 b[j] = '\0';
576 p = b;
577 break;
578 case 1:
579 if (issetugid() == 0)
580 p = getenv("MALLOC_OPTIONS");
581 else
582 continue;
583 break;
584 case 2:
585 p = malloc_options;
586 break;
587 default:
588 p = NULL;
152 } 589 }
153 bucket = 0; 590
154 amt = 8; 591 for (; p != NULL && *p != '\0'; p++) {
155 while (pagesz > amt) { 592 switch (*p) {
156 amt <<= 1; 593 case '>':
157 bucket++; 594 mopts.malloc_cache <<= 1;
595 if (mopts.malloc_cache > MALLOC_MAXCACHE)
596 mopts.malloc_cache = MALLOC_MAXCACHE;
597 break;
598 case '<':
599 mopts.malloc_cache >>= 1;
600 break;
601 case 'a':
602 mopts.malloc_abort = 0;
603 break;
604 case 'A':
605 mopts.malloc_abort = 1;
606 break;
607#ifdef MALLOC_STATS
608 case 'd':
609 mopts.malloc_stats = 0;
610 break;
611 case 'D':
612 mopts.malloc_stats = 1;
613 break;
614#endif /* MALLOC_STATS */
615 case 'f':
616 mopts.malloc_freeprot = 0;
617 break;
618 case 'F':
619 mopts.malloc_freeprot = 1;
620 break;
621 case 'g':
622 mopts.malloc_guard = 0;
623 break;
624 case 'G':
625 mopts.malloc_guard = MALLOC_PAGESIZE;
626 break;
627 case 'h':
628 mopts.malloc_hint = 0;
629 break;
630 case 'H':
631 mopts.malloc_hint = 1;
632 break;
633 case 'j':
634 mopts.malloc_junk = 0;
635 break;
636 case 'J':
637 mopts.malloc_junk = 1;
638 break;
639 case 'n':
640 case 'N':
641 break;
642 case 'p':
643 mopts.malloc_move = 0;
644 break;
645 case 'P':
646 mopts.malloc_move = 1;
647 break;
648 case 'r':
649 mopts.malloc_realloc = 0;
650 break;
651 case 'R':
652 mopts.malloc_realloc = 1;
653 break;
654 case 'x':
655 mopts.malloc_xmalloc = 0;
656 break;
657 case 'X':
658 mopts.malloc_xmalloc = 1;
659 break;
660 case 'z':
661 mopts.malloc_zero = 0;
662 break;
663 case 'Z':
664 mopts.malloc_zero = 1;
665 break;
666 default: {
667 static const char q[] = "malloc() warning: "
668 "unknown char in MALLOC_OPTIONS\n";
669 write(STDERR_FILENO, q, sizeof(q) - 1);
670 break;
671 }
672 }
158 } 673 }
159 pagebucket = bucket;
160 } 674 }
675
161 /* 676 /*
162 * Convert amount of memory requested into closest block size 677 * We want junk in the entire allocation, and zero only in the part
163 * stored in hash buckets which satisfies request. 678 * the user asked for.
164 * Account for space used per block for accounting.
165 */ 679 */
166 if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { 680 if (mopts.malloc_zero)
167#ifndef RCHECK 681 mopts.malloc_junk = 1;
168 amt = 8; /* size of first bucket */ 682
169 bucket = 0; 683#ifdef MALLOC_STATS
170#else 684 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
171 amt = 16; /* size of first bucket */ 685 static const char q[] = "malloc() warning: atexit(2) failed."
172 bucket = 1; 686 " Will not be able to dump stats on exit\n";
173#endif 687 write(STDERR_FILENO, q, sizeof(q) - 1);
174 n = -((long)sizeof (*op) + RSLOP);
175 } else {
176 amt = pagesz;
177 bucket = pagebucket;
178 }
179 while (nbytes > amt + n) {
180 amt <<= 1;
181 if (amt == 0)
182 return (NULL);
183 bucket++;
184 } 688 }
689#endif /* MALLOC_STATS */
690
691 while ((mopts.malloc_canary = arc4random()) == 0)
692 ;
693
185 /* 694 /*
186 * If nothing in hash bucket right now, 695 * Allocate dir_info with a guard page on either side. Also
187 * request more memory from the system. 696 * randomise offset inside the page at which the dir_info
697 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
188 */ 698 */
189 if ((op = nextf[bucket]) == NULL) { 699 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == NULL)
190 morecore(bucket); 700 return -1;
191 if ((op = nextf[bucket]) == NULL) 701 mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
192 return (NULL); 702 mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
193 } 703 MALLOC_PAGESIZE, PROT_NONE);
194 /* remove from linked list */ 704 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
195 nextf[bucket] = op->ov_next; 705 d = (struct dir_info *)(p + MALLOC_PAGESIZE +
196 op->ov_magic = MAGIC; 706 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
197 op->ov_index = bucket; 707
198#ifdef MSTATS 708 d->regions_bits = 9;
199 nmalloc[bucket]++; 709 d->regions_free = d->regions_total = 1 << d->regions_bits;
200#endif 710 regioninfo_size = d->regions_total * sizeof(struct region_info);
201#ifdef RCHECK 711 d->r = MMAP(regioninfo_size);
712 if (d->r == MAP_FAILED) {
713 wrterror("malloc init mmap failed");
714 d->regions_total = 0;
715 return 1;
716 }
717 malloc_used += regioninfo_size;
718 memset(d->r, 0, regioninfo_size);
719 d->canary1 = mopts.malloc_canary ^ (u_int32_t)d;
720 d->canary2 = ~d->canary1;
721
722 *dp = d;
723
202 /* 724 /*
203 * Record allocated size of block and 725 * Options have been set and will never be reset.
204 * bound space with magic numbers. 726 * Prevent further tampering with them.
205 */ 727 */
206 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 728 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
207 op->ov_rmagic = RMAGIC; 729 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
208 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 730
209#endif 731 return 0;
210 return ((char *)(op + 1));
211} 732}
212 733
213/* 734static int
214 * Allocate more memory to the indicated bucket. 735omalloc_grow(struct dir_info *d)
215 */ 736{
737 size_t newbits;
738 size_t newtotal;
739 size_t newsize;
740 size_t mask;
741 size_t i;
742 struct region_info *p;
743
744 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
745 return 1;
746
747 newbits = d->regions_bits + 1;
748 newtotal = d->regions_total * 2;
749 newsize = newtotal * sizeof(struct region_info);
750 mask = newtotal - 1;
751
752 p = MMAP(newsize);
753 if (p == MAP_FAILED)
754 return 1;
755
756 malloc_used += newsize;
757 memset(p, 0, newsize);
758 STATS_ZERO(d->inserts);
759 STATS_ZERO(d->insert_collisions);
760 for (i = 0; i < d->regions_total; i++) {
761 void *q = d->r[i].p;
762 if (q != NULL) {
763 size_t index = hash(q) & mask;
764 STATS_INC(d->inserts);
765 while (p[index].p != NULL) {
766 index = (index - 1) & mask;
767 STATS_INC(d->insert_collisions);
768 }
769 p[index] = d->r[i];
770 }
771 }
772 /* avoid pages containing meta info to end up in cache */
773 if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
774 wrterror("munmap");
775 else
776 malloc_used -= d->regions_total * sizeof(struct region_info);
777 d->regions_free = d->regions_free + d->regions_total;
778 d->regions_total = newtotal;
779 d->regions_bits = newbits;
780 d->r = p;
781 return 0;
782}
783
784static struct chunk_info *
785alloc_chunk_info(struct dir_info *d)
786{
787 struct chunk_info *p;
788 int i;
789
790 if (d->chunk_info_list == NULL) {
791 p = MMAP(MALLOC_PAGESIZE);
792 if (p == MAP_FAILED)
793 return NULL;
794 malloc_used += MALLOC_PAGESIZE;
795 for (i = 0; i < MALLOC_PAGESIZE / sizeof(*p); i++) {
796 p[i].next = d->chunk_info_list;
797 d->chunk_info_list = &p[i];
798 }
799 }
800 p = d->chunk_info_list;
801 d->chunk_info_list = p->next;
802 memset(p, 0, sizeof *p);
803 p->canary = d->canary1;
804 return p;
805}
806
807
216static void 808static void
217morecore(bucket) 809put_chunk_info(struct dir_info *d, struct chunk_info *p)
218 int bucket;
219{ 810{
220 register union overhead *op; 811 p->next = d->chunk_info_list;
221 register long sz; /* size of desired block */ 812 d->chunk_info_list = p;
222 long amt; /* amount to allocate */ 813}
223 int nblks; /* how many blocks we get */
224 814
225 /* 815static int
226 * sbrk_size <= 0 only for big, FLUFFY, requests (about 816insert(struct dir_info *d, void *p, size_t sz)
227 * 2^30 bytes on a VAX, I think) or for a negative arg. 817{
228 */ 818 size_t index;
229 sz = 1 << (bucket + 3); 819 size_t mask;
230#ifdef DEBUG 820 void *q;
231 ASSERT(sz > 0); 821
232#else 822 if (d->regions_free * 4 < d->regions_total) {
233 if (sz <= 0) 823 if (omalloc_grow(d))
234 return; 824 return 1;
235#endif
236 if (sz < pagesz) {
237 amt = pagesz;
238 nblks = amt / sz;
239 } else {
240 amt = sz + pagesz;
241 nblks = 1;
242 } 825 }
243 op = (union overhead *)sbrk(amt); 826 mask = d->regions_total - 1;
244 /* no more room! */ 827 index = hash(p) & mask;
245 if ((long)op == -1) 828 q = d->r[index].p;
246 return; 829 STATS_INC(d->inserts);
247 /* 830 while (q != NULL) {
248 * Add new memory allocated to that on 831 index = (index - 1) & mask;
249 * free list for this hash bucket. 832 q = d->r[index].p;
250 */ 833 STATS_INC(d->insert_collisions);
251 nextf[bucket] = op; 834 }
252 while (--nblks > 0) { 835 d->r[index].p = p;
253 op->ov_next = (union overhead *)((caddr_t)op + sz); 836 d->r[index].size = sz;
254 op = (union overhead *)((caddr_t)op + sz); 837 d->regions_free--;
255 } 838 return 0;
256} 839}
257 840
258void 841static struct region_info *
259free(cp) 842find(struct dir_info *d, void *p)
260 void *cp; 843{
261{ 844 size_t index;
262 register long size; 845 size_t mask = d->regions_total - 1;
263 register union overhead *op; 846 void *q, *r;
264 847
265 if (cp == NULL) 848 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
266 return; 849 d->canary1 != ~d->canary2)
267 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 850 wrterror("internal struct corrupt");
268#ifdef DEBUG 851 p = MASK_POINTER(p);
269 ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ 852 index = hash(p) & mask;
270#else 853 r = d->r[index].p;
271 if (op->ov_magic != MAGIC) 854 q = MASK_POINTER(r);
272 return; /* sanity */ 855 STATS_INC(d->finds);
273#endif 856 while (q != p && r != NULL) {
274#ifdef RCHECK 857 index = (index - 1) & mask;
275 ASSERT(op->ov_rmagic == RMAGIC); 858 r = d->r[index].p;
276 ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); 859 q = MASK_POINTER(r);
277#endif 860 STATS_INC(d->find_collisions);
278 size = op->ov_index; 861 }
279 ASSERT(size < NBUCKETS); 862 return q == p ? &d->r[index] : NULL;
280 op->ov_next = nextf[size]; /* also clobbers ov_magic */
281 nextf[size] = op;
282#ifdef MSTATS
283 nmalloc[size]--;
284#endif
285} 863}
286 864
865static void
866delete(struct dir_info *d, struct region_info *ri)
867{
868 /* algorithm R, Knuth Vol III section 6.4 */
869 size_t mask = d->regions_total - 1;
870 size_t i, j, r;
871
872 if (d->regions_total & (d->regions_total - 1))
873 wrterror("regions_total not 2^x");
874 d->regions_free++;
875 STATS_INC(g_pool->deletes);
876
877 i = ri - d->r;
878 for (;;) {
879 d->r[i].p = NULL;
880 d->r[i].size = 0;
881 j = i;
882 for (;;) {
883 i = (i - 1) & mask;
884 if (d->r[i].p == NULL)
885 return;
886 r = hash(d->r[i].p) & mask;
887 if ((i <= r && r < j) || (r < j && j < i) ||
888 (j < i && i <= r))
889 continue;
890 d->r[j] = d->r[i];
891 STATS_INC(g_pool->delete_moves);
892 break;
893 }
894
895 }
896}
897
287/* 898/*
288 * When a program attempts "storage compaction" as mentioned in the 899 * Allocate a page of chunks
289 * old malloc man page, it realloc's an already freed block. Usually
290 * this is the last block it freed; occasionally it might be farther
291 * back. We have to search all the free lists for the block in order
292 * to determine its bucket: 1st we make one pass thru the lists
293 * checking only the first block in each; if that fails we search
294 * ``realloc_srchlen'' blocks in each list for a match (the variable
295 * is extern so the caller can modify it). If that fails we just copy
296 * however many bytes was given to realloc() and hope it's not huge.
297 */ 900 */
298int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ 901static struct chunk_info *
902omalloc_make_chunks(struct dir_info *d, int bits)
903{
904 struct chunk_info *bp;
905 void *pp;
906 long i, k;
299 907
300void * 908 /* Allocate a new bucket */
301realloc(cp, nbytes) 909 pp = map(d, MALLOC_PAGESIZE, 0);
302 void *cp; 910 if (pp == MAP_FAILED)
303 size_t nbytes; 911 return NULL;
304{ 912
305 register u_long onb; 913 bp = alloc_chunk_info(d);
306 register long i; 914 if (bp == NULL) {
307 union overhead *op; 915 unmap(d, pp, MALLOC_PAGESIZE);
308 char *res; 916 return NULL;
309 int was_alloced = 0; 917 }
310 918
311 if (cp == NULL) 919 /* memory protect the page allocated in the malloc(0) case */
312 return (malloc(nbytes)); 920 if (bits == 0) {
313 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 921 bp->size = 0;
314 if (op->ov_magic == MAGIC) { 922 bp->shift = 1;
315 was_alloced++; 923 i = MALLOC_MINSIZE - 1;
316 i = op->ov_index; 924 while (i >>= 1)
317 } else { 925 bp->shift++;
318 /* 926 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
319 * Already free, doing "compaction". 927 bp->page = pp;
320 * 928
321 * Search for the old block of memory on the 929 k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
322 * free list. First, check the most common 930 if (k < 0) {
323 * case (last element free'd), then (this failing) 931 unmap(d, pp, MALLOC_PAGESIZE);
324 * the last ``realloc_srchlen'' items free'd. 932 put_chunk_info(d, bp);
325 * If all lookups fail, then assume the size of 933 return NULL;
326 * the memory block being realloc'd is the
327 * largest possible (so that all "nbytes" of new
328 * memory are copied into). Note that this could cause
329 * a memory fault if the old area was tiny, and the moon
330 * is gibbous. However, that is very unlikely.
331 */
332 if ((i = findbucket(op, 1)) < 0 &&
333 (i = findbucket(op, realloc_srchlen)) < 0)
334 i = NBUCKETS;
335 }
336 onb = 1 << (i + 3);
337 if (onb < pagesz)
338 onb -= sizeof (*op) + RSLOP;
339 else
340 onb += pagesz - sizeof (*op) - RSLOP;
341 /* avoid the copy if same size block */
342 if (was_alloced) {
343 if (i) {
344 i = 1 << (i + 2);
345 if (i < pagesz)
346 i -= sizeof (*op) + RSLOP;
347 else
348 i += pagesz - sizeof (*op) - RSLOP;
349 } 934 }
350 if (nbytes <= onb && nbytes > i) { 935 } else {
351#ifdef RCHECK 936 bp->size = (1UL << bits);
352 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 937 bp->shift = bits;
353 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 938 bp->total = bp->free = MALLOC_PAGESIZE >> bits;
354#endif 939 bp->page = pp;
355 return(cp); 940 }
356 } else 941
357 free(cp); 942 /* set all valid bits in the bitmap */
358 } 943 k = bp->total;
359 if ((res = malloc(nbytes)) == NULL) 944 i = 0;
360 return (NULL); 945
361 if (cp != res) /* common optimization if "compacting" */ 946 /* Do a bunch at a time */
362 bcopy(cp, res, (nbytes < onb) ? nbytes : onb); 947 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
363 return (res); 948 bp->bits[i / MALLOC_BITS] = ~0UL;
949
950 for (; i < k; i++)
951 bp->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS);
952
953 bp->next = d->chunk_dir[bits];
954 d->chunk_dir[bits] = bp;
955
956 bits++;
957 if ((uintptr_t)pp & bits)
958 wrterror("pp & bits");
959
960 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp);
961 return bp;
364} 962}
365 963
964
366/* 965/*
367 * Search ``srchlen'' elements of each free list for a block whose 966 * Allocate a chunk
368 * header starts at ``freep''. If srchlen is -1 search the whole list.
369 * Return bucket number, or -1 if not found.
370 */ 967 */
371static 968static void *
372findbucket(freep, srchlen) 969malloc_bytes(struct dir_info *d, size_t size)
373 union overhead *freep;
374 int srchlen;
375{ 970{
376 register union overhead *p; 971 int i, j;
377 register int i, j; 972 size_t k;
973 u_long u, *lp;
974 struct chunk_info *bp;
975
976 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) ||
977 d->canary1 != ~d->canary2)
978 wrterror("internal struct corrupt");
979 /* Don't bother with anything less than this */
980 /* unless we have a malloc(0) requests */
981 if (size != 0 && size < MALLOC_MINSIZE)
982 size = MALLOC_MINSIZE;
378 983
379 for (i = 0; i < NBUCKETS; i++) { 984 /* Find the right bucket */
985 if (size == 0)
380 j = 0; 986 j = 0;
381 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { 987 else {
382 if (p == freep) 988 j = MALLOC_MINSHIFT;
383 return (i); 989 i = (size - 1) >> (MALLOC_MINSHIFT - 1);
990 while (i >>= 1)
384 j++; 991 j++;
992 }
993
994 /* If it's empty, make a page more of that size chunks */
995 bp = d->chunk_dir[j];
996 if (bp == NULL && (bp = omalloc_make_chunks(d, j)) == NULL)
997 return NULL;
998
999 if (bp->canary != d->canary1)
1000 wrterror("chunk info corrupted");
1001 /* Find first word of bitmap which isn't empty */
1002 for (lp = bp->bits; !*lp; lp++)
1003 /* EMPTY */;
1004
1005 /* Find that bit, and tweak it */
1006 u = 1;
1007 k = 0;
1008 while (!(*lp & u)) {
1009 u += u;
1010 k++;
1011 }
1012
1013 /* advance a random # of positions */
1014 i = (getrbyte() & (MALLOC_DELAYED_CHUNKS - 1)) % bp->free;
1015 while (i > 0) {
1016 u += u;
1017 k++;
1018 if (k >= MALLOC_BITS) {
1019 lp++;
1020 u = 1;
1021 k = 0;
1022 }
1023 if (lp - bp->bits > (bp->total - 1) / MALLOC_BITS) {
1024 wrterror("chunk overflow");
1025 errno = EFAULT;
1026 return (NULL);
1027 }
1028 if (*lp & u)
1029 i--;
1030 }
1031
1032 *lp ^= u;
1033
1034 /* If there are no more free, remove from free-list */
1035 if (!--bp->free) {
1036 d->chunk_dir[j] = bp->next;
1037 bp->next = NULL;
1038 }
1039 /* Adjust to the real offset of that chunk */
1040 k += (lp - bp->bits) * MALLOC_BITS;
1041 k <<= bp->shift;
1042
1043 if (mopts.malloc_junk && bp->size > 0)
1044 memset((char *)bp->page + k, SOME_JUNK, bp->size);
1045 return ((char *)bp->page + k);
1046}
1047
1048
1049/*
1050 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1051 */
1052static void
1053free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1054{
1055 struct chunk_info *info, **mp;
1056 long i;
1057
1058 info = (struct chunk_info *)r->size;
1059 if (info->canary != d->canary1)
1060 wrterror("chunk info corrupted");
1061
1062 /* Find the chunk number on the page */
1063 i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1064
1065 if ((uintptr_t)ptr & ((1UL << (info->shift)) - 1)) {
1066 wrterror("modified chunk-pointer");
1067 return;
1068 }
1069 if (info->bits[i / MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1070 wrterror("chunk is already free");
1071 return;
1072 }
1073
1074 info->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS);
1075 info->free++;
1076
1077 if (info->size != 0)
1078 mp = d->chunk_dir + info->shift;
1079 else
1080 mp = d->chunk_dir;
1081
1082 if (info->free == 1) {
1083 /* Page became non-full */
1084
1085 /* Insert in address order */
1086 while (*mp != NULL && (*mp)->next != NULL &&
1087 (*mp)->next->page < info->page)
1088 mp = &(*mp)->next;
1089 info->next = *mp;
1090 *mp = info;
1091 return;
1092 }
1093 if (info->free != info->total)
1094 return;
1095
1096 /* Find & remove this page in the queue */
1097 while (*mp != info) {
1098 mp = &((*mp)->next);
1099 if (!*mp) {
1100 wrterror("not on queue");
1101 errno = EFAULT;
1102 return;
1103 }
1104 }
1105 *mp = info->next;
1106
1107 if (info->size == 0 && !mopts.malloc_freeprot)
1108 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1109 unmap(d, info->page, MALLOC_PAGESIZE);
1110
1111 delete(d, r);
1112 put_chunk_info(d, info);
1113}
1114
1115
1116
1117static void *
1118omalloc(size_t sz, int zero_fill)
1119{
1120 void *p;
1121 size_t psz;
1122
1123 if (sz > MALLOC_MAXCHUNK) {
1124 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1125 errno = ENOMEM;
1126 return NULL;
1127 }
1128 sz += mopts.malloc_guard;
1129 psz = PAGEROUND(sz);
1130 p = map(g_pool, psz, zero_fill);
1131 if (p == MAP_FAILED) {
1132 errno = ENOMEM;
1133 return NULL;
385 } 1134 }
1135 if (insert(g_pool, p, sz)) {
1136 unmap(g_pool, p, psz);
1137 errno = ENOMEM;
1138 return NULL;
1139 }
1140 if (mopts.malloc_guard) {
1141 if (mprotect((char *)p + psz - mopts.malloc_guard,
1142 mopts.malloc_guard, PROT_NONE))
1143 wrterror("mprotect");
1144 malloc_guarded += mopts.malloc_guard;
1145 }
1146
1147 if (mopts.malloc_move &&
1148 sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1149 MALLOC_LEEWAY) {
1150 /* fill whole allocation */
1151 if (mopts.malloc_junk)
1152 memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1153 /* shift towards the end */
1154 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1155 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1156 /* fill zeros if needed and overwritten above */
1157 if (zero_fill && mopts.malloc_junk)
1158 memset(p, 0, sz - mopts.malloc_guard);
1159 } else {
1160 if (mopts.malloc_junk) {
1161 if (zero_fill)
1162 memset(p + sz - mopts.malloc_guard,
1163 SOME_JUNK, psz - sz);
1164 else
1165 memset(p, SOME_JUNK,
1166 psz - mopts.malloc_guard);
1167 }
1168 }
1169
1170 } else {
1171 /* takes care of SOME_JUNK */
1172 p = malloc_bytes(g_pool, sz);
1173 if (zero_fill && p != NULL && sz > 0)
1174 memset(p, 0, sz);
386 } 1175 }
387 return (-1); 1176
1177 return p;
388} 1178}
389 1179
390#ifdef MSTATS
391/* 1180/*
392 * mstats - print out statistics about malloc 1181 * Common function for handling recursion. Only
393 * 1182 * print the error message once, to avoid making the problem
394 * Prints two lines of numbers, one showing the length of the free list 1183 * potentially worse.
395 * for each size category, the second showing the number of mallocs -
396 * frees for each size category.
397 */ 1184 */
398mstats(s) 1185static void
399 char *s; 1186malloc_recurse(void)
400{ 1187{
401 register int i, j; 1188 static int noprint;
402 register union overhead *p; 1189
403 int totfree = 0, 1190 if (noprint == 0) {
404 totused = 0; 1191 noprint = 1;
405 1192 wrterror("recursive call");
406 fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); 1193 }
407 for (i = 0; i < NBUCKETS; i++) { 1194 malloc_active--;
408 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) 1195 _MALLOC_UNLOCK();
409 ; 1196 errno = EDEADLK;
410 fprintf(stderr, " %d", j); 1197}
411 totfree += j * (1 << (i + 3)); 1198
412 } 1199static int
413 fprintf(stderr, "\nused:\t"); 1200malloc_init(void)
414 for (i = 0; i < NBUCKETS; i++) { 1201{
415 fprintf(stderr, " %d", nmalloc[i]); 1202 if (omalloc_init(&g_pool)) {
416 totused += nmalloc[i] * (1 << (i + 3)); 1203 _MALLOC_UNLOCK();
417 } 1204 if (mopts.malloc_xmalloc)
418 fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", 1205 wrterror("out of memory");
419 totused, totfree); 1206 errno = ENOMEM;
1207 return -1;
1208 }
1209 return 0;
420} 1210}
1211
1212void *
1213malloc(size_t size)
1214{
1215 void *r;
1216 int saved_errno = errno;
1217
1218 _MALLOC_LOCK();
1219 malloc_func = " in malloc():";
1220 if (g_pool == NULL) {
1221 if (malloc_init() != 0)
1222 return NULL;
1223 }
1224 if (malloc_active++) {
1225 malloc_recurse();
1226 return NULL;
1227 }
1228 r = omalloc(size, mopts.malloc_zero);
1229 malloc_active--;
1230 _MALLOC_UNLOCK();
1231 if (r == NULL && mopts.malloc_xmalloc) {
1232 wrterror("out of memory");
1233 errno = ENOMEM;
1234 }
1235 if (r != NULL)
1236 errno = saved_errno;
1237 return r;
1238}
1239
1240static void
1241ofree(void *p)
1242{
1243 struct region_info *r;
1244 size_t sz;
1245
1246 r = find(g_pool, p);
1247 if (r == NULL) {
1248 wrterror("bogus pointer (double free?)");
1249 return;
1250 }
1251 REALSIZE(sz, r);
1252 if (sz > MALLOC_MAXCHUNK) {
1253 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1254 MALLOC_LEEWAY) {
1255 if (r->p != p) {
1256 wrterror("bogus pointer");
1257 return;
1258 }
1259 } else {
1260#if notyetbecause_of_realloc
1261 /* shifted towards the end */
1262 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1263 MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1264 ~(MALLOC_MINSIZE-1))) {
1265 }
421#endif 1266#endif
1267 p = r->p;
1268 }
1269 if (mopts.malloc_guard) {
1270 if (sz < mopts.malloc_guard)
1271 wrterror("guard size");
1272 if (!mopts.malloc_freeprot) {
1273 if (mprotect((char *)p + PAGEROUND(sz) -
1274 mopts.malloc_guard, mopts.malloc_guard,
1275 PROT_READ | PROT_WRITE))
1276 wrterror("mprotect");
1277 }
1278 malloc_guarded -= mopts.malloc_guard;
1279 }
1280 if (mopts.malloc_junk && !mopts.malloc_freeprot)
1281 memset(p, SOME_FREEJUNK,
1282 PAGEROUND(sz) - mopts.malloc_guard);
1283 unmap(g_pool, p, PAGEROUND(sz));
1284 delete(g_pool, r);
1285 } else {
1286 void *tmp;
1287 int i;
1288
1289 if (mopts.malloc_junk && sz > 0)
1290 memset(p, SOME_FREEJUNK, sz);
1291 if (!mopts.malloc_freeprot) {
1292 i = getrbyte() & (MALLOC_DELAYED_CHUNKS - 1);
1293 tmp = p;
1294 p = g_pool->delayed_chunks[i];
1295 g_pool->delayed_chunks[i] = tmp;
1296 }
1297 if (p != NULL) {
1298 r = find(g_pool, p);
1299 if (r == NULL) {
1300 wrterror("bogus pointer (double free?)");
1301 return;
1302 }
1303 free_bytes(g_pool, r, p);
1304 }
1305 }
1306}
1307
1308void
1309free(void *ptr)
1310{
1311 int saved_errno = errno;
1312
1313 /* This is legal. */
1314 if (ptr == NULL)
1315 return;
1316
1317 _MALLOC_LOCK();
1318 malloc_func = " in free():";
1319 if (g_pool == NULL) {
1320 _MALLOC_UNLOCK();
1321 wrterror("free() called before allocation");
1322 return;
1323 }
1324 if (malloc_active++) {
1325 malloc_recurse();
1326 return;
1327 }
1328 ofree(ptr);
1329 malloc_active--;
1330 _MALLOC_UNLOCK();
1331 errno = saved_errno;
1332}
1333
1334
1335static void *
1336orealloc(void *p, size_t newsz)
1337{
1338 struct region_info *r;
1339 size_t oldsz, goldsz, gnewsz;
1340 void *q;
1341
1342 if (p == NULL)
1343 return omalloc(newsz, 0);
1344
1345 r = find(g_pool, p);
1346 if (r == NULL) {
1347 wrterror("bogus pointer (double free?)");
1348 return NULL;
1349 }
1350 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1351 errno = ENOMEM;
1352 return NULL;
1353 }
1354
1355 REALSIZE(oldsz, r);
1356 goldsz = oldsz;
1357 if (oldsz > MALLOC_MAXCHUNK) {
1358 if (oldsz < mopts.malloc_guard)
1359 wrterror("guard size");
1360 oldsz -= mopts.malloc_guard;
1361 }
1362
1363 gnewsz = newsz;
1364 if (gnewsz > MALLOC_MAXCHUNK)
1365 gnewsz += mopts.malloc_guard;
1366
1367 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1368 !mopts.malloc_realloc) {
1369 size_t roldsz = PAGEROUND(goldsz);
1370 size_t rnewsz = PAGEROUND(gnewsz);
1371
1372 if (rnewsz > roldsz) {
1373 if (!mopts.malloc_guard) {
1374 STATS_INC(g_pool.cheap_realloc_tries);
1375 zapcacheregion(g_pool, p + roldsz);
1376 q = MMAPA(p + roldsz, rnewsz - roldsz);
1377 if (q == p + roldsz) {
1378 malloc_used += rnewsz - roldsz;
1379 if (mopts.malloc_junk)
1380 memset(q, SOME_JUNK,
1381 rnewsz - roldsz);
1382 r->size = newsz;
1383 STATS_INC(g_pool->cheap_reallocs);
1384 return p;
1385 } else if (q != MAP_FAILED)
1386 munmap(q, rnewsz - roldsz);
1387 }
1388 } else if (rnewsz < roldsz) {
1389 if (mopts.malloc_guard) {
1390 if (mprotect((char *)p + roldsz -
1391 mopts.malloc_guard, mopts.malloc_guard,
1392 PROT_READ | PROT_WRITE))
1393 wrterror("mprotect");
1394 if (mprotect((char *)p + rnewsz -
1395 mopts.malloc_guard, mopts.malloc_guard,
1396 PROT_NONE))
1397 wrterror("mprotect");
1398 }
1399 unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1400 r->size = gnewsz;
1401 return p;
1402 } else {
1403 if (newsz > oldsz && mopts.malloc_junk)
1404 memset((char *)p + newsz, SOME_JUNK,
1405 rnewsz - mopts.malloc_guard - newsz);
1406 r->size = gnewsz;
1407 return p;
1408 }
1409 }
1410 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1411 if (mopts.malloc_junk && newsz > 0)
1412 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1413 return p;
1414 } else if (newsz != oldsz || mopts.malloc_realloc) {
1415 q = omalloc(newsz, 0);
1416 if (q == NULL)
1417 return NULL;
1418 if (newsz != 0 && oldsz != 0)
1419 memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1420 ofree(p);
1421 return q;
1422 } else
1423 return p;
1424}
1425
1426void *
1427realloc(void *ptr, size_t size)
1428{
1429 void *r;
1430 int saved_errno = errno;
1431
1432 _MALLOC_LOCK();
1433 malloc_func = " in realloc():";
1434 if (g_pool == NULL) {
1435 if (malloc_init() != 0)
1436 return NULL;
1437 }
1438 if (malloc_active++) {
1439 malloc_recurse();
1440 return NULL;
1441 }
1442 r = orealloc(ptr, size);
1443
1444 malloc_active--;
1445 _MALLOC_UNLOCK();
1446 if (r == NULL && mopts.malloc_xmalloc) {
1447 wrterror("out of memory");
1448 errno = ENOMEM;
1449 }
1450 if (r != NULL)
1451 errno = saved_errno;
1452 return r;
1453}
1454
1455
1456#define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
1457
1458void *
1459calloc(size_t nmemb, size_t size)
1460{
1461 void *r;
1462 int saved_errno = errno;
1463
1464 _MALLOC_LOCK();
1465 malloc_func = " in calloc():";
1466 if (g_pool == NULL) {
1467 if (malloc_init() != 0)
1468 return NULL;
1469 }
1470 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1471 nmemb > 0 && SIZE_MAX / nmemb < size) {
1472 _MALLOC_UNLOCK();
1473 if (mopts.malloc_xmalloc)
1474 wrterror("out of memory");
1475 errno = ENOMEM;
1476 return NULL;
1477 }
1478
1479 if (malloc_active++) {
1480 malloc_recurse();
1481 return NULL;
1482 }
1483
1484 size *= nmemb;
1485 r = omalloc(size, 1);
1486
1487 malloc_active--;
1488 _MALLOC_UNLOCK();
1489 if (r == NULL && mopts.malloc_xmalloc) {
1490 wrterror("out of memory");
1491 errno = ENOMEM;
1492 }
1493 if (r != NULL)
1494 errno = saved_errno;
1495 return r;
1496}
1497