summaryrefslogtreecommitdiff
path: root/src/lib/libc/stdlib/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libc/stdlib/malloc.c')
-rw-r--r--src/lib/libc/stdlib/malloc.c1784
1 files changed, 1439 insertions, 345 deletions
diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c
index 3c57fad024..902b69c216 100644
--- a/src/lib/libc/stdlib/malloc.c
+++ b/src/lib/libc/stdlib/malloc.c
@@ -1,421 +1,1515 @@
1/* $OpenBSD: malloc.c,v 1.125 2010/05/18 22:24:55 tedu Exp $ */
1/* 2/*
2 * Copyright (c) 1983 Regents of the University of California. 3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
3 * All rights reserved.
4 * 4 *
5 * Redistribution and use in source and binary forms, with or without 5 * Permission to use, copy, modify, and distribute this software for any
6 * modification, are permitted provided that the following conditions 6 * purpose with or without fee is hereby granted, provided that the above
7 * are met: 7 * copyright notice and this permission notice appear in all copies.
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 * 8 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */ 16 */
33 17
34#if defined(LIBC_SCCS) && !defined(lint)
35/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/
36static char *rcsid = "$Id: malloc.c,v 1.1.1.1 1995/10/18 08:42:18 deraadt Exp $";
37#endif /* LIBC_SCCS and not lint */
38
39/* 18/*
40 * malloc.c (Caltech) 2/21/82 19 * Parts of this code, mainly the sub page sized chunk management code is
41 * Chris Kingsley, kingsley@cit-20. 20 * derived from the malloc implementation with the following license:
42 * 21 */
43 * This is a very fast storage allocator. It allocates blocks of a small 22/*
44 * number of different sizes, and keeps free lists of each size. Blocks that 23 * ----------------------------------------------------------------------------
45 * don't exactly fit are passed up to the next larger size. In this 24 * "THE BEER-WARE LICENSE" (Revision 42):
46 * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. 25 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
47 * This is designed for use in a virtual memory environment. 26 * can do whatever you want with this stuff. If we meet some day, and you think
27 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
28 * ----------------------------------------------------------------------------
48 */ 29 */
49 30
31/* #define MALLOC_STATS */
32
50#include <sys/types.h> 33#include <sys/types.h>
34#include <sys/param.h>
35#include <sys/queue.h>
36#include <sys/mman.h>
37#include <sys/uio.h>
38#include <errno.h>
39#include <stdint.h>
51#include <stdlib.h> 40#include <stdlib.h>
52#include <string.h> 41#include <string.h>
42#include <stdio.h>
53#include <unistd.h> 43#include <unistd.h>
54 44
55#define NULL 0 45#ifdef MALLOC_STATS
46#include <fcntl.h>
47#endif
48
49#include "thread_private.h"
50
51#define MALLOC_MINSHIFT 4
52#define MALLOC_MAXSHIFT 16
53
54#if defined(__sparc__) && !defined(__sparcv9__)
55#define MALLOC_PAGESHIFT (13U)
56#elif defined(__mips64__)
57#define MALLOC_PAGESHIFT (14U)
58#else
59#define MALLOC_PAGESHIFT (PGSHIFT)
60#endif
61
62#define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT)
63#define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT)
64#define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1)
65#define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
66
67#define MALLOC_MAXCHUNK (1 << (MALLOC_PAGESHIFT-1))
68#define MALLOC_MAXCACHE 256
69#define MALLOC_DELAYED_CHUNKS 15 /* max of getrnibble() */
70/*
71 * When the P option is active, we move allocations between half a page
72 * and a whole page towards the end, subject to alignment constraints.
73 * This is the extra headroom we allow. Set to zero to be the most
74 * strict.
75 */
76#define MALLOC_LEEWAY 0
56 77
57static void morecore(); 78#define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
58static int findbucket();
59 79
60/* 80/*
61 * The overhead on a block is at least 4 bytes. When free, this space 81 * What to use for Junk. This is the byte value we use to fill with
62 * contains a pointer to the next free block, and the bottom two bits must 82 * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
63 * be zero. When in use, the first byte is set to MAGIC, and the second 83 * and SOME_FREEJUNK right before free.
64 * byte is the size index. The remaining bytes are for alignment.
65 * If range checking is enabled then a second word holds the size of the
66 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
67 * The order of elements is critical: ov_magic must overlay the low order
68 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
69 */ 84 */
70union overhead { 85#define SOME_JUNK 0xd0 /* as in "Duh" :-) */
71 union overhead *ov_next; /* when free */ 86#define SOME_FREEJUNK 0xdf
72 struct { 87
73 u_char ovu_magic; /* magic number */ 88#define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
74 u_char ovu_index; /* bucket # */ 89 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
75#ifdef RCHECK 90
76 u_short ovu_rmagic; /* range magic number */ 91#define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
77 u_long ovu_size; /* actual block size */ 92 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
78#endif 93
79 } ovu; 94struct region_info {
80#define ov_magic ovu.ovu_magic 95 void *p; /* page; low bits used to mark chunks */
81#define ov_index ovu.ovu_index 96 uintptr_t size; /* size for pages, or chunk_info pointer */
82#define ov_rmagic ovu.ovu_rmagic
83#define ov_size ovu.ovu_size
84}; 97};
85 98
86#define MAGIC 0xef /* magic # on accounting info */ 99LIST_HEAD(chunk_head, chunk_info);
87#define RMAGIC 0x5555 /* magic # on range info */
88 100
89#ifdef RCHECK 101struct dir_info {
90#define RSLOP sizeof (u_short) 102 u_int32_t canary1;
103 struct region_info *r; /* region slots */
104 size_t regions_total; /* number of region slots */
105 size_t regions_bits; /* log2 of total */
106 size_t regions_free; /* number of free slots */
107 /* list of free chunk info structs */
108 struct chunk_head chunk_info_list;
109 /* lists of chunks with free slots */
110 struct chunk_head chunk_dir[MALLOC_MAXSHIFT];
111 size_t free_regions_size; /* free pages cached */
112 /* free pages cache */
113 struct region_info free_regions[MALLOC_MAXCACHE];
114 /* delayed free chunk slots */
115 void *delayed_chunks[MALLOC_DELAYED_CHUNKS + 1];
116#ifdef MALLOC_STATS
117 size_t inserts;
118 size_t insert_collisions;
119 size_t finds;
120 size_t find_collisions;
121 size_t deletes;
122 size_t delete_moves;
123 size_t cheap_realloc_tries;
124 size_t cheap_reallocs;
125#define STATS_INC(x) ((x)++)
126#define STATS_ZERO(x) ((x) = 0)
91#else 127#else
92#define RSLOP 0 128#define STATS_INC(x) /* nothing */
93#endif 129#define STATS_ZERO(x) /* nothing */
130#endif /* MALLOC_STATS */
131 u_int32_t canary2;
132};
133#define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
134 ~MALLOC_PAGEMASK)
94 135
95/* 136/*
96 * nextf[i] is the pointer to the next free block of size 2^(i+3). The 137 * This structure describes a page worth of chunks.
97 * smallest allocatable block is 8 bytes. The overhead information 138 *
98 * precedes the data area returned to the user. 139 * How many bits per u_long in the bitmap
99 */ 140 */
100#define NBUCKETS 30 141#define MALLOC_BITS (NBBY * sizeof(u_long))
101static union overhead *nextf[NBUCKETS]; 142struct chunk_info {
102extern char *sbrk(); 143 LIST_ENTRY(chunk_info) entries;
144 void *page; /* pointer to the page */
145 u_int32_t canary;
146 u_short size; /* size of this page's chunks */
147 u_short shift; /* how far to shift for this size */
148 u_short free; /* how many free chunks */
149 u_short total; /* how many chunk */
150 /* which chunks are free */
151 u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS];
152};
153
154struct malloc_readonly {
155 struct dir_info *g_pool; /* Main bookkeeping information */
156 int malloc_abort; /* abort() on error */
157 int malloc_freeprot; /* mprotect free pages PROT_NONE? */
158 int malloc_hint; /* call madvice on free pages? */
159 int malloc_junk; /* junk fill? */
160 int malloc_move; /* move allocations to end of page? */
161 int malloc_realloc; /* always realloc? */
162 int malloc_xmalloc; /* xmalloc behaviour? */
163 int malloc_zero; /* zero fill? */
164 size_t malloc_guard; /* use guard pages after allocations? */
165 u_int malloc_cache; /* free pages we cache */
166#ifdef MALLOC_STATS
167 int malloc_stats; /* dump statistics at end */
168#endif
169 u_int32_t malloc_canary; /* Matched against ones in g_pool */
170};
103 171
104static int pagesz; /* page size */ 172/* This object is mapped PROT_READ after initialisation to prevent tampering */
105static int pagebucket; /* page size bucket */ 173static union {
174 struct malloc_readonly mopts;
175 u_char _pad[MALLOC_PAGESIZE];
176} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
177#define mopts malloc_readonly.mopts
178#define g_pool mopts.g_pool
106 179
107#ifdef MSTATS 180char *malloc_options; /* compile-time options */
108/* 181
109 * nmalloc[i] is the difference between the number of mallocs and frees 182static char *malloc_func; /* current function */
110 * for a given block size. 183static int malloc_active; /* status of malloc */
184
185static size_t malloc_guarded; /* bytes used for guards */
186static size_t malloc_used; /* bytes allocated */
187
188static size_t rnibblesused; /* random nibbles used */
189static u_char rbytes[512]; /* random bytes */
190static u_char getrnibble(void);
191
192extern char *__progname;
193
194/* low bits of r->p determine size: 0 means >= page size and p->size holding
195 * real size, otherwise r->size is a shift count, or 1 for malloc(0)
111 */ 196 */
112static u_int nmalloc[NBUCKETS]; 197#define REALSIZE(sz, r) \
113#include <stdio.h> 198 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \
199 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
200
201static inline size_t
202hash(void *p)
203{
204 size_t sum;
205 union {
206 uintptr_t p;
207 unsigned short a[sizeof(void *) / sizeof(short)];
208 } u;
209 u.p = (uintptr_t)p >> MALLOC_PAGESHIFT;
210 sum = u.a[0];
211 sum = (sum << 7) - sum + u.a[1];
212#ifdef __LP64__
213 sum = (sum << 7) - sum + u.a[2];
214 sum = (sum << 7) - sum + u.a[3];
114#endif 215#endif
216 return sum;
217}
115 218
116#if defined(DEBUG) || defined(RCHECK) 219#ifdef MALLOC_STATS
117#define ASSERT(p) if (!(p)) botch("p") 220static void
118#include <stdio.h> 221dump_chunk(int fd, struct chunk_info *p, int fromfreelist)
119static
120botch(s)
121 char *s;
122{ 222{
123 fprintf(stderr, "\r\nassertion botched: %s\r\n", s); 223 char buf[64];
124 (void) fflush(stderr); /* just in case user buffered it */ 224
125 abort(); 225 while (p != NULL) {
226 snprintf(buf, sizeof(buf), "chunk %d %d/%d %p\n", p->size,
227 p->free, p->total, p->page);
228 write(fd, buf, strlen(buf));
229 if (!fromfreelist)
230 break;
231 p = LIST_NEXT(p, entries);
232 if (p != NULL) {
233 snprintf(buf, sizeof(buf), " ");
234 write(fd, buf, strlen(buf));
235 }
236 }
126} 237}
127#else
128#define ASSERT(p)
129#endif
130 238
131void * 239static void
132malloc(nbytes) 240dump_free_chunk_info(int fd, struct dir_info *d)
133 size_t nbytes; 241{
242 char buf[64];
243 int i;
244
245 snprintf(buf, sizeof(buf), "Free chunk structs:\n");
246 write(fd, buf, strlen(buf));
247 for (i = 0; i < MALLOC_MAXSHIFT; i++) {
248 struct chunk_info *p = LIST_FIRST(&d->chunk_dir[i]);
249 if (p != NULL) {
250 snprintf(buf, sizeof(buf), "%2d) ", i);
251 write(fd, buf, strlen(buf));
252 dump_chunk(fd, p, 1);
253 }
254 }
255
256}
257
258static void
259dump_free_page_info(int fd, struct dir_info *d)
260{
261 char buf[64];
262 int i;
263
264 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
265 d->free_regions_size);
266 write(fd, buf, strlen(buf));
267 for (i = 0; i < mopts.malloc_cache; i++) {
268 if (d->free_regions[i].p != NULL) {
269 snprintf(buf, sizeof(buf), "%2d) ", i);
270 write(fd, buf, strlen(buf));
271 snprintf(buf, sizeof(buf), "free at %p: %zu\n",
272 d->free_regions[i].p, d->free_regions[i].size);
273 write(fd, buf, strlen(buf));
274 }
275 }
276}
277
278static void
279malloc_dump1(int fd, struct dir_info *d)
134{ 280{
135 register union overhead *op; 281 char buf[64];
136 register long bucket, n; 282 size_t i, realsize;
137 register unsigned amt; 283
284 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
285 write(fd, buf, strlen(buf));
286 if (d == NULL)
287 return;
288 snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total);
289 write(fd, buf, strlen(buf));
290 snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds,
291 d->find_collisions,
292 1.0 + (double)d->find_collisions / d->finds);
293 write(fd, buf, strlen(buf));
294 snprintf(buf, sizeof(buf), "Inserts %zu/%zu %f\n", d->inserts,
295 d->insert_collisions,
296 1.0 + (double)d->insert_collisions / d->inserts);
297 write(fd, buf, strlen(buf));
298 snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
299 d->delete_moves);
300 write(fd, buf, strlen(buf));
301 snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
302 d->cheap_reallocs, d->cheap_realloc_tries);
303 write(fd, buf, strlen(buf));
304 snprintf(buf, sizeof(buf), "Regions slots free %zu\n", d->regions_free);
305 write(fd, buf, strlen(buf));
306 for (i = 0; i < d->regions_total; i++) {
307 if (d->r[i].p != NULL) {
308 size_t h = hash(d->r[i].p) &
309 (d->regions_total - 1);
310 snprintf(buf, sizeof(buf), "%4zx) #%zx %zd ",
311 i, h, h - i);
312 write(fd, buf, strlen(buf));
313 REALSIZE(realsize, &d->r[i]);
314 if (realsize > MALLOC_MAXCHUNK) {
315 snprintf(buf, sizeof(buf),
316 "%p: %zu\n", d->r[i].p, realsize);
317 write(fd, buf, strlen(buf));
318 } else
319 dump_chunk(fd,
320 (struct chunk_info *)d->r[i].size, 0);
321 }
322 }
323 dump_free_chunk_info(fd, d);
324 dump_free_page_info(fd, d);
325 snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used);
326 write(fd, buf, strlen(buf));
327 snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded);
328 write(fd, buf, strlen(buf));
329}
330
331
332void
333malloc_dump(int fd)
334{
335 malloc_dump1(fd, g_pool);
336}
337
338static void
339malloc_exit(void)
340{
341 static const char q[] = "malloc() warning: Couldn't dump stats\n";
342 int save_errno = errno, fd;
343
344 fd = open("malloc.out", O_RDWR|O_APPEND);
345 if (fd != -1) {
346 malloc_dump(fd);
347 close(fd);
348 } else
349 write(STDERR_FILENO, q, sizeof(q) - 1);
350 errno = save_errno;
351}
352#endif /* MALLOC_STATS */
353
354
355
356static void
357wrterror(char *p)
358{
359 char *q = " error: ";
360 struct iovec iov[5];
361
362 iov[0].iov_base = __progname;
363 iov[0].iov_len = strlen(__progname);
364 iov[1].iov_base = malloc_func;
365 iov[1].iov_len = strlen(malloc_func);
366 iov[2].iov_base = q;
367 iov[2].iov_len = strlen(q);
368 iov[3].iov_base = p;
369 iov[3].iov_len = strlen(p);
370 iov[4].iov_base = "\n";
371 iov[4].iov_len = 1;
372 writev(STDERR_FILENO, iov, 5);
373
374#ifdef MALLOC_STATS
375 if (mopts.malloc_stats)
376 malloc_dump(STDERR_FILENO);
377#endif /* MALLOC_STATS */
378 //malloc_active--;
379 if (mopts.malloc_abort)
380 abort();
381}
382
383static void
384rbytes_init(void)
385{
386 arc4random_buf(rbytes, sizeof(rbytes));
387 rnibblesused = 0;
388}
389
390static inline u_char
391getrnibble(void)
392{
393 u_char x;
394
395 if (rnibblesused >= 2 * sizeof(rbytes))
396 rbytes_init();
397 x = rbytes[rnibblesused++ / 2];
398 return (rnibblesused & 1 ? x & 0xf : x >> 4);
399}
400
401/*
402 * Cache maintenance. We keep at most malloc_cache pages cached.
403 * If the cache is becoming full, unmap pages in the cache for real,
404 * and then add the region to the cache
405 * Opposed to the regular region data structure, the sizes in the
406 * cache are in MALLOC_PAGESIZE units.
407 */
408static void
409unmap(struct dir_info *d, void *p, size_t sz)
410{
411 size_t psz = sz >> MALLOC_PAGESHIFT;
412 size_t rsz, tounmap;
413 struct region_info *r;
414 u_int i, offset;
415
416 if (sz != PAGEROUND(sz)) {
417 wrterror("munmap round");
418 return;
419 }
420
421 if (psz > mopts.malloc_cache) {
422 if (munmap(p, sz))
423 wrterror("munmap");
424 malloc_used -= sz;
425 return;
426 }
427 tounmap = 0;
428 rsz = mopts.malloc_cache - d->free_regions_size;
429 if (psz > rsz)
430 tounmap = psz - rsz;
431 offset = getrnibble();
432 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
433 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
434 if (r->p != NULL) {
435 rsz = r->size << MALLOC_PAGESHIFT;
436 if (munmap(r->p, rsz))
437 wrterror("munmap");
438 r->p = NULL;
439 if (tounmap > r->size)
440 tounmap -= r->size;
441 else
442 tounmap = 0;
443 d->free_regions_size -= r->size;
444 r->size = 0;
445 malloc_used -= rsz;
446 }
447 }
448 if (tounmap > 0)
449 wrterror("malloc cache underflow");
450 for (i = 0; i < mopts.malloc_cache; i++) {
451 r = &d->free_regions[i];
452 if (r->p == NULL) {
453 if (mopts.malloc_hint)
454 madvise(p, sz, MADV_FREE);
455 if (mopts.malloc_freeprot)
456 mprotect(p, sz, PROT_NONE);
457 r->p = p;
458 r->size = psz;
459 d->free_regions_size += psz;
460 break;
461 }
462 }
463 if (i == mopts.malloc_cache)
464 wrterror("malloc free slot lost");
465 if (d->free_regions_size > mopts.malloc_cache)
466 wrterror("malloc cache overflow");
467}
468
469static void
470zapcacheregion(struct dir_info *d, void *p)
471{
472 u_int i;
473 struct region_info *r;
474 size_t rsz;
475
476 for (i = 0; i < mopts.malloc_cache; i++) {
477 r = &d->free_regions[i];
478 if (r->p == p) {
479 rsz = r->size << MALLOC_PAGESHIFT;
480 if (munmap(r->p, rsz))
481 wrterror("munmap");
482 r->p = NULL;
483 d->free_regions_size -= r->size;
484 r->size = 0;
485 malloc_used -= rsz;
486 }
487 }
488}
489
490static void *
491map(struct dir_info *d, size_t sz, int zero_fill)
492{
493 size_t psz = sz >> MALLOC_PAGESHIFT;
494 struct region_info *r, *big = NULL;
495 u_int i, offset;
496 void *p;
497
498 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
499 d->canary1 != ~d->canary2)
500 wrterror("internal struct corrupt");
501 if (sz != PAGEROUND(sz)) {
502 wrterror("map round");
503 return NULL;
504 }
505 if (psz > d->free_regions_size) {
506 p = MMAP(sz);
507 if (p != MAP_FAILED)
508 malloc_used += sz;
509 /* zero fill not needed */
510 return p;
511 }
512 offset = getrnibble();
513 for (i = 0; i < mopts.malloc_cache; i++) {
514 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
515 if (r->p != NULL) {
516 if (r->size == psz) {
517 p = r->p;
518 if (mopts.malloc_freeprot)
519 mprotect(p, sz, PROT_READ | PROT_WRITE);
520 if (mopts.malloc_hint)
521 madvise(p, sz, MADV_NORMAL);
522 r->p = NULL;
523 r->size = 0;
524 d->free_regions_size -= psz;
525 if (zero_fill)
526 memset(p, 0, sz);
527 else if (mopts.malloc_junk &&
528 mopts.malloc_freeprot)
529 memset(p, SOME_FREEJUNK, sz);
530 return p;
531 } else if (r->size > psz)
532 big = r;
533 }
534 }
535 if (big != NULL) {
536 r = big;
537 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
538 if (mopts.malloc_freeprot)
539 mprotect(p, sz, PROT_READ | PROT_WRITE);
540 if (mopts.malloc_hint)
541 madvise(p, sz, MADV_NORMAL);
542 r->size -= psz;
543 d->free_regions_size -= psz;
544 if (zero_fill)
545 memset(p, 0, sz);
546 else if (mopts.malloc_junk && mopts.malloc_freeprot)
547 memset(p, SOME_FREEJUNK, sz);
548 return p;
549 }
550 p = MMAP(sz);
551 if (p != MAP_FAILED)
552 malloc_used += sz;
553 if (d->free_regions_size > mopts.malloc_cache)
554 wrterror("malloc cache");
555 /* zero fill not needed */
556 return p;
557}
558
559/*
560 * Initialize a dir_info, which should have been cleared by caller
561 */
562static int
563omalloc_init(struct dir_info **dp)
564{
565 char *p, b[64];
566 int i, j;
567 size_t d_avail, regioninfo_size;
568 struct dir_info *d;
569
570 rbytes_init();
138 571
139 /* 572 /*
140 * First time malloc is called, setup page size and 573 * Default options
141 * align break pointer so all data will be page aligned.
142 */ 574 */
143 if (pagesz == 0) { 575 mopts.malloc_abort = 1;
144 pagesz = n = getpagesize(); 576 mopts.malloc_move = 1;
145 op = (union overhead *)sbrk(0); 577 mopts.malloc_cache = 64;
146 n = n - sizeof (*op) - ((long)op & (n - 1)); 578
147 if (n < 0) 579 for (i = 0; i < 3; i++) {
148 n += pagesz; 580 switch (i) {
149 if (n) { 581 case 0:
150 if (sbrk(n) == (char *)-1) 582 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
151 return (NULL); 583 if (j <= 0)
584 continue;
585 b[j] = '\0';
586 p = b;
587 break;
588 case 1:
589 if (issetugid() == 0)
590 p = getenv("MALLOC_OPTIONS");
591 else
592 continue;
593 break;
594 case 2:
595 p = malloc_options;
596 break;
597 default:
598 p = NULL;
152 } 599 }
153 bucket = 0; 600
154 amt = 8; 601 for (; p != NULL && *p != '\0'; p++) {
155 while (pagesz > amt) { 602 switch (*p) {
156 amt <<= 1; 603 case '>':
157 bucket++; 604 mopts.malloc_cache <<= 1;
605 if (mopts.malloc_cache > MALLOC_MAXCACHE)
606 mopts.malloc_cache = MALLOC_MAXCACHE;
607 break;
608 case '<':
609 mopts.malloc_cache >>= 1;
610 break;
611 case 'a':
612 mopts.malloc_abort = 0;
613 break;
614 case 'A':
615 mopts.malloc_abort = 1;
616 break;
617#ifdef MALLOC_STATS
618 case 'd':
619 mopts.malloc_stats = 0;
620 break;
621 case 'D':
622 mopts.malloc_stats = 1;
623 break;
624#endif /* MALLOC_STATS */
625 case 'f':
626 mopts.malloc_freeprot = 0;
627 break;
628 case 'F':
629 mopts.malloc_freeprot = 1;
630 break;
631 case 'g':
632 mopts.malloc_guard = 0;
633 break;
634 case 'G':
635 mopts.malloc_guard = MALLOC_PAGESIZE;
636 break;
637 case 'h':
638 mopts.malloc_hint = 0;
639 break;
640 case 'H':
641 mopts.malloc_hint = 1;
642 break;
643 case 'j':
644 mopts.malloc_junk = 0;
645 break;
646 case 'J':
647 mopts.malloc_junk = 1;
648 break;
649 case 'n':
650 case 'N':
651 break;
652 case 'p':
653 mopts.malloc_move = 0;
654 break;
655 case 'P':
656 mopts.malloc_move = 1;
657 break;
658 case 'r':
659 mopts.malloc_realloc = 0;
660 break;
661 case 'R':
662 mopts.malloc_realloc = 1;
663 break;
664 case 'S':
665 mopts.malloc_freeprot = mopts.malloc_junk = 1;
666 mopts.malloc_guard = MALLOC_PAGESIZE;
667 break;
668 case 'x':
669 mopts.malloc_xmalloc = 0;
670 break;
671 case 'X':
672 mopts.malloc_xmalloc = 1;
673 break;
674 case 'z':
675 mopts.malloc_zero = 0;
676 break;
677 case 'Z':
678 mopts.malloc_zero = 1;
679 break;
680 default: {
681 static const char q[] = "malloc() warning: "
682 "unknown char in MALLOC_OPTIONS\n";
683 write(STDERR_FILENO, q, sizeof(q) - 1);
684 break;
685 }
686 }
158 } 687 }
159 pagebucket = bucket;
160 } 688 }
689
161 /* 690 /*
162 * Convert amount of memory requested into closest block size 691 * We want junk in the entire allocation, and zero only in the part
163 * stored in hash buckets which satisfies request. 692 * the user asked for.
164 * Account for space used per block for accounting.
165 */ 693 */
166 if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { 694 if (mopts.malloc_zero)
167#ifndef RCHECK 695 mopts.malloc_junk = 1;
168 amt = 8; /* size of first bucket */ 696
169 bucket = 0; 697#ifdef MALLOC_STATS
170#else 698 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
171 amt = 16; /* size of first bucket */ 699 static const char q[] = "malloc() warning: atexit(2) failed."
172 bucket = 1; 700 " Will not be able to dump stats on exit\n";
173#endif 701 write(STDERR_FILENO, q, sizeof(q) - 1);
174 n = -((long)sizeof (*op) + RSLOP);
175 } else {
176 amt = pagesz;
177 bucket = pagebucket;
178 }
179 while (nbytes > amt + n) {
180 amt <<= 1;
181 if (amt == 0)
182 return (NULL);
183 bucket++;
184 } 702 }
703#endif /* MALLOC_STATS */
704
705 while ((mopts.malloc_canary = arc4random()) == 0)
706 ;
707
185 /* 708 /*
186 * If nothing in hash bucket right now, 709 * Allocate dir_info with a guard page on either side. Also
187 * request more memory from the system. 710 * randomise offset inside the page at which the dir_info
711 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
188 */ 712 */
189 if ((op = nextf[bucket]) == NULL) { 713 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
190 morecore(bucket); 714 return -1;
191 if ((op = nextf[bucket]) == NULL) 715 mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
192 return (NULL); 716 mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
193 } 717 MALLOC_PAGESIZE, PROT_NONE);
194 /* remove from linked list */ 718 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
195 nextf[bucket] = op->ov_next; 719 d = (struct dir_info *)(p + MALLOC_PAGESIZE +
196 op->ov_magic = MAGIC; 720 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
197 op->ov_index = bucket; 721
198#ifdef MSTATS 722 d->regions_bits = 9;
199 nmalloc[bucket]++; 723 d->regions_free = d->regions_total = 1 << d->regions_bits;
200#endif 724 regioninfo_size = d->regions_total * sizeof(struct region_info);
201#ifdef RCHECK 725 d->r = MMAP(regioninfo_size);
726 if (d->r == MAP_FAILED) {
727 wrterror("malloc init mmap failed");
728 d->regions_total = 0;
729 return 1;
730 }
731 LIST_INIT(&d->chunk_info_list);
732 for (i = 0; i < MALLOC_MAXSHIFT; i++)
733 LIST_INIT(&d->chunk_dir[i]);
734 malloc_used += regioninfo_size;
735 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
736 d->canary2 = ~d->canary1;
737
738 *dp = d;
739
202 /* 740 /*
203 * Record allocated size of block and 741 * Options have been set and will never be reset.
204 * bound space with magic numbers. 742 * Prevent further tampering with them.
205 */ 743 */
206 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 744 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
207 op->ov_rmagic = RMAGIC; 745 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
208 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 746
209#endif 747 return 0;
210 return ((char *)(op + 1));
211} 748}
212 749
213/* 750static int
214 * Allocate more memory to the indicated bucket. 751omalloc_grow(struct dir_info *d)
215 */
216static void
217morecore(bucket)
218 int bucket;
219{ 752{
220 register union overhead *op; 753 size_t newbits;
221 register long sz; /* size of desired block */ 754 size_t newtotal;
222 long amt; /* amount to allocate */ 755 size_t newsize;
223 int nblks; /* how many blocks we get */ 756 size_t mask;
757 size_t i;
758 struct region_info *p;
224 759
225 /* 760 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
226 * sbrk_size <= 0 only for big, FLUFFY, requests (about 761 return 1;
227 * 2^30 bytes on a VAX, I think) or for a negative arg. 762
228 */ 763 newbits = d->regions_bits + 1;
229 sz = 1 << (bucket + 3); 764 newtotal = d->regions_total * 2;
230#ifdef DEBUG 765 newsize = newtotal * sizeof(struct region_info);
231 ASSERT(sz > 0); 766 mask = newtotal - 1;
232#else 767
233 if (sz <= 0) 768 p = MMAP(newsize);
234 return; 769 if (p == MAP_FAILED)
235#endif 770 return 1;
236 if (sz < pagesz) { 771
237 amt = pagesz; 772 malloc_used += newsize;
238 nblks = amt / sz; 773 memset(p, 0, newsize);
239 } else { 774 STATS_ZERO(d->inserts);
240 amt = sz + pagesz; 775 STATS_ZERO(d->insert_collisions);
241 nblks = 1; 776 for (i = 0; i < d->regions_total; i++) {
777 void *q = d->r[i].p;
778 if (q != NULL) {
779 size_t index = hash(q) & mask;
780 STATS_INC(d->inserts);
781 while (p[index].p != NULL) {
782 index = (index - 1) & mask;
783 STATS_INC(d->insert_collisions);
784 }
785 p[index] = d->r[i];
786 }
242 } 787 }
243 op = (union overhead *)sbrk(amt); 788 /* avoid pages containing meta info to end up in cache */
244 /* no more room! */ 789 if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
245 if ((long)op == -1) 790 wrterror("munmap");
246 return; 791 else
247 /* 792 malloc_used -= d->regions_total * sizeof(struct region_info);
248 * Add new memory allocated to that on 793 d->regions_free = d->regions_free + d->regions_total;
249 * free list for this hash bucket. 794 d->regions_total = newtotal;
250 */ 795 d->regions_bits = newbits;
251 nextf[bucket] = op; 796 d->r = p;
252 while (--nblks > 0) { 797 return 0;
253 op->ov_next = (union overhead *)((caddr_t)op + sz);
254 op = (union overhead *)((caddr_t)op + sz);
255 }
256} 798}
257 799
258void 800static struct chunk_info *
259free(cp) 801alloc_chunk_info(struct dir_info *d)
260 void *cp; 802{
261{ 803 struct chunk_info *p;
262 register long size; 804 int i;
263 register union overhead *op; 805
264 806 if (LIST_EMPTY(&d->chunk_info_list)) {
265 if (cp == NULL) 807 p = MMAP(MALLOC_PAGESIZE);
266 return; 808 if (p == MAP_FAILED)
267 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 809 return NULL;
268#ifdef DEBUG 810 malloc_used += MALLOC_PAGESIZE;
269 ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ 811 for (i = 0; i < MALLOC_PAGESIZE / sizeof(*p); i++)
270#else 812 LIST_INSERT_HEAD(&d->chunk_info_list, &p[i], entries);
271 if (op->ov_magic != MAGIC) 813 }
272 return; /* sanity */ 814 p = LIST_FIRST(&d->chunk_info_list);
273#endif 815 LIST_REMOVE(p, entries);
274#ifdef RCHECK 816 memset(p, 0, sizeof *p);
275 ASSERT(op->ov_rmagic == RMAGIC); 817 p->canary = d->canary1;
276 ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); 818 return p;
277#endif
278 size = op->ov_index;
279 ASSERT(size < NBUCKETS);
280 op->ov_next = nextf[size]; /* also clobbers ov_magic */
281 nextf[size] = op;
282#ifdef MSTATS
283 nmalloc[size]--;
284#endif
285} 819}
286 820
821static int
822insert(struct dir_info *d, void *p, size_t sz)
823{
824 size_t index;
825 size_t mask;
826 void *q;
827
828 if (d->regions_free * 4 < d->regions_total) {
829 if (omalloc_grow(d))
830 return 1;
831 }
832 mask = d->regions_total - 1;
833 index = hash(p) & mask;
834 q = d->r[index].p;
835 STATS_INC(d->inserts);
836 while (q != NULL) {
837 index = (index - 1) & mask;
838 q = d->r[index].p;
839 STATS_INC(d->insert_collisions);
840 }
841 d->r[index].p = p;
842 d->r[index].size = sz;
843 d->regions_free--;
844 return 0;
845}
846
847static struct region_info *
848find(struct dir_info *d, void *p)
849{
850 size_t index;
851 size_t mask = d->regions_total - 1;
852 void *q, *r;
853
854 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
855 d->canary1 != ~d->canary2)
856 wrterror("internal struct corrupt");
857 p = MASK_POINTER(p);
858 index = hash(p) & mask;
859 r = d->r[index].p;
860 q = MASK_POINTER(r);
861 STATS_INC(d->finds);
862 while (q != p && r != NULL) {
863 index = (index - 1) & mask;
864 r = d->r[index].p;
865 q = MASK_POINTER(r);
866 STATS_INC(d->find_collisions);
867 }
868 return q == p ? &d->r[index] : NULL;
869}
870
871static void
872delete(struct dir_info *d, struct region_info *ri)
873{
874 /* algorithm R, Knuth Vol III section 6.4 */
875 size_t mask = d->regions_total - 1;
876 size_t i, j, r;
877
878 if (d->regions_total & (d->regions_total - 1))
879 wrterror("regions_total not 2^x");
880 d->regions_free++;
881 STATS_INC(g_pool->deletes);
882
883 i = ri - d->r;
884 for (;;) {
885 d->r[i].p = NULL;
886 d->r[i].size = 0;
887 j = i;
888 for (;;) {
889 i = (i - 1) & mask;
890 if (d->r[i].p == NULL)
891 return;
892 r = hash(d->r[i].p) & mask;
893 if ((i <= r && r < j) || (r < j && j < i) ||
894 (j < i && i <= r))
895 continue;
896 d->r[j] = d->r[i];
897 STATS_INC(g_pool->delete_moves);
898 break;
899 }
900
901 }
902}
903
287/* 904/*
288 * When a program attempts "storage compaction" as mentioned in the 905 * Allocate a page of chunks
289 * old malloc man page, it realloc's an already freed block. Usually
290 * this is the last block it freed; occasionally it might be farther
291 * back. We have to search all the free lists for the block in order
292 * to determine its bucket: 1st we make one pass thru the lists
293 * checking only the first block in each; if that fails we search
294 * ``realloc_srchlen'' blocks in each list for a match (the variable
295 * is extern so the caller can modify it). If that fails we just copy
296 * however many bytes was given to realloc() and hope it's not huge.
297 */ 906 */
298int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ 907static struct chunk_info *
908omalloc_make_chunks(struct dir_info *d, int bits)
909{
910 struct chunk_info *bp;
911 void *pp;
912 long i, k;
299 913
300void * 914 /* Allocate a new bucket */
301realloc(cp, nbytes) 915 pp = map(d, MALLOC_PAGESIZE, 0);
302 void *cp; 916 if (pp == MAP_FAILED)
303 size_t nbytes; 917 return NULL;
304{ 918
305 register u_long onb; 919 bp = alloc_chunk_info(d);
306 register long i; 920 if (bp == NULL) {
307 union overhead *op; 921 unmap(d, pp, MALLOC_PAGESIZE);
308 char *res; 922 return NULL;
309 int was_alloced = 0; 923 }
310 924
311 if (cp == NULL) 925 /* memory protect the page allocated in the malloc(0) case */
312 return (malloc(nbytes)); 926 if (bits == 0) {
313 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 927 bp->size = 0;
314 if (op->ov_magic == MAGIC) { 928 bp->shift = 1;
315 was_alloced++; 929 i = MALLOC_MINSIZE - 1;
316 i = op->ov_index; 930 while (i >>= 1)
317 } else { 931 bp->shift++;
318 /* 932 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
319 * Already free, doing "compaction". 933 bp->page = pp;
320 * 934
321 * Search for the old block of memory on the 935 k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
322 * free list. First, check the most common 936 if (k < 0) {
323 * case (last element free'd), then (this failing) 937 unmap(d, pp, MALLOC_PAGESIZE);
324 * the last ``realloc_srchlen'' items free'd. 938 LIST_INSERT_HEAD(&d->chunk_info_list, bp, entries);
325 * If all lookups fail, then assume the size of 939 return NULL;
326 * the memory block being realloc'd is the
327 * largest possible (so that all "nbytes" of new
328 * memory are copied into). Note that this could cause
329 * a memory fault if the old area was tiny, and the moon
330 * is gibbous. However, that is very unlikely.
331 */
332 if ((i = findbucket(op, 1)) < 0 &&
333 (i = findbucket(op, realloc_srchlen)) < 0)
334 i = NBUCKETS;
335 }
336 onb = 1 << (i + 3);
337 if (onb < pagesz)
338 onb -= sizeof (*op) + RSLOP;
339 else
340 onb += pagesz - sizeof (*op) - RSLOP;
341 /* avoid the copy if same size block */
342 if (was_alloced) {
343 if (i) {
344 i = 1 << (i + 2);
345 if (i < pagesz)
346 i -= sizeof (*op) + RSLOP;
347 else
348 i += pagesz - sizeof (*op) - RSLOP;
349 } 940 }
350 if (nbytes <= onb && nbytes > i) { 941 } else {
351#ifdef RCHECK 942 bp->size = (1UL << bits);
352 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 943 bp->shift = bits;
353 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 944 bp->total = bp->free = MALLOC_PAGESIZE >> bits;
354#endif 945 bp->page = pp;
355 return(cp);
356 } else
357 free(cp);
358 } 946 }
359 if ((res = malloc(nbytes)) == NULL) 947
360 return (NULL); 948 /* set all valid bits in the bitmap */
361 if (cp != res) /* common optimization if "compacting" */ 949 k = bp->total;
362 bcopy(cp, res, (nbytes < onb) ? nbytes : onb); 950 i = 0;
363 return (res); 951
952 /* Do a bunch at a time */
953 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
954 bp->bits[i / MALLOC_BITS] = ~0UL;
955
956 for (; i < k; i++)
957 bp->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS);
958
959 LIST_INSERT_HEAD(&d->chunk_dir[bits], bp, entries);
960
961 bits++;
962 if ((uintptr_t)pp & bits)
963 wrterror("pp & bits");
964
965 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp);
966 return bp;
364} 967}
365 968
969
366/* 970/*
367 * Search ``srchlen'' elements of each free list for a block whose 971 * Allocate a chunk
368 * header starts at ``freep''. If srchlen is -1 search the whole list.
369 * Return bucket number, or -1 if not found.
370 */ 972 */
371static 973static void *
372findbucket(freep, srchlen) 974malloc_bytes(struct dir_info *d, size_t size)
373 union overhead *freep;
374 int srchlen;
375{ 975{
376 register union overhead *p; 976 int i, j;
377 register int i, j; 977 size_t k;
978 u_long u, *lp;
979 struct chunk_info *bp;
980
981 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
982 d->canary1 != ~d->canary2)
983 wrterror("internal struct corrupt");
984 /* Don't bother with anything less than this */
985 /* unless we have a malloc(0) requests */
986 if (size != 0 && size < MALLOC_MINSIZE)
987 size = MALLOC_MINSIZE;
378 988
379 for (i = 0; i < NBUCKETS; i++) { 989 /* Find the right bucket */
990 if (size == 0)
380 j = 0; 991 j = 0;
381 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { 992 else {
382 if (p == freep) 993 j = MALLOC_MINSHIFT;
383 return (i); 994 i = (size - 1) >> (MALLOC_MINSHIFT - 1);
995 while (i >>= 1)
384 j++; 996 j++;
997 }
998
999 /* If it's empty, make a page more of that size chunks */
1000 if (LIST_EMPTY(&d->chunk_dir[j])) {
1001 bp = omalloc_make_chunks(d, j);
1002 if (bp == NULL)
1003 return NULL;
1004 } else
1005 bp = LIST_FIRST(&d->chunk_dir[j]);
1006
1007 if (bp->canary != d->canary1)
1008 wrterror("chunk info corrupted");
1009 /* Find first word of bitmap which isn't empty */
1010 for (lp = bp->bits; !*lp; lp++)
1011 /* EMPTY */;
1012
1013 /* Find that bit, and tweak it */
1014 u = 1;
1015 k = 0;
1016 while (!(*lp & u)) {
1017 u += u;
1018 k++;
1019 }
1020
1021 /* advance a random # of positions */
1022 i = getrnibble() % bp->free;
1023 while (i > 0) {
1024 u += u;
1025 k++;
1026 if (k >= MALLOC_BITS) {
1027 lp++;
1028 u = 1;
1029 k = 0;
1030 }
1031 if (lp - bp->bits > (bp->total - 1) / MALLOC_BITS) {
1032 wrterror("chunk overflow");
1033 errno = EFAULT;
1034 return (NULL);
385 } 1035 }
1036 if (*lp & u)
1037 i--;
1038 }
1039
1040 *lp ^= u;
1041
1042 /* If there are no more free, remove from free-list */
1043 if (!--bp->free)
1044 LIST_REMOVE(bp, entries);
1045
1046 /* Adjust to the real offset of that chunk */
1047 k += (lp - bp->bits) * MALLOC_BITS;
1048 k <<= bp->shift;
1049
1050 if (mopts.malloc_junk && bp->size > 0)
1051 memset((char *)bp->page + k, SOME_JUNK, bp->size);
1052 return ((char *)bp->page + k);
1053}
1054
1055
1056/*
1057 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1058 */
1059static void
1060free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1061{
1062 struct chunk_head *mp;
1063 struct chunk_info *info;
1064 long i;
1065
1066 info = (struct chunk_info *)r->size;
1067 if (info->canary != d->canary1)
1068 wrterror("chunk info corrupted");
1069
1070 /* Find the chunk number on the page */
1071 i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1072
1073 if ((uintptr_t)ptr & ((1UL << (info->shift)) - 1)) {
1074 wrterror("modified chunk-pointer");
1075 return;
1076 }
1077 if (info->bits[i / MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1078 wrterror("chunk is already free");
1079 return;
1080 }
1081
1082 info->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS);
1083 info->free++;
1084
1085 if (info->size != 0)
1086 mp = d->chunk_dir + info->shift;
1087 else
1088 mp = d->chunk_dir;
1089
1090 if (info->free == 1) {
1091 /* Page became non-full */
1092 LIST_INSERT_HEAD(mp, info, entries);
1093 return;
1094 }
1095 if (info->free != info->total)
1096 return;
1097
1098 LIST_REMOVE(info, entries);
1099
1100 if (info->size == 0 && !mopts.malloc_freeprot)
1101 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1102 unmap(d, info->page, MALLOC_PAGESIZE);
1103
1104 delete(d, r);
1105 LIST_INSERT_HEAD(&d->chunk_info_list, info, entries);
1106}
1107
1108
1109
1110static void *
1111omalloc(size_t sz, int zero_fill)
1112{
1113 void *p;
1114 size_t psz;
1115
1116 if (sz > MALLOC_MAXCHUNK) {
1117 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1118 errno = ENOMEM;
1119 return NULL;
1120 }
1121 sz += mopts.malloc_guard;
1122 psz = PAGEROUND(sz);
1123 p = map(g_pool, psz, zero_fill);
1124 if (p == MAP_FAILED) {
1125 errno = ENOMEM;
1126 return NULL;
1127 }
1128 if (insert(g_pool, p, sz)) {
1129 unmap(g_pool, p, psz);
1130 errno = ENOMEM;
1131 return NULL;
1132 }
1133 if (mopts.malloc_guard) {
1134 if (mprotect((char *)p + psz - mopts.malloc_guard,
1135 mopts.malloc_guard, PROT_NONE))
1136 wrterror("mprotect");
1137 malloc_guarded += mopts.malloc_guard;
1138 }
1139
1140 if (mopts.malloc_move &&
1141 sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1142 MALLOC_LEEWAY) {
1143 /* fill whole allocation */
1144 if (mopts.malloc_junk)
1145 memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1146 /* shift towards the end */
1147 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1148 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1149 /* fill zeros if needed and overwritten above */
1150 if (zero_fill && mopts.malloc_junk)
1151 memset(p, 0, sz - mopts.malloc_guard);
1152 } else {
1153 if (mopts.malloc_junk) {
1154 if (zero_fill)
1155 memset(p + sz - mopts.malloc_guard,
1156 SOME_JUNK, psz - sz);
1157 else
1158 memset(p, SOME_JUNK,
1159 psz - mopts.malloc_guard);
1160 }
1161 }
1162
1163 } else {
1164 /* takes care of SOME_JUNK */
1165 p = malloc_bytes(g_pool, sz);
1166 if (zero_fill && p != NULL && sz > 0)
1167 memset(p, 0, sz);
386 } 1168 }
387 return (-1); 1169
1170 return p;
388} 1171}
389 1172
390#ifdef MSTATS
391/* 1173/*
392 * mstats - print out statistics about malloc 1174 * Common function for handling recursion. Only
393 * 1175 * print the error message once, to avoid making the problem
394 * Prints two lines of numbers, one showing the length of the free list 1176 * potentially worse.
395 * for each size category, the second showing the number of mallocs -
396 * frees for each size category.
397 */ 1177 */
398mstats(s) 1178static void
399 char *s; 1179malloc_recurse(void)
400{ 1180{
401 register int i, j; 1181 static int noprint;
402 register union overhead *p; 1182
403 int totfree = 0, 1183 if (noprint == 0) {
404 totused = 0; 1184 noprint = 1;
405 1185 wrterror("recursive call");
406 fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); 1186 }
407 for (i = 0; i < NBUCKETS; i++) { 1187 malloc_active--;
408 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) 1188 _MALLOC_UNLOCK();
409 ; 1189 errno = EDEADLK;
410 fprintf(stderr, " %d", j); 1190}
411 totfree += j * (1 << (i + 3)); 1191
412 } 1192static int
413 fprintf(stderr, "\nused:\t"); 1193malloc_init(void)
414 for (i = 0; i < NBUCKETS; i++) { 1194{
415 fprintf(stderr, " %d", nmalloc[i]); 1195 if (omalloc_init(&g_pool)) {
416 totused += nmalloc[i] * (1 << (i + 3)); 1196 _MALLOC_UNLOCK();
417 } 1197 if (mopts.malloc_xmalloc)
418 fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", 1198 wrterror("out of memory");
419 totused, totfree); 1199 errno = ENOMEM;
1200 return -1;
1201 }
1202 return 0;
1203}
1204
1205void *
1206malloc(size_t size)
1207{
1208 void *r;
1209 int saved_errno = errno;
1210
1211 _MALLOC_LOCK();
1212 malloc_func = " in malloc():";
1213 if (g_pool == NULL) {
1214 if (malloc_init() != 0)
1215 return NULL;
1216 }
1217 if (malloc_active++) {
1218 malloc_recurse();
1219 return NULL;
1220 }
1221 r = omalloc(size, mopts.malloc_zero);
1222 malloc_active--;
1223 _MALLOC_UNLOCK();
1224 if (r == NULL && mopts.malloc_xmalloc) {
1225 wrterror("out of memory");
1226 errno = ENOMEM;
1227 }
1228 if (r != NULL)
1229 errno = saved_errno;
1230 return r;
420} 1231}
1232
1233static void
1234ofree(void *p)
1235{
1236 struct region_info *r;
1237 size_t sz;
1238
1239 r = find(g_pool, p);
1240 if (r == NULL) {
1241 wrterror("bogus pointer (double free?)");
1242 return;
1243 }
1244 REALSIZE(sz, r);
1245 if (sz > MALLOC_MAXCHUNK) {
1246 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1247 MALLOC_LEEWAY) {
1248 if (r->p != p) {
1249 wrterror("bogus pointer");
1250 return;
1251 }
1252 } else {
1253#if notyetbecause_of_realloc
1254 /* shifted towards the end */
1255 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1256 MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1257 ~(MALLOC_MINSIZE-1))) {
1258 }
421#endif 1259#endif
1260 p = r->p;
1261 }
1262 if (mopts.malloc_guard) {
1263 if (sz < mopts.malloc_guard)
1264 wrterror("guard size");
1265 if (!mopts.malloc_freeprot) {
1266 if (mprotect((char *)p + PAGEROUND(sz) -
1267 mopts.malloc_guard, mopts.malloc_guard,
1268 PROT_READ | PROT_WRITE))
1269 wrterror("mprotect");
1270 }
1271 malloc_guarded -= mopts.malloc_guard;
1272 }
1273 if (mopts.malloc_junk && !mopts.malloc_freeprot)
1274 memset(p, SOME_FREEJUNK,
1275 PAGEROUND(sz) - mopts.malloc_guard);
1276 unmap(g_pool, p, PAGEROUND(sz));
1277 delete(g_pool, r);
1278 } else {
1279 void *tmp;
1280 int i;
1281
1282 if (mopts.malloc_junk && sz > 0)
1283 memset(p, SOME_FREEJUNK, sz);
1284 if (!mopts.malloc_freeprot) {
1285 i = getrnibble();
1286 tmp = p;
1287 p = g_pool->delayed_chunks[i];
1288 g_pool->delayed_chunks[i] = tmp;
1289 }
1290 if (p != NULL) {
1291 r = find(g_pool, p);
1292 if (r == NULL) {
1293 wrterror("bogus pointer (double free?)");
1294 return;
1295 }
1296 free_bytes(g_pool, r, p);
1297 }
1298 }
1299}
1300
1301void
1302free(void *ptr)
1303{
1304 int saved_errno = errno;
1305
1306 /* This is legal. */
1307 if (ptr == NULL)
1308 return;
1309
1310 _MALLOC_LOCK();
1311 malloc_func = " in free():";
1312 if (g_pool == NULL) {
1313 _MALLOC_UNLOCK();
1314 wrterror("free() called before allocation");
1315 return;
1316 }
1317 if (malloc_active++) {
1318 malloc_recurse();
1319 return;
1320 }
1321 ofree(ptr);
1322 malloc_active--;
1323 _MALLOC_UNLOCK();
1324 errno = saved_errno;
1325}
1326
1327
1328static void *
1329orealloc(void *p, size_t newsz)
1330{
1331 struct region_info *r;
1332 size_t oldsz, goldsz, gnewsz;
1333 void *q;
1334
1335 if (p == NULL)
1336 return omalloc(newsz, 0);
1337
1338 r = find(g_pool, p);
1339 if (r == NULL) {
1340 wrterror("bogus pointer (double free?)");
1341 return NULL;
1342 }
1343 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1344 errno = ENOMEM;
1345 return NULL;
1346 }
1347
1348 REALSIZE(oldsz, r);
1349 goldsz = oldsz;
1350 if (oldsz > MALLOC_MAXCHUNK) {
1351 if (oldsz < mopts.malloc_guard)
1352 wrterror("guard size");
1353 oldsz -= mopts.malloc_guard;
1354 }
1355
1356 gnewsz = newsz;
1357 if (gnewsz > MALLOC_MAXCHUNK)
1358 gnewsz += mopts.malloc_guard;
1359
1360 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1361 !mopts.malloc_realloc) {
1362 size_t roldsz = PAGEROUND(goldsz);
1363 size_t rnewsz = PAGEROUND(gnewsz);
1364
1365 if (rnewsz > roldsz) {
1366 if (!mopts.malloc_guard) {
1367 STATS_INC(g_pool->cheap_realloc_tries);
1368 zapcacheregion(g_pool, p + roldsz);
1369 q = MMAPA(p + roldsz, rnewsz - roldsz);
1370 if (q == p + roldsz) {
1371 malloc_used += rnewsz - roldsz;
1372 if (mopts.malloc_junk)
1373 memset(q, SOME_JUNK,
1374 rnewsz - roldsz);
1375 r->size = newsz;
1376 STATS_INC(g_pool->cheap_reallocs);
1377 return p;
1378 } else if (q != MAP_FAILED)
1379 munmap(q, rnewsz - roldsz);
1380 }
1381 } else if (rnewsz < roldsz) {
1382 if (mopts.malloc_guard) {
1383 if (mprotect((char *)p + roldsz -
1384 mopts.malloc_guard, mopts.malloc_guard,
1385 PROT_READ | PROT_WRITE))
1386 wrterror("mprotect");
1387 if (mprotect((char *)p + rnewsz -
1388 mopts.malloc_guard, mopts.malloc_guard,
1389 PROT_NONE))
1390 wrterror("mprotect");
1391 }
1392 unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1393 r->size = gnewsz;
1394 return p;
1395 } else {
1396 if (newsz > oldsz && mopts.malloc_junk)
1397 memset((char *)p + newsz, SOME_JUNK,
1398 rnewsz - mopts.malloc_guard - newsz);
1399 r->size = gnewsz;
1400 return p;
1401 }
1402 }
1403 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1404 if (mopts.malloc_junk && newsz > 0)
1405 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1406 return p;
1407 } else if (newsz != oldsz || mopts.malloc_realloc) {
1408 q = omalloc(newsz, 0);
1409 if (q == NULL)
1410 return NULL;
1411 if (newsz != 0 && oldsz != 0)
1412 memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1413 ofree(p);
1414 return q;
1415 } else
1416 return p;
1417}
1418
1419void *
1420realloc(void *ptr, size_t size)
1421{
1422 void *r;
1423 int saved_errno = errno;
1424
1425 _MALLOC_LOCK();
1426 malloc_func = " in realloc():";
1427 if (g_pool == NULL) {
1428 if (malloc_init() != 0)
1429 return NULL;
1430 }
1431 if (malloc_active++) {
1432 malloc_recurse();
1433 return NULL;
1434 }
1435 r = orealloc(ptr, size);
1436
1437 malloc_active--;
1438 _MALLOC_UNLOCK();
1439 if (r == NULL && mopts.malloc_xmalloc) {
1440 wrterror("out of memory");
1441 errno = ENOMEM;
1442 }
1443 if (r != NULL)
1444 errno = saved_errno;
1445 return r;
1446}
1447
1448
1449#define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
1450
1451void *
1452calloc(size_t nmemb, size_t size)
1453{
1454 void *r;
1455 int saved_errno = errno;
1456
1457 _MALLOC_LOCK();
1458 malloc_func = " in calloc():";
1459 if (g_pool == NULL) {
1460 if (malloc_init() != 0)
1461 return NULL;
1462 }
1463 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1464 nmemb > 0 && SIZE_MAX / nmemb < size) {
1465 _MALLOC_UNLOCK();
1466 if (mopts.malloc_xmalloc)
1467 wrterror("out of memory");
1468 errno = ENOMEM;
1469 return NULL;
1470 }
1471
1472 if (malloc_active++) {
1473 malloc_recurse();
1474 return NULL;
1475 }
1476
1477 size *= nmemb;
1478 r = omalloc(size, 1);
1479
1480 malloc_active--;
1481 _MALLOC_UNLOCK();
1482 if (r == NULL && mopts.malloc_xmalloc) {
1483 wrterror("out of memory");
1484 errno = ENOMEM;
1485 }
1486 if (r != NULL)
1487 errno = saved_errno;
1488 return r;
1489}
1490
1491int
1492posix_memalign(void **memptr, size_t alignment, size_t size)
1493{
1494 void *result;
1495
1496 /* Make sure that alignment is a large enough power of 2. */
1497 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1498 alignment > MALLOC_PAGESIZE)
1499 return EINVAL;
1500
1501 /*
1502 * max(size, alignment) is enough to assure the requested alignment,
1503 * since the allocator always allocates power-of-two blocks.
1504 */
1505 if (size < alignment)
1506 size = alignment;
1507 result = malloc(size);
1508
1509 if (result == NULL)
1510 return ENOMEM;
1511
1512 *memptr = result;
1513 return 0;
1514}
1515