summaryrefslogtreecommitdiff
path: root/src/lj_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/lj_alloc.c1232
1 files changed, 1232 insertions, 0 deletions
diff --git a/src/lj_alloc.c b/src/lj_alloc.c
new file mode 100644
index 00000000..8ad4f8fb
--- /dev/null
+++ b/src/lj_alloc.c
@@ -0,0 +1,1232 @@
1/*
2** Bundled memory allocator.
3**
4** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5** The original bears the following remark:
6**
7** This is a version (aka dlmalloc) of malloc/free/realloc written by
8** Doug Lea and released to the public domain, as explained at
9** http://creativecommons.org/licenses/publicdomain.
10**
11** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee)
12**
13** No additional copyright is claimed over the customizations.
14** Please do NOT bother the original author about this version here!
15**
16** If you want to use dlmalloc in another project, you should get
17** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18** For thread-safe derivatives, take a look at:
19** - ptmalloc: http://www.malloc.de/
20** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
21*/
22
23#define lj_alloc_c
24#define LUA_CORE
25
26/* To get the mremap prototype. Must be defind before any system includes. */
27#if defined(__linux__) && !defined(_GNU_SOURCE)
28#define _GNU_SOURCE
29#endif
30
31#include "lj_def.h"
32#include "lj_arch.h"
33#include "lj_alloc.h"
34
35#ifndef LUAJIT_USE_SYSMALLOC
36
37#define MAX_SIZE_T (~(size_t)0)
38#define MALLOC_ALIGNMENT ((size_t)8U)
39
40#define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U)
41#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
42#define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U)
43#define MAX_RELEASE_CHECK_RATE 255
44
45/* ------------------- size_t and alignment properties -------------------- */
46
47/* The byte and bit size of a size_t */
48#define SIZE_T_SIZE (sizeof(size_t))
49#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
50
51/* Some constants coerced to size_t */
52/* Annoying but necessary to avoid errors on some platforms */
53#define SIZE_T_ZERO ((size_t)0)
54#define SIZE_T_ONE ((size_t)1)
55#define SIZE_T_TWO ((size_t)2)
56#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
57#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
58#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
59
60/* The bit mask value corresponding to MALLOC_ALIGNMENT */
61#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
62
63/* the number of bytes to offset an address to align it */
64#define align_offset(A)\
65 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
67
68/* -------------------------- MMAP support ------------------------------- */
69
70#define MFAIL ((void *)(MAX_SIZE_T))
71#define CMFAIL ((char *)(MFAIL)) /* defined for convenience */
72
73#define IS_DIRECT_BIT (SIZE_T_ONE)
74
75#ifdef LUA_USE_WIN
76
77#if LJ_64
78#error "missing support for WIN64 to allocate in lower 2G"
79#endif
80
81#define WIN32_LEAN_AND_MEAN
82#include <windows.h>
83
84/* Win32 MMAP via VirtualAlloc */
85static LJ_AINLINE void *CALL_MMAP(size_t size)
86{
87 void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
88 return (ptr != 0)? ptr: MFAIL;
89}
90
91/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
92static LJ_AINLINE void *DIRECT_MMAP(size_t size)
93{
94 void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
95 PAGE_READWRITE);
96 return (ptr != 0)? ptr: MFAIL;
97}
98
99/* This function supports releasing coalesed segments */
100static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
101{
102 MEMORY_BASIC_INFORMATION minfo;
103 char *cptr = (char *)ptr;
104 while (size) {
105 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
106 return -1;
107 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
108 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
109 return -1;
110 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
111 return -1;
112 cptr += minfo.RegionSize;
113 size -= minfo.RegionSize;
114 }
115 return 0;
116}
117
118#else
119
120#include <sys/mman.h>
121
122#define MMAP_PROT (PROT_READ|PROT_WRITE)
123#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
124#define MAP_ANONYMOUS MAP_ANON
125#endif /* MAP_ANON */
126
127#if LJ_64
128#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS|MAP_32BIT)
129#else
130#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
131#endif
132
133#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
134#define DIRECT_MMAP(s) CALL_MMAP(s)
135#define CALL_MUNMAP(a, s) munmap((a), (s))
136
137#ifdef __linux__
138/* Need to define _GNU_SOURCE to get the mremap prototype. */
139#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
140#endif
141
142#endif
143
144#ifndef CALL_MREMAP
145#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
146#endif
147
148/* ----------------------- Chunk representations ------------------------ */
149
150struct malloc_chunk {
151 size_t prev_foot; /* Size of previous chunk (if free). */
152 size_t head; /* Size and inuse bits. */
153 struct malloc_chunk *fd; /* double links -- used only if free. */
154 struct malloc_chunk *bk;
155};
156
157typedef struct malloc_chunk mchunk;
158typedef struct malloc_chunk *mchunkptr;
159typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
160typedef unsigned int bindex_t; /* Described below */
161typedef unsigned int binmap_t; /* Described below */
162typedef unsigned int flag_t; /* The type of various bit flag sets */
163
164/* ------------------- Chunks sizes and alignments ----------------------- */
165
166#define MCHUNK_SIZE (sizeof(mchunk))
167
168#define CHUNK_OVERHEAD (SIZE_T_SIZE)
169
170/* Direct chunks need a second word of overhead ... */
171#define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
172/* ... and additional padding for fake next-chunk at foot */
173#define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES)
174
175/* The smallest size we can malloc is an aligned minimal chunk */
176#define MIN_CHUNK_SIZE\
177 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
178
179/* conversion from malloc headers to user pointers, and back */
180#define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
181#define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
182/* chunk associated with aligned address A */
183#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
184
185/* Bounds on request (not chunk) sizes. */
186#define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2)
187#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
188
189/* pad request bytes into a usable size */
190#define pad_request(req) \
191 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
192
193/* pad request, checking for minimum (but not maximum) */
194#define request2size(req) \
195 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
196
197/* ------------------ Operations on head and foot fields ----------------- */
198
199#define PINUSE_BIT (SIZE_T_ONE)
200#define CINUSE_BIT (SIZE_T_TWO)
201#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
202
203/* Head value for fenceposts */
204#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
205
206/* extraction of fields from head words */
207#define cinuse(p) ((p)->head & CINUSE_BIT)
208#define pinuse(p) ((p)->head & PINUSE_BIT)
209#define chunksize(p) ((p)->head & ~(INUSE_BITS))
210
211#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
212#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
213
214/* Treat space at ptr +/- offset as a chunk */
215#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
216#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
217
218/* Ptr to next or previous physical malloc_chunk. */
219#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
220#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
221
222/* extract next chunk's pinuse bit */
223#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
224
225/* Get/set size at footer */
226#define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot)
227#define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
228
229/* Set size, pinuse bit, and foot */
230#define set_size_and_pinuse_of_free_chunk(p, s)\
231 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
232
233/* Set size, pinuse bit, foot, and clear next pinuse */
234#define set_free_with_pinuse(p, s, n)\
235 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
236
237#define is_direct(p)\
238 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
239
240/* Get the internal overhead associated with chunk p */
241#define overhead_for(p)\
242 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
243
244/* ---------------------- Overlaid data structures ----------------------- */
245
246struct malloc_tree_chunk {
247 /* The first four fields must be compatible with malloc_chunk */
248 size_t prev_foot;
249 size_t head;
250 struct malloc_tree_chunk *fd;
251 struct malloc_tree_chunk *bk;
252
253 struct malloc_tree_chunk *child[2];
254 struct malloc_tree_chunk *parent;
255 bindex_t index;
256};
257
258typedef struct malloc_tree_chunk tchunk;
259typedef struct malloc_tree_chunk *tchunkptr;
260typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
261
262/* A little helper macro for trees */
263#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
264
265/* ----------------------------- Segments -------------------------------- */
266
267struct malloc_segment {
268 char *base; /* base address */
269 size_t size; /* allocated size */
270 struct malloc_segment *next; /* ptr to next segment */
271};
272
273typedef struct malloc_segment msegment;
274typedef struct malloc_segment *msegmentptr;
275
276/* ---------------------------- malloc_state ----------------------------- */
277
278/* Bin types, widths and sizes */
279#define NSMALLBINS (32U)
280#define NTREEBINS (32U)
281#define SMALLBIN_SHIFT (3U)
282#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
283#define TREEBIN_SHIFT (8U)
284#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
285#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
286#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
287
288struct malloc_state {
289 binmap_t smallmap;
290 binmap_t treemap;
291 size_t dvsize;
292 size_t topsize;
293 mchunkptr dv;
294 mchunkptr top;
295 size_t trim_check;
296 size_t release_checks;
297 mchunkptr smallbins[(NSMALLBINS+1)*2];
298 tbinptr treebins[NTREEBINS];
299 msegment seg;
300};
301
302typedef struct malloc_state *mstate;
303
304#define is_initialized(M) ((M)->top != 0)
305
306/* -------------------------- system alloc setup ------------------------- */
307
308/* page-align a size */
309#define page_align(S)\
310 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
311
312/* granularity-align a size */
313#define granularity_align(S)\
314 (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
315 & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
316
317#ifdef LUA_USE_WIN
318#define mmap_align(S) granularity_align(S)
319#else
320#define mmap_align(S) page_align(S)
321#endif
322
323/* True if segment S holds address A */
324#define segment_holds(S, A)\
325 ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
326
327/* Return segment holding given address */
328static msegmentptr segment_holding(mstate m, char *addr)
329{
330 msegmentptr sp = &m->seg;
331 for (;;) {
332 if (addr >= sp->base && addr < sp->base + sp->size)
333 return sp;
334 if ((sp = sp->next) == 0)
335 return 0;
336 }
337}
338
339/* Return true if segment contains a segment link */
340static int has_segment_link(mstate m, msegmentptr ss)
341{
342 msegmentptr sp = &m->seg;
343 for (;;) {
344 if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
345 return 1;
346 if ((sp = sp->next) == 0)
347 return 0;
348 }
349}
350
351/*
352 TOP_FOOT_SIZE is padding at the end of a segment, including space
353 that may be needed to place segment records and fenceposts when new
354 noncontiguous segments are added.
355*/
356#define TOP_FOOT_SIZE\
357 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
358
359/* ---------------------------- Indexing Bins ---------------------------- */
360
361#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
362#define small_index(s) ((s) >> SMALLBIN_SHIFT)
363#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
364#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
365
366/* addressing by index. See above about smallbin repositioning */
367#define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
368#define treebin_at(M,i) (&((M)->treebins[i]))
369
370/* assign tree index for size S to variable I */
371#define compute_tree_index(S, I)\
372{\
373 unsigned int X = S >> TREEBIN_SHIFT;\
374 if (X == 0) {\
375 I = 0;\
376 } else if (X > 0xFFFF) {\
377 I = NTREEBINS-1;\
378 } else {\
379 unsigned int K = lj_fls(X);\
380 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
381 }\
382}
383
384/* Bit representing maximum resolved size in a treebin at i */
385#define bit_for_tree_index(i) \
386 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
387
388/* Shift placing maximum resolved bit in a treebin at i as sign bit */
389#define leftshift_for_tree_index(i) \
390 ((i == NTREEBINS-1)? 0 : \
391 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
392
393/* The size of the smallest chunk held in bin with index i */
394#define minsize_for_tree_index(i) \
395 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
396 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
397
398/* ------------------------ Operations on bin maps ----------------------- */
399
400/* bit corresponding to given index */
401#define idx2bit(i) ((binmap_t)(1) << (i))
402
403/* Mark/Clear bits with given index */
404#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
405#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
406#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
407
408#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
409#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
410#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
411
412/* mask with all bits to left of least bit of x on */
413#define left_bits(x) ((x<<1) | (~(x<<1)+1))
414
415/* Set cinuse bit and pinuse bit of next chunk */
416#define set_inuse(M,p,s)\
417 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
418 ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
419
420/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
421#define set_inuse_and_pinuse(M,p,s)\
422 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
423 ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
424
425/* Set size, cinuse and pinuse bit of this chunk */
426#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
427 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
428
429/* ----------------------- Operations on smallbins ----------------------- */
430
431/* Link a free chunk into a smallbin */
432#define insert_small_chunk(M, P, S) {\
433 bindex_t I = small_index(S);\
434 mchunkptr B = smallbin_at(M, I);\
435 mchunkptr F = B;\
436 if (!smallmap_is_marked(M, I))\
437 mark_smallmap(M, I);\
438 else\
439 F = B->fd;\
440 B->fd = P;\
441 F->bk = P;\
442 P->fd = F;\
443 P->bk = B;\
444}
445
446/* Unlink a chunk from a smallbin */
447#define unlink_small_chunk(M, P, S) {\
448 mchunkptr F = P->fd;\
449 mchunkptr B = P->bk;\
450 bindex_t I = small_index(S);\
451 if (F == B) {\
452 clear_smallmap(M, I);\
453 } else {\
454 F->bk = B;\
455 B->fd = F;\
456 }\
457}
458
459/* Unlink the first chunk from a smallbin */
460#define unlink_first_small_chunk(M, B, P, I) {\
461 mchunkptr F = P->fd;\
462 if (B == F) {\
463 clear_smallmap(M, I);\
464 } else {\
465 B->fd = F;\
466 F->bk = B;\
467 }\
468}
469
470/* Replace dv node, binning the old one */
471/* Used only when dvsize known to be small */
472#define replace_dv(M, P, S) {\
473 size_t DVS = M->dvsize;\
474 if (DVS != 0) {\
475 mchunkptr DV = M->dv;\
476 insert_small_chunk(M, DV, DVS);\
477 }\
478 M->dvsize = S;\
479 M->dv = P;\
480}
481
482/* ------------------------- Operations on trees ------------------------- */
483
484/* Insert chunk into tree */
485#define insert_large_chunk(M, X, S) {\
486 tbinptr *H;\
487 bindex_t I;\
488 compute_tree_index(S, I);\
489 H = treebin_at(M, I);\
490 X->index = I;\
491 X->child[0] = X->child[1] = 0;\
492 if (!treemap_is_marked(M, I)) {\
493 mark_treemap(M, I);\
494 *H = X;\
495 X->parent = (tchunkptr)H;\
496 X->fd = X->bk = X;\
497 } else {\
498 tchunkptr T = *H;\
499 size_t K = S << leftshift_for_tree_index(I);\
500 for (;;) {\
501 if (chunksize(T) != S) {\
502 tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
503 K <<= 1;\
504 if (*C != 0) {\
505 T = *C;\
506 } else {\
507 *C = X;\
508 X->parent = T;\
509 X->fd = X->bk = X;\
510 break;\
511 }\
512 } else {\
513 tchunkptr F = T->fd;\
514 T->fd = F->bk = X;\
515 X->fd = F;\
516 X->bk = T;\
517 X->parent = 0;\
518 break;\
519 }\
520 }\
521 }\
522}
523
524#define unlink_large_chunk(M, X) {\
525 tchunkptr XP = X->parent;\
526 tchunkptr R;\
527 if (X->bk != X) {\
528 tchunkptr F = X->fd;\
529 R = X->bk;\
530 F->bk = R;\
531 R->fd = F;\
532 } else {\
533 tchunkptr *RP;\
534 if (((R = *(RP = &(X->child[1]))) != 0) ||\
535 ((R = *(RP = &(X->child[0]))) != 0)) {\
536 tchunkptr *CP;\
537 while ((*(CP = &(R->child[1])) != 0) ||\
538 (*(CP = &(R->child[0])) != 0)) {\
539 R = *(RP = CP);\
540 }\
541 *RP = 0;\
542 }\
543 }\
544 if (XP != 0) {\
545 tbinptr *H = treebin_at(M, X->index);\
546 if (X == *H) {\
547 if ((*H = R) == 0) \
548 clear_treemap(M, X->index);\
549 } else {\
550 if (XP->child[0] == X) \
551 XP->child[0] = R;\
552 else \
553 XP->child[1] = R;\
554 }\
555 if (R != 0) {\
556 tchunkptr C0, C1;\
557 R->parent = XP;\
558 if ((C0 = X->child[0]) != 0) {\
559 R->child[0] = C0;\
560 C0->parent = R;\
561 }\
562 if ((C1 = X->child[1]) != 0) {\
563 R->child[1] = C1;\
564 C1->parent = R;\
565 }\
566 }\
567 }\
568}
569
570/* Relays to large vs small bin operations */
571
572#define insert_chunk(M, P, S)\
573 if (is_small(S)) { insert_small_chunk(M, P, S)\
574 } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
575
576#define unlink_chunk(M, P, S)\
577 if (is_small(S)) { unlink_small_chunk(M, P, S)\
578 } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
579
580/* ----------------------- Direct-mmapping chunks ----------------------- */
581
582static void *direct_alloc(size_t nb)
583{
584 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
585 if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */
586 char *mm = (char *)(DIRECT_MMAP(mmsize));
587 if (mm != CMFAIL) {
588 size_t offset = align_offset(chunk2mem(mm));
589 size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
590 mchunkptr p = (mchunkptr)(mm + offset);
591 p->prev_foot = offset | IS_DIRECT_BIT;
592 p->head = psize|CINUSE_BIT;
593 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
594 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
595 return chunk2mem(p);
596 }
597 }
598 return NULL;
599}
600
601static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
602{
603 size_t oldsize = chunksize(oldp);
604 if (is_small(nb)) /* Can't shrink direct regions below small size */
605 return NULL;
606 /* Keep old chunk if big enough but not too big */
607 if (oldsize >= nb + SIZE_T_SIZE &&
608 (oldsize - nb) <= (DEFAULT_GRANULARITY << 1)) {
609 return oldp;
610 } else {
611 size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
612 size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
613 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
614 char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
615 oldmmsize, newmmsize, 1);
616 if (cp != CMFAIL) {
617 mchunkptr newp = (mchunkptr)(cp + offset);
618 size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
619 newp->head = psize|CINUSE_BIT;
620 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
621 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
622 return newp;
623 }
624 }
625 return NULL;
626}
627
628/* -------------------------- mspace management -------------------------- */
629
630/* Initialize top chunk and its size */
631static void init_top(mstate m, mchunkptr p, size_t psize)
632{
633 /* Ensure alignment */
634 size_t offset = align_offset(chunk2mem(p));
635 p = (mchunkptr)((char *)p + offset);
636 psize -= offset;
637
638 m->top = p;
639 m->topsize = psize;
640 p->head = psize | PINUSE_BIT;
641 /* set size of fake trailing chunk holding overhead space only once */
642 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
643 m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
644}
645
646/* Initialize bins for a new mstate that is otherwise zeroed out */
647static void init_bins(mstate m)
648{
649 /* Establish circular links for smallbins */
650 bindex_t i;
651 for (i = 0; i < NSMALLBINS; i++) {
652 sbinptr bin = smallbin_at(m,i);
653 bin->fd = bin->bk = bin;
654 }
655}
656
657/* Allocate chunk and prepend remainder with chunk in successor base. */
658static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
659{
660 mchunkptr p = align_as_chunk(newbase);
661 mchunkptr oldfirst = align_as_chunk(oldbase);
662 size_t psize = (size_t)((char *)oldfirst - (char *)p);
663 mchunkptr q = chunk_plus_offset(p, nb);
664 size_t qsize = psize - nb;
665 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
666
667 /* consolidate remainder with first chunk of old base */
668 if (oldfirst == m->top) {
669 size_t tsize = m->topsize += qsize;
670 m->top = q;
671 q->head = tsize | PINUSE_BIT;
672 } else if (oldfirst == m->dv) {
673 size_t dsize = m->dvsize += qsize;
674 m->dv = q;
675 set_size_and_pinuse_of_free_chunk(q, dsize);
676 } else {
677 if (!cinuse(oldfirst)) {
678 size_t nsize = chunksize(oldfirst);
679 unlink_chunk(m, oldfirst, nsize);
680 oldfirst = chunk_plus_offset(oldfirst, nsize);
681 qsize += nsize;
682 }
683 set_free_with_pinuse(q, qsize, oldfirst);
684 insert_chunk(m, q, qsize);
685 }
686
687 return chunk2mem(p);
688}
689
690/* Add a segment to hold a new noncontiguous region */
691static void add_segment(mstate m, char *tbase, size_t tsize)
692{
693 /* Determine locations and sizes of segment, fenceposts, old top */
694 char *old_top = (char *)m->top;
695 msegmentptr oldsp = segment_holding(m, old_top);
696 char *old_end = oldsp->base + oldsp->size;
697 size_t ssize = pad_request(sizeof(struct malloc_segment));
698 char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
699 size_t offset = align_offset(chunk2mem(rawsp));
700 char *asp = rawsp + offset;
701 char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
702 mchunkptr sp = (mchunkptr)csp;
703 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
704 mchunkptr tnext = chunk_plus_offset(sp, ssize);
705 mchunkptr p = tnext;
706
707 /* reset top to new space */
708 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
709
710 /* Set up segment record */
711 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
712 *ss = m->seg; /* Push current record */
713 m->seg.base = tbase;
714 m->seg.size = tsize;
715 m->seg.next = ss;
716
717 /* Insert trailing fenceposts */
718 for (;;) {
719 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
720 p->head = FENCEPOST_HEAD;
721 if ((char *)(&(nextp->head)) < old_end)
722 p = nextp;
723 else
724 break;
725 }
726
727 /* Insert the rest of old top into a bin as an ordinary free chunk */
728 if (csp != old_top) {
729 mchunkptr q = (mchunkptr)old_top;
730 size_t psize = (size_t)(csp - old_top);
731 mchunkptr tn = chunk_plus_offset(q, psize);
732 set_free_with_pinuse(q, psize, tn);
733 insert_chunk(m, q, psize);
734 }
735}
736
737/* -------------------------- System allocation -------------------------- */
738
739static void *alloc_sys(mstate m, size_t nb)
740{
741 char *tbase = CMFAIL;
742 size_t tsize = 0;
743
744 /* Directly map large chunks */
745 if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
746 void *mem = direct_alloc(nb);
747 if (mem != 0)
748 return mem;
749 }
750
751 {
752 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
753 size_t rsize = granularity_align(req);
754 if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
755 char *mp = (char *)(CALL_MMAP(rsize));
756 if (mp != CMFAIL) {
757 tbase = mp;
758 tsize = rsize;
759 }
760 }
761 }
762
763 if (tbase != CMFAIL) {
764 msegmentptr sp = &m->seg;
765 /* Try to merge with an existing segment */
766 while (sp != 0 && tbase != sp->base + sp->size)
767 sp = sp->next;
768 if (sp != 0 && segment_holds(sp, m->top)) { /* append */
769 sp->size += tsize;
770 init_top(m, m->top, m->topsize + tsize);
771 } else {
772 sp = &m->seg;
773 while (sp != 0 && sp->base != tbase + tsize)
774 sp = sp->next;
775 if (sp != 0) {
776 char *oldbase = sp->base;
777 sp->base = tbase;
778 sp->size += tsize;
779 return prepend_alloc(m, tbase, oldbase, nb);
780 } else {
781 add_segment(m, tbase, tsize);
782 }
783 }
784
785 if (nb < m->topsize) { /* Allocate from new or extended top space */
786 size_t rsize = m->topsize -= nb;
787 mchunkptr p = m->top;
788 mchunkptr r = m->top = chunk_plus_offset(p, nb);
789 r->head = rsize | PINUSE_BIT;
790 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
791 return chunk2mem(p);
792 }
793 }
794
795 return NULL;
796}
797
798/* ----------------------- system deallocation -------------------------- */
799
800/* Unmap and unlink any mmapped segments that don't contain used chunks */
801static size_t release_unused_segments(mstate m)
802{
803 size_t released = 0;
804 size_t nsegs = 0;
805 msegmentptr pred = &m->seg;
806 msegmentptr sp = pred->next;
807 while (sp != 0) {
808 char *base = sp->base;
809 size_t size = sp->size;
810 msegmentptr next = sp->next;
811 nsegs++;
812 {
813 mchunkptr p = align_as_chunk(base);
814 size_t psize = chunksize(p);
815 /* Can unmap if first chunk holds entire segment and not pinned */
816 if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
817 tchunkptr tp = (tchunkptr)p;
818 if (p == m->dv) {
819 m->dv = 0;
820 m->dvsize = 0;
821 } else {
822 unlink_large_chunk(m, tp);
823 }
824 if (CALL_MUNMAP(base, size) == 0) {
825 released += size;
826 /* unlink obsoleted record */
827 sp = pred;
828 sp->next = next;
829 } else { /* back out if cannot unmap */
830 insert_large_chunk(m, tp, psize);
831 }
832 }
833 }
834 pred = sp;
835 sp = next;
836 }
837 /* Reset check counter */
838 m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
839 nsegs : MAX_RELEASE_CHECK_RATE;
840 return released;
841}
842
843static int alloc_trim(mstate m, size_t pad)
844{
845 size_t released = 0;
846 if (pad < MAX_REQUEST && is_initialized(m)) {
847 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
848
849 if (m->topsize > pad) {
850 /* Shrink top space in granularity-size units, keeping at least one */
851 size_t unit = DEFAULT_GRANULARITY;
852 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
853 SIZE_T_ONE) * unit;
854 msegmentptr sp = segment_holding(m, (char *)m->top);
855
856 if (sp->size >= extra &&
857 !has_segment_link(m, sp)) { /* can't shrink if pinned */
858 size_t newsize = sp->size - extra;
859 /* Prefer mremap, fall back to munmap */
860 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
861 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
862 released = extra;
863 }
864 }
865
866 if (released != 0) {
867 sp->size -= released;
868 init_top(m, m->top, m->topsize - released);
869 }
870 }
871
872 /* Unmap any unused mmapped segments */
873 released += release_unused_segments(m);
874
875 /* On failure, disable autotrim to avoid repeated failed future calls */
876 if (released == 0 && m->topsize > m->trim_check)
877 m->trim_check = MAX_SIZE_T;
878 }
879
880 return (released != 0)? 1 : 0;
881}
882
883/* ---------------------------- malloc support --------------------------- */
884
885/* allocate a large request from the best fitting chunk in a treebin */
886static void *tmalloc_large(mstate m, size_t nb)
887{
888 tchunkptr v = 0;
889 size_t rsize = ~nb+1; /* Unsigned negation */
890 tchunkptr t;
891 bindex_t idx;
892 compute_tree_index(nb, idx);
893
894 if ((t = *treebin_at(m, idx)) != 0) {
895 /* Traverse tree for this bin looking for node with size == nb */
896 size_t sizebits = nb << leftshift_for_tree_index(idx);
897 tchunkptr rst = 0; /* The deepest untaken right subtree */
898 for (;;) {
899 tchunkptr rt;
900 size_t trem = chunksize(t) - nb;
901 if (trem < rsize) {
902 v = t;
903 if ((rsize = trem) == 0)
904 break;
905 }
906 rt = t->child[1];
907 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
908 if (rt != 0 && rt != t)
909 rst = rt;
910 if (t == 0) {
911 t = rst; /* set t to least subtree holding sizes > nb */
912 break;
913 }
914 sizebits <<= 1;
915 }
916 }
917
918 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
919 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
920 if (leftbits != 0)
921 t = *treebin_at(m, lj_ffs(leftbits));
922 }
923
924 while (t != 0) { /* find smallest of tree or subtree */
925 size_t trem = chunksize(t) - nb;
926 if (trem < rsize) {
927 rsize = trem;
928 v = t;
929 }
930 t = leftmost_child(t);
931 }
932
933 /* If dv is a better fit, return NULL so malloc will use it */
934 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
935 mchunkptr r = chunk_plus_offset(v, nb);
936 unlink_large_chunk(m, v);
937 if (rsize < MIN_CHUNK_SIZE) {
938 set_inuse_and_pinuse(m, v, (rsize + nb));
939 } else {
940 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
941 set_size_and_pinuse_of_free_chunk(r, rsize);
942 insert_chunk(m, r, rsize);
943 }
944 return chunk2mem(v);
945 }
946 return NULL;
947}
948
949/* allocate a small request from the best fitting chunk in a treebin */
950static void *tmalloc_small(mstate m, size_t nb)
951{
952 tchunkptr t, v;
953 mchunkptr r;
954 size_t rsize;
955 bindex_t i = lj_ffs(m->treemap);
956
957 v = t = *treebin_at(m, i);
958 rsize = chunksize(t) - nb;
959
960 while ((t = leftmost_child(t)) != 0) {
961 size_t trem = chunksize(t) - nb;
962 if (trem < rsize) {
963 rsize = trem;
964 v = t;
965 }
966 }
967
968 r = chunk_plus_offset(v, nb);
969 unlink_large_chunk(m, v);
970 if (rsize < MIN_CHUNK_SIZE) {
971 set_inuse_and_pinuse(m, v, (rsize + nb));
972 } else {
973 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
974 set_size_and_pinuse_of_free_chunk(r, rsize);
975 replace_dv(m, r, rsize);
976 }
977 return chunk2mem(v);
978}
979
980/* ----------------------------------------------------------------------- */
981
982void *lj_alloc_create(void)
983{
984 size_t tsize = DEFAULT_GRANULARITY;
985 char *tbase = (char *)(CALL_MMAP(tsize));
986 if (tbase != CMFAIL) {
987 size_t msize = pad_request(sizeof(struct malloc_state));
988 mchunkptr mn;
989 mchunkptr msp = align_as_chunk(tbase);
990 mstate m = (mstate)(chunk2mem(msp));
991 memset(m, 0, msize);
992 msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
993 m->seg.base = tbase;
994 m->seg.size = tsize;
995 m->release_checks = MAX_RELEASE_CHECK_RATE;
996 init_bins(m);
997 mn = next_chunk(mem2chunk(m));
998 init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
999 return m;
1000 }
1001 return NULL;
1002}
1003
1004void lj_alloc_destroy(void *msp)
1005{
1006 mstate ms = (mstate)msp;
1007 msegmentptr sp = &ms->seg;
1008 while (sp != 0) {
1009 char *base = sp->base;
1010 size_t size = sp->size;
1011 sp = sp->next;
1012 CALL_MUNMAP(base, size);
1013 }
1014}
1015
1016static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
1017{
1018 mstate ms = (mstate)msp;
1019 void *mem;
1020 size_t nb;
1021 if (nsize <= MAX_SMALL_REQUEST) {
1022 bindex_t idx;
1023 binmap_t smallbits;
1024 nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
1025 idx = small_index(nb);
1026 smallbits = ms->smallmap >> idx;
1027
1028 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
1029 mchunkptr b, p;
1030 idx += ~smallbits & 1; /* Uses next bin if idx empty */
1031 b = smallbin_at(ms, idx);
1032 p = b->fd;
1033 unlink_first_small_chunk(ms, b, p, idx);
1034 set_inuse_and_pinuse(ms, p, small_index2size(idx));
1035 mem = chunk2mem(p);
1036 return mem;
1037 } else if (nb > ms->dvsize) {
1038 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
1039 mchunkptr b, p, r;
1040 size_t rsize;
1041 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
1042 bindex_t i = lj_ffs(leftbits);
1043 b = smallbin_at(ms, i);
1044 p = b->fd;
1045 unlink_first_small_chunk(ms, b, p, i);
1046 rsize = small_index2size(i) - nb;
1047 /* Fit here cannot be remainderless if 4byte sizes */
1048 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
1049 set_inuse_and_pinuse(ms, p, small_index2size(i));
1050 } else {
1051 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
1052 r = chunk_plus_offset(p, nb);
1053 set_size_and_pinuse_of_free_chunk(r, rsize);
1054 replace_dv(ms, r, rsize);
1055 }
1056 mem = chunk2mem(p);
1057 return mem;
1058 } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
1059 return mem;
1060 }
1061 }
1062 } else if (nsize >= MAX_REQUEST) {
1063 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1064 } else {
1065 nb = pad_request(nsize);
1066 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
1067 return mem;
1068 }
1069 }
1070
1071 if (nb <= ms->dvsize) {
1072 size_t rsize = ms->dvsize - nb;
1073 mchunkptr p = ms->dv;
1074 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
1075 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
1076 ms->dvsize = rsize;
1077 set_size_and_pinuse_of_free_chunk(r, rsize);
1078 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
1079 } else { /* exhaust dv */
1080 size_t dvs = ms->dvsize;
1081 ms->dvsize = 0;
1082 ms->dv = 0;
1083 set_inuse_and_pinuse(ms, p, dvs);
1084 }
1085 mem = chunk2mem(p);
1086 return mem;
1087 } else if (nb < ms->topsize) { /* Split top */
1088 size_t rsize = ms->topsize -= nb;
1089 mchunkptr p = ms->top;
1090 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
1091 r->head = rsize | PINUSE_BIT;
1092 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
1093 mem = chunk2mem(p);
1094 return mem;
1095 }
1096 return alloc_sys(ms, nb);
1097}
1098
1099static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
1100{
1101 if (ptr != 0) {
1102 mchunkptr p = mem2chunk(ptr);
1103 mstate fm = (mstate)msp;
1104 size_t psize = chunksize(p);
1105 mchunkptr next = chunk_plus_offset(p, psize);
1106 if (!pinuse(p)) {
1107 size_t prevsize = p->prev_foot;
1108 if ((prevsize & IS_DIRECT_BIT) != 0) {
1109 prevsize &= ~IS_DIRECT_BIT;
1110 psize += prevsize + DIRECT_FOOT_PAD;
1111 CALL_MUNMAP((char *)p - prevsize, psize);
1112 return NULL;
1113 } else {
1114 mchunkptr prev = chunk_minus_offset(p, prevsize);
1115 psize += prevsize;
1116 p = prev;
1117 /* consolidate backward */
1118 if (p != fm->dv) {
1119 unlink_chunk(fm, p, prevsize);
1120 } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
1121 fm->dvsize = psize;
1122 set_free_with_pinuse(p, psize, next);
1123 return NULL;
1124 }
1125 }
1126 }
1127 if (!cinuse(next)) { /* consolidate forward */
1128 if (next == fm->top) {
1129 size_t tsize = fm->topsize += psize;
1130 fm->top = p;
1131 p->head = tsize | PINUSE_BIT;
1132 if (p == fm->dv) {
1133 fm->dv = 0;
1134 fm->dvsize = 0;
1135 }
1136 if (tsize > fm->trim_check)
1137 alloc_trim(fm, 0);
1138 return NULL;
1139 } else if (next == fm->dv) {
1140 size_t dsize = fm->dvsize += psize;
1141 fm->dv = p;
1142 set_size_and_pinuse_of_free_chunk(p, dsize);
1143 return NULL;
1144 } else {
1145 size_t nsize = chunksize(next);
1146 psize += nsize;
1147 unlink_chunk(fm, next, nsize);
1148 set_size_and_pinuse_of_free_chunk(p, psize);
1149 if (p == fm->dv) {
1150 fm->dvsize = psize;
1151 return NULL;
1152 }
1153 }
1154 } else {
1155 set_free_with_pinuse(p, psize, next);
1156 }
1157
1158 if (is_small(psize)) {
1159 insert_small_chunk(fm, p, psize);
1160 } else {
1161 tchunkptr tp = (tchunkptr)p;
1162 insert_large_chunk(fm, tp, psize);
1163 if (--fm->release_checks == 0)
1164 release_unused_segments(fm);
1165 }
1166 }
1167 return NULL;
1168}
1169
1170static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
1171{
1172 if (nsize >= MAX_REQUEST) {
1173 return NULL;
1174 } else {
1175 mstate m = (mstate)msp;
1176 mchunkptr oldp = mem2chunk(ptr);
1177 size_t oldsize = chunksize(oldp);
1178 mchunkptr next = chunk_plus_offset(oldp, oldsize);
1179 mchunkptr newp = 0;
1180 size_t nb = request2size(nsize);
1181
1182 /* Try to either shrink or extend into top. Else malloc-copy-free */
1183 if (is_direct(oldp)) {
1184 newp = direct_resize(oldp, nb); /* this may return NULL. */
1185 } else if (oldsize >= nb) { /* already big enough */
1186 size_t rsize = oldsize - nb;
1187 newp = oldp;
1188 if (rsize >= MIN_CHUNK_SIZE) {
1189 mchunkptr remainder = chunk_plus_offset(newp, nb);
1190 set_inuse(m, newp, nb);
1191 set_inuse(m, remainder, rsize);
1192 lj_alloc_free(m, chunk2mem(remainder));
1193 }
1194 } else if (next == m->top && oldsize + m->topsize > nb) {
1195 /* Expand into top */
1196 size_t newsize = oldsize + m->topsize;
1197 size_t newtopsize = newsize - nb;
1198 mchunkptr newtop = chunk_plus_offset(oldp, nb);
1199 set_inuse(m, oldp, nb);
1200 newtop->head = newtopsize |PINUSE_BIT;
1201 m->top = newtop;
1202 m->topsize = newtopsize;
1203 newp = oldp;
1204 }
1205
1206 if (newp != 0) {
1207 return chunk2mem(newp);
1208 } else {
1209 void *newmem = lj_alloc_malloc(m, nsize);
1210 if (newmem != 0) {
1211 size_t oc = oldsize - overhead_for(oldp);
1212 memcpy(newmem, ptr, oc < nsize ? oc : nsize);
1213 lj_alloc_free(m, ptr);
1214 }
1215 return newmem;
1216 }
1217 }
1218}
1219
1220void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
1221{
1222 (void)osize;
1223 if (nsize == 0) {
1224 return lj_alloc_free(msp, ptr);
1225 } else if (ptr == NULL) {
1226 return lj_alloc_malloc(msp, nsize);
1227 } else {
1228 return lj_alloc_realloc(msp, ptr, nsize);
1229 }
1230}
1231
1232#endif