summaryrefslogtreecommitdiff
path: root/src/lj_gc.c
diff options
context:
space:
mode:
authorMike Pall <mike>2009-12-08 19:46:35 +0100
committerMike Pall <mike>2009-12-08 19:46:35 +0100
commit55b16959717084884fd4a0cbae6d19e3786c20c7 (patch)
treec8a07a43c13679751ed25a9d06796e9e7b2134a6 /src/lj_gc.c
downloadluajit-2.0.0-beta1.tar.gz
luajit-2.0.0-beta1.tar.bz2
luajit-2.0.0-beta1.zip
RELEASE LuaJIT-2.0.0-beta1v2.0.0-beta1
Diffstat (limited to 'src/lj_gc.c')
-rw-r--r--src/lj_gc.c800
1 files changed, 800 insertions, 0 deletions
diff --git a/src/lj_gc.c b/src/lj_gc.c
new file mode 100644
index 00000000..e479b567
--- /dev/null
+++ b/src/lj_gc.c
@@ -0,0 +1,800 @@
1/*
2** Garbage collector.
3** Copyright (C) 2005-2009 Mike Pall. See Copyright Notice in luajit.h
4**
5** Major portions taken verbatim or adapted from the Lua interpreter.
6** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7*/
8
9#define lj_gc_c
10#define LUA_CORE
11
12#include "lj_obj.h"
13#include "lj_gc.h"
14#include "lj_err.h"
15#include "lj_str.h"
16#include "lj_tab.h"
17#include "lj_func.h"
18#include "lj_udata.h"
19#include "lj_meta.h"
20#include "lj_state.h"
21#include "lj_frame.h"
22#include "lj_trace.h"
23#include "lj_vm.h"
24
25#define GCSTEPSIZE 1024u
26#define GCSWEEPMAX 40
27#define GCSWEEPCOST 10
28#define GCFINALIZECOST 100
29
30/* Macros to set GCobj colors and flags. */
31#define white2gray(x) ((x)->gch.marked &= cast_byte(~LJ_GC_WHITES))
32#define black2gray(x) ((x)->gch.marked &= cast_byte(~LJ_GC_BLACK))
33#define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
34#define makewhite(g, x) \
35 ((x)->gch.marked = ((x)->gch.marked & cast_byte(~LJ_GC_COLORS)) | curwhite(g))
36#define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
37#define markfinalized(u) ((u)->marked |= LJ_GC_FINALIZED)
38
39/* -- Mark phase ---------------------------------------------------------- */
40
41/* Mark a TValue (if needed). */
42#define gc_marktv(g, tv) \
43 { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
44 if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
45
46/* Mark a GCobj (if needed). */
47#define gc_markobj(g, o) \
48 { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
49
50/* Mark a string object. */
51#define gc_mark_str(s) ((s)->marked &= cast_byte(~LJ_GC_WHITES))
52
53/* Mark a white GCobj. */
54static void gc_mark(global_State *g, GCobj *o)
55{
56 lua_assert(iswhite(o) && !isdead(g, o));
57 white2gray(o);
58 if (LJ_UNLIKELY(o->gch.gct == ~LJ_TUDATA)) {
59 GCtab *mt = tabref(gco2ud(o)->metatable);
60 gray2black(o); /* Userdata are never gray. */
61 if (mt) gc_markobj(g, mt);
62 gc_markobj(g, tabref(gco2ud(o)->env));
63 } else if (LJ_UNLIKELY(o->gch.gct == ~LJ_TUPVAL)) {
64 GCupval *uv = gco2uv(o);
65 gc_marktv(g, uv->v);
66 if (uv->closed)
67 gray2black(o); /* Closed upvalues are never gray. */
68 } else if (o->gch.gct != ~LJ_TSTR) {
69 lua_assert(o->gch.gct == ~LJ_TFUNC || o->gch.gct == ~LJ_TTAB ||
70 o->gch.gct == ~LJ_TTHREAD || o->gch.gct == ~LJ_TPROTO);
71 setgcrefr(o->gch.gclist, g->gc.gray);
72 setgcref(g->gc.gray, o);
73 }
74}
75
76/* Mark the base metatables. */
77static void gc_mark_basemt(global_State *g)
78{
79 int i;
80 for (i = 0; i < BASEMT_MAX; i++)
81 if (tabref(g->basemt[i]) != NULL)
82 gc_markobj(g, tabref(g->basemt[i]));
83}
84
85/* Start a GC cycle and mark the root set. */
86static void gc_mark_start(global_State *g)
87{
88 setgcrefnull(g->gc.gray);
89 setgcrefnull(g->gc.grayagain);
90 setgcrefnull(g->gc.weak);
91 gc_markobj(g, mainthread(g));
92 gc_markobj(g, tabref(mainthread(g)->env));
93 gc_marktv(g, &g->registrytv);
94 gc_mark_basemt(g);
95 g->gc.state = GCSpropagate;
96}
97
98/* Mark open upvalues. */
99static void gc_mark_uv(global_State *g)
100{
101 GCupval *uv;
102 for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
103 lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
104 if (isgray(obj2gco(uv)))
105 gc_marktv(g, uv->v);
106 }
107}
108
109/* Mark userdata in mmudata list. */
110static void gc_mark_mmudata(global_State *g)
111{
112 GCobj *root = gcref(g->gc.mmudata);
113 GCobj *u = root;
114 if (u) {
115 do {
116 u = gcnext(u);
117 makewhite(g, u); /* Could be from previous GC. */
118 gc_mark(g, u);
119 } while (u != root);
120 }
121}
122
123/* Separate userdata which which needs finalization to mmudata list. */
124size_t lj_gc_separateudata(global_State *g, int all)
125{
126 size_t m = 0;
127 GCRef *p = &mainthread(g)->nextgc;
128 GCobj *o;
129 while ((o = gcref(*p)) != NULL) {
130 if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
131 p = &o->gch.nextgc; /* Nothing to do. */
132 } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
133 markfinalized(gco2ud(o)); /* Done, as there's no __gc metamethod. */
134 p = &o->gch.nextgc;
135 } else { /* Otherwise move userdata to be finalized to mmudata list. */
136 m += sizeudata(gco2ud(o));
137 markfinalized(gco2ud(o));
138 *p = o->gch.nextgc;
139 if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */
140 GCobj *root = gcref(g->gc.mmudata);
141 setgcrefr(o->gch.nextgc, root->gch.nextgc);
142 setgcref(root->gch.nextgc, o);
143 setgcref(g->gc.mmudata, o);
144 } else { /* Create circular list. */
145 setgcref(o->gch.nextgc, o);
146 setgcref(g->gc.mmudata, o);
147 }
148 }
149 }
150 return m;
151}
152
153/* -- Propagation phase --------------------------------------------------- */
154
155/* Traverse a table. */
156static int gc_traverse_tab(global_State *g, GCtab *t)
157{
158 int weak = 0;
159 cTValue *mode;
160 GCtab *mt = tabref(t->metatable);
161 if (mt)
162 gc_markobj(g, mt);
163 mode = lj_meta_fastg(g, mt, MM_mode);
164 if (mode && tvisstr(mode)) { /* Valid __mode field? */
165 const char *modestr = strVdata(mode);
166 int c;
167 while ((c = *modestr++)) {
168 if (c == 'k') weak |= LJ_GC_WEAKKEY;
169 else if (c == 'v') weak |= LJ_GC_WEAKVAL;
170 }
171 if (weak) { /* Weak tables are cleared in the atomic phase. */
172 t->marked = cast_byte((t->marked & ~LJ_GC_WEAK) | weak);
173 setgcrefr(t->gclist, g->gc.weak);
174 setgcref(g->gc.weak, obj2gco(t));
175 }
176 }
177 if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */
178 return 1;
179 if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
180 MSize i, asize = t->asize;
181 for (i = 0; i < asize; i++)
182 gc_marktv(g, arrayslot(t, i));
183 }
184 if (t->hmask > 0) { /* Mark hash part. */
185 Node *node = noderef(t->node);
186 MSize i, hmask = t->hmask;
187 for (i = 0; i <= hmask; i++) {
188 Node *n = &node[i];
189 lua_assert(itype(&n->key) != LJ_TDEADKEY || tvisnil(&n->val));
190 if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
191 lua_assert(!tvisnil(&n->key));
192 if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
193 if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
194 } else if (tvisgcv(&n->key)) { /* Leave GC key in, but mark as dead. */
195 setitype(&n->key, LJ_TDEADKEY);
196 }
197 }
198 }
199 return weak;
200}
201
202/* Traverse a function. */
203static void gc_traverse_func(global_State *g, GCfunc *fn)
204{
205 gc_markobj(g, tabref(fn->c.env));
206 if (isluafunc(fn)) {
207 uint32_t i;
208 lua_assert(fn->l.nupvalues == funcproto(fn)->sizeuv);
209 gc_markobj(g, funcproto(fn));
210 for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
211 gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
212 } else {
213 uint32_t i;
214 for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */
215 gc_marktv(g, &fn->c.upvalue[i]);
216 }
217}
218
219#if LJ_HASJIT
220/* Traverse a trace. */
221static void gc_traverse_trace(global_State *g, Trace *T)
222{
223 IRRef ref;
224 for (ref = T->nk; ref < REF_TRUE; ref++) {
225 IRIns *ir = &T->ir[ref];
226 if (ir->o == IR_KGC)
227 gc_markobj(g, ir_kgc(ir));
228 }
229}
230
231/* The current trace is a GC root while not anchored in the prototype (yet). */
232#define gc_mark_curtrace(g) \
233 { if (G2J(g)->state != LJ_TRACE_IDLE && G2J(g)->curtrace != 0) \
234 gc_traverse_trace(g, &G2J(g)->cur); }
235#else
236#define gc_mark_curtrace(g) UNUSED(g)
237#endif
238
239/* Traverse a prototype. */
240static void gc_traverse_proto(global_State *g, GCproto *pt)
241{
242 ptrdiff_t i;
243#if LJ_HASJIT
244 jit_State *J = G2J(g);
245 TraceNo root, side;
246 /* Mark all root traces and attached side traces. */
247 for (root = pt->trace; root != 0; root = J->trace[root]->nextroot) {
248 for (side = J->trace[root]->nextside; side != 0;
249 side = J->trace[side]->nextside)
250 gc_traverse_trace(g, J->trace[side]);
251 gc_traverse_trace(g, J->trace[root]);
252 }
253#endif
254 /* GC during prototype creation could cause NULL fields. */
255 if (pt->chunkname)
256 gc_mark_str(pt->chunkname);
257 for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */
258 gc_markobj(g, gcref(pt->k.gc[i]));
259 for (i = 0; i < (ptrdiff_t)pt->sizeuvname; i++) /* Mark upvalue names. */
260 if (pt->uvname[i])
261 gc_mark_str(pt->uvname[i]);
262 for (i = 0; i < (ptrdiff_t)pt->sizevarinfo; i++) /* Mark names of locals. */
263 if (pt->varinfo[i].name)
264 gc_mark_str(pt->varinfo[i].name);
265}
266
267/* Traverse the frame structure of a stack. */
268static TValue *gc_traverse_frames(global_State *g, lua_State *th)
269{
270 TValue *frame, *top = th->top-1;
271 /* Note: extra vararg frame not skipped, marks function twice (harmless). */
272 for (frame = th->base-1; frame > th->stack; frame = frame_prev(frame)) {
273 GCfunc *fn = frame_func(frame);
274 TValue *ftop = frame;
275 if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
276 if (ftop > top) top = ftop;
277 gc_markobj(g, frame_gc(frame)); /* Need to mark hidden function (or L). */
278 }
279 top++; /* Correct bias of -1 (frame == base-1). */
280 if (top > th->maxstack) top = th->maxstack;
281 return top;
282}
283
284/* Traverse a thread object. */
285static void gc_traverse_thread(global_State *g, lua_State *th)
286{
287 TValue *o, *lim;
288 gc_markobj(g, tabref(th->env));
289 for (o = th->stack+1; o < th->top; o++)
290 gc_marktv(g, o);
291 lim = gc_traverse_frames(g, th);
292 /* Extra cleanup required to avoid this marking problem:
293 **
294 ** [aa[bb.X| X created.
295 ** [aa[cc| GC called from (small) inner frame, X destroyed.
296 ** [aa....X.| GC called again in (larger) outer frame, X resurrected (ouch).
297 **
298 ** During GC in step 2 the stack must be cleaned up to the max. frame extent:
299 **
300 ** ***| Slots cleaned
301 ** [cc| from top of last frame
302 ** [aa......| to max. frame extent.
303 */
304 for (; o <= lim; o++)
305 setnilV(o);
306 lj_state_shrinkstack(th, (MSize)(lim - th->stack));
307}
308
309/* Propagate one gray object. Traverse it and turn it black. */
310static size_t propagatemark(global_State *g)
311{
312 GCobj *o = gcref(g->gc.gray);
313 lua_assert(isgray(o));
314 gray2black(o);
315 setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
316 if (LJ_LIKELY(o->gch.gct == ~LJ_TTAB)) {
317 GCtab *t = gco2tab(o);
318 if (gc_traverse_tab(g, t))
319 black2gray(o); /* Keep weak tables gray. */
320 return sizeof(GCtab) + sizeof(TValue) * t->asize +
321 sizeof(Node) * (t->hmask + 1);
322 } else if (LJ_LIKELY(o->gch.gct == ~LJ_TFUNC)) {
323 GCfunc *fn = gco2func(o);
324 gc_traverse_func(g, fn);
325 return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
326 sizeCfunc((MSize)fn->c.nupvalues);
327 } else if (LJ_LIKELY(o->gch.gct == ~LJ_TPROTO)) {
328 GCproto *pt = gco2pt(o);
329 gc_traverse_proto(g, pt);
330 return sizeof(GCproto) + sizeof(BCIns) * pt->sizebc +
331 sizeof(GCobj *) * pt->sizekgc +
332 sizeof(lua_Number) * pt->sizekn +
333 sizeof(int16_t) * pt->sizeuv +
334 sizeof(int32_t) * pt->sizelineinfo +
335 sizeof(VarInfo) * pt->sizevarinfo +
336 sizeof(GCstr *) * pt->sizeuvname;
337 } else {
338 lua_State *th = gco2th(o);
339 setgcrefr(th->gclist, g->gc.grayagain);
340 setgcref(g->gc.grayagain, o);
341 black2gray(o); /* Threads are never black. */
342 gc_traverse_thread(g, th);
343 return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
344 }
345}
346
347/* Propagate all gray objects. */
348static size_t gc_propagate_gray(global_State *g)
349{
350 size_t m = 0;
351 while (gcref(g->gc.gray) != NULL)
352 m += propagatemark(g);
353 return m;
354}
355
356/* -- Sweep phase --------------------------------------------------------- */
357
358/* Try to shrink some common data structures. */
359static void gc_shrink(global_State *g, lua_State *L)
360{
361 if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
362 lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */
363 if (g->tmpbuf.sz > LJ_MIN_SBUF*2)
364 lj_str_resizebuf(L, &g->tmpbuf, g->tmpbuf.sz >> 1); /* Shrink temp buf. */
365}
366
367/* Type of GC free functions. */
368typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
369
370/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
371static const GCFreeFunc gc_freefunc[] = {
372 (GCFreeFunc)lj_str_free,
373 (GCFreeFunc)lj_func_freeuv,
374 (GCFreeFunc)lj_state_free,
375 (GCFreeFunc)lj_func_freeproto,
376 (GCFreeFunc)lj_func_free,
377 (GCFreeFunc)0,
378 (GCFreeFunc)lj_tab_free,
379 (GCFreeFunc)lj_udata_free
380};
381
382/* Full sweep of a GC list. */
383#define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM)
384
385/* Partial sweep of a GC list. */
386static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
387{
388 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
389 int ow = otherwhite(g);
390 GCobj *o;
391 while ((o = gcref(*p)) != NULL && lim-- > 0) {
392 if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
393 gc_fullsweep(g, &gco2th(o)->openupval);
394 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
395 lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
396 makewhite(g, o); /* Value is alive, change to the current white. */
397 p = &o->gch.nextgc;
398 } else { /* Otherwise value is dead, free it. */
399 lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED);
400 setgcrefr(*p, o->gch.nextgc);
401 if (o == gcref(g->gc.root))
402 setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
403 gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
404 }
405 }
406 return p;
407}
408
409/* Check whether we can clear a key or a value slot from a table. */
410static int gc_mayclear(cTValue *o, int val)
411{
412 if (tvisgcv(o)) { /* Only collectable objects can be weak references. */
413 if (tvisstr(o)) { /* But strings cannot be used as weak references. */
414 gc_mark_str(strV(o)); /* And need to be marked. */
415 return 0;
416 }
417 if (iswhite(gcV(o)))
418 return 1; /* Object is about to be collected. */
419 if (tvisudata(o) && val && isfinalized(udataV(o)))
420 return 1; /* Finalized userdata is dropped only from values. */
421 }
422 return 0; /* Cannot clear. */
423}
424
425/* Clear collected entries from weak tables. */
426static void gc_clearweak(GCobj *o)
427{
428 while (o) {
429 GCtab *t = gco2tab(o);
430 lua_assert((t->marked & LJ_GC_WEAK));
431 if ((t->marked & LJ_GC_WEAKVAL)) {
432 MSize i, asize = t->asize;
433 for (i = 0; i < asize; i++) {
434 /* Clear array slot when value is about to be collected. */
435 TValue *tv = arrayslot(t, i);
436 if (gc_mayclear(tv, 1))
437 setnilV(tv);
438 }
439 }
440 if (t->hmask > 0) {
441 Node *node = noderef(t->node);
442 MSize i, hmask = t->hmask;
443 for (i = 0; i <= hmask; i++) {
444 Node *n = &node[i];
445 /* Clear hash slot when key or value is about to be collected. */
446 if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
447 gc_mayclear(&n->val, 1))) {
448 setnilV(&n->val);
449 if (tvisgcv(&n->key)) /* Leave GC key in, but mark as dead. */
450 setitype(&n->key, LJ_TDEADKEY);
451 }
452 }
453 }
454 o = gcref(t->gclist);
455 }
456}
457
458/* Finalize one userdata object from mmudata list. */
459static void gc_finalize(lua_State *L)
460{
461 global_State *g = G(L);
462 GCobj *o = gcnext(gcref(g->gc.mmudata));
463 GCudata *ud = gco2ud(o);
464 cTValue *mo;
465 /* Unchain from list of userdata to be finalized. */
466 if (o == gcref(g->gc.mmudata))
467 setgcrefnull(g->gc.mmudata);
468 else
469 setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, ud->nextgc);
470 /* Add it back to the main userdata list and make it white. */
471 setgcrefr(ud->nextgc, mainthread(g)->nextgc);
472 setgcref(mainthread(g)->nextgc, o);
473 makewhite(g, o);
474 /* Resolve the __gc metamethod. */
475 mo = lj_meta_fastg(g, tabref(ud->metatable), MM_gc);
476 if (mo) {
477 /* Save and restore lots of state around the __gc callback. */
478 uint8_t oldh = hook_save(g);
479 MSize oldt = g->gc.threshold;
480 GCobj *oldjl = gcref(g->jit_L);
481 MSize oldjs = 0;
482 ptrdiff_t oldjb = 0;
483 int errcode;
484 TValue *top;
485 if (oldjl) {
486 oldjs = gco2th(oldjl)->stacksize;
487 oldjb = savestack(gco2th(oldjl), mref(g->jit_base, TValue ));
488 setgcrefnull(g->jit_L);
489 }
490 lj_trace_abort(g);
491 top = L->top;
492 L->top = top+2;
493 hook_entergc(g); /* Disable hooks and new traces during __gc. */
494 g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
495 copyTV(L, top, mo);
496 setudataV(L, top+1, ud);
497 errcode = lj_vm_pcall(L, top+1, 1+0, -1); /* Stack: |mo|ud| -> | */
498 hook_restore(g, oldh);
499 g->gc.threshold = oldt; /* Restore GC threshold. */
500 if (oldjl) {
501 if (gco2th(oldjl)->stacksize < oldjs)
502 lj_state_growstack(gco2th(oldjl), oldjs - gco2th(oldjl)->stacksize);
503 setgcref(g->jit_L, oldjl);
504 setmref(g->jit_base, restorestack(gco2th(oldjl), oldjb));
505 }
506 if (errcode)
507 lj_err_throw(L, errcode); /* Propagate errors. */
508 }
509}
510
511/* Finalize all userdata objects from mmudata list. */
512void lj_gc_finalizeudata(lua_State *L)
513{
514 while (gcref(G(L)->gc.mmudata) != NULL)
515 gc_finalize(L);
516}
517
518/* Free all remaining GC objects. */
519void lj_gc_freeall(global_State *g)
520{
521 MSize i, strmask;
522 /* Free everything, except super-fixed objects (the main thread). */
523 g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
524 gc_fullsweep(g, &g->gc.root);
525 strmask = g->strmask;
526 for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
527 gc_fullsweep(g, &g->strhash[i]);
528}
529
530/* -- Collector ----------------------------------------------------------- */
531
532/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
533static void atomic(global_State *g, lua_State *L)
534{
535 size_t udsize;
536
537 gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
538 gc_propagate_gray(g); /* Propagate any left-overs. */
539
540 setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
541 setgcrefnull(g->gc.weak);
542 lua_assert(!iswhite(obj2gco(mainthread(g))));
543 gc_markobj(g, L); /* Mark running thread. */
544 gc_mark_curtrace(g); /* Mark current trace. */
545 gc_mark_basemt(g); /* Mark base metatables (again). */
546 gc_propagate_gray(g); /* Propagate all of the above. */
547
548 setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */
549 setgcrefnull(g->gc.grayagain);
550 gc_propagate_gray(g); /* Propagate it. */
551
552 udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
553 gc_mark_mmudata(g); /* Mark them. */
554 udsize += gc_propagate_gray(g); /* And propagate the marks. */
555
556 /* All marking done, clear weak tables. */
557 gc_clearweak(gcref(g->gc.weak));
558
559 /* Prepare for sweep phase. */
560 g->gc.currentwhite = cast_byte(otherwhite(g)); /* Flip current white. */
561 g->gc.sweepstr = 0;
562 g->gc.sweep = &g->gc.root;
563 g->gc.state = GCSsweepstring;
564 g->gc.estimate = g->gc.total - (MSize)udsize; /* Initial estimate. */
565}
566
567/* GC state machine. Returns a cost estimate for each step performed. */
568static size_t gc_onestep(lua_State *L)
569{
570 global_State *g = G(L);
571 switch (g->gc.state) {
572 case GCSpause:
573 gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
574 return 0;
575 case GCSpropagate:
576 if (gcref(g->gc.gray) != NULL)
577 return propagatemark(g); /* Propagate one gray object. */
578 atomic(g, L); /* End of mark phase. */
579 return 0;
580 case GCSsweepstring: {
581 MSize old = g->gc.total;
582 gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */
583 if (g->gc.sweepstr > g->strmask)
584 g->gc.state = GCSsweep; /* All string hash chains sweeped. */
585 lua_assert(old >= g->gc.total);
586 g->gc.estimate -= old - g->gc.total;
587 return GCSWEEPCOST;
588 }
589 case GCSsweep: {
590 MSize old = g->gc.total;
591 g->gc.sweep = gc_sweep(g, g->gc.sweep, GCSWEEPMAX); /* Partial sweep. */
592 if (gcref(*g->gc.sweep) == NULL) {
593 gc_shrink(g, L);
594 g->gc.state = GCSfinalize; /* End of sweep phase. */
595 }
596 lua_assert(old >= g->gc.total);
597 g->gc.estimate -= old - g->gc.total;
598 return GCSWEEPMAX*GCSWEEPCOST;
599 }
600 case GCSfinalize:
601 if (gcref(g->gc.mmudata) != NULL) {
602 gc_finalize(L); /* Finalize one userdata object. */
603 if (g->gc.estimate > GCFINALIZECOST)
604 g->gc.estimate -= GCFINALIZECOST;
605 return GCFINALIZECOST;
606 }
607 g->gc.state = GCSpause; /* End of GC cycle. */
608 g->gc.debt = 0;
609 return 0;
610 default:
611 lua_assert(0);
612 return 0;
613 }
614}
615
616/* Perform a limited amount of incremental GC steps. */
617int lj_gc_step(lua_State *L)
618{
619 global_State *g = G(L);
620 MSize lim;
621 int32_t ostate = g->vmstate;
622 setvmstate(g, GC);
623 lim = (GCSTEPSIZE/100) * g->gc.stepmul;
624 if (lim == 0)
625 lim = LJ_MAX_MEM;
626 g->gc.debt += g->gc.total - g->gc.threshold;
627 do {
628 lim -= (MSize)gc_onestep(L);
629 if (g->gc.state == GCSpause) {
630 lua_assert(g->gc.total >= g->gc.estimate);
631 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
632 g->vmstate = ostate;
633 return 1; /* Finished a GC cycle. */
634 }
635 } while ((int32_t)lim > 0);
636 if (g->gc.debt < GCSTEPSIZE) {
637 g->gc.threshold = g->gc.total + GCSTEPSIZE;
638 } else {
639 g->gc.debt -= GCSTEPSIZE;
640 g->gc.threshold = g->gc.total;
641 }
642 g->vmstate = ostate;
643 return 0;
644}
645
646/* Ditto, but fix the stack top first. */
647void lj_gc_step_fixtop(lua_State *L)
648{
649 if (curr_funcisL(L)) L->top = curr_topL(L);
650 lj_gc_step(L);
651}
652
653/* Perform multiple GC steps. Called from JIT-compiled code. */
654void lj_gc_step_jit(lua_State *L, const BCIns *pc, MSize steps)
655{
656 cframe_pc(cframe_raw(L->cframe)) = pc;
657 L->top = curr_topL(L);
658 while (steps-- > 0 && lj_gc_step(L) == 0)
659 ;
660}
661
662/* Perform a full GC cycle. */
663void lj_gc_fullgc(lua_State *L)
664{
665 global_State *g = G(L);
666 int32_t ostate = g->vmstate;
667 setvmstate(g, GC);
668 if (g->gc.state <= GCSpropagate) { /* Caught somewhere in the middle. */
669 g->gc.sweepstr = 0;
670 g->gc.sweep = &g->gc.root; /* Sweep everything (preserving it). */
671 setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */
672 setgcrefnull(g->gc.grayagain);
673 setgcrefnull(g->gc.weak);
674 g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
675 }
676 lua_assert(g->gc.state != GCSpause && g->gc.state != GCSpropagate);
677 while (g->gc.state != GCSfinalize) { /* Finish sweep. */
678 lua_assert(g->gc.state == GCSsweepstring || g->gc.state == GCSsweep);
679 gc_onestep(L);
680 }
681 /* Now perform a full GC. */
682 gc_mark_start(g);
683 while (g->gc.state != GCSpause)
684 gc_onestep(L);
685 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
686 g->vmstate = ostate;
687}
688
689/* -- Write barriers ------------------------------------------------------ */
690
691/* Move the GC propagation frontier back for tables (make it gray again). */
692void lj_gc_barrierback(global_State *g, GCtab *t)
693{
694 GCobj *o = obj2gco(t);
695 lua_assert(isblack(o) && !isdead(g, o));
696 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
697 black2gray(o);
698 setgcrefr(t->gclist, g->gc.grayagain);
699 setgcref(g->gc.grayagain, o);
700}
701
702/* Move the GC propagation frontier forward. */
703void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
704{
705 lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
706 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
707 lua_assert(o->gch.gct != ~LJ_TTAB);
708 /* Preserve invariant during propagation. Otherwise it doesn't matter. */
709 if (g->gc.state == GCSpropagate)
710 gc_mark(g, v); /* Move frontier forward. */
711 else
712 makewhite(g, o); /* Make it white to avoid the following barrier. */
713}
714
715/* The reason for duplicating this is that it needs to be visible from ASM. */
716void lj_gc_barrieruv(global_State *g, GCobj *o, GCobj *v)
717{
718 lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
719 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
720 lua_assert(o->gch.gct == ~LJ_TUPVAL);
721 /* Preserve invariant during propagation. Otherwise it doesn't matter. */
722 if (g->gc.state == GCSpropagate)
723 gc_mark(g, v); /* Move frontier forward. */
724 else
725 makewhite(g, o); /* Make it white to avoid the following barrier. */
726}
727
728/* Close upvalue. Also needs a write barrier. */
729void lj_gc_closeuv(global_State *g, GCupval *uv)
730{
731 GCobj *o = obj2gco(uv);
732 /* Copy stack slot to upvalue itself and point to the copy. */
733 copyTV(mainthread(g), &uv->tv, uv->v);
734 uv->v = &uv->tv;
735 uv->closed = 1;
736 setgcrefr(o->gch.nextgc, g->gc.root);
737 setgcref(g->gc.root, o);
738 if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
739 if (g->gc.state == GCSpropagate) {
740 gray2black(o); /* Make it black and preserve invariant. */
741 if (tviswhite(uv->v))
742 lj_gc_barrierf(g, o, gcV(uv->v));
743 } else {
744 makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
745 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
746 }
747 }
748}
749
750#if LJ_HASJIT
751/* Mark a trace if it's saved during the propagation phase. */
752void lj_gc_barriertrace(global_State *g, void *T)
753{
754 if (g->gc.state == GCSpropagate)
755 gc_traverse_trace(g, (Trace *)T);
756}
757#endif
758
759/* -- Allocator ----------------------------------------------------------- */
760
761/* Call pluggable memory allocator to allocate or resize a fragment. */
762void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz)
763{
764 global_State *g = G(L);
765 lua_assert((osz == 0) == (p == NULL));
766 p = g->allocf(g->allocd, p, osz, nsz);
767 if (p == NULL && nsz > 0)
768 lj_err_throw(L, LUA_ERRMEM);
769 lua_assert((nsz == 0) == (p == NULL));
770 g->gc.total = (g->gc.total - osz) + nsz;
771 return p;
772}
773
774/* Allocate new GC object and link it to the root set. */
775void *lj_mem_newgco(lua_State *L, MSize size)
776{
777 global_State *g = G(L);
778 GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
779 if (o == NULL)
780 lj_err_throw(L, LUA_ERRMEM);
781 g->gc.total += size;
782 setgcrefr(o->gch.nextgc, g->gc.root);
783 setgcref(g->gc.root, o);
784 newwhite(g, o);
785 return o;
786}
787
788/* Resize growable vector. */
789void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
790{
791 MSize sz = (*szp) << 1;
792 if (sz < LJ_MIN_VECSZ)
793 sz = LJ_MIN_VECSZ;
794 if (sz > lim)
795 sz = lim;
796 p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
797 *szp = sz;
798 return p;
799}
800