diff options
Diffstat (limited to 'src/lj_gc.c')
-rw-r--r-- | src/lj_gc.c | 70 |
1 files changed, 34 insertions, 36 deletions
diff --git a/src/lj_gc.c b/src/lj_gc.c index aa243d13..c82af662 100644 --- a/src/lj_gc.c +++ b/src/lj_gc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include "lj_obj.h" | 12 | #include "lj_obj.h" |
13 | #include "lj_gc.h" | 13 | #include "lj_gc.h" |
14 | #include "lj_err.h" | 14 | #include "lj_err.h" |
15 | #include "lj_buf.h" | ||
15 | #include "lj_str.h" | 16 | #include "lj_str.h" |
16 | #include "lj_tab.h" | 17 | #include "lj_tab.h" |
17 | #include "lj_func.h" | 18 | #include "lj_func.h" |
@@ -68,7 +69,7 @@ static void gc_mark(global_State *g, GCobj *o) | |||
68 | gray2black(o); /* Closed upvalues are never gray. */ | 69 | gray2black(o); /* Closed upvalues are never gray. */ |
69 | } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { | 70 | } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { |
70 | lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || | 71 | lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || |
71 | gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO); | 72 | gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE); |
72 | setgcrefr(o->gch.gclist, g->gc.gray); | 73 | setgcrefr(o->gch.gclist, g->gc.gray); |
73 | setgcref(g->gc.gray, o); | 74 | setgcref(g->gc.gray, o); |
74 | } | 75 | } |
@@ -237,6 +238,8 @@ static void gc_traverse_trace(global_State *g, GCtrace *T) | |||
237 | IRIns *ir = &T->ir[ref]; | 238 | IRIns *ir = &T->ir[ref]; |
238 | if (ir->o == IR_KGC) | 239 | if (ir->o == IR_KGC) |
239 | gc_markobj(g, ir_kgc(ir)); | 240 | gc_markobj(g, ir_kgc(ir)); |
241 | if (irt_is64(ir->t) && ir->o != IR_KNULL) | ||
242 | ref++; | ||
240 | } | 243 | } |
241 | if (T->link) gc_marktrace(g, T->link); | 244 | if (T->link) gc_marktrace(g, T->link); |
242 | if (T->nextroot) gc_marktrace(g, T->nextroot); | 245 | if (T->nextroot) gc_marktrace(g, T->nextroot); |
@@ -267,12 +270,12 @@ static MSize gc_traverse_frames(global_State *g, lua_State *th) | |||
267 | { | 270 | { |
268 | TValue *frame, *top = th->top-1, *bot = tvref(th->stack); | 271 | TValue *frame, *top = th->top-1, *bot = tvref(th->stack); |
269 | /* Note: extra vararg frame not skipped, marks function twice (harmless). */ | 272 | /* Note: extra vararg frame not skipped, marks function twice (harmless). */ |
270 | for (frame = th->base-1; frame > bot; frame = frame_prev(frame)) { | 273 | for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) { |
271 | GCfunc *fn = frame_func(frame); | 274 | GCfunc *fn = frame_func(frame); |
272 | TValue *ftop = frame; | 275 | TValue *ftop = frame; |
273 | if (isluafunc(fn)) ftop += funcproto(fn)->framesize; | 276 | if (isluafunc(fn)) ftop += funcproto(fn)->framesize; |
274 | if (ftop > top) top = ftop; | 277 | if (ftop > top) top = ftop; |
275 | gc_markobj(g, fn); /* Need to mark hidden function (or L). */ | 278 | if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */ |
276 | } | 279 | } |
277 | top++; /* Correct bias of -1 (frame == base-1). */ | 280 | top++; /* Correct bias of -1 (frame == base-1). */ |
278 | if (top > tvref(th->maxstack)) top = tvref(th->maxstack); | 281 | if (top > tvref(th->maxstack)) top = tvref(th->maxstack); |
@@ -283,7 +286,7 @@ static MSize gc_traverse_frames(global_State *g, lua_State *th) | |||
283 | static void gc_traverse_thread(global_State *g, lua_State *th) | 286 | static void gc_traverse_thread(global_State *g, lua_State *th) |
284 | { | 287 | { |
285 | TValue *o, *top = th->top; | 288 | TValue *o, *top = th->top; |
286 | for (o = tvref(th->stack)+1; o < top; o++) | 289 | for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++) |
287 | gc_marktv(g, o); | 290 | gc_marktv(g, o); |
288 | if (g->gc.state == GCSatomic) { | 291 | if (g->gc.state == GCSatomic) { |
289 | top = tvref(th->stack) + th->stacksize; | 292 | top = tvref(th->stack) + th->stacksize; |
@@ -348,15 +351,6 @@ static size_t gc_propagate_gray(global_State *g) | |||
348 | 351 | ||
349 | /* -- Sweep phase --------------------------------------------------------- */ | 352 | /* -- Sweep phase --------------------------------------------------------- */ |
350 | 353 | ||
351 | /* Try to shrink some common data structures. */ | ||
352 | static void gc_shrink(global_State *g, lua_State *L) | ||
353 | { | ||
354 | if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1) | ||
355 | lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */ | ||
356 | if (g->tmpbuf.sz > LJ_MIN_SBUF*2) | ||
357 | lj_str_resizebuf(L, &g->tmpbuf, g->tmpbuf.sz >> 1); /* Shrink temp buf. */ | ||
358 | } | ||
359 | |||
360 | /* Type of GC free functions. */ | 354 | /* Type of GC free functions. */ |
361 | typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o); | 355 | typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o); |
362 | 356 | ||
@@ -382,7 +376,7 @@ static const GCFreeFunc gc_freefunc[] = { | |||
382 | }; | 376 | }; |
383 | 377 | ||
384 | /* Full sweep of a GC list. */ | 378 | /* Full sweep of a GC list. */ |
385 | #define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM) | 379 | #define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0) |
386 | 380 | ||
387 | /* Partial sweep of a GC list. */ | 381 | /* Partial sweep of a GC list. */ |
388 | static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) | 382 | static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) |
@@ -460,17 +454,18 @@ static void gc_call_finalizer(global_State *g, lua_State *L, | |||
460 | { | 454 | { |
461 | /* Save and restore lots of state around the __gc callback. */ | 455 | /* Save and restore lots of state around the __gc callback. */ |
462 | uint8_t oldh = hook_save(g); | 456 | uint8_t oldh = hook_save(g); |
463 | MSize oldt = g->gc.threshold; | 457 | GCSize oldt = g->gc.threshold; |
464 | int errcode; | 458 | int errcode; |
465 | TValue *top; | 459 | TValue *top; |
466 | lj_trace_abort(g); | 460 | lj_trace_abort(g); |
467 | top = L->top; | ||
468 | L->top = top+2; | ||
469 | hook_entergc(g); /* Disable hooks and new traces during __gc. */ | 461 | hook_entergc(g); /* Disable hooks and new traces during __gc. */ |
470 | g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */ | 462 | g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */ |
471 | copyTV(L, top, mo); | 463 | top = L->top; |
472 | setgcV(L, top+1, o, ~o->gch.gct); | 464 | copyTV(L, top++, mo); |
473 | errcode = lj_vm_pcall(L, top+1, 1+0, -1); /* Stack: |mo|o| -> | */ | 465 | if (LJ_FR2) setnilV(top++); |
466 | setgcV(L, top, o, ~o->gch.gct); | ||
467 | L->top = top+1; | ||
468 | errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */ | ||
474 | hook_restore(g, oldh); | 469 | hook_restore(g, oldh); |
475 | g->gc.threshold = oldt; /* Restore GC threshold. */ | 470 | g->gc.threshold = oldt; /* Restore GC threshold. */ |
476 | if (errcode) | 471 | if (errcode) |
@@ -483,7 +478,7 @@ static void gc_finalize(lua_State *L) | |||
483 | global_State *g = G(L); | 478 | global_State *g = G(L); |
484 | GCobj *o = gcnext(gcref(g->gc.mmudata)); | 479 | GCobj *o = gcnext(gcref(g->gc.mmudata)); |
485 | cTValue *mo; | 480 | cTValue *mo; |
486 | lua_assert(gcref(g->jit_L) == NULL); /* Must not be called on trace. */ | 481 | lua_assert(tvref(g->jit_base) == NULL); /* Must not be called on trace. */ |
487 | /* Unchain from list of userdata to be finalized. */ | 482 | /* Unchain from list of userdata to be finalized. */ |
488 | if (o == gcref(g->gc.mmudata)) | 483 | if (o == gcref(g->gc.mmudata)) |
489 | setgcrefnull(g->gc.mmudata); | 484 | setgcrefnull(g->gc.mmudata); |
@@ -592,11 +587,13 @@ static void atomic(global_State *g, lua_State *L) | |||
592 | /* All marking done, clear weak tables. */ | 587 | /* All marking done, clear weak tables. */ |
593 | gc_clearweak(gcref(g->gc.weak)); | 588 | gc_clearweak(gcref(g->gc.weak)); |
594 | 589 | ||
590 | lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */ | ||
591 | |||
595 | /* Prepare for sweep phase. */ | 592 | /* Prepare for sweep phase. */ |
596 | g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */ | 593 | g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */ |
597 | g->strempty.marked = g->gc.currentwhite; | 594 | g->strempty.marked = g->gc.currentwhite; |
598 | setmref(g->gc.sweep, &g->gc.root); | 595 | setmref(g->gc.sweep, &g->gc.root); |
599 | g->gc.estimate = g->gc.total - (MSize)udsize; /* Initial estimate. */ | 596 | g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */ |
600 | } | 597 | } |
601 | 598 | ||
602 | /* GC state machine. Returns a cost estimate for each step performed. */ | 599 | /* GC state machine. Returns a cost estimate for each step performed. */ |
@@ -613,14 +610,14 @@ static size_t gc_onestep(lua_State *L) | |||
613 | g->gc.state = GCSatomic; /* End of mark phase. */ | 610 | g->gc.state = GCSatomic; /* End of mark phase. */ |
614 | return 0; | 611 | return 0; |
615 | case GCSatomic: | 612 | case GCSatomic: |
616 | if (gcref(g->jit_L)) /* Don't run atomic phase on trace. */ | 613 | if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */ |
617 | return LJ_MAX_MEM; | 614 | return LJ_MAX_MEM; |
618 | atomic(g, L); | 615 | atomic(g, L); |
619 | g->gc.state = GCSsweepstring; /* Start of sweep phase. */ | 616 | g->gc.state = GCSsweepstring; /* Start of sweep phase. */ |
620 | g->gc.sweepstr = 0; | 617 | g->gc.sweepstr = 0; |
621 | return 0; | 618 | return 0; |
622 | case GCSsweepstring: { | 619 | case GCSsweepstring: { |
623 | MSize old = g->gc.total; | 620 | GCSize old = g->gc.total; |
624 | gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */ | 621 | gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */ |
625 | if (g->gc.sweepstr > g->strmask) | 622 | if (g->gc.sweepstr > g->strmask) |
626 | g->gc.state = GCSsweep; /* All string hash chains sweeped. */ | 623 | g->gc.state = GCSsweep; /* All string hash chains sweeped. */ |
@@ -629,12 +626,13 @@ static size_t gc_onestep(lua_State *L) | |||
629 | return GCSWEEPCOST; | 626 | return GCSWEEPCOST; |
630 | } | 627 | } |
631 | case GCSsweep: { | 628 | case GCSsweep: { |
632 | MSize old = g->gc.total; | 629 | GCSize old = g->gc.total; |
633 | setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); | 630 | setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); |
634 | lua_assert(old >= g->gc.total); | 631 | lua_assert(old >= g->gc.total); |
635 | g->gc.estimate -= old - g->gc.total; | 632 | g->gc.estimate -= old - g->gc.total; |
636 | if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { | 633 | if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { |
637 | gc_shrink(g, L); | 634 | if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1) |
635 | lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */ | ||
638 | if (gcref(g->gc.mmudata)) { /* Need any finalizations? */ | 636 | if (gcref(g->gc.mmudata)) { /* Need any finalizations? */ |
639 | g->gc.state = GCSfinalize; | 637 | g->gc.state = GCSfinalize; |
640 | #if LJ_HASFFI | 638 | #if LJ_HASFFI |
@@ -649,7 +647,7 @@ static size_t gc_onestep(lua_State *L) | |||
649 | } | 647 | } |
650 | case GCSfinalize: | 648 | case GCSfinalize: |
651 | if (gcref(g->gc.mmudata) != NULL) { | 649 | if (gcref(g->gc.mmudata) != NULL) { |
652 | if (gcref(g->jit_L)) /* Don't call finalizers on trace. */ | 650 | if (tvref(g->jit_base)) /* Don't call finalizers on trace. */ |
653 | return LJ_MAX_MEM; | 651 | return LJ_MAX_MEM; |
654 | gc_finalize(L); /* Finalize one userdata object. */ | 652 | gc_finalize(L); /* Finalize one userdata object. */ |
655 | if (g->gc.estimate > GCFINALIZECOST) | 653 | if (g->gc.estimate > GCFINALIZECOST) |
@@ -672,7 +670,7 @@ static size_t gc_onestep(lua_State *L) | |||
672 | int LJ_FASTCALL lj_gc_step(lua_State *L) | 670 | int LJ_FASTCALL lj_gc_step(lua_State *L) |
673 | { | 671 | { |
674 | global_State *g = G(L); | 672 | global_State *g = G(L); |
675 | MSize lim; | 673 | GCSize lim; |
676 | int32_t ostate = g->vmstate; | 674 | int32_t ostate = g->vmstate; |
677 | setvmstate(g, GC); | 675 | setvmstate(g, GC); |
678 | lim = (GCSTEPSIZE/100) * g->gc.stepmul; | 676 | lim = (GCSTEPSIZE/100) * g->gc.stepmul; |
@@ -681,13 +679,13 @@ int LJ_FASTCALL lj_gc_step(lua_State *L) | |||
681 | if (g->gc.total > g->gc.threshold) | 679 | if (g->gc.total > g->gc.threshold) |
682 | g->gc.debt += g->gc.total - g->gc.threshold; | 680 | g->gc.debt += g->gc.total - g->gc.threshold; |
683 | do { | 681 | do { |
684 | lim -= (MSize)gc_onestep(L); | 682 | lim -= (GCSize)gc_onestep(L); |
685 | if (g->gc.state == GCSpause) { | 683 | if (g->gc.state == GCSpause) { |
686 | g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; | 684 | g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; |
687 | g->vmstate = ostate; | 685 | g->vmstate = ostate; |
688 | return 1; /* Finished a GC cycle. */ | 686 | return 1; /* Finished a GC cycle. */ |
689 | } | 687 | } |
690 | } while ((int32_t)lim > 0); | 688 | } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0)); |
691 | if (g->gc.debt < GCSTEPSIZE) { | 689 | if (g->gc.debt < GCSTEPSIZE) { |
692 | g->gc.threshold = g->gc.total + GCSTEPSIZE; | 690 | g->gc.threshold = g->gc.total + GCSTEPSIZE; |
693 | g->vmstate = ostate; | 691 | g->vmstate = ostate; |
@@ -711,8 +709,8 @@ void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L) | |||
711 | /* Perform multiple GC steps. Called from JIT-compiled code. */ | 709 | /* Perform multiple GC steps. Called from JIT-compiled code. */ |
712 | int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps) | 710 | int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps) |
713 | { | 711 | { |
714 | lua_State *L = gco2th(gcref(g->jit_L)); | 712 | lua_State *L = gco2th(gcref(g->cur_L)); |
715 | L->base = mref(G(L)->jit_base, TValue); | 713 | L->base = tvref(G(L)->jit_base); |
716 | L->top = curr_topL(L); | 714 | L->top = curr_topL(L); |
717 | while (steps-- > 0 && lj_gc_step(L) == 0) | 715 | while (steps-- > 0 && lj_gc_step(L) == 0) |
718 | ; | 716 | ; |
@@ -806,7 +804,7 @@ void lj_gc_barriertrace(global_State *g, uint32_t traceno) | |||
806 | /* -- Allocator ----------------------------------------------------------- */ | 804 | /* -- Allocator ----------------------------------------------------------- */ |
807 | 805 | ||
808 | /* Call pluggable memory allocator to allocate or resize a fragment. */ | 806 | /* Call pluggable memory allocator to allocate or resize a fragment. */ |
809 | void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz) | 807 | void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz) |
810 | { | 808 | { |
811 | global_State *g = G(L); | 809 | global_State *g = G(L); |
812 | lua_assert((osz == 0) == (p == NULL)); | 810 | lua_assert((osz == 0) == (p == NULL)); |
@@ -814,19 +812,19 @@ void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz) | |||
814 | if (p == NULL && nsz > 0) | 812 | if (p == NULL && nsz > 0) |
815 | lj_err_mem(L); | 813 | lj_err_mem(L); |
816 | lua_assert((nsz == 0) == (p == NULL)); | 814 | lua_assert((nsz == 0) == (p == NULL)); |
817 | lua_assert(checkptr32(p)); | 815 | lua_assert(checkptrGC(p)); |
818 | g->gc.total = (g->gc.total - osz) + nsz; | 816 | g->gc.total = (g->gc.total - osz) + nsz; |
819 | return p; | 817 | return p; |
820 | } | 818 | } |
821 | 819 | ||
822 | /* Allocate new GC object and link it to the root set. */ | 820 | /* Allocate new GC object and link it to the root set. */ |
823 | void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size) | 821 | void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size) |
824 | { | 822 | { |
825 | global_State *g = G(L); | 823 | global_State *g = G(L); |
826 | GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); | 824 | GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); |
827 | if (o == NULL) | 825 | if (o == NULL) |
828 | lj_err_mem(L); | 826 | lj_err_mem(L); |
829 | lua_assert(checkptr32(o)); | 827 | lua_assert(checkptrGC(o)); |
830 | g->gc.total += size; | 828 | g->gc.total += size; |
831 | setgcrefr(o->gch.nextgc, g->gc.root); | 829 | setgcrefr(o->gch.nextgc, g->gc.root); |
832 | setgcref(g->gc.root, o); | 830 | setgcref(g->gc.root, o); |