diff options
Diffstat (limited to 'src/lj_gc.c')
-rw-r--r-- | src/lj_gc.c | 70 |
1 files changed, 34 insertions, 36 deletions
diff --git a/src/lj_gc.c b/src/lj_gc.c index a5d32ea3..2aaf5b2c 100644 --- a/src/lj_gc.c +++ b/src/lj_gc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include "lj_obj.h" | 12 | #include "lj_obj.h" |
13 | #include "lj_gc.h" | 13 | #include "lj_gc.h" |
14 | #include "lj_err.h" | 14 | #include "lj_err.h" |
15 | #include "lj_buf.h" | ||
15 | #include "lj_str.h" | 16 | #include "lj_str.h" |
16 | #include "lj_tab.h" | 17 | #include "lj_tab.h" |
17 | #include "lj_func.h" | 18 | #include "lj_func.h" |
@@ -68,7 +69,7 @@ static void gc_mark(global_State *g, GCobj *o) | |||
68 | gray2black(o); /* Closed upvalues are never gray. */ | 69 | gray2black(o); /* Closed upvalues are never gray. */ |
69 | } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { | 70 | } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { |
70 | lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || | 71 | lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || |
71 | gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO); | 72 | gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE); |
72 | setgcrefr(o->gch.gclist, g->gc.gray); | 73 | setgcrefr(o->gch.gclist, g->gc.gray); |
73 | setgcref(g->gc.gray, o); | 74 | setgcref(g->gc.gray, o); |
74 | } | 75 | } |
@@ -244,6 +245,8 @@ static void gc_traverse_trace(global_State *g, GCtrace *T) | |||
244 | IRIns *ir = &T->ir[ref]; | 245 | IRIns *ir = &T->ir[ref]; |
245 | if (ir->o == IR_KGC) | 246 | if (ir->o == IR_KGC) |
246 | gc_markobj(g, ir_kgc(ir)); | 247 | gc_markobj(g, ir_kgc(ir)); |
248 | if (irt_is64(ir->t) && ir->o != IR_KNULL) | ||
249 | ref++; | ||
247 | } | 250 | } |
248 | if (T->link) gc_marktrace(g, T->link); | 251 | if (T->link) gc_marktrace(g, T->link); |
249 | if (T->nextroot) gc_marktrace(g, T->nextroot); | 252 | if (T->nextroot) gc_marktrace(g, T->nextroot); |
@@ -274,12 +277,12 @@ static MSize gc_traverse_frames(global_State *g, lua_State *th) | |||
274 | { | 277 | { |
275 | TValue *frame, *top = th->top-1, *bot = tvref(th->stack); | 278 | TValue *frame, *top = th->top-1, *bot = tvref(th->stack); |
276 | /* Note: extra vararg frame not skipped, marks function twice (harmless). */ | 279 | /* Note: extra vararg frame not skipped, marks function twice (harmless). */ |
277 | for (frame = th->base-1; frame > bot; frame = frame_prev(frame)) { | 280 | for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) { |
278 | GCfunc *fn = frame_func(frame); | 281 | GCfunc *fn = frame_func(frame); |
279 | TValue *ftop = frame; | 282 | TValue *ftop = frame; |
280 | if (isluafunc(fn)) ftop += funcproto(fn)->framesize; | 283 | if (isluafunc(fn)) ftop += funcproto(fn)->framesize; |
281 | if (ftop > top) top = ftop; | 284 | if (ftop > top) top = ftop; |
282 | gc_markobj(g, fn); /* Need to mark hidden function (or L). */ | 285 | if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */ |
283 | } | 286 | } |
284 | top++; /* Correct bias of -1 (frame == base-1). */ | 287 | top++; /* Correct bias of -1 (frame == base-1). */ |
285 | if (top > tvref(th->maxstack)) top = tvref(th->maxstack); | 288 | if (top > tvref(th->maxstack)) top = tvref(th->maxstack); |
@@ -290,7 +293,7 @@ static MSize gc_traverse_frames(global_State *g, lua_State *th) | |||
290 | static void gc_traverse_thread(global_State *g, lua_State *th) | 293 | static void gc_traverse_thread(global_State *g, lua_State *th) |
291 | { | 294 | { |
292 | TValue *o, *top = th->top; | 295 | TValue *o, *top = th->top; |
293 | for (o = tvref(th->stack)+1; o < top; o++) | 296 | for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++) |
294 | gc_marktv(g, o); | 297 | gc_marktv(g, o); |
295 | if (g->gc.state == GCSatomic) { | 298 | if (g->gc.state == GCSatomic) { |
296 | top = tvref(th->stack) + th->stacksize; | 299 | top = tvref(th->stack) + th->stacksize; |
@@ -355,15 +358,6 @@ static size_t gc_propagate_gray(global_State *g) | |||
355 | 358 | ||
356 | /* -- Sweep phase --------------------------------------------------------- */ | 359 | /* -- Sweep phase --------------------------------------------------------- */ |
357 | 360 | ||
358 | /* Try to shrink some common data structures. */ | ||
359 | static void gc_shrink(global_State *g, lua_State *L) | ||
360 | { | ||
361 | if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1) | ||
362 | lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */ | ||
363 | if (g->tmpbuf.sz > LJ_MIN_SBUF*2) | ||
364 | lj_str_resizebuf(L, &g->tmpbuf, g->tmpbuf.sz >> 1); /* Shrink temp buf. */ | ||
365 | } | ||
366 | |||
367 | /* Type of GC free functions. */ | 361 | /* Type of GC free functions. */ |
368 | typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o); | 362 | typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o); |
369 | 363 | ||
@@ -389,7 +383,7 @@ static const GCFreeFunc gc_freefunc[] = { | |||
389 | }; | 383 | }; |
390 | 384 | ||
391 | /* Full sweep of a GC list. */ | 385 | /* Full sweep of a GC list. */ |
392 | #define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM) | 386 | #define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0) |
393 | 387 | ||
394 | /* Partial sweep of a GC list. */ | 388 | /* Partial sweep of a GC list. */ |
395 | static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) | 389 | static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) |
@@ -467,17 +461,18 @@ static void gc_call_finalizer(global_State *g, lua_State *L, | |||
467 | { | 461 | { |
468 | /* Save and restore lots of state around the __gc callback. */ | 462 | /* Save and restore lots of state around the __gc callback. */ |
469 | uint8_t oldh = hook_save(g); | 463 | uint8_t oldh = hook_save(g); |
470 | MSize oldt = g->gc.threshold; | 464 | GCSize oldt = g->gc.threshold; |
471 | int errcode; | 465 | int errcode; |
472 | TValue *top; | 466 | TValue *top; |
473 | lj_trace_abort(g); | 467 | lj_trace_abort(g); |
474 | top = L->top; | ||
475 | L->top = top+2; | ||
476 | hook_entergc(g); /* Disable hooks and new traces during __gc. */ | 468 | hook_entergc(g); /* Disable hooks and new traces during __gc. */ |
477 | g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */ | 469 | g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */ |
478 | copyTV(L, top, mo); | 470 | top = L->top; |
479 | setgcV(L, top+1, o, ~o->gch.gct); | 471 | copyTV(L, top++, mo); |
480 | errcode = lj_vm_pcall(L, top+1, 1+0, -1); /* Stack: |mo|o| -> | */ | 472 | if (LJ_FR2) setnilV(top++); |
473 | setgcV(L, top, o, ~o->gch.gct); | ||
474 | L->top = top+1; | ||
475 | errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */ | ||
481 | hook_restore(g, oldh); | 476 | hook_restore(g, oldh); |
482 | g->gc.threshold = oldt; /* Restore GC threshold. */ | 477 | g->gc.threshold = oldt; /* Restore GC threshold. */ |
483 | if (errcode) | 478 | if (errcode) |
@@ -490,7 +485,7 @@ static void gc_finalize(lua_State *L) | |||
490 | global_State *g = G(L); | 485 | global_State *g = G(L); |
491 | GCobj *o = gcnext(gcref(g->gc.mmudata)); | 486 | GCobj *o = gcnext(gcref(g->gc.mmudata)); |
492 | cTValue *mo; | 487 | cTValue *mo; |
493 | lua_assert(gcref(g->jit_L) == NULL); /* Must not be called on trace. */ | 488 | lua_assert(tvref(g->jit_base) == NULL); /* Must not be called on trace. */ |
494 | /* Unchain from list of userdata to be finalized. */ | 489 | /* Unchain from list of userdata to be finalized. */ |
495 | if (o == gcref(g->gc.mmudata)) | 490 | if (o == gcref(g->gc.mmudata)) |
496 | setgcrefnull(g->gc.mmudata); | 491 | setgcrefnull(g->gc.mmudata); |
@@ -599,11 +594,13 @@ static void atomic(global_State *g, lua_State *L) | |||
599 | /* All marking done, clear weak tables. */ | 594 | /* All marking done, clear weak tables. */ |
600 | gc_clearweak(gcref(g->gc.weak)); | 595 | gc_clearweak(gcref(g->gc.weak)); |
601 | 596 | ||
597 | lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */ | ||
598 | |||
602 | /* Prepare for sweep phase. */ | 599 | /* Prepare for sweep phase. */ |
603 | g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */ | 600 | g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */ |
604 | g->strempty.marked = g->gc.currentwhite; | 601 | g->strempty.marked = g->gc.currentwhite; |
605 | setmref(g->gc.sweep, &g->gc.root); | 602 | setmref(g->gc.sweep, &g->gc.root); |
606 | g->gc.estimate = g->gc.total - (MSize)udsize; /* Initial estimate. */ | 603 | g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */ |
607 | } | 604 | } |
608 | 605 | ||
609 | /* GC state machine. Returns a cost estimate for each step performed. */ | 606 | /* GC state machine. Returns a cost estimate for each step performed. */ |
@@ -620,14 +617,14 @@ static size_t gc_onestep(lua_State *L) | |||
620 | g->gc.state = GCSatomic; /* End of mark phase. */ | 617 | g->gc.state = GCSatomic; /* End of mark phase. */ |
621 | return 0; | 618 | return 0; |
622 | case GCSatomic: | 619 | case GCSatomic: |
623 | if (gcref(g->jit_L)) /* Don't run atomic phase on trace. */ | 620 | if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */ |
624 | return LJ_MAX_MEM; | 621 | return LJ_MAX_MEM; |
625 | atomic(g, L); | 622 | atomic(g, L); |
626 | g->gc.state = GCSsweepstring; /* Start of sweep phase. */ | 623 | g->gc.state = GCSsweepstring; /* Start of sweep phase. */ |
627 | g->gc.sweepstr = 0; | 624 | g->gc.sweepstr = 0; |
628 | return 0; | 625 | return 0; |
629 | case GCSsweepstring: { | 626 | case GCSsweepstring: { |
630 | MSize old = g->gc.total; | 627 | GCSize old = g->gc.total; |
631 | gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */ | 628 | gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */ |
632 | if (g->gc.sweepstr > g->strmask) | 629 | if (g->gc.sweepstr > g->strmask) |
633 | g->gc.state = GCSsweep; /* All string hash chains sweeped. */ | 630 | g->gc.state = GCSsweep; /* All string hash chains sweeped. */ |
@@ -636,12 +633,13 @@ static size_t gc_onestep(lua_State *L) | |||
636 | return GCSWEEPCOST; | 633 | return GCSWEEPCOST; |
637 | } | 634 | } |
638 | case GCSsweep: { | 635 | case GCSsweep: { |
639 | MSize old = g->gc.total; | 636 | GCSize old = g->gc.total; |
640 | setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); | 637 | setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); |
641 | lua_assert(old >= g->gc.total); | 638 | lua_assert(old >= g->gc.total); |
642 | g->gc.estimate -= old - g->gc.total; | 639 | g->gc.estimate -= old - g->gc.total; |
643 | if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { | 640 | if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { |
644 | gc_shrink(g, L); | 641 | if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1) |
642 | lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */ | ||
645 | if (gcref(g->gc.mmudata)) { /* Need any finalizations? */ | 643 | if (gcref(g->gc.mmudata)) { /* Need any finalizations? */ |
646 | g->gc.state = GCSfinalize; | 644 | g->gc.state = GCSfinalize; |
647 | #if LJ_HASFFI | 645 | #if LJ_HASFFI |
@@ -656,7 +654,7 @@ static size_t gc_onestep(lua_State *L) | |||
656 | } | 654 | } |
657 | case GCSfinalize: | 655 | case GCSfinalize: |
658 | if (gcref(g->gc.mmudata) != NULL) { | 656 | if (gcref(g->gc.mmudata) != NULL) { |
659 | if (gcref(g->jit_L)) /* Don't call finalizers on trace. */ | 657 | if (tvref(g->jit_base)) /* Don't call finalizers on trace. */ |
660 | return LJ_MAX_MEM; | 658 | return LJ_MAX_MEM; |
661 | gc_finalize(L); /* Finalize one userdata object. */ | 659 | gc_finalize(L); /* Finalize one userdata object. */ |
662 | if (g->gc.estimate > GCFINALIZECOST) | 660 | if (g->gc.estimate > GCFINALIZECOST) |
@@ -679,7 +677,7 @@ static size_t gc_onestep(lua_State *L) | |||
679 | int LJ_FASTCALL lj_gc_step(lua_State *L) | 677 | int LJ_FASTCALL lj_gc_step(lua_State *L) |
680 | { | 678 | { |
681 | global_State *g = G(L); | 679 | global_State *g = G(L); |
682 | MSize lim; | 680 | GCSize lim; |
683 | int32_t ostate = g->vmstate; | 681 | int32_t ostate = g->vmstate; |
684 | setvmstate(g, GC); | 682 | setvmstate(g, GC); |
685 | lim = (GCSTEPSIZE/100) * g->gc.stepmul; | 683 | lim = (GCSTEPSIZE/100) * g->gc.stepmul; |
@@ -688,13 +686,13 @@ int LJ_FASTCALL lj_gc_step(lua_State *L) | |||
688 | if (g->gc.total > g->gc.threshold) | 686 | if (g->gc.total > g->gc.threshold) |
689 | g->gc.debt += g->gc.total - g->gc.threshold; | 687 | g->gc.debt += g->gc.total - g->gc.threshold; |
690 | do { | 688 | do { |
691 | lim -= (MSize)gc_onestep(L); | 689 | lim -= (GCSize)gc_onestep(L); |
692 | if (g->gc.state == GCSpause) { | 690 | if (g->gc.state == GCSpause) { |
693 | g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; | 691 | g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; |
694 | g->vmstate = ostate; | 692 | g->vmstate = ostate; |
695 | return 1; /* Finished a GC cycle. */ | 693 | return 1; /* Finished a GC cycle. */ |
696 | } | 694 | } |
697 | } while ((int32_t)lim > 0); | 695 | } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0)); |
698 | if (g->gc.debt < GCSTEPSIZE) { | 696 | if (g->gc.debt < GCSTEPSIZE) { |
699 | g->gc.threshold = g->gc.total + GCSTEPSIZE; | 697 | g->gc.threshold = g->gc.total + GCSTEPSIZE; |
700 | g->vmstate = ostate; | 698 | g->vmstate = ostate; |
@@ -718,8 +716,8 @@ void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L) | |||
718 | /* Perform multiple GC steps. Called from JIT-compiled code. */ | 716 | /* Perform multiple GC steps. Called from JIT-compiled code. */ |
719 | int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps) | 717 | int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps) |
720 | { | 718 | { |
721 | lua_State *L = gco2th(gcref(g->jit_L)); | 719 | lua_State *L = gco2th(gcref(g->cur_L)); |
722 | L->base = mref(G(L)->jit_base, TValue); | 720 | L->base = tvref(G(L)->jit_base); |
723 | L->top = curr_topL(L); | 721 | L->top = curr_topL(L); |
724 | while (steps-- > 0 && lj_gc_step(L) == 0) | 722 | while (steps-- > 0 && lj_gc_step(L) == 0) |
725 | ; | 723 | ; |
@@ -813,7 +811,7 @@ void lj_gc_barriertrace(global_State *g, uint32_t traceno) | |||
813 | /* -- Allocator ----------------------------------------------------------- */ | 811 | /* -- Allocator ----------------------------------------------------------- */ |
814 | 812 | ||
815 | /* Call pluggable memory allocator to allocate or resize a fragment. */ | 813 | /* Call pluggable memory allocator to allocate or resize a fragment. */ |
816 | void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz) | 814 | void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz) |
817 | { | 815 | { |
818 | global_State *g = G(L); | 816 | global_State *g = G(L); |
819 | lua_assert((osz == 0) == (p == NULL)); | 817 | lua_assert((osz == 0) == (p == NULL)); |
@@ -821,19 +819,19 @@ void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz) | |||
821 | if (p == NULL && nsz > 0) | 819 | if (p == NULL && nsz > 0) |
822 | lj_err_mem(L); | 820 | lj_err_mem(L); |
823 | lua_assert((nsz == 0) == (p == NULL)); | 821 | lua_assert((nsz == 0) == (p == NULL)); |
824 | lua_assert(checkptr32(p)); | 822 | lua_assert(checkptrGC(p)); |
825 | g->gc.total = (g->gc.total - osz) + nsz; | 823 | g->gc.total = (g->gc.total - osz) + nsz; |
826 | return p; | 824 | return p; |
827 | } | 825 | } |
828 | 826 | ||
829 | /* Allocate new GC object and link it to the root set. */ | 827 | /* Allocate new GC object and link it to the root set. */ |
830 | void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size) | 828 | void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size) |
831 | { | 829 | { |
832 | global_State *g = G(L); | 830 | global_State *g = G(L); |
833 | GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); | 831 | GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); |
834 | if (o == NULL) | 832 | if (o == NULL) |
835 | lj_err_mem(L); | 833 | lj_err_mem(L); |
836 | lua_assert(checkptr32(o)); | 834 | lua_assert(checkptrGC(o)); |
837 | g->gc.total += size; | 835 | g->gc.total += size; |
838 | setgcrefr(o->gch.nextgc, g->gc.root); | 836 | setgcrefr(o->gch.nextgc, g->gc.root); |
839 | setgcref(g->gc.root, o); | 837 | setgcref(g->gc.root, o); |