diff options
author | Roberto Ierusalimschy <roberto@inf.puc-rio.br> | 2023-07-13 14:55:46 -0300 |
---|---|---|
committer | Roberto Ierusalimschy <roberto@inf.puc-rio.br> | 2023-07-13 14:55:46 -0300 |
commit | 6b51133a988587f34ee9581d799ea9913581afd3 (patch) | |
tree | beae04d76822101b0d12493f07fddbd12014bab3 /lgc.c | |
parent | cbae01620278f9b568805db16a96d0631ced473d (diff) | |
download | lua-6b51133a988587f34ee9581d799ea9913581afd3.tar.gz lua-6b51133a988587f34ee9581d799ea9913581afd3.tar.bz2 lua-6b51133a988587f34ee9581d799ea9913581afd3.zip |
Thread stacks resized in the atomic phase
Although stack resize can be a little expensive, it seems unusual to
have too many threads needing resize during one GC cycle. On the other
hand, the change allows full collections to skip the propagate phase,
going straight from a pause to the atomic phase.
Diffstat (limited to 'lgc.c')
-rw-r--r-- | lgc.c | 8 |
1 files changed, 5 insertions, 3 deletions
@@ -638,7 +638,9 @@ static int traversethread (global_State *g, lua_State *th) { | |||
638 | for (uv = th->openupval; uv != NULL; uv = uv->u.open.next) | 638 | for (uv = th->openupval; uv != NULL; uv = uv->u.open.next) |
639 | markobject(g, uv); /* open upvalues cannot be collected */ | 639 | markobject(g, uv); /* open upvalues cannot be collected */ |
640 | if (g->gcstate == GCSatomic) { /* final traversal? */ | 640 | if (g->gcstate == GCSatomic) { /* final traversal? */ |
641 | for (; o < th->stack_last.p + EXTRA_STACK; o++) | 641 | if (!g->gcemergency) |
642 | luaD_shrinkstack(th); /* do not change stack in emergency cycle */ | ||
643 | for (o = th->top.p; o < th->stack_last.p + EXTRA_STACK; o++) | ||
642 | setnilvalue(s2v(o)); /* clear dead stack slice */ | 644 | setnilvalue(s2v(o)); /* clear dead stack slice */ |
643 | /* 'remarkupvals' may have removed thread from 'twups' list */ | 645 | /* 'remarkupvals' may have removed thread from 'twups' list */ |
644 | if (!isintwups(th) && th->openupval != NULL) { | 646 | if (!isintwups(th) && th->openupval != NULL) { |
@@ -646,8 +648,6 @@ static int traversethread (global_State *g, lua_State *th) { | |||
646 | g->twups = th; | 648 | g->twups = th; |
647 | } | 649 | } |
648 | } | 650 | } |
649 | else if (!g->gcemergency) | ||
650 | luaD_shrinkstack(th); /* do not change stack in emergency cycle */ | ||
651 | return 1 + stacksize(th); | 651 | return 1 + stacksize(th); |
652 | } | 652 | } |
653 | 653 | ||
@@ -1710,6 +1710,8 @@ static void fullinc (lua_State *L, global_State *g) { | |||
1710 | entersweep(L); /* sweep everything to turn them back to white */ | 1710 | entersweep(L); /* sweep everything to turn them back to white */ |
1711 | /* finish any pending sweep phase to start a new cycle */ | 1711 | /* finish any pending sweep phase to start a new cycle */ |
1712 | luaC_runtilstate(L, bitmask(GCSpause)); | 1712 | luaC_runtilstate(L, bitmask(GCSpause)); |
1713 | luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */ | ||
1714 | g->gcstate = GCSenteratomic; /* go straight to atomic phase ??? */ | ||
1713 | luaC_runtilstate(L, bitmask(GCScallfin)); /* run up to finalizers */ | 1715 | luaC_runtilstate(L, bitmask(GCScallfin)); /* run up to finalizers */ |
1714 | /* estimate must be correct after a full GC cycle */ | 1716 | /* estimate must be correct after a full GC cycle */ |
1715 | lua_assert(g->GCestimate == gettotalbytes(g)); | 1717 | lua_assert(g->GCestimate == gettotalbytes(g)); |