aboutsummaryrefslogtreecommitdiff
path: root/src/lj_snap.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lj_snap.c')
-rw-r--r--src/lj_snap.c29
1 files changed, 25 insertions, 4 deletions
diff --git a/src/lj_snap.c b/src/lj_snap.c
index 9fae57d8..b9a82008 100644
--- a/src/lj_snap.c
+++ b/src/lj_snap.c
@@ -403,6 +403,27 @@ static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax,
403 return tr; 403 return tr;
404} 404}
405 405
406/* Check whether a sunk store corresponds to an allocation. Slow path. */
407static int snap_sunk_store2(jit_State *J, IRIns *ira, IRIns *irs)
408{
409 if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
410 irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
411 IRIns *irk = IR(irs->op1);
412 if (irk->o == IR_AREF || irk->o == IR_HREFK)
413 irk = IR(irk->op1);
414 return (IR(irk->op1) == ira);
415 }
416 return 0;
417}
418
419/* Check whether a sunk store corresponds to an allocation. Fast path. */
420static LJ_AINLINE int snap_sunk_store(jit_State *J, IRIns *ira, IRIns *irs)
421{
422 if (irs->s != 255)
423 return (ira + irs->s == irs); /* Fast check. */
424 return snap_sunk_store2(J, ira, irs);
425}
426
406/* Replay snapshot state to setup side trace. */ 427/* Replay snapshot state to setup side trace. */
407void lj_snap_replay(jit_State *J, GCtrace *T) 428void lj_snap_replay(jit_State *J, GCtrace *T)
408{ 429{
@@ -464,7 +485,7 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
464 } else { 485 } else {
465 IRIns *irs; 486 IRIns *irs;
466 for (irs = ir+1; irs < irlast; irs++) 487 for (irs = ir+1; irs < irlast; irs++)
467 if (irs->r == RID_SINK && ir + irs->s == irs) { 488 if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
468 if (snap_pref(J, T, map, nent, seen, irs->op2) == 0) 489 if (snap_pref(J, T, map, nent, seen, irs->op2) == 0)
469 snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1); 490 snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1);
470 else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && 491 else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) &&
@@ -504,7 +525,7 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
504 TRef tr = emitir(ir->ot, op1, op2); 525 TRef tr = emitir(ir->ot, op1, op2);
505 J->slot[snap_slot(sn)] = tr; 526 J->slot[snap_slot(sn)] = tr;
506 for (irs = ir+1; irs < irlast; irs++) 527 for (irs = ir+1; irs < irlast; irs++)
507 if (irs->r == RID_SINK && ir + irs->s == irs) { 528 if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
508 IRIns *irr = &T->ir[irs->op1]; 529 IRIns *irr = &T->ir[irs->op1];
509 TRef val, key = irr->op2, tmp = tr; 530 TRef val, key = irr->op2, tmp = tr;
510 if (irr->o != IR_FREF) { 531 if (irr->o != IR_FREF) {
@@ -700,7 +721,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
700 } else { 721 } else {
701 IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref]; 722 IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref];
702 for (irs = ir+1; irs < irlast; irs++) 723 for (irs = ir+1; irs < irlast; irs++)
703 if (irs->r == RID_SINK && ir + irs->s == irs) { 724 if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
704 IRIns *iro = &T->ir[T->ir[irs->op1].op2]; 725 IRIns *iro = &T->ir[T->ir[irs->op1].op2];
705 uint8_t *p = (uint8_t *)cd; 726 uint8_t *p = (uint8_t *)cd;
706 CTSize szs; 727 CTSize szs;
@@ -733,7 +754,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
733 settabV(J->L, o, t); 754 settabV(J->L, o, t);
734 irlast = &T->ir[T->snap[snapno].ref]; 755 irlast = &T->ir[T->snap[snapno].ref];
735 for (irs = ir+1; irs < irlast; irs++) 756 for (irs = ir+1; irs < irlast; irs++)
736 if (irs->r == RID_SINK && ir + irs->s == irs) { 757 if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) {
737 IRIns *irk = &T->ir[irs->op1]; 758 IRIns *irk = &T->ir[irs->op1];
738 TValue tmp, *val; 759 TValue tmp, *val;
739 lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || 760 lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||