diff options
author | Mike Pall <mike> | 2015-10-01 16:16:18 +0200 |
---|---|---|
committer | Mike Pall <mike> | 2015-10-01 16:16:18 +0200 |
commit | 0b09c971c9edfdd901c9a4480423cc786bce7a94 (patch) | |
tree | 45566b5b9b8e11d6523368a50923041e881df183 | |
parent | 52ea1a30afc204553c99126ab43c2b16f2bd0182 (diff) | |
parent | ef087aa6782d72af61dcf1b5801ce43818ecf003 (diff) | |
download | luajit-0b09c971c9edfdd901c9a4480423cc786bce7a94.tar.gz luajit-0b09c971c9edfdd901c9a4480423cc786bce7a94.tar.bz2 luajit-0b09c971c9edfdd901c9a4480423cc786bce7a94.zip |
Merge branch 'master' into v2.1
-rw-r--r-- | src/lj_snap.c | 26 |
1 files changed, 11 insertions, 15 deletions
diff --git a/src/lj_snap.c b/src/lj_snap.c index fa9abb74..df7ae634 100644 --- a/src/lj_snap.c +++ b/src/lj_snap.c | |||
@@ -26,9 +26,6 @@ | |||
26 | #include "lj_cdata.h" | 26 | #include "lj_cdata.h" |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | /* Some local macros to save typing. Undef'd at the end. */ | ||
30 | #define IR(ref) (&J->cur.ir[(ref)]) | ||
31 | |||
32 | /* Pass IR on to next optimization in chain (FOLD). */ | 29 | /* Pass IR on to next optimization in chain (FOLD). */ |
33 | #define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) | 30 | #define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) |
34 | 31 | ||
@@ -73,7 +70,7 @@ static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots) | |||
73 | IRRef ref = tref_ref(tr); | 70 | IRRef ref = tref_ref(tr); |
74 | if (ref) { | 71 | if (ref) { |
75 | SnapEntry sn = SNAP_TR(s, tr); | 72 | SnapEntry sn = SNAP_TR(s, tr); |
76 | IRIns *ir = IR(ref); | 73 | IRIns *ir = &J->cur.ir[ref]; |
77 | if (!(sn & (SNAP_CONT|SNAP_FRAME)) && | 74 | if (!(sn & (SNAP_CONT|SNAP_FRAME)) && |
78 | ir->o == IR_SLOAD && ir->op1 == s && ref > retf) { | 75 | ir->o == IR_SLOAD && ir->op1 == s && ref > retf) { |
79 | /* No need to snapshot unmodified non-inherited slots. */ | 76 | /* No need to snapshot unmodified non-inherited slots. */ |
@@ -407,24 +404,24 @@ static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax, | |||
407 | } | 404 | } |
408 | 405 | ||
409 | /* Check whether a sunk store corresponds to an allocation. Slow path. */ | 406 | /* Check whether a sunk store corresponds to an allocation. Slow path. */ |
410 | static int snap_sunk_store2(jit_State *J, IRIns *ira, IRIns *irs) | 407 | static int snap_sunk_store2(GCtrace *T, IRIns *ira, IRIns *irs) |
411 | { | 408 | { |
412 | if (irs->o == IR_ASTORE || irs->o == IR_HSTORE || | 409 | if (irs->o == IR_ASTORE || irs->o == IR_HSTORE || |
413 | irs->o == IR_FSTORE || irs->o == IR_XSTORE) { | 410 | irs->o == IR_FSTORE || irs->o == IR_XSTORE) { |
414 | IRIns *irk = IR(irs->op1); | 411 | IRIns *irk = &T->ir[irs->op1]; |
415 | if (irk->o == IR_AREF || irk->o == IR_HREFK) | 412 | if (irk->o == IR_AREF || irk->o == IR_HREFK) |
416 | irk = IR(irk->op1); | 413 | irk = &T->ir[irk->op1]; |
417 | return (IR(irk->op1) == ira); | 414 | return (&T->ir[irk->op1] == ira); |
418 | } | 415 | } |
419 | return 0; | 416 | return 0; |
420 | } | 417 | } |
421 | 418 | ||
422 | /* Check whether a sunk store corresponds to an allocation. Fast path. */ | 419 | /* Check whether a sunk store corresponds to an allocation. Fast path. */ |
423 | static LJ_AINLINE int snap_sunk_store(jit_State *J, IRIns *ira, IRIns *irs) | 420 | static LJ_AINLINE int snap_sunk_store(GCtrace *T, IRIns *ira, IRIns *irs) |
424 | { | 421 | { |
425 | if (irs->s != 255) | 422 | if (irs->s != 255) |
426 | return (ira + irs->s == irs); /* Fast check. */ | 423 | return (ira + irs->s == irs); /* Fast check. */ |
427 | return snap_sunk_store2(J, ira, irs); | 424 | return snap_sunk_store2(T, ira, irs); |
428 | } | 425 | } |
429 | 426 | ||
430 | /* Replay snapshot state to setup side trace. */ | 427 | /* Replay snapshot state to setup side trace. */ |
@@ -487,7 +484,7 @@ void lj_snap_replay(jit_State *J, GCtrace *T) | |||
487 | } else { | 484 | } else { |
488 | IRIns *irs; | 485 | IRIns *irs; |
489 | for (irs = ir+1; irs < irlast; irs++) | 486 | for (irs = ir+1; irs < irlast; irs++) |
490 | if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { | 487 | if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) { |
491 | if (snap_pref(J, T, map, nent, seen, irs->op2) == 0) | 488 | if (snap_pref(J, T, map, nent, seen, irs->op2) == 0) |
492 | snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1); | 489 | snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1); |
493 | else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && | 490 | else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && |
@@ -527,7 +524,7 @@ void lj_snap_replay(jit_State *J, GCtrace *T) | |||
527 | TRef tr = emitir(ir->ot, op1, op2); | 524 | TRef tr = emitir(ir->ot, op1, op2); |
528 | J->slot[snap_slot(sn)] = tr; | 525 | J->slot[snap_slot(sn)] = tr; |
529 | for (irs = ir+1; irs < irlast; irs++) | 526 | for (irs = ir+1; irs < irlast; irs++) |
530 | if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { | 527 | if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) { |
531 | IRIns *irr = &T->ir[irs->op1]; | 528 | IRIns *irr = &T->ir[irs->op1]; |
532 | TRef val, key = irr->op2, tmp = tr; | 529 | TRef val, key = irr->op2, tmp = tr; |
533 | if (irr->o != IR_FREF) { | 530 | if (irr->o != IR_FREF) { |
@@ -729,7 +726,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, | |||
729 | } else { | 726 | } else { |
730 | IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref]; | 727 | IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref]; |
731 | for (irs = ir+1; irs < irlast; irs++) | 728 | for (irs = ir+1; irs < irlast; irs++) |
732 | if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { | 729 | if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) { |
733 | IRIns *iro = &T->ir[T->ir[irs->op1].op2]; | 730 | IRIns *iro = &T->ir[T->ir[irs->op1].op2]; |
734 | uint8_t *p = (uint8_t *)cd; | 731 | uint8_t *p = (uint8_t *)cd; |
735 | CTSize szs; | 732 | CTSize szs; |
@@ -762,7 +759,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, | |||
762 | settabV(J->L, o, t); | 759 | settabV(J->L, o, t); |
763 | irlast = &T->ir[T->snap[snapno].ref]; | 760 | irlast = &T->ir[T->snap[snapno].ref]; |
764 | for (irs = ir+1; irs < irlast; irs++) | 761 | for (irs = ir+1; irs < irlast; irs++) |
765 | if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { | 762 | if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) { |
766 | IRIns *irk = &T->ir[irs->op1]; | 763 | IRIns *irk = &T->ir[irs->op1]; |
767 | TValue tmp, *val; | 764 | TValue tmp, *val; |
768 | lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || | 765 | lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || |
@@ -863,7 +860,6 @@ const BCIns *lj_snap_restore(jit_State *J, void *exptr) | |||
863 | return pc; | 860 | return pc; |
864 | } | 861 | } |
865 | 862 | ||
866 | #undef IR | ||
867 | #undef emitir_raw | 863 | #undef emitir_raw |
868 | #undef emitir | 864 | #undef emitir |
869 | 865 | ||