aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pall <mike>2015-10-01 16:12:48 +0200
committerMike Pall <mike>2015-10-01 16:12:48 +0200
commitef087aa6782d72af61dcf1b5801ce43818ecf003 (patch)
tree8b79fd841b5a7bec0cfecec6b19b45e8381b9e37
parent076d625dc6046c5f05d55a95d3b9afd5637d5a5a (diff)
downloadluajit-ef087aa6782d72af61dcf1b5801ce43818ecf003.tar.gz
luajit-ef087aa6782d72af61dcf1b5801ce43818ecf003.tar.bz2
luajit-ef087aa6782d72af61dcf1b5801ce43818ecf003.zip
Fix unsinking check.
Reported by Elias Hogstvedt. Debugged and fixed by Vyacheslav Egorov.
-rw-r--r--src/lj_snap.c26
1 files changed, 11 insertions, 15 deletions
diff --git a/src/lj_snap.c b/src/lj_snap.c
index 5c870baf..ea46b831 100644
--- a/src/lj_snap.c
+++ b/src/lj_snap.c
@@ -26,9 +26,6 @@
26#include "lj_cdata.h" 26#include "lj_cdata.h"
27#endif 27#endif
28 28
29/* Some local macros to save typing. Undef'd at the end. */
30#define IR(ref) (&J->cur.ir[(ref)])
31
32/* Pass IR on to next optimization in chain (FOLD). */ 29/* Pass IR on to next optimization in chain (FOLD). */
33#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) 30#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
34 31
@@ -73,7 +70,7 @@ static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
73 IRRef ref = tref_ref(tr); 70 IRRef ref = tref_ref(tr);
74 if (ref) { 71 if (ref) {
75 SnapEntry sn = SNAP_TR(s, tr); 72 SnapEntry sn = SNAP_TR(s, tr);
76 IRIns *ir = IR(ref); 73 IRIns *ir = &J->cur.ir[ref];
77 if (!(sn & (SNAP_CONT|SNAP_FRAME)) && 74 if (!(sn & (SNAP_CONT|SNAP_FRAME)) &&
78 ir->o == IR_SLOAD && ir->op1 == s && ref > retf) { 75 ir->o == IR_SLOAD && ir->op1 == s && ref > retf) {
79 /* No need to snapshot unmodified non-inherited slots. */ 76 /* No need to snapshot unmodified non-inherited slots. */
@@ -404,24 +401,24 @@ static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax,
404} 401}
405 402
406/* Check whether a sunk store corresponds to an allocation. Slow path. */ 403/* Check whether a sunk store corresponds to an allocation. Slow path. */
407static int snap_sunk_store2(jit_State *J, IRIns *ira, IRIns *irs) 404static int snap_sunk_store2(GCtrace *T, IRIns *ira, IRIns *irs)
408{ 405{
409 if (irs->o == IR_ASTORE || irs->o == IR_HSTORE || 406 if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
410 irs->o == IR_FSTORE || irs->o == IR_XSTORE) { 407 irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
411 IRIns *irk = IR(irs->op1); 408 IRIns *irk = &T->ir[irs->op1];
412 if (irk->o == IR_AREF || irk->o == IR_HREFK) 409 if (irk->o == IR_AREF || irk->o == IR_HREFK)
413 irk = IR(irk->op1); 410 irk = &T->ir[irk->op1];
414 return (IR(irk->op1) == ira); 411 return (&T->ir[irk->op1] == ira);
415 } 412 }
416 return 0; 413 return 0;
417} 414}
418 415
419/* Check whether a sunk store corresponds to an allocation. Fast path. */ 416/* Check whether a sunk store corresponds to an allocation. Fast path. */
420static LJ_AINLINE int snap_sunk_store(jit_State *J, IRIns *ira, IRIns *irs) 417static LJ_AINLINE int snap_sunk_store(GCtrace *T, IRIns *ira, IRIns *irs)
421{ 418{
422 if (irs->s != 255) 419 if (irs->s != 255)
423 return (ira + irs->s == irs); /* Fast check. */ 420 return (ira + irs->s == irs); /* Fast check. */
424 return snap_sunk_store2(J, ira, irs); 421 return snap_sunk_store2(T, ira, irs);
425} 422}
426 423
427/* Replay snapshot state to setup side trace. */ 424/* Replay snapshot state to setup side trace. */
@@ -484,7 +481,7 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
484 } else { 481 } else {
485 IRIns *irs; 482 IRIns *irs;
486 for (irs = ir+1; irs < irlast; irs++) 483 for (irs = ir+1; irs < irlast; irs++)
487 if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { 484 if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
488 if (snap_pref(J, T, map, nent, seen, irs->op2) == 0) 485 if (snap_pref(J, T, map, nent, seen, irs->op2) == 0)
489 snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1); 486 snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1);
490 else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && 487 else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) &&
@@ -524,7 +521,7 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
524 TRef tr = emitir(ir->ot, op1, op2); 521 TRef tr = emitir(ir->ot, op1, op2);
525 J->slot[snap_slot(sn)] = tr; 522 J->slot[snap_slot(sn)] = tr;
526 for (irs = ir+1; irs < irlast; irs++) 523 for (irs = ir+1; irs < irlast; irs++)
527 if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { 524 if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
528 IRIns *irr = &T->ir[irs->op1]; 525 IRIns *irr = &T->ir[irs->op1];
529 TRef val, key = irr->op2, tmp = tr; 526 TRef val, key = irr->op2, tmp = tr;
530 if (irr->o != IR_FREF) { 527 if (irr->o != IR_FREF) {
@@ -726,7 +723,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
726 } else { 723 } else {
727 IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref]; 724 IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref];
728 for (irs = ir+1; irs < irlast; irs++) 725 for (irs = ir+1; irs < irlast; irs++)
729 if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { 726 if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
730 IRIns *iro = &T->ir[T->ir[irs->op1].op2]; 727 IRIns *iro = &T->ir[T->ir[irs->op1].op2];
731 uint8_t *p = (uint8_t *)cd; 728 uint8_t *p = (uint8_t *)cd;
732 CTSize szs; 729 CTSize szs;
@@ -759,7 +756,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
759 settabV(J->L, o, t); 756 settabV(J->L, o, t);
760 irlast = &T->ir[T->snap[snapno].ref]; 757 irlast = &T->ir[T->snap[snapno].ref];
761 for (irs = ir+1; irs < irlast; irs++) 758 for (irs = ir+1; irs < irlast; irs++)
762 if (irs->r == RID_SINK && snap_sunk_store(J, ir, irs)) { 759 if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
763 IRIns *irk = &T->ir[irs->op1]; 760 IRIns *irk = &T->ir[irs->op1];
764 TValue tmp, *val; 761 TValue tmp, *val;
765 lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || 762 lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
@@ -859,7 +856,6 @@ const BCIns *lj_snap_restore(jit_State *J, void *exptr)
859 return pc; 856 return pc;
860} 857}
861 858
862#undef IR
863#undef emitir_raw 859#undef emitir_raw
864#undef emitir 860#undef emitir
865 861