aboutsummaryrefslogtreecommitdiff
path: root/src/lj_opt_fold.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lj_opt_fold.c')
-rw-r--r--src/lj_opt_fold.c126
1 files changed, 0 insertions, 126 deletions
diff --git a/src/lj_opt_fold.c b/src/lj_opt_fold.c
index adf88f4d..e05d6b7b 100644
--- a/src/lj_opt_fold.c
+++ b/src/lj_opt_fold.c
@@ -441,12 +441,6 @@ LJFOLDF(kfold_strcmp)
441 441
442/* -- Constant folding of conversions ------------------------------------- */ 442/* -- Constant folding of conversions ------------------------------------- */
443 443
444LJFOLD(TONUM KINT)
445LJFOLDF(kfold_tonum)
446{
447 return lj_ir_knum(J, cast_num(fleft->i));
448}
449
450LJFOLD(TOBIT KNUM KNUM) 444LJFOLD(TOBIT KNUM KNUM)
451LJFOLDF(kfold_tobit) 445LJFOLDF(kfold_tobit)
452{ 446{
@@ -455,40 +449,6 @@ LJFOLDF(kfold_tobit)
455 return INTFOLD((int32_t)tv.u32.lo); 449 return INTFOLD((int32_t)tv.u32.lo);
456} 450}
457 451
458LJFOLD(TOINT KNUM any)
459LJFOLDF(kfold_toint)
460{
461 lua_Number n = knumleft;
462 int32_t k = lj_num2int(n);
463 if (irt_isguard(fins->t) && n != cast_num(k)) {
464 /* We're about to create a guard which always fails, like TOINT +1.5.
465 ** Some pathological loops cause this during LICM, e.g.:
466 ** local x,k,t = 0,1.5,{1,[1.5]=2}
467 ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end
468 ** assert(x == 300)
469 */
470 return FAILFOLD;
471 }
472 return INTFOLD(k);
473}
474
475LJFOLD(TOI64 KINT any)
476LJFOLDF(kfold_toi64_kint)
477{
478 lua_assert(fins->op2 == IRTOINT_ZEXT64 || fins->op2 == IRTOINT_SEXT64);
479 if (fins->op2 == IRTOINT_ZEXT64)
480 return INT64FOLD((uint64_t)(uint32_t)fleft->i);
481 else
482 return INT64FOLD((uint64_t)(int32_t)fleft->i);
483}
484
485LJFOLD(TOI64 KNUM any)
486LJFOLDF(kfold_toi64_knum)
487{
488 lua_assert(fins->op2 == IRTOINT_TRUNCI64);
489 return INT64FOLD((uint64_t)(int64_t)knumleft);
490}
491
492LJFOLD(CONV KINT IRCONV_NUM_INT) 452LJFOLD(CONV KINT IRCONV_NUM_INT)
493LJFOLDF(kfold_conv_kint_num) 453LJFOLDF(kfold_conv_kint_num)
494{ 454{
@@ -613,9 +573,6 @@ LJFOLDF(shortcut_round)
613 return NEXTFOLD; 573 return NEXTFOLD;
614} 574}
615 575
616LJFOLD(FPMATH TONUM IRFPM_FLOOR)
617LJFOLD(FPMATH TONUM IRFPM_CEIL)
618LJFOLD(FPMATH TONUM IRFPM_TRUNC)
619LJFOLD(ABS ABS KNUM) 576LJFOLD(ABS ABS KNUM)
620LJFOLDF(shortcut_left) 577LJFOLDF(shortcut_left)
621{ 578{
@@ -640,32 +597,6 @@ LJFOLDF(shortcut_leftleft)
640 return fleft->op1; /* f(g(x)) ==> x */ 597 return fleft->op1; /* f(g(x)) ==> x */
641} 598}
642 599
643LJFOLD(TONUM TOINT)
644LJFOLDF(shortcut_leftleft_toint)
645{
646 PHIBARRIER(fleft);
647 if (irt_isguard(fleft->t)) /* Only safe with a guarded TOINT. */
648 return fleft->op1; /* f(g(x)) ==> x */
649 return NEXTFOLD;
650}
651
652LJFOLD(TOINT TONUM any)
653LJFOLD(TOBIT TONUM KNUM) /* The inverse must NOT be shortcut! */
654LJFOLDF(shortcut_leftleft_across_phi)
655{
656 /* Fold even across PHI to avoid expensive int->num->int conversions. */
657 return fleft->op1; /* f(g(x)) ==> x */
658}
659
660LJFOLD(TOI64 TONUM any)
661LJFOLDF(shortcut_leftleft_toint64)
662{
663 /* Fold even across PHI to avoid expensive int->num->int64 conversions. */
664 fins->op1 = fleft->op1; /* (int64_t)(double)(int)x ==> (int64_t)x */
665 fins->op2 = IRTOINT_SEXT64;
666 return RETRYFOLD;
667}
668
669/* -- FP algebraic simplifications ---------------------------------------- */ 600/* -- FP algebraic simplifications ---------------------------------------- */
670 601
671/* FP arithmetic is tricky -- there's not much to simplify. 602/* FP arithmetic is tricky -- there's not much to simplify.
@@ -969,63 +900,6 @@ LJFOLDF(narrow_convert)
969 return lj_opt_narrow_convert(J); 900 return lj_opt_narrow_convert(J);
970} 901}
971 902
972/* Relaxed CSE rule for TOINT allows commoning with stronger checks, too. */
973LJFOLD(TOINT any any)
974LJFOLDF(cse_toint)
975{
976 if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
977 IRRef ref, op1 = fins->op1;
978 uint8_t guard = irt_isguard(fins->t);
979 for (ref = J->chain[IR_TOINT]; ref > op1; ref = IR(ref)->prev)
980 if (IR(ref)->op1 == op1 && irt_isguard(IR(ref)->t) >= guard)
981 return ref;
982 }
983 return EMITFOLD; /* No fallthrough to regular CSE. */
984}
985
986/* -- Strength reduction of widening -------------------------------------- */
987
988LJFOLD(TOI64 any 3) /* IRTOINT_ZEXT64 */
989LJFOLDF(simplify_zext64)
990{
991#if LJ_TARGET_X64
992 /* Eliminate widening. All 32 bit ops implicitly zero-extend the result. */
993 PHIBARRIER(fleft);
994 return LEFTFOLD;
995#else
996 UNUSED(J);
997 return NEXTFOLD;
998#endif
999}
1000
1001LJFOLD(TOI64 any 4) /* IRTOINT_SEXT64 */
1002LJFOLDF(simplify_sext64)
1003{
1004 IRRef ref = fins->op1;
1005 int64_t ofs = 0;
1006 PHIBARRIER(fleft);
1007 if (fleft->o == IR_ADD && irref_isk(fleft->op2)) {
1008 ofs = (int64_t)IR(fleft->op2)->i;
1009 ref = fleft->op1;
1010 }
1011 /* Use scalar evolution analysis results to strength-reduce sign-extension. */
1012 if (ref == J->scev.idx) {
1013 IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop;
1014 lua_assert(irt_isint(J->scev.t));
1015 if (lo && IR(lo)->i + ofs >= 0) {
1016#if LJ_TARGET_X64
1017 /* Eliminate widening. All 32 bit ops do an implicit zero-extension. */
1018 return LEFTFOLD;
1019#else
1020 /* Reduce to a (cheaper) zero-extension. */
1021 fins->op2 = IRTOINT_ZEXT64;
1022 return RETRYFOLD;
1023#endif
1024 }
1025 }
1026 return NEXTFOLD;
1027}
1028
1029/* -- Integer algebraic simplifications ----------------------------------- */ 903/* -- Integer algebraic simplifications ----------------------------------- */
1030 904
1031LJFOLD(ADD any KINT) 905LJFOLD(ADD any KINT)