aboutsummaryrefslogtreecommitdiff
path: root/src/lj_opt_fold.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/lj_opt_fold.c286
1 files changed, 235 insertions, 51 deletions
diff --git a/src/lj_opt_fold.c b/src/lj_opt_fold.c
index cee9776a..e1d13691 100644
--- a/src/lj_opt_fold.c
+++ b/src/lj_opt_fold.c
@@ -14,18 +14,21 @@
14 14
15#if LJ_HASJIT 15#if LJ_HASJIT
16 16
17#include "lj_buf.h"
17#include "lj_str.h" 18#include "lj_str.h"
18#include "lj_tab.h" 19#include "lj_tab.h"
19#include "lj_ir.h" 20#include "lj_ir.h"
20#include "lj_jit.h" 21#include "lj_jit.h"
22#include "lj_ircall.h"
21#include "lj_iropt.h" 23#include "lj_iropt.h"
22#include "lj_trace.h" 24#include "lj_trace.h"
23#if LJ_HASFFI 25#if LJ_HASFFI
24#include "lj_ctype.h" 26#include "lj_ctype.h"
25#endif
26#include "lj_carith.h" 27#include "lj_carith.h"
28#endif
27#include "lj_vm.h" 29#include "lj_vm.h"
28#include "lj_strscan.h" 30#include "lj_strscan.h"
31#include "lj_strfmt.h"
29 32
30/* Here's a short description how the FOLD engine processes instructions: 33/* Here's a short description how the FOLD engine processes instructions:
31** 34**
@@ -155,13 +158,14 @@ typedef IRRef (LJ_FASTCALL *FoldFunc)(jit_State *J);
155 158
156/* Barrier to prevent folding across a GC step. 159/* Barrier to prevent folding across a GC step.
157** GC steps can only happen at the head of a trace and at LOOP. 160** GC steps can only happen at the head of a trace and at LOOP.
158** And the GC is only driven forward if there is at least one allocation. 161** And the GC is only driven forward if there's at least one allocation.
159*/ 162*/
160#define gcstep_barrier(J, ref) \ 163#define gcstep_barrier(J, ref) \
161 ((ref) < J->chain[IR_LOOP] && \ 164 ((ref) < J->chain[IR_LOOP] && \
162 (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \ 165 (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \
163 J->chain[IR_TNEW] || J->chain[IR_TDUP] || \ 166 J->chain[IR_TNEW] || J->chain[IR_TDUP] || \
164 J->chain[IR_CNEW] || J->chain[IR_CNEWI] || J->chain[IR_TOSTR])) 167 J->chain[IR_CNEW] || J->chain[IR_CNEWI] || \
168 J->chain[IR_BUFSTR] || J->chain[IR_TOSTR] || J->chain[IR_CALLA]))
165 169
166/* -- Constant folding for FP numbers ------------------------------------- */ 170/* -- Constant folding for FP numbers ------------------------------------- */
167 171
@@ -336,11 +340,9 @@ LJFOLDF(kfold_intcomp0)
336static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op) 340static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op)
337{ 341{
338 switch (op) { 342 switch (op) {
339#if LJ_64 || LJ_HASFFI 343#if LJ_HASFFI
340 case IR_ADD: k1 += k2; break; 344 case IR_ADD: k1 += k2; break;
341 case IR_SUB: k1 -= k2; break; 345 case IR_SUB: k1 -= k2; break;
342#endif
343#if LJ_HASFFI
344 case IR_MUL: k1 *= k2; break; 346 case IR_MUL: k1 *= k2; break;
345 case IR_BAND: k1 &= k2; break; 347 case IR_BAND: k1 &= k2; break;
346 case IR_BOR: k1 |= k2; break; 348 case IR_BOR: k1 |= k2; break;
@@ -392,20 +394,10 @@ LJFOLD(BROL KINT64 KINT)
392LJFOLD(BROR KINT64 KINT) 394LJFOLD(BROR KINT64 KINT)
393LJFOLDF(kfold_int64shift) 395LJFOLDF(kfold_int64shift)
394{ 396{
395#if LJ_HASFFI || LJ_64 397#if LJ_HASFFI
396 uint64_t k = ir_k64(fleft)->u64; 398 uint64_t k = ir_k64(fleft)->u64;
397 int32_t sh = (fright->i & 63); 399 int32_t sh = (fright->i & 63);
398 switch ((IROp)fins->o) { 400 return INT64FOLD(lj_carith_shift64(k, sh, fins->o - IR_BSHL));
399 case IR_BSHL: k <<= sh; break;
400#if LJ_HASFFI
401 case IR_BSHR: k >>= sh; break;
402 case IR_BSAR: k = (uint64_t)((int64_t)k >> sh); break;
403 case IR_BROL: k = lj_rol(k, sh); break;
404 case IR_BROR: k = lj_ror(k, sh); break;
405#endif
406 default: lua_assert(0); break;
407 }
408 return INT64FOLD(k);
409#else 401#else
410 UNUSED(J); lua_assert(0); return FAILFOLD; 402 UNUSED(J); lua_assert(0); return FAILFOLD;
411#endif 403#endif
@@ -528,6 +520,180 @@ LJFOLDF(kfold_strcmp)
528 return NEXTFOLD; 520 return NEXTFOLD;
529} 521}
530 522
523/* -- Constant folding and forwarding for buffers ------------------------- */
524
525/*
526** Buffer ops perform stores, but their effect is limited to the buffer
527** itself. Also, buffer ops are chained: a use of an op implies a use of
528** all other ops up the chain. Conversely, if an op is unused, all ops
529** up the chain can go unsed. This largely eliminates the need to treat
530** them as stores.
531**
532** Alas, treating them as normal (IRM_N) ops doesn't work, because they
533** cannot be CSEd in isolation. CSE for IRM_N is implicitly done in LOOP
534** or if FOLD is disabled.
535**
536** The compromise is to declare them as loads, emit them like stores and
537** CSE whole chains manually when the BUFSTR is to be emitted. Any chain
538** fragments left over from CSE are eliminated by DCE.
539*/
540
541/* BUFHDR is emitted like a store, see below. */
542
543LJFOLD(BUFPUT BUFHDR BUFSTR)
544LJFOLDF(bufput_append)
545{
546 /* New buffer, no other buffer op inbetween and same buffer? */
547 if ((J->flags & JIT_F_OPT_FWD) &&
548 !(fleft->op2 & IRBUFHDR_APPEND) &&
549 fleft->prev == fright->op2 &&
550 fleft->op1 == IR(fright->op2)->op1) {
551 IRRef ref = fins->op1;
552 IR(ref)->op2 = (fleft->op2 | IRBUFHDR_APPEND); /* Modify BUFHDR. */
553 IR(ref)->op1 = fright->op1;
554 return ref;
555 }
556 return EMITFOLD; /* Always emit, CSE later. */
557}
558
559LJFOLD(BUFPUT any any)
560LJFOLDF(bufput_kgc)
561{
562 if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fright->o == IR_KGC) {
563 GCstr *s2 = ir_kstr(fright);
564 if (s2->len == 0) { /* Empty string? */
565 return LEFTFOLD;
566 } else {
567 if (fleft->o == IR_BUFPUT && irref_isk(fleft->op2) &&
568 !irt_isphi(fleft->t)) { /* Join two constant string puts in a row. */
569 GCstr *s1 = ir_kstr(IR(fleft->op2));
570 IRRef kref = lj_ir_kstr(J, lj_buf_cat2str(J->L, s1, s2));
571 /* lj_ir_kstr() may realloc the IR and invalidates any IRIns *. */
572 IR(fins->op1)->op2 = kref; /* Modify previous BUFPUT. */
573 return fins->op1;
574 }
575 }
576 }
577 return EMITFOLD; /* Always emit, CSE later. */
578}
579
580LJFOLD(BUFSTR any any)
581LJFOLDF(bufstr_kfold_cse)
582{
583 lua_assert(fleft->o == IR_BUFHDR || fleft->o == IR_BUFPUT ||
584 fleft->o == IR_CALLL);
585 if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
586 if (fleft->o == IR_BUFHDR) { /* No put operations? */
587 if (!(fleft->op2 & IRBUFHDR_APPEND)) /* Empty buffer? */
588 return lj_ir_kstr(J, &J2G(J)->strempty);
589 fins->op1 = fleft->op1;
590 fins->op2 = fleft->prev; /* Relies on checks in bufput_append. */
591 return CSEFOLD;
592 } else if (fleft->o == IR_BUFPUT) {
593 IRIns *irb = IR(fleft->op1);
594 if (irb->o == IR_BUFHDR && !(irb->op2 & IRBUFHDR_APPEND))
595 return fleft->op2; /* Shortcut for a single put operation. */
596 }
597 }
598 /* Try to CSE the whole chain. */
599 if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
600 IRRef ref = J->chain[IR_BUFSTR];
601 while (ref) {
602 IRIns *irs = IR(ref), *ira = fleft, *irb = IR(irs->op1);
603 while (ira->o == irb->o && ira->op2 == irb->op2) {
604 lua_assert(ira->o == IR_BUFHDR || ira->o == IR_BUFPUT ||
605 ira->o == IR_CALLL || ira->o == IR_CARG);
606 if (ira->o == IR_BUFHDR && !(ira->op2 & IRBUFHDR_APPEND))
607 return ref; /* CSE succeeded. */
608 if (ira->o == IR_CALLL && ira->op2 == IRCALL_lj_buf_puttab)
609 break;
610 ira = IR(ira->op1);
611 irb = IR(irb->op1);
612 }
613 ref = irs->prev;
614 }
615 }
616 return EMITFOLD; /* No CSE possible. */
617}
618
619LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_reverse)
620LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_upper)
621LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_lower)
622LJFOLD(CALLL CARG IRCALL_lj_strfmt_putquoted)
623LJFOLDF(bufput_kfold_op)
624{
625 if (irref_isk(fleft->op2)) {
626 const CCallInfo *ci = &lj_ir_callinfo[fins->op2];
627 SBuf *sb = lj_buf_tmp_(J->L);
628 sb = ((SBuf * (LJ_FASTCALL *)(SBuf *, GCstr *))ci->func)(sb,
629 ir_kstr(IR(fleft->op2)));
630 fins->o = IR_BUFPUT;
631 fins->op1 = fleft->op1;
632 fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb));
633 return RETRYFOLD;
634 }
635 return EMITFOLD; /* Always emit, CSE later. */
636}
637
638LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_rep)
639LJFOLDF(bufput_kfold_rep)
640{
641 if (irref_isk(fleft->op2)) {
642 IRIns *irc = IR(fleft->op1);
643 if (irref_isk(irc->op2)) {
644 SBuf *sb = lj_buf_tmp_(J->L);
645 sb = lj_buf_putstr_rep(sb, ir_kstr(IR(irc->op2)), IR(fleft->op2)->i);
646 fins->o = IR_BUFPUT;
647 fins->op1 = irc->op1;
648 fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb));
649 return RETRYFOLD;
650 }
651 }
652 return EMITFOLD; /* Always emit, CSE later. */
653}
654
655LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfxint)
656LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum_int)
657LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum_uint)
658LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum)
659LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfstr)
660LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfchar)
661LJFOLDF(bufput_kfold_fmt)
662{
663 IRIns *irc = IR(fleft->op1);
664 lua_assert(irref_isk(irc->op2)); /* SFormat must be const. */
665 if (irref_isk(fleft->op2)) {
666 SFormat sf = (SFormat)IR(irc->op2)->i;
667 IRIns *ira = IR(fleft->op2);
668 SBuf *sb = lj_buf_tmp_(J->L);
669 switch (fins->op2) {
670 case IRCALL_lj_strfmt_putfxint:
671 sb = lj_strfmt_putfxint(sb, sf, ir_k64(ira)->u64);
672 break;
673 case IRCALL_lj_strfmt_putfstr:
674 sb = lj_strfmt_putfstr(sb, sf, ir_kstr(ira));
675 break;
676 case IRCALL_lj_strfmt_putfchar:
677 sb = lj_strfmt_putfchar(sb, sf, ira->i);
678 break;
679 case IRCALL_lj_strfmt_putfnum_int:
680 case IRCALL_lj_strfmt_putfnum_uint:
681 case IRCALL_lj_strfmt_putfnum:
682 default: {
683 const CCallInfo *ci = &lj_ir_callinfo[fins->op2];
684 sb = ((SBuf * (*)(SBuf *, SFormat, lua_Number))ci->func)(sb, sf,
685 ir_knum(ira)->n);
686 break;
687 }
688 }
689 fins->o = IR_BUFPUT;
690 fins->op1 = irc->op1;
691 fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb));
692 return RETRYFOLD;
693 }
694 return EMITFOLD; /* Always emit, CSE later. */
695}
696
531/* -- Constant folding of pointer arithmetic ------------------------------ */ 697/* -- Constant folding of pointer arithmetic ------------------------------ */
532 698
533LJFOLD(ADD KGC KINT) 699LJFOLD(ADD KGC KINT)
@@ -648,27 +814,22 @@ LJFOLD(CONV KNUM IRCONV_INT_NUM)
648LJFOLDF(kfold_conv_knum_int_num) 814LJFOLDF(kfold_conv_knum_int_num)
649{ 815{
650 lua_Number n = knumleft; 816 lua_Number n = knumleft;
651 if (!(fins->op2 & IRCONV_TRUNC)) { 817 int32_t k = lj_num2int(n);
652 int32_t k = lj_num2int(n); 818 if (irt_isguard(fins->t) && n != (lua_Number)k) {
653 if (irt_isguard(fins->t) && n != (lua_Number)k) { 819 /* We're about to create a guard which always fails, like CONV +1.5.
654 /* We're about to create a guard which always fails, like CONV +1.5. 820 ** Some pathological loops cause this during LICM, e.g.:
655 ** Some pathological loops cause this during LICM, e.g.: 821 ** local x,k,t = 0,1.5,{1,[1.5]=2}
656 ** local x,k,t = 0,1.5,{1,[1.5]=2} 822 ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end
657 ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end 823 ** assert(x == 300)
658 ** assert(x == 300) 824 */
659 */ 825 return FAILFOLD;
660 return FAILFOLD;
661 }
662 return INTFOLD(k);
663 } else {
664 return INTFOLD((int32_t)n);
665 } 826 }
827 return INTFOLD(k);
666} 828}
667 829
668LJFOLD(CONV KNUM IRCONV_U32_NUM) 830LJFOLD(CONV KNUM IRCONV_U32_NUM)
669LJFOLDF(kfold_conv_knum_u32_num) 831LJFOLDF(kfold_conv_knum_u32_num)
670{ 832{
671 lua_assert((fins->op2 & IRCONV_TRUNC));
672#ifdef _MSC_VER 833#ifdef _MSC_VER
673 { /* Workaround for MSVC bug. */ 834 { /* Workaround for MSVC bug. */
674 volatile uint32_t u = (uint32_t)knumleft; 835 volatile uint32_t u = (uint32_t)knumleft;
@@ -682,27 +843,27 @@ LJFOLDF(kfold_conv_knum_u32_num)
682LJFOLD(CONV KNUM IRCONV_I64_NUM) 843LJFOLD(CONV KNUM IRCONV_I64_NUM)
683LJFOLDF(kfold_conv_knum_i64_num) 844LJFOLDF(kfold_conv_knum_i64_num)
684{ 845{
685 lua_assert((fins->op2 & IRCONV_TRUNC));
686 return INT64FOLD((uint64_t)(int64_t)knumleft); 846 return INT64FOLD((uint64_t)(int64_t)knumleft);
687} 847}
688 848
689LJFOLD(CONV KNUM IRCONV_U64_NUM) 849LJFOLD(CONV KNUM IRCONV_U64_NUM)
690LJFOLDF(kfold_conv_knum_u64_num) 850LJFOLDF(kfold_conv_knum_u64_num)
691{ 851{
692 lua_assert((fins->op2 & IRCONV_TRUNC));
693 return INT64FOLD(lj_num2u64(knumleft)); 852 return INT64FOLD(lj_num2u64(knumleft));
694} 853}
695 854
696LJFOLD(TOSTR KNUM) 855LJFOLD(TOSTR KNUM any)
697LJFOLDF(kfold_tostr_knum) 856LJFOLDF(kfold_tostr_knum)
698{ 857{
699 return lj_ir_kstr(J, lj_str_fromnum(J->L, &knumleft)); 858 return lj_ir_kstr(J, lj_strfmt_num(J->L, ir_knum(fleft)));
700} 859}
701 860
702LJFOLD(TOSTR KINT) 861LJFOLD(TOSTR KINT any)
703LJFOLDF(kfold_tostr_kint) 862LJFOLDF(kfold_tostr_kint)
704{ 863{
705 return lj_ir_kstr(J, lj_str_fromint(J->L, fleft->i)); 864 return lj_ir_kstr(J, fins->op2 == IRTOSTR_INT ?
865 lj_strfmt_int(J->L, fleft->i) :
866 lj_strfmt_char(J->L, fleft->i));
706} 867}
707 868
708LJFOLD(STRTO KGC) 869LJFOLD(STRTO KGC)
@@ -1205,7 +1366,9 @@ static TRef simplify_intmul_k(jit_State *J, int32_t k)
1205 ** But this is mainly intended for simple address arithmetic. 1366 ** But this is mainly intended for simple address arithmetic.
1206 ** Also it's easier for the backend to optimize the original multiplies. 1367 ** Also it's easier for the backend to optimize the original multiplies.
1207 */ 1368 */
1208 if (k == 1) { /* i * 1 ==> i */ 1369 if (k == 0) { /* i * 0 ==> 0 */
1370 return RIGHTFOLD;
1371 } else if (k == 1) { /* i * 1 ==> i */
1209 return LEFTFOLD; 1372 return LEFTFOLD;
1210 } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */ 1373 } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */
1211 fins->o = IR_BSHL; 1374 fins->o = IR_BSHL;
@@ -1218,9 +1381,7 @@ static TRef simplify_intmul_k(jit_State *J, int32_t k)
1218LJFOLD(MUL any KINT) 1381LJFOLD(MUL any KINT)
1219LJFOLDF(simplify_intmul_k32) 1382LJFOLDF(simplify_intmul_k32)
1220{ 1383{
1221 if (fright->i == 0) /* i * 0 ==> 0 */ 1384 if (fright->i >= 0)
1222 return INTFOLD(0);
1223 else if (fright->i > 0)
1224 return simplify_intmul_k(J, fright->i); 1385 return simplify_intmul_k(J, fright->i);
1225 return NEXTFOLD; 1386 return NEXTFOLD;
1226} 1387}
@@ -1228,14 +1389,13 @@ LJFOLDF(simplify_intmul_k32)
1228LJFOLD(MUL any KINT64) 1389LJFOLD(MUL any KINT64)
1229LJFOLDF(simplify_intmul_k64) 1390LJFOLDF(simplify_intmul_k64)
1230{ 1391{
1231 if (ir_kint64(fright)->u64 == 0) /* i * 0 ==> 0 */ 1392#if LJ_HASFFI
1232 return INT64FOLD(0); 1393 if (ir_kint64(fright)->u64 < 0x80000000u)
1233#if LJ_64
1234 /* NYI: SPLIT for BSHL and 32 bit backend support. */
1235 else if (ir_kint64(fright)->u64 < 0x80000000u)
1236 return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64); 1394 return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64);
1237#endif
1238 return NEXTFOLD; 1395 return NEXTFOLD;
1396#else
1397 UNUSED(J); lua_assert(0); return FAILFOLD;
1398#endif
1239} 1399}
1240 1400
1241LJFOLD(MOD any KINT) 1401LJFOLD(MOD any KINT)
@@ -1535,7 +1695,7 @@ LJFOLD(BOR BOR KINT64)
1535LJFOLD(BXOR BXOR KINT64) 1695LJFOLD(BXOR BXOR KINT64)
1536LJFOLDF(reassoc_intarith_k64) 1696LJFOLDF(reassoc_intarith_k64)
1537{ 1697{
1538#if LJ_HASFFI || LJ_64 1698#if LJ_HASFFI
1539 IRIns *irk = IR(fleft->op2); 1699 IRIns *irk = IR(fleft->op2);
1540 if (irk->o == IR_KINT64) { 1700 if (irk->o == IR_KINT64) {
1541 uint64_t k = kfold_int64arith(ir_k64(irk)->u64, 1701 uint64_t k = kfold_int64arith(ir_k64(irk)->u64,
@@ -1953,6 +2113,7 @@ LJFOLDF(fwd_href_tdup)
1953** an aliased table, as it may invalidate all of the pointers and fields. 2113** an aliased table, as it may invalidate all of the pointers and fields.
1954** Only HREF needs the NEWREF check -- AREF and HREFK already depend on 2114** Only HREF needs the NEWREF check -- AREF and HREFK already depend on
1955** FLOADs. And NEWREF itself is treated like a store (see below). 2115** FLOADs. And NEWREF itself is treated like a store (see below).
2116** LREF is constant (per trace) since coroutine switches are not inlined.
1956*/ 2117*/
1957LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE) 2118LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE)
1958LJFOLDF(fload_tab_tnew_asize) 2119LJFOLDF(fload_tab_tnew_asize)
@@ -2016,6 +2177,14 @@ LJFOLDF(fload_str_len_snew)
2016 return NEXTFOLD; 2177 return NEXTFOLD;
2017} 2178}
2018 2179
2180LJFOLD(FLOAD TOSTR IRFL_STR_LEN)
2181LJFOLDF(fload_str_len_tostr)
2182{
2183 if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fleft->op2 == IRTOSTR_CHAR)
2184 return INTFOLD(1);
2185 return NEXTFOLD;
2186}
2187
2019/* The C type ID of cdata objects is immutable. */ 2188/* The C type ID of cdata objects is immutable. */
2020LJFOLD(FLOAD KGC IRFL_CDATA_CTYPEID) 2189LJFOLD(FLOAD KGC IRFL_CDATA_CTYPEID)
2021LJFOLDF(fload_cdata_typeid_kgc) 2190LJFOLDF(fload_cdata_typeid_kgc)
@@ -2062,6 +2231,8 @@ LJFOLDF(fload_cdata_ptr_int64_cnew)
2062} 2231}
2063 2232
2064LJFOLD(FLOAD any IRFL_STR_LEN) 2233LJFOLD(FLOAD any IRFL_STR_LEN)
2234LJFOLD(FLOAD any IRFL_FUNC_ENV)
2235LJFOLD(FLOAD any IRFL_THREAD_ENV)
2065LJFOLD(FLOAD any IRFL_CDATA_CTYPEID) 2236LJFOLD(FLOAD any IRFL_CDATA_CTYPEID)
2066LJFOLD(FLOAD any IRFL_CDATA_PTR) 2237LJFOLD(FLOAD any IRFL_CDATA_PTR)
2067LJFOLD(FLOAD any IRFL_CDATA_INT) 2238LJFOLD(FLOAD any IRFL_CDATA_INT)
@@ -2127,6 +2298,17 @@ LJFOLDF(barrier_tnew_tdup)
2127 return DROPFOLD; 2298 return DROPFOLD;
2128} 2299}
2129 2300
2301/* -- Profiling ----------------------------------------------------------- */
2302
2303LJFOLD(PROF any any)
2304LJFOLDF(prof)
2305{
2306 IRRef ref = J->chain[IR_PROF];
2307 if (ref+1 == J->cur.nins) /* Drop neighbouring IR_PROF. */
2308 return ref;
2309 return EMITFOLD;
2310}
2311
2130/* -- Stores and allocations ---------------------------------------------- */ 2312/* -- Stores and allocations ---------------------------------------------- */
2131 2313
2132/* Stores and allocations cannot be folded or passed on to CSE in general. 2314/* Stores and allocations cannot be folded or passed on to CSE in general.
@@ -2149,8 +2331,9 @@ LJFOLD(XSTORE any any)
2149LJFOLDX(lj_opt_dse_xstore) 2331LJFOLDX(lj_opt_dse_xstore)
2150 2332
2151LJFOLD(NEWREF any any) /* Treated like a store. */ 2333LJFOLD(NEWREF any any) /* Treated like a store. */
2152LJFOLD(CALLS any any) 2334LJFOLD(CALLA any any)
2153LJFOLD(CALLL any any) /* Safeguard fallback. */ 2335LJFOLD(CALLL any any) /* Safeguard fallback. */
2336LJFOLD(CALLS any any)
2154LJFOLD(CALLXS any any) 2337LJFOLD(CALLXS any any)
2155LJFOLD(XBAR) 2338LJFOLD(XBAR)
2156LJFOLD(RETF any any) /* Modifies BASE. */ 2339LJFOLD(RETF any any) /* Modifies BASE. */
@@ -2158,6 +2341,7 @@ LJFOLD(TNEW any any)
2158LJFOLD(TDUP any) 2341LJFOLD(TDUP any)
2159LJFOLD(CNEW any any) 2342LJFOLD(CNEW any any)
2160LJFOLD(XSNEW any any) 2343LJFOLD(XSNEW any any)
2344LJFOLD(BUFHDR any any)
2161LJFOLDX(lj_ir_emit) 2345LJFOLDX(lj_ir_emit)
2162 2346
2163/* ------------------------------------------------------------------------ */ 2347/* ------------------------------------------------------------------------ */