aboutsummaryrefslogtreecommitdiff
path: root/src/lj_asm_arm64.h
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/lj_asm_arm64.h95
1 files changed, 54 insertions, 41 deletions
diff --git a/src/lj_asm_arm64.h b/src/lj_asm_arm64.h
index a3502223..0729a3a5 100644
--- a/src/lj_asm_arm64.h
+++ b/src/lj_asm_arm64.h
@@ -213,7 +213,7 @@ static uint32_t asm_fuseopm(ASMState *as, A64Ins ai, IRRef ref, RegSet allow)
213 return A64F_M(ir->r); 213 return A64F_M(ir->r);
214 } else if (irref_isk(ref)) { 214 } else if (irref_isk(ref)) {
215 uint32_t m; 215 uint32_t m;
216 int64_t k = get_k64val(ir); 216 int64_t k = get_k64val(as, ref);
217 if ((ai & 0x1f000000) == 0x0a000000) 217 if ((ai & 0x1f000000) == 0x0a000000)
218 m = emit_isk13(k, irt_is64(ir->t)); 218 m = emit_isk13(k, irt_is64(ir->t));
219 else 219 else
@@ -354,9 +354,9 @@ static int asm_fusemadd(ASMState *as, IRIns *ir, A64Ins ai, A64Ins air)
354static int asm_fuseandshift(ASMState *as, IRIns *ir) 354static int asm_fuseandshift(ASMState *as, IRIns *ir)
355{ 355{
356 IRIns *irl = IR(ir->op1); 356 IRIns *irl = IR(ir->op1);
357 lua_assert(ir->o == IR_BAND); 357 lj_assertA(ir->o == IR_BAND, "bad usage");
358 if (canfuse(as, irl) && irref_isk(ir->op2)) { 358 if (canfuse(as, irl) && irref_isk(ir->op2)) {
359 uint64_t mask = get_k64val(IR(ir->op2)); 359 uint64_t mask = get_k64val(as, ir->op2);
360 if (irref_isk(irl->op2) && (irl->o == IR_BSHR || irl->o == IR_BSHL)) { 360 if (irref_isk(irl->op2) && (irl->o == IR_BSHR || irl->o == IR_BSHL)) {
361 int32_t shmask = irt_is64(irl->t) ? 63 : 31; 361 int32_t shmask = irt_is64(irl->t) ? 63 : 31;
362 int32_t shift = (IR(irl->op2)->i & shmask); 362 int32_t shift = (IR(irl->op2)->i & shmask);
@@ -384,7 +384,7 @@ static int asm_fuseandshift(ASMState *as, IRIns *ir)
384static int asm_fuseorshift(ASMState *as, IRIns *ir) 384static int asm_fuseorshift(ASMState *as, IRIns *ir)
385{ 385{
386 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); 386 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
387 lua_assert(ir->o == IR_BOR); 387 lj_assertA(ir->o == IR_BOR, "bad usage");
388 if (canfuse(as, irl) && canfuse(as, irr) && 388 if (canfuse(as, irl) && canfuse(as, irr) &&
389 ((irl->o == IR_BSHR && irr->o == IR_BSHL) || 389 ((irl->o == IR_BSHR && irr->o == IR_BSHL) ||
390 (irl->o == IR_BSHL && irr->o == IR_BSHR))) { 390 (irl->o == IR_BSHL && irr->o == IR_BSHR))) {
@@ -428,7 +428,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
428 if (ref) { 428 if (ref) {
429 if (irt_isfp(ir->t)) { 429 if (irt_isfp(ir->t)) {
430 if (fpr <= REGARG_LASTFPR) { 430 if (fpr <= REGARG_LASTFPR) {
431 lua_assert(rset_test(as->freeset, fpr)); /* Must have been evicted. */ 431 lj_assertA(rset_test(as->freeset, fpr),
432 "reg %d not free", fpr); /* Must have been evicted. */
432 ra_leftov(as, fpr, ref); 433 ra_leftov(as, fpr, ref);
433 fpr++; 434 fpr++;
434 } else { 435 } else {
@@ -438,7 +439,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
438 } 439 }
439 } else { 440 } else {
440 if (gpr <= REGARG_LASTGPR) { 441 if (gpr <= REGARG_LASTGPR) {
441 lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ 442 lj_assertA(rset_test(as->freeset, gpr),
443 "reg %d not free", gpr); /* Must have been evicted. */
442 ra_leftov(as, gpr, ref); 444 ra_leftov(as, gpr, ref);
443 gpr++; 445 gpr++;
444 } else { 446 } else {
@@ -459,7 +461,7 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
459 rset_clear(drop, ir->r); /* Dest reg handled below. */ 461 rset_clear(drop, ir->r); /* Dest reg handled below. */
460 ra_evictset(as, drop); /* Evictions must be performed first. */ 462 ra_evictset(as, drop); /* Evictions must be performed first. */
461 if (ra_used(ir)) { 463 if (ra_used(ir)) {
462 lua_assert(!irt_ispri(ir->t)); 464 lj_assertA(!irt_ispri(ir->t), "PRI dest");
463 if (irt_isfp(ir->t)) { 465 if (irt_isfp(ir->t)) {
464 if (ci->flags & CCI_CASTU64) { 466 if (ci->flags & CCI_CASTU64) {
465 Reg dest = ra_dest(as, ir, RSET_FPR) & 31; 467 Reg dest = ra_dest(as, ir, RSET_FPR) & 31;
@@ -546,7 +548,7 @@ static void asm_conv(ASMState *as, IRIns *ir)
546 int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64); 548 int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64);
547 int stfp = (st == IRT_NUM || st == IRT_FLOAT); 549 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
548 IRRef lref = ir->op1; 550 IRRef lref = ir->op1;
549 lua_assert(irt_type(ir->t) != st); 551 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
550 if (irt_isfp(ir->t)) { 552 if (irt_isfp(ir->t)) {
551 Reg dest = ra_dest(as, ir, RSET_FPR); 553 Reg dest = ra_dest(as, ir, RSET_FPR);
552 if (stfp) { /* FP to FP conversion. */ 554 if (stfp) { /* FP to FP conversion. */
@@ -566,7 +568,8 @@ static void asm_conv(ASMState *as, IRIns *ir)
566 } else if (stfp) { /* FP to integer conversion. */ 568 } else if (stfp) { /* FP to integer conversion. */
567 if (irt_isguard(ir->t)) { 569 if (irt_isguard(ir->t)) {
568 /* Checked conversions are only supported from number to int. */ 570 /* Checked conversions are only supported from number to int. */
569 lua_assert(irt_isint(ir->t) && st == IRT_NUM); 571 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
572 "bad type for checked CONV");
570 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); 573 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
571 } else { 574 } else {
572 Reg left = ra_alloc1(as, lref, RSET_FPR); 575 Reg left = ra_alloc1(as, lref, RSET_FPR);
@@ -586,7 +589,7 @@ static void asm_conv(ASMState *as, IRIns *ir)
586 A64Ins ai = st == IRT_I8 ? A64I_SXTBw : 589 A64Ins ai = st == IRT_I8 ? A64I_SXTBw :
587 st == IRT_U8 ? A64I_UXTBw : 590 st == IRT_U8 ? A64I_UXTBw :
588 st == IRT_I16 ? A64I_SXTHw : A64I_UXTHw; 591 st == IRT_I16 ? A64I_SXTHw : A64I_UXTHw;
589 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); 592 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
590 emit_dn(as, ai, dest, left); 593 emit_dn(as, ai, dest, left);
591 } else { 594 } else {
592 Reg dest = ra_dest(as, ir, RSET_GPR); 595 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -650,7 +653,8 @@ static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref)
650{ 653{
651 RegSet allow = rset_exclude(RSET_GPR, base); 654 RegSet allow = rset_exclude(RSET_GPR, base);
652 IRIns *ir = IR(ref); 655 IRIns *ir = IR(ref);
653 lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); 656 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
657 "store of IR type %d", irt_type(ir->t));
654 if (irref_isk(ref)) { 658 if (irref_isk(ref)) {
655 TValue k; 659 TValue k;
656 lj_ir_kvalue(as->J->L, &k, ir); 660 lj_ir_kvalue(as->J->L, &k, ir);
@@ -770,7 +774,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
770 } 774 }
771 rset_clear(allow, scr); 775 rset_clear(allow, scr);
772 } else { 776 } else {
773 lua_assert(irt_ispri(kt) && !irt_isnil(kt)); 777 lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
774 type = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow); 778 type = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow);
775 scr = ra_scratch(as, rset_clear(allow, type)); 779 scr = ra_scratch(as, rset_clear(allow, type));
776 rset_clear(allow, scr); 780 rset_clear(allow, scr);
@@ -831,7 +835,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
831 rset_clear(allow, type); 835 rset_clear(allow, type);
832 } 836 }
833 /* Load main position relative to tab->node into dest. */ 837 /* Load main position relative to tab->node into dest. */
834 khash = isk ? ir_khash(irkey) : 1; 838 khash = isk ? ir_khash(as, irkey) : 1;
835 if (khash == 0) { 839 if (khash == 0) {
836 emit_lso(as, A64I_LDRx, dest, tab, offsetof(GCtab, node)); 840 emit_lso(as, A64I_LDRx, dest, tab, offsetof(GCtab, node));
837 } else { 841 } else {
@@ -886,7 +890,7 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
886 Reg key, idx = node; 890 Reg key, idx = node;
887 RegSet allow = rset_exclude(RSET_GPR, node); 891 RegSet allow = rset_exclude(RSET_GPR, node);
888 uint64_t k; 892 uint64_t k;
889 lua_assert(ofs % sizeof(Node) == 0); 893 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
890 if (bigofs) { 894 if (bigofs) {
891 idx = dest; 895 idx = dest;
892 rset_clear(allow, dest); 896 rset_clear(allow, dest);
@@ -936,7 +940,7 @@ static void asm_uref(ASMState *as, IRIns *ir)
936static void asm_fref(ASMState *as, IRIns *ir) 940static void asm_fref(ASMState *as, IRIns *ir)
937{ 941{
938 UNUSED(as); UNUSED(ir); 942 UNUSED(as); UNUSED(ir);
939 lua_assert(!ra_used(ir)); 943 lj_assertA(!ra_used(ir), "unfused FREF");
940} 944}
941 945
942static void asm_strref(ASMState *as, IRIns *ir) 946static void asm_strref(ASMState *as, IRIns *ir)
@@ -988,7 +992,7 @@ static void asm_fload(ASMState *as, IRIns *ir)
988 Reg idx; 992 Reg idx;
989 A64Ins ai = asm_fxloadins(ir); 993 A64Ins ai = asm_fxloadins(ir);
990 int32_t ofs; 994 int32_t ofs;
991 if (ir->op1 == REF_NIL) { 995 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
992 idx = RID_GL; 996 idx = RID_GL;
993 ofs = (ir->op2 << 2) - GG_OFS(g); 997 ofs = (ir->op2 << 2) - GG_OFS(g);
994 } else { 998 } else {
@@ -1019,7 +1023,7 @@ static void asm_fstore(ASMState *as, IRIns *ir)
1019static void asm_xload(ASMState *as, IRIns *ir) 1023static void asm_xload(ASMState *as, IRIns *ir)
1020{ 1024{
1021 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); 1025 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
1022 lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); 1026 lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
1023 asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR); 1027 asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR);
1024} 1028}
1025 1029
@@ -1037,8 +1041,9 @@ static void asm_ahuvload(ASMState *as, IRIns *ir)
1037 Reg idx, tmp, type; 1041 Reg idx, tmp, type;
1038 int32_t ofs = 0; 1042 int32_t ofs = 0;
1039 RegSet gpr = RSET_GPR, allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; 1043 RegSet gpr = RSET_GPR, allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
1040 lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || 1044 lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
1041 irt_isint(ir->t)); 1045 irt_isint(ir->t),
1046 "bad load type %d", irt_type(ir->t));
1042 if (ra_used(ir)) { 1047 if (ra_used(ir)) {
1043 Reg dest = ra_dest(as, ir, allow); 1048 Reg dest = ra_dest(as, ir, allow);
1044 tmp = irt_isnum(ir->t) ? ra_scratch(as, rset_clear(gpr, dest)) : dest; 1049 tmp = irt_isnum(ir->t) ? ra_scratch(as, rset_clear(gpr, dest)) : dest;
@@ -1057,7 +1062,8 @@ static void asm_ahuvload(ASMState *as, IRIns *ir)
1057 /* Always do the type check, even if the load result is unused. */ 1062 /* Always do the type check, even if the load result is unused. */
1058 asm_guardcc(as, irt_isnum(ir->t) ? CC_LS : CC_NE); 1063 asm_guardcc(as, irt_isnum(ir->t) ? CC_LS : CC_NE);
1059 if (irt_type(ir->t) >= IRT_NUM) { 1064 if (irt_type(ir->t) >= IRT_NUM) {
1060 lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t)); 1065 lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t),
1066 "bad load type %d", irt_type(ir->t));
1061 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), 1067 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
1062 ra_allock(as, LJ_TISNUM << 15, rset_exclude(gpr, idx)), tmp); 1068 ra_allock(as, LJ_TISNUM << 15, rset_exclude(gpr, idx)), tmp);
1063 } else if (irt_isaddr(ir->t)) { 1069 } else if (irt_isaddr(ir->t)) {
@@ -1122,8 +1128,10 @@ static void asm_sload(ASMState *as, IRIns *ir)
1122 IRType1 t = ir->t; 1128 IRType1 t = ir->t;
1123 Reg dest = RID_NONE, base; 1129 Reg dest = RID_NONE, base;
1124 RegSet allow = RSET_GPR; 1130 RegSet allow = RSET_GPR;
1125 lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ 1131 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1126 lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); 1132 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1133 lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1134 "inconsistent SLOAD variant");
1127 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { 1135 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1128 dest = ra_scratch(as, RSET_FPR); 1136 dest = ra_scratch(as, RSET_FPR);
1129 asm_tointg(as, ir, dest); 1137 asm_tointg(as, ir, dest);
@@ -1132,7 +1140,8 @@ static void asm_sload(ASMState *as, IRIns *ir)
1132 Reg tmp = RID_NONE; 1140 Reg tmp = RID_NONE;
1133 if ((ir->op2 & IRSLOAD_CONVERT)) 1141 if ((ir->op2 & IRSLOAD_CONVERT))
1134 tmp = ra_scratch(as, irt_isint(t) ? RSET_FPR : RSET_GPR); 1142 tmp = ra_scratch(as, irt_isint(t) ? RSET_FPR : RSET_GPR);
1135 lua_assert((irt_isnum(t)) || irt_isint(t) || irt_isaddr(t)); 1143 lj_assertA((irt_isnum(t)) || irt_isint(t) || irt_isaddr(t),
1144 "bad SLOAD type %d", irt_type(t));
1136 dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : allow); 1145 dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : allow);
1137 base = ra_alloc1(as, REF_BASE, rset_clear(allow, dest)); 1146 base = ra_alloc1(as, REF_BASE, rset_clear(allow, dest));
1138 if (irt_isaddr(t)) { 1147 if (irt_isaddr(t)) {
@@ -1172,7 +1181,8 @@ dotypecheck:
1172 /* Need type check, even if the load result is unused. */ 1181 /* Need type check, even if the load result is unused. */
1173 asm_guardcc(as, irt_isnum(t) ? CC_LS : CC_NE); 1182 asm_guardcc(as, irt_isnum(t) ? CC_LS : CC_NE);
1174 if (irt_type(t) >= IRT_NUM) { 1183 if (irt_type(t) >= IRT_NUM) {
1175 lua_assert(irt_isinteger(t) || irt_isnum(t)); 1184 lj_assertA(irt_isinteger(t) || irt_isnum(t),
1185 "bad SLOAD type %d", irt_type(t));
1176 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), 1186 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
1177 ra_allock(as, LJ_TISNUM << 15, allow), tmp); 1187 ra_allock(as, LJ_TISNUM << 15, allow), tmp);
1178 } else if (irt_isnil(t)) { 1188 } else if (irt_isnil(t)) {
@@ -1207,7 +1217,8 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1207 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; 1217 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1208 IRRef args[4]; 1218 IRRef args[4];
1209 RegSet allow = (RSET_GPR & ~RSET_SCRATCH); 1219 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1210 lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); 1220 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1221 "bad CNEW/CNEWI operands");
1211 1222
1212 as->gcsteps++; 1223 as->gcsteps++;
1213 asm_setupresult(as, ir, ci); /* GCcdata * */ 1224 asm_setupresult(as, ir, ci); /* GCcdata * */
@@ -1215,7 +1226,7 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1215 if (ir->o == IR_CNEWI) { 1226 if (ir->o == IR_CNEWI) {
1216 int32_t ofs = sizeof(GCcdata); 1227 int32_t ofs = sizeof(GCcdata);
1217 Reg r = ra_alloc1(as, ir->op2, allow); 1228 Reg r = ra_alloc1(as, ir->op2, allow);
1218 lua_assert(sz == 4 || sz == 8); 1229 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1219 emit_lso(as, sz == 8 ? A64I_STRx : A64I_STRw, r, RID_RET, ofs); 1230 emit_lso(as, sz == 8 ? A64I_STRx : A64I_STRw, r, RID_RET, ofs);
1220 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ 1231 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1221 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; 1232 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
@@ -1274,7 +1285,7 @@ static void asm_obar(ASMState *as, IRIns *ir)
1274 RegSet allow = RSET_GPR; 1285 RegSet allow = RSET_GPR;
1275 Reg obj, val, tmp; 1286 Reg obj, val, tmp;
1276 /* No need for other object barriers (yet). */ 1287 /* No need for other object barriers (yet). */
1277 lua_assert(IR(ir->op1)->o == IR_UREFC); 1288 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1278 ra_evictset(as, RSET_SCRATCH); 1289 ra_evictset(as, RSET_SCRATCH);
1279 l_end = emit_label(as); 1290 l_end = emit_label(as);
1280 args[0] = ASMREF_TMP1; /* global_State *g */ 1291 args[0] = ASMREF_TMP1; /* global_State *g */
@@ -1544,7 +1555,7 @@ static void asm_bitshift(ASMState *as, IRIns *ir, A64Ins ai, A64Shift sh)
1544#define asm_bshr(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSR) 1555#define asm_bshr(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSR)
1545#define asm_bsar(as, ir) asm_bitshift(as, ir, A64I_SBFMw, A64SH_ASR) 1556#define asm_bsar(as, ir) asm_bitshift(as, ir, A64I_SBFMw, A64SH_ASR)
1546#define asm_bror(as, ir) asm_bitshift(as, ir, A64I_EXTRw, A64SH_ROR) 1557#define asm_bror(as, ir) asm_bitshift(as, ir, A64I_EXTRw, A64SH_ROR)
1547#define asm_brol(as, ir) lua_assert(0) 1558#define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
1548 1559
1549static void asm_intmin_max(ASMState *as, IRIns *ir, A64CC cc) 1560static void asm_intmin_max(ASMState *as, IRIns *ir, A64CC cc)
1550{ 1561{
@@ -1625,15 +1636,16 @@ static void asm_intcomp(ASMState *as, IRIns *ir)
1625 Reg left; 1636 Reg left;
1626 uint32_t m; 1637 uint32_t m;
1627 int cmpprev0 = 0; 1638 int cmpprev0 = 0;
1628 lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || 1639 lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) ||
1629 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t)); 1640 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t),
1641 "bad comparison data type %d", irt_type(ir->t));
1630 if (asm_swapops(as, lref, rref)) { 1642 if (asm_swapops(as, lref, rref)) {
1631 IRRef tmp = lref; lref = rref; rref = tmp; 1643 IRRef tmp = lref; lref = rref; rref = tmp;
1632 if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */ 1644 if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
1633 else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */ 1645 else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
1634 } 1646 }
1635 oldcc = cc; 1647 oldcc = cc;
1636 if (irref_isk(rref) && get_k64val(IR(rref)) == 0) { 1648 if (irref_isk(rref) && get_k64val(as, rref) == 0) {
1637 IRIns *irl = IR(lref); 1649 IRIns *irl = IR(lref);
1638 if (cc == CC_GE) cc = CC_PL; 1650 if (cc == CC_GE) cc = CC_PL;
1639 else if (cc == CC_LT) cc = CC_MI; 1651 else if (cc == CC_LT) cc = CC_MI;
@@ -1648,7 +1660,7 @@ static void asm_intcomp(ASMState *as, IRIns *ir)
1648 Reg tmp = blref; blref = brref; brref = tmp; 1660 Reg tmp = blref; blref = brref; brref = tmp;
1649 } 1661 }
1650 if (irref_isk(brref)) { 1662 if (irref_isk(brref)) {
1651 uint64_t k = get_k64val(IR(brref)); 1663 uint64_t k = get_k64val(as, brref);
1652 if (k && !(k & (k-1)) && (cc == CC_EQ || cc == CC_NE)) { 1664 if (k && !(k & (k-1)) && (cc == CC_EQ || cc == CC_NE)) {
1653 asm_guardtnb(as, cc == CC_EQ ? A64I_TBZ : A64I_TBNZ, 1665 asm_guardtnb(as, cc == CC_EQ ? A64I_TBZ : A64I_TBNZ,
1654 ra_alloc1(as, blref, RSET_GPR), emit_ctz64(k)); 1666 ra_alloc1(as, blref, RSET_GPR), emit_ctz64(k));
@@ -1697,7 +1709,8 @@ static void asm_comp(ASMState *as, IRIns *ir)
1697/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ 1709/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1698static void asm_hiop(ASMState *as, IRIns *ir) 1710static void asm_hiop(ASMState *as, IRIns *ir)
1699{ 1711{
1700 UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on 64 bit. */ 1712 UNUSED(as); UNUSED(ir);
1713 lj_assertA(0, "unexpected HIOP"); /* Unused on 64 bit. */
1701} 1714}
1702 1715
1703/* -- Profiling ----------------------------------------------------------- */ 1716/* -- Profiling ----------------------------------------------------------- */
@@ -1705,7 +1718,7 @@ static void asm_hiop(ASMState *as, IRIns *ir)
1705static void asm_prof(ASMState *as, IRIns *ir) 1718static void asm_prof(ASMState *as, IRIns *ir)
1706{ 1719{
1707 uint32_t k = emit_isk13(HOOK_PROFILE, 0); 1720 uint32_t k = emit_isk13(HOOK_PROFILE, 0);
1708 lua_assert(k != 0); 1721 lj_assertA(k != 0, "HOOK_PROFILE does not fit in K13");
1709 UNUSED(ir); 1722 UNUSED(ir);
1710 asm_guardcc(as, CC_NE); 1723 asm_guardcc(as, CC_NE);
1711 emit_n(as, A64I_TSTw^k, RID_TMP); 1724 emit_n(as, A64I_TSTw^k, RID_TMP);
@@ -1723,7 +1736,7 @@ static void asm_stack_check(ASMState *as, BCReg topslot,
1723 if (irp) { 1736 if (irp) {
1724 if (!ra_hasspill(irp->s)) { 1737 if (!ra_hasspill(irp->s)) {
1725 pbase = irp->r; 1738 pbase = irp->r;
1726 lua_assert(ra_hasreg(pbase)); 1739 lj_assertA(ra_hasreg(pbase), "base reg lost");
1727 } else if (allow) { 1740 } else if (allow) {
1728 pbase = rset_pickbot(allow); 1741 pbase = rset_pickbot(allow);
1729 } else { 1742 } else {
@@ -1735,7 +1748,7 @@ static void asm_stack_check(ASMState *as, BCReg topslot,
1735 } 1748 }
1736 emit_cond_branch(as, CC_LS, asm_exitstub_addr(as, exitno)); 1749 emit_cond_branch(as, CC_LS, asm_exitstub_addr(as, exitno));
1737 k = emit_isk12((8*topslot)); 1750 k = emit_isk12((8*topslot));
1738 lua_assert(k); 1751 lj_assertA(k, "slot offset %d does not fit in K12", 8*topslot);
1739 emit_n(as, A64I_CMPx^k, RID_TMP); 1752 emit_n(as, A64I_CMPx^k, RID_TMP);
1740 emit_dnm(as, A64I_SUBx, RID_TMP, RID_TMP, pbase); 1753 emit_dnm(as, A64I_SUBx, RID_TMP, RID_TMP, pbase);
1741 emit_lso(as, A64I_LDRx, RID_TMP, RID_TMP, 1754 emit_lso(as, A64I_LDRx, RID_TMP, RID_TMP,
@@ -1776,7 +1789,7 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
1776 } 1789 }
1777 checkmclim(as); 1790 checkmclim(as);
1778 } 1791 }
1779 lua_assert(map + nent == flinks); 1792 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
1780} 1793}
1781 1794
1782/* -- GC handling --------------------------------------------------------- */ 1795/* -- GC handling --------------------------------------------------------- */
@@ -1864,7 +1877,7 @@ static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
1864 rset_clear(allow, ra_dest(as, ir, allow)); 1877 rset_clear(allow, ra_dest(as, ir, allow));
1865 } else { 1878 } else {
1866 Reg r = irp->r; 1879 Reg r = irp->r;
1867 lua_assert(ra_hasreg(r)); 1880 lj_assertA(ra_hasreg(r), "base reg lost");
1868 rset_clear(allow, r); 1881 rset_clear(allow, r);
1869 if (r != ir->r && !rset_test(as->freeset, r)) 1882 if (r != ir->r && !rset_test(as->freeset, r))
1870 ra_restore(as, regcost_ref(as->cost[r])); 1883 ra_restore(as, regcost_ref(as->cost[r]));
@@ -1888,7 +1901,7 @@ static void asm_tail_fixup(ASMState *as, TraceNo lnk)
1888 } else { 1901 } else {
1889 /* Patch stack adjustment. */ 1902 /* Patch stack adjustment. */
1890 uint32_t k = emit_isk12(spadj); 1903 uint32_t k = emit_isk12(spadj);
1891 lua_assert(k); 1904 lj_assertA(k, "stack adjustment %d does not fit in K12", spadj);
1892 p[-2] = (A64I_ADDx^k) | A64F_D(RID_SP) | A64F_N(RID_SP); 1905 p[-2] = (A64I_ADDx^k) | A64F_D(RID_SP) | A64F_N(RID_SP);
1893 } 1906 }
1894 /* Patch exit branch. */ 1907 /* Patch exit branch. */
@@ -1974,7 +1987,7 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
1974 } else if ((ins & 0xfc000000u) == 0x14000000u && 1987 } else if ((ins & 0xfc000000u) == 0x14000000u &&
1975 ((ins ^ (px-p)) & 0x03ffffffu) == 0) { 1988 ((ins ^ (px-p)) & 0x03ffffffu) == 0) {
1976 /* Patch b. */ 1989 /* Patch b. */
1977 lua_assert(A64F_S_OK(delta, 26)); 1990 lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range");
1978 *p = A64I_LE((ins & 0xfc000000u) | A64F_S26(delta)); 1991 *p = A64I_LE((ins & 0xfc000000u) | A64F_S26(delta));
1979 if (!cstart) cstart = p; 1992 if (!cstart) cstart = p;
1980 } else if ((ins & 0x7e000000u) == 0x34000000u && 1993 } else if ((ins & 0x7e000000u) == 0x34000000u &&
@@ -1995,7 +2008,7 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
1995 } 2008 }
1996 { /* Always patch long-range branch in exit stub itself. */ 2009 { /* Always patch long-range branch in exit stub itself. */
1997 ptrdiff_t delta = target - px; 2010 ptrdiff_t delta = target - px;
1998 lua_assert(A64F_S_OK(delta, 26)); 2011 lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range");
1999 *px = A64I_B | A64F_S26(delta); 2012 *px = A64I_B | A64F_S26(delta);
2000 if (!cstart) cstart = px; 2013 if (!cstart) cstart = px;
2001 } 2014 }