diff options
Diffstat (limited to 'src/lj_asm_arm.h')
-rw-r--r-- | src/lj_asm_arm.h | 422 |
1 files changed, 134 insertions, 288 deletions
diff --git a/src/lj_asm_arm.h b/src/lj_asm_arm.h index a66573c0..9b661eb7 100644 --- a/src/lj_asm_arm.h +++ b/src/lj_asm_arm.h | |||
@@ -338,7 +338,7 @@ static int asm_fusemadd(ASMState *as, IRIns *ir, ARMIns ai, ARMIns air) | |||
338 | /* Generate a call to a C function. */ | 338 | /* Generate a call to a C function. */ |
339 | static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | 339 | static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) |
340 | { | 340 | { |
341 | uint32_t n, nargs = CCI_NARGS(ci); | 341 | uint32_t n, nargs = CCI_XNARGS(ci); |
342 | int32_t ofs = 0; | 342 | int32_t ofs = 0; |
343 | #if LJ_SOFTFP | 343 | #if LJ_SOFTFP |
344 | Reg gpr = REGARG_FIRSTGPR; | 344 | Reg gpr = REGARG_FIRSTGPR; |
@@ -453,15 +453,6 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | |||
453 | UNUSED(ci); | 453 | UNUSED(ci); |
454 | } | 454 | } |
455 | 455 | ||
456 | static void asm_call(ASMState *as, IRIns *ir) | ||
457 | { | ||
458 | IRRef args[CCI_NARGS_MAX]; | ||
459 | const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; | ||
460 | asm_collectargs(as, ir, ci, args); | ||
461 | asm_setupresult(as, ir, ci); | ||
462 | asm_gencall(as, ci, args); | ||
463 | } | ||
464 | |||
465 | static void asm_callx(ASMState *as, IRIns *ir) | 456 | static void asm_callx(ASMState *as, IRIns *ir) |
466 | { | 457 | { |
467 | IRRef args[CCI_NARGS_MAX*2]; | 458 | IRRef args[CCI_NARGS_MAX*2]; |
@@ -528,6 +519,8 @@ static void asm_tobit(ASMState *as, IRIns *ir) | |||
528 | emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); | 519 | emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); |
529 | emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15)); | 520 | emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15)); |
530 | } | 521 | } |
522 | #else | ||
523 | #define asm_tobit(as, ir) lua_assert(0) | ||
531 | #endif | 524 | #endif |
532 | 525 | ||
533 | static void asm_conv(ASMState *as, IRIns *ir) | 526 | static void asm_conv(ASMState *as, IRIns *ir) |
@@ -600,31 +593,6 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
600 | } | 593 | } |
601 | } | 594 | } |
602 | 595 | ||
603 | #if !LJ_SOFTFP && LJ_HASFFI | ||
604 | static void asm_conv64(ASMState *as, IRIns *ir) | ||
605 | { | ||
606 | IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); | ||
607 | IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); | ||
608 | IRCallID id; | ||
609 | CCallInfo ci; | ||
610 | IRRef args[2]; | ||
611 | args[0] = (ir-1)->op1; | ||
612 | args[1] = ir->op1; | ||
613 | if (st == IRT_NUM || st == IRT_FLOAT) { | ||
614 | id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64); | ||
615 | ir--; | ||
616 | } else { | ||
617 | id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64); | ||
618 | } | ||
619 | ci = lj_ir_callinfo[id]; | ||
620 | #if !LJ_ABI_SOFTFP | ||
621 | ci.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */ | ||
622 | #endif | ||
623 | asm_setupresult(as, ir, &ci); | ||
624 | asm_gencall(as, &ci, args); | ||
625 | } | ||
626 | #endif | ||
627 | |||
628 | static void asm_strto(ASMState *as, IRIns *ir) | 596 | static void asm_strto(ASMState *as, IRIns *ir) |
629 | { | 597 | { |
630 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; | 598 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; |
@@ -688,6 +656,8 @@ static void asm_strto(ASMState *as, IRIns *ir) | |||
688 | emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR); | 656 | emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR); |
689 | } | 657 | } |
690 | 658 | ||
659 | /* -- Memory references --------------------------------------------------- */ | ||
660 | |||
691 | /* Get pointer to TValue. */ | 661 | /* Get pointer to TValue. */ |
692 | static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) | 662 | static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) |
693 | { | 663 | { |
@@ -713,7 +683,7 @@ static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) | |||
713 | Reg src = ra_alloc1(as, ref, allow); | 683 | Reg src = ra_alloc1(as, ref, allow); |
714 | emit_lso(as, ARMI_STR, src, RID_SP, 0); | 684 | emit_lso(as, ARMI_STR, src, RID_SP, 0); |
715 | } | 685 | } |
716 | if ((ir+1)->o == IR_HIOP) | 686 | if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) |
717 | type = ra_alloc1(as, ref+1, allow); | 687 | type = ra_alloc1(as, ref+1, allow); |
718 | else | 688 | else |
719 | type = ra_allock(as, irt_toitype(ir->t), allow); | 689 | type = ra_allock(as, irt_toitype(ir->t), allow); |
@@ -721,27 +691,6 @@ static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) | |||
721 | } | 691 | } |
722 | } | 692 | } |
723 | 693 | ||
724 | static void asm_tostr(ASMState *as, IRIns *ir) | ||
725 | { | ||
726 | IRRef args[2]; | ||
727 | args[0] = ASMREF_L; | ||
728 | as->gcsteps++; | ||
729 | if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) { | ||
730 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum]; | ||
731 | args[1] = ASMREF_TMP1; /* const lua_Number * */ | ||
732 | asm_setupresult(as, ir, ci); /* GCstr * */ | ||
733 | asm_gencall(as, ci, args); | ||
734 | asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1); | ||
735 | } else { | ||
736 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint]; | ||
737 | args[1] = ir->op1; /* int32_t k */ | ||
738 | asm_setupresult(as, ir, ci); /* GCstr * */ | ||
739 | asm_gencall(as, ci, args); | ||
740 | } | ||
741 | } | ||
742 | |||
743 | /* -- Memory references --------------------------------------------------- */ | ||
744 | |||
745 | static void asm_aref(ASMState *as, IRIns *ir) | 694 | static void asm_aref(ASMState *as, IRIns *ir) |
746 | { | 695 | { |
747 | Reg dest = ra_dest(as, ir, RSET_GPR); | 696 | Reg dest = ra_dest(as, ir, RSET_GPR); |
@@ -959,20 +908,6 @@ static void asm_hrefk(ASMState *as, IRIns *ir) | |||
959 | emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR); | 908 | emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR); |
960 | } | 909 | } |
961 | 910 | ||
962 | static void asm_newref(ASMState *as, IRIns *ir) | ||
963 | { | ||
964 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey]; | ||
965 | IRRef args[3]; | ||
966 | if (ir->r == RID_SINK) | ||
967 | return; | ||
968 | args[0] = ASMREF_L; /* lua_State *L */ | ||
969 | args[1] = ir->op1; /* GCtab *t */ | ||
970 | args[2] = ASMREF_TMP1; /* cTValue *key */ | ||
971 | asm_setupresult(as, ir, ci); /* TValue * */ | ||
972 | asm_gencall(as, ci, args); | ||
973 | asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2); | ||
974 | } | ||
975 | |||
976 | static void asm_uref(ASMState *as, IRIns *ir) | 911 | static void asm_uref(ASMState *as, IRIns *ir) |
977 | { | 912 | { |
978 | /* NYI: Check that UREFO is still open and not aliasing a slot. */ | 913 | /* NYI: Check that UREFO is still open and not aliasing a slot. */ |
@@ -1105,7 +1040,7 @@ static void asm_xload(ASMState *as, IRIns *ir) | |||
1105 | asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); | 1040 | asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); |
1106 | } | 1041 | } |
1107 | 1042 | ||
1108 | static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs) | 1043 | static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) |
1109 | { | 1044 | { |
1110 | if (ir->r != RID_SINK) { | 1045 | if (ir->r != RID_SINK) { |
1111 | Reg src = ra_alloc1(as, ir->op2, | 1046 | Reg src = ra_alloc1(as, ir->op2, |
@@ -1115,6 +1050,8 @@ static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs) | |||
1115 | } | 1050 | } |
1116 | } | 1051 | } |
1117 | 1052 | ||
1053 | #define asm_xstore(as, ir) asm_xstore_(as, ir, 0) | ||
1054 | |||
1118 | static void asm_ahuvload(ASMState *as, IRIns *ir) | 1055 | static void asm_ahuvload(ASMState *as, IRIns *ir) |
1119 | { | 1056 | { |
1120 | int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); | 1057 | int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); |
@@ -1272,19 +1209,16 @@ dotypecheck: | |||
1272 | static void asm_cnew(ASMState *as, IRIns *ir) | 1209 | static void asm_cnew(ASMState *as, IRIns *ir) |
1273 | { | 1210 | { |
1274 | CTState *cts = ctype_ctsG(J2G(as->J)); | 1211 | CTState *cts = ctype_ctsG(J2G(as->J)); |
1275 | CTypeID ctypeid = (CTypeID)IR(ir->op1)->i; | 1212 | CTypeID id = (CTypeID)IR(ir->op1)->i; |
1276 | CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ? | 1213 | CTSize sz; |
1277 | lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i; | 1214 | CTInfo info = lj_ctype_info(cts, id, &sz); |
1278 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; | 1215 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; |
1279 | IRRef args[2]; | 1216 | IRRef args[4]; |
1280 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | 1217 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); |
1281 | RegSet drop = RSET_SCRATCH; | 1218 | RegSet drop = RSET_SCRATCH; |
1282 | lua_assert(sz != CTSIZE_INVALID); | 1219 | lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); |
1283 | 1220 | ||
1284 | args[0] = ASMREF_L; /* lua_State *L */ | ||
1285 | args[1] = ASMREF_TMP1; /* MSize size */ | ||
1286 | as->gcsteps++; | 1221 | as->gcsteps++; |
1287 | |||
1288 | if (ra_hasreg(ir->r)) | 1222 | if (ra_hasreg(ir->r)) |
1289 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | 1223 | rset_clear(drop, ir->r); /* Dest reg handled below. */ |
1290 | ra_evictset(as, drop); | 1224 | ra_evictset(as, drop); |
@@ -1306,16 +1240,28 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
1306 | if (ofs == sizeof(GCcdata)) break; | 1240 | if (ofs == sizeof(GCcdata)) break; |
1307 | ofs -= 4; ir--; | 1241 | ofs -= 4; ir--; |
1308 | } | 1242 | } |
1243 | } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ | ||
1244 | ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; | ||
1245 | args[0] = ASMREF_L; /* lua_State *L */ | ||
1246 | args[1] = ir->op1; /* CTypeID id */ | ||
1247 | args[2] = ir->op2; /* CTSize sz */ | ||
1248 | args[3] = ASMREF_TMP1; /* CTSize align */ | ||
1249 | asm_gencall(as, ci, args); | ||
1250 | emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); | ||
1251 | return; | ||
1309 | } | 1252 | } |
1253 | |||
1310 | /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ | 1254 | /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ |
1311 | { | 1255 | { |
1312 | uint32_t k = emit_isk12(ARMI_MOV, ctypeid); | 1256 | uint32_t k = emit_isk12(ARMI_MOV, id); |
1313 | Reg r = k ? RID_R1 : ra_allock(as, ctypeid, allow); | 1257 | Reg r = k ? RID_R1 : ra_allock(as, id, allow); |
1314 | emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct)); | 1258 | emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct)); |
1315 | emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid)); | 1259 | emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid)); |
1316 | emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP); | 1260 | emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP); |
1317 | if (k) emit_d(as, ARMI_MOV^k, RID_R1); | 1261 | if (k) emit_d(as, ARMI_MOV^k, RID_R1); |
1318 | } | 1262 | } |
1263 | args[0] = ASMREF_L; /* lua_State *L */ | ||
1264 | args[1] = ASMREF_TMP1; /* MSize size */ | ||
1319 | asm_gencall(as, ci, args); | 1265 | asm_gencall(as, ci, args); |
1320 | ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), | 1266 | ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), |
1321 | ra_releasetmp(as, ASMREF_TMP1)); | 1267 | ra_releasetmp(as, ASMREF_TMP1)); |
@@ -1392,24 +1338,41 @@ static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai) | |||
1392 | emit_dm(as, ai, (dest & 15), (left & 15)); | 1338 | emit_dm(as, ai, (dest & 15), (left & 15)); |
1393 | } | 1339 | } |
1394 | 1340 | ||
1395 | static int asm_fpjoin_pow(ASMState *as, IRIns *ir) | 1341 | static void asm_callround(ASMState *as, IRIns *ir, int id) |
1396 | { | 1342 | { |
1397 | IRIns *irp = IR(ir->op1); | 1343 | /* The modified regs must match with the *.dasc implementation. */ |
1398 | if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) { | 1344 | RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)| |
1399 | IRIns *irpp = IR(irp->op1); | 1345 | RID2RSET(RID_R3)|RID2RSET(RID_R12); |
1400 | if (irpp == ir-2 && irpp->o == IR_FPMATH && | 1346 | RegSet of; |
1401 | irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) { | 1347 | Reg dest, src; |
1402 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow]; | 1348 | ra_evictset(as, drop); |
1403 | IRRef args[2]; | 1349 | dest = ra_dest(as, ir, RSET_FPR); |
1404 | args[0] = irpp->op1; | 1350 | emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15)); |
1405 | args[1] = irp->op2; | 1351 | emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf : |
1406 | asm_setupresult(as, ir, ci); | 1352 | id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf : |
1407 | asm_gencall(as, ci, args); | 1353 | (void *)lj_vm_trunc_sf); |
1408 | return 1; | 1354 | /* Workaround to protect argument GPRs from being used for remat. */ |
1409 | } | 1355 | of = as->freeset; |
1410 | } | 1356 | as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1); |
1411 | return 0; | 1357 | as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L); |
1358 | src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */ | ||
1359 | as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1)); | ||
1360 | emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15)); | ||
1361 | } | ||
1362 | |||
1363 | static void asm_fpmath(ASMState *as, IRIns *ir) | ||
1364 | { | ||
1365 | if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) | ||
1366 | return; | ||
1367 | if (ir->op2 <= IRFPM_TRUNC) | ||
1368 | asm_callround(as, ir, ir->op2); | ||
1369 | else if (ir->op2 == IRFPM_SQRT) | ||
1370 | asm_fpunary(as, ir, ARMI_VSQRT_D); | ||
1371 | else | ||
1372 | asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); | ||
1412 | } | 1373 | } |
1374 | #else | ||
1375 | #define asm_fpmath(as, ir) lua_assert(0) | ||
1413 | #endif | 1376 | #endif |
1414 | 1377 | ||
1415 | static int asm_swapops(ASMState *as, IRRef lref, IRRef rref) | 1378 | static int asm_swapops(ASMState *as, IRRef lref, IRRef rref) |
@@ -1459,32 +1422,6 @@ static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai) | |||
1459 | asm_intop(as, ir, ai); | 1422 | asm_intop(as, ir, ai); |
1460 | } | 1423 | } |
1461 | 1424 | ||
1462 | static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai) | ||
1463 | { | ||
1464 | if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */ | ||
1465 | uint32_t cc = (as->mcp[1] >> 28); | ||
1466 | as->flagmcp = NULL; | ||
1467 | if (cc <= CC_NE) { | ||
1468 | as->mcp++; | ||
1469 | ai |= ARMI_S; | ||
1470 | } else if (cc == CC_GE) { | ||
1471 | *++as->mcp ^= ((CC_GE^CC_PL) << 28); | ||
1472 | ai |= ARMI_S; | ||
1473 | } else if (cc == CC_LT) { | ||
1474 | *++as->mcp ^= ((CC_LT^CC_MI) << 28); | ||
1475 | ai |= ARMI_S; | ||
1476 | } /* else: other conds don't work with bit ops. */ | ||
1477 | } | ||
1478 | if (ir->op2 == 0) { | ||
1479 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
1480 | uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR); | ||
1481 | emit_d(as, ai^m, dest); | ||
1482 | } else { | ||
1483 | /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */ | ||
1484 | asm_intop(as, ir, ai); | ||
1485 | } | ||
1486 | } | ||
1487 | |||
1488 | static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai) | 1425 | static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai) |
1489 | { | 1426 | { |
1490 | Reg dest = ra_dest(as, ir, RSET_GPR); | 1427 | Reg dest = ra_dest(as, ir, RSET_GPR); |
@@ -1550,6 +1487,26 @@ static void asm_mul(ASMState *as, IRIns *ir) | |||
1550 | asm_intmul(as, ir); | 1487 | asm_intmul(as, ir); |
1551 | } | 1488 | } |
1552 | 1489 | ||
1490 | #define asm_addov(as, ir) asm_add(as, ir) | ||
1491 | #define asm_subov(as, ir) asm_sub(as, ir) | ||
1492 | #define asm_mulov(as, ir) asm_mul(as, ir) | ||
1493 | |||
1494 | #if LJ_SOFTFP | ||
1495 | #define asm_div(as, ir) lua_assert(0) | ||
1496 | #define asm_pow(as, ir) lua_assert(0) | ||
1497 | #define asm_abs(as, ir) lua_assert(0) | ||
1498 | #define asm_atan2(as, ir) lua_assert(0) | ||
1499 | #define asm_ldexp(as, ir) lua_assert(0) | ||
1500 | #else | ||
1501 | #define asm_div(as, ir) asm_fparith(as, ir, ARMI_VDIV_D) | ||
1502 | #define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi) | ||
1503 | #define asm_abs(as, ir) asm_fpunary(as, ir, ARMI_VABS_D) | ||
1504 | #define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) | ||
1505 | #define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp) | ||
1506 | #endif | ||
1507 | |||
1508 | #define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi) | ||
1509 | |||
1553 | static void asm_neg(ASMState *as, IRIns *ir) | 1510 | static void asm_neg(ASMState *as, IRIns *ir) |
1554 | { | 1511 | { |
1555 | #if !LJ_SOFTFP | 1512 | #if !LJ_SOFTFP |
@@ -1561,41 +1518,35 @@ static void asm_neg(ASMState *as, IRIns *ir) | |||
1561 | asm_intneg(as, ir, ARMI_RSB); | 1518 | asm_intneg(as, ir, ARMI_RSB); |
1562 | } | 1519 | } |
1563 | 1520 | ||
1564 | static void asm_callid(ASMState *as, IRIns *ir, IRCallID id) | 1521 | static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai) |
1565 | { | 1522 | { |
1566 | const CCallInfo *ci = &lj_ir_callinfo[id]; | 1523 | if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */ |
1567 | IRRef args[2]; | 1524 | uint32_t cc = (as->mcp[1] >> 28); |
1568 | args[0] = ir->op1; | 1525 | as->flagmcp = NULL; |
1569 | args[1] = ir->op2; | 1526 | if (cc <= CC_NE) { |
1570 | asm_setupresult(as, ir, ci); | 1527 | as->mcp++; |
1571 | asm_gencall(as, ci, args); | 1528 | ai |= ARMI_S; |
1529 | } else if (cc == CC_GE) { | ||
1530 | *++as->mcp ^= ((CC_GE^CC_PL) << 28); | ||
1531 | ai |= ARMI_S; | ||
1532 | } else if (cc == CC_LT) { | ||
1533 | *++as->mcp ^= ((CC_LT^CC_MI) << 28); | ||
1534 | ai |= ARMI_S; | ||
1535 | } /* else: other conds don't work with bit ops. */ | ||
1536 | } | ||
1537 | if (ir->op2 == 0) { | ||
1538 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
1539 | uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR); | ||
1540 | emit_d(as, ai^m, dest); | ||
1541 | } else { | ||
1542 | /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */ | ||
1543 | asm_intop(as, ir, ai); | ||
1544 | } | ||
1572 | } | 1545 | } |
1573 | 1546 | ||
1574 | #if !LJ_SOFTFP | 1547 | #define asm_bnot(as, ir) asm_bitop(as, ir, ARMI_MVN) |
1575 | static void asm_callround(ASMState *as, IRIns *ir, int id) | ||
1576 | { | ||
1577 | /* The modified regs must match with the *.dasc implementation. */ | ||
1578 | RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)| | ||
1579 | RID2RSET(RID_R3)|RID2RSET(RID_R12); | ||
1580 | RegSet of; | ||
1581 | Reg dest, src; | ||
1582 | ra_evictset(as, drop); | ||
1583 | dest = ra_dest(as, ir, RSET_FPR); | ||
1584 | emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15)); | ||
1585 | emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf : | ||
1586 | id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf : | ||
1587 | (void *)lj_vm_trunc_sf); | ||
1588 | /* Workaround to protect argument GPRs from being used for remat. */ | ||
1589 | of = as->freeset; | ||
1590 | as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1); | ||
1591 | as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L); | ||
1592 | src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */ | ||
1593 | as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1)); | ||
1594 | emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15)); | ||
1595 | } | ||
1596 | #endif | ||
1597 | 1548 | ||
1598 | static void asm_bitswap(ASMState *as, IRIns *ir) | 1549 | static void asm_bswap(ASMState *as, IRIns *ir) |
1599 | { | 1550 | { |
1600 | Reg dest = ra_dest(as, ir, RSET_GPR); | 1551 | Reg dest = ra_dest(as, ir, RSET_GPR); |
1601 | Reg left = ra_alloc1(as, ir->op1, RSET_GPR); | 1552 | Reg left = ra_alloc1(as, ir->op1, RSET_GPR); |
@@ -1612,6 +1563,10 @@ static void asm_bitswap(ASMState *as, IRIns *ir) | |||
1612 | } | 1563 | } |
1613 | } | 1564 | } |
1614 | 1565 | ||
1566 | #define asm_band(as, ir) asm_bitop(as, ir, ARMI_AND) | ||
1567 | #define asm_bor(as, ir) asm_bitop(as, ir, ARMI_ORR) | ||
1568 | #define asm_bxor(as, ir) asm_bitop(as, ir, ARMI_EOR) | ||
1569 | |||
1615 | static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) | 1570 | static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) |
1616 | { | 1571 | { |
1617 | if (irref_isk(ir->op2)) { /* Constant shifts. */ | 1572 | if (irref_isk(ir->op2)) { /* Constant shifts. */ |
@@ -1629,6 +1584,12 @@ static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) | |||
1629 | } | 1584 | } |
1630 | } | 1585 | } |
1631 | 1586 | ||
1587 | #define asm_bshl(as, ir) asm_bitshift(as, ir, ARMSH_LSL) | ||
1588 | #define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR) | ||
1589 | #define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR) | ||
1590 | #define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR) | ||
1591 | #define asm_brol(as, ir) lua_assert(0) | ||
1592 | |||
1632 | static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) | 1593 | static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) |
1633 | { | 1594 | { |
1634 | uint32_t kcmp = 0, kmov = 0; | 1595 | uint32_t kcmp = 0, kmov = 0; |
@@ -1702,6 +1663,9 @@ static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc) | |||
1702 | asm_intmin_max(as, ir, cc); | 1663 | asm_intmin_max(as, ir, cc); |
1703 | } | 1664 | } |
1704 | 1665 | ||
1666 | #define asm_min(as, ir) asm_min_max(as, ir, CC_GT, CC_HI) | ||
1667 | #define asm_max(as, ir) asm_min_max(as, ir, CC_LT, CC_LO) | ||
1668 | |||
1705 | /* -- Comparisons --------------------------------------------------------- */ | 1669 | /* -- Comparisons --------------------------------------------------------- */ |
1706 | 1670 | ||
1707 | /* Map of comparisons to flags. ORDER IR. */ | 1671 | /* Map of comparisons to flags. ORDER IR. */ |
@@ -1817,6 +1781,18 @@ notst: | |||
1817 | as->flagmcp = as->mcp; /* Allow elimination of the compare. */ | 1781 | as->flagmcp = as->mcp; /* Allow elimination of the compare. */ |
1818 | } | 1782 | } |
1819 | 1783 | ||
1784 | static void asm_comp(ASMState *as, IRIns *ir) | ||
1785 | { | ||
1786 | #if !LJ_SOFTFP | ||
1787 | if (irt_isnum(ir->t)) | ||
1788 | asm_fpcomp(as, ir); | ||
1789 | else | ||
1790 | #endif | ||
1791 | asm_intcomp(as, ir); | ||
1792 | } | ||
1793 | |||
1794 | #define asm_equal(as, ir) asm_comp(as, ir) | ||
1795 | |||
1820 | #if LJ_HASFFI | 1796 | #if LJ_HASFFI |
1821 | /* 64 bit integer comparisons. */ | 1797 | /* 64 bit integer comparisons. */ |
1822 | static void asm_int64comp(ASMState *as, IRIns *ir) | 1798 | static void asm_int64comp(ASMState *as, IRIns *ir) |
@@ -1891,7 +1867,7 @@ static void asm_hiop(ASMState *as, IRIns *ir) | |||
1891 | #endif | 1867 | #endif |
1892 | } else if ((ir-1)->o == IR_XSTORE) { | 1868 | } else if ((ir-1)->o == IR_XSTORE) { |
1893 | if ((ir-1)->r != RID_SINK) | 1869 | if ((ir-1)->r != RID_SINK) |
1894 | asm_xstore(as, ir, 4); | 1870 | asm_xstore_(as, ir, 4); |
1895 | return; | 1871 | return; |
1896 | } | 1872 | } |
1897 | if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ | 1873 | if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ |
@@ -2160,143 +2136,13 @@ static void asm_tail_prep(ASMState *as) | |||
2160 | *p = 0; /* Prevent load/store merging. */ | 2136 | *p = 0; /* Prevent load/store merging. */ |
2161 | } | 2137 | } |
2162 | 2138 | ||
2163 | /* -- Instruction dispatch ------------------------------------------------ */ | ||
2164 | |||
2165 | /* Assemble a single instruction. */ | ||
2166 | static void asm_ir(ASMState *as, IRIns *ir) | ||
2167 | { | ||
2168 | switch ((IROp)ir->o) { | ||
2169 | /* Miscellaneous ops. */ | ||
2170 | case IR_LOOP: asm_loop(as); break; | ||
2171 | case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break; | ||
2172 | case IR_USE: | ||
2173 | ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break; | ||
2174 | case IR_PHI: asm_phi(as, ir); break; | ||
2175 | case IR_HIOP: asm_hiop(as, ir); break; | ||
2176 | case IR_GCSTEP: asm_gcstep(as, ir); break; | ||
2177 | |||
2178 | /* Guarded assertions. */ | ||
2179 | case IR_EQ: case IR_NE: | ||
2180 | if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) { | ||
2181 | as->curins--; | ||
2182 | asm_href(as, ir-1, (IROp)ir->o); | ||
2183 | break; | ||
2184 | } | ||
2185 | /* fallthrough */ | ||
2186 | case IR_LT: case IR_GE: case IR_LE: case IR_GT: | ||
2187 | case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT: | ||
2188 | case IR_ABC: | ||
2189 | #if !LJ_SOFTFP | ||
2190 | if (irt_isnum(ir->t)) { asm_fpcomp(as, ir); break; } | ||
2191 | #endif | ||
2192 | asm_intcomp(as, ir); | ||
2193 | break; | ||
2194 | |||
2195 | case IR_RETF: asm_retf(as, ir); break; | ||
2196 | |||
2197 | /* Bit ops. */ | ||
2198 | case IR_BNOT: asm_bitop(as, ir, ARMI_MVN); break; | ||
2199 | case IR_BSWAP: asm_bitswap(as, ir); break; | ||
2200 | |||
2201 | case IR_BAND: asm_bitop(as, ir, ARMI_AND); break; | ||
2202 | case IR_BOR: asm_bitop(as, ir, ARMI_ORR); break; | ||
2203 | case IR_BXOR: asm_bitop(as, ir, ARMI_EOR); break; | ||
2204 | |||
2205 | case IR_BSHL: asm_bitshift(as, ir, ARMSH_LSL); break; | ||
2206 | case IR_BSHR: asm_bitshift(as, ir, ARMSH_LSR); break; | ||
2207 | case IR_BSAR: asm_bitshift(as, ir, ARMSH_ASR); break; | ||
2208 | case IR_BROR: asm_bitshift(as, ir, ARMSH_ROR); break; | ||
2209 | case IR_BROL: lua_assert(0); break; | ||
2210 | |||
2211 | /* Arithmetic ops. */ | ||
2212 | case IR_ADD: case IR_ADDOV: asm_add(as, ir); break; | ||
2213 | case IR_SUB: case IR_SUBOV: asm_sub(as, ir); break; | ||
2214 | case IR_MUL: case IR_MULOV: asm_mul(as, ir); break; | ||
2215 | case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break; | ||
2216 | case IR_NEG: asm_neg(as, ir); break; | ||
2217 | |||
2218 | #if LJ_SOFTFP | ||
2219 | case IR_DIV: case IR_POW: case IR_ABS: | ||
2220 | case IR_ATAN2: case IR_LDEXP: case IR_FPMATH: case IR_TOBIT: | ||
2221 | lua_assert(0); /* Unused for LJ_SOFTFP. */ | ||
2222 | break; | ||
2223 | #else | ||
2224 | case IR_DIV: asm_fparith(as, ir, ARMI_VDIV_D); break; | ||
2225 | case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break; | ||
2226 | case IR_ABS: asm_fpunary(as, ir, ARMI_VABS_D); break; | ||
2227 | case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break; | ||
2228 | case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break; | ||
2229 | case IR_FPMATH: | ||
2230 | if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) | ||
2231 | break; | ||
2232 | if (ir->op2 <= IRFPM_TRUNC) | ||
2233 | asm_callround(as, ir, ir->op2); | ||
2234 | else if (ir->op2 == IRFPM_SQRT) | ||
2235 | asm_fpunary(as, ir, ARMI_VSQRT_D); | ||
2236 | else | ||
2237 | asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); | ||
2238 | break; | ||
2239 | case IR_TOBIT: asm_tobit(as, ir); break; | ||
2240 | #endif | ||
2241 | |||
2242 | case IR_MIN: asm_min_max(as, ir, CC_GT, CC_HI); break; | ||
2243 | case IR_MAX: asm_min_max(as, ir, CC_LT, CC_LO); break; | ||
2244 | |||
2245 | /* Memory references. */ | ||
2246 | case IR_AREF: asm_aref(as, ir); break; | ||
2247 | case IR_HREF: asm_href(as, ir, 0); break; | ||
2248 | case IR_HREFK: asm_hrefk(as, ir); break; | ||
2249 | case IR_NEWREF: asm_newref(as, ir); break; | ||
2250 | case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break; | ||
2251 | case IR_FREF: asm_fref(as, ir); break; | ||
2252 | case IR_STRREF: asm_strref(as, ir); break; | ||
2253 | |||
2254 | /* Loads and stores. */ | ||
2255 | case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: | ||
2256 | asm_ahuvload(as, ir); | ||
2257 | break; | ||
2258 | case IR_FLOAD: asm_fload(as, ir); break; | ||
2259 | case IR_XLOAD: asm_xload(as, ir); break; | ||
2260 | case IR_SLOAD: asm_sload(as, ir); break; | ||
2261 | |||
2262 | case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break; | ||
2263 | case IR_FSTORE: asm_fstore(as, ir); break; | ||
2264 | case IR_XSTORE: asm_xstore(as, ir, 0); break; | ||
2265 | |||
2266 | /* Allocations. */ | ||
2267 | case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break; | ||
2268 | case IR_TNEW: asm_tnew(as, ir); break; | ||
2269 | case IR_TDUP: asm_tdup(as, ir); break; | ||
2270 | case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break; | ||
2271 | |||
2272 | /* Write barriers. */ | ||
2273 | case IR_TBAR: asm_tbar(as, ir); break; | ||
2274 | case IR_OBAR: asm_obar(as, ir); break; | ||
2275 | |||
2276 | /* Type conversions. */ | ||
2277 | case IR_CONV: asm_conv(as, ir); break; | ||
2278 | case IR_TOSTR: asm_tostr(as, ir); break; | ||
2279 | case IR_STRTO: asm_strto(as, ir); break; | ||
2280 | |||
2281 | /* Calls. */ | ||
2282 | case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break; | ||
2283 | case IR_CALLXS: asm_callx(as, ir); break; | ||
2284 | case IR_CARG: break; | ||
2285 | |||
2286 | default: | ||
2287 | setintV(&as->J->errinfo, ir->o); | ||
2288 | lj_trace_err_info(as->J, LJ_TRERR_NYIIR); | ||
2289 | break; | ||
2290 | } | ||
2291 | } | ||
2292 | |||
2293 | /* -- Trace setup --------------------------------------------------------- */ | 2139 | /* -- Trace setup --------------------------------------------------------- */ |
2294 | 2140 | ||
2295 | /* Ensure there are enough stack slots for call arguments. */ | 2141 | /* Ensure there are enough stack slots for call arguments. */ |
2296 | static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) | 2142 | static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) |
2297 | { | 2143 | { |
2298 | IRRef args[CCI_NARGS_MAX*2]; | 2144 | IRRef args[CCI_NARGS_MAX*2]; |
2299 | uint32_t i, nargs = (int)CCI_NARGS(ci); | 2145 | uint32_t i, nargs = CCI_XNARGS(ci); |
2300 | int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0; | 2146 | int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0; |
2301 | asm_collectargs(as, ir, ci, args); | 2147 | asm_collectargs(as, ir, ci, args); |
2302 | for (i = 0; i < nargs; i++) { | 2148 | for (i = 0; i < nargs; i++) { |