diff options
author | Mike Pall <mike> | 2013-04-22 22:32:41 +0200 |
---|---|---|
committer | Mike Pall <mike> | 2013-04-22 22:32:41 +0200 |
commit | a2c78810ca0162c06b3ae02b52d6b4c04a8d5be3 (patch) | |
tree | d82fe00c6ca8ff6a2bfce89176e0d97b3095be38 /src/lj_asm_arm.h | |
parent | 2ab5e7c5dce9e8bd19b7f4c9d7a90ef30af53d0a (diff) | |
download | luajit-a2c78810ca0162c06b3ae02b52d6b4c04a8d5be3.tar.gz luajit-a2c78810ca0162c06b3ae02b52d6b4c04a8d5be3.tar.bz2 luajit-a2c78810ca0162c06b3ae02b52d6b4c04a8d5be3.zip |
Combine IR instruction dispatch for all assembler backends.
Diffstat (limited to 'src/lj_asm_arm.h')
-rw-r--r-- | src/lj_asm_arm.h | 298 |
1 files changed, 113 insertions, 185 deletions
diff --git a/src/lj_asm_arm.h b/src/lj_asm_arm.h index 25a28bd7..039a2a9a 100644 --- a/src/lj_asm_arm.h +++ b/src/lj_asm_arm.h | |||
@@ -519,6 +519,8 @@ static void asm_tobit(ASMState *as, IRIns *ir) | |||
519 | emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); | 519 | emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15)); |
520 | emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15)); | 520 | emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15)); |
521 | } | 521 | } |
522 | #else | ||
523 | #define asm_tobit(as, ir) lua_assert(0) | ||
522 | #endif | 524 | #endif |
523 | 525 | ||
524 | static void asm_conv(ASMState *as, IRIns *ir) | 526 | static void asm_conv(ASMState *as, IRIns *ir) |
@@ -1038,7 +1040,7 @@ static void asm_xload(ASMState *as, IRIns *ir) | |||
1038 | asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); | 1040 | asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); |
1039 | } | 1041 | } |
1040 | 1042 | ||
1041 | static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs) | 1043 | static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) |
1042 | { | 1044 | { |
1043 | if (ir->r != RID_SINK) { | 1045 | if (ir->r != RID_SINK) { |
1044 | Reg src = ra_alloc1(as, ir->op2, | 1046 | Reg src = ra_alloc1(as, ir->op2, |
@@ -1048,6 +1050,8 @@ static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs) | |||
1048 | } | 1050 | } |
1049 | } | 1051 | } |
1050 | 1052 | ||
1053 | #define asm_xstore(as, ir) asm_xstore_(as, ir, 0) | ||
1054 | |||
1051 | static void asm_ahuvload(ASMState *as, IRIns *ir) | 1055 | static void asm_ahuvload(ASMState *as, IRIns *ir) |
1052 | { | 1056 | { |
1053 | int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); | 1057 | int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); |
@@ -1324,6 +1328,42 @@ static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai) | |||
1324 | Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); | 1328 | Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); |
1325 | emit_dm(as, ai, (dest & 15), (left & 15)); | 1329 | emit_dm(as, ai, (dest & 15), (left & 15)); |
1326 | } | 1330 | } |
1331 | |||
1332 | static void asm_callround(ASMState *as, IRIns *ir, int id) | ||
1333 | { | ||
1334 | /* The modified regs must match with the *.dasc implementation. */ | ||
1335 | RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)| | ||
1336 | RID2RSET(RID_R3)|RID2RSET(RID_R12); | ||
1337 | RegSet of; | ||
1338 | Reg dest, src; | ||
1339 | ra_evictset(as, drop); | ||
1340 | dest = ra_dest(as, ir, RSET_FPR); | ||
1341 | emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15)); | ||
1342 | emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf : | ||
1343 | id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf : | ||
1344 | (void *)lj_vm_trunc_sf); | ||
1345 | /* Workaround to protect argument GPRs from being used for remat. */ | ||
1346 | of = as->freeset; | ||
1347 | as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1); | ||
1348 | as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L); | ||
1349 | src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */ | ||
1350 | as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1)); | ||
1351 | emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15)); | ||
1352 | } | ||
1353 | |||
1354 | static void asm_fpmath(ASMState *as, IRIns *ir) | ||
1355 | { | ||
1356 | if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) | ||
1357 | return; | ||
1358 | if (ir->op2 <= IRFPM_TRUNC) | ||
1359 | asm_callround(as, ir, ir->op2); | ||
1360 | else if (ir->op2 == IRFPM_SQRT) | ||
1361 | asm_fpunary(as, ir, ARMI_VSQRT_D); | ||
1362 | else | ||
1363 | asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); | ||
1364 | } | ||
1365 | #else | ||
1366 | #define asm_fpmath(as, ir) lua_assert(0) | ||
1327 | #endif | 1367 | #endif |
1328 | 1368 | ||
1329 | static int asm_swapops(ASMState *as, IRRef lref, IRRef rref) | 1369 | static int asm_swapops(ASMState *as, IRRef lref, IRRef rref) |
@@ -1373,32 +1413,6 @@ static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai) | |||
1373 | asm_intop(as, ir, ai); | 1413 | asm_intop(as, ir, ai); |
1374 | } | 1414 | } |
1375 | 1415 | ||
1376 | static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai) | ||
1377 | { | ||
1378 | if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */ | ||
1379 | uint32_t cc = (as->mcp[1] >> 28); | ||
1380 | as->flagmcp = NULL; | ||
1381 | if (cc <= CC_NE) { | ||
1382 | as->mcp++; | ||
1383 | ai |= ARMI_S; | ||
1384 | } else if (cc == CC_GE) { | ||
1385 | *++as->mcp ^= ((CC_GE^CC_PL) << 28); | ||
1386 | ai |= ARMI_S; | ||
1387 | } else if (cc == CC_LT) { | ||
1388 | *++as->mcp ^= ((CC_LT^CC_MI) << 28); | ||
1389 | ai |= ARMI_S; | ||
1390 | } /* else: other conds don't work with bit ops. */ | ||
1391 | } | ||
1392 | if (ir->op2 == 0) { | ||
1393 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
1394 | uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR); | ||
1395 | emit_d(as, ai^m, dest); | ||
1396 | } else { | ||
1397 | /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */ | ||
1398 | asm_intop(as, ir, ai); | ||
1399 | } | ||
1400 | } | ||
1401 | |||
1402 | static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai) | 1416 | static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai) |
1403 | { | 1417 | { |
1404 | Reg dest = ra_dest(as, ir, RSET_GPR); | 1418 | Reg dest = ra_dest(as, ir, RSET_GPR); |
@@ -1464,6 +1478,26 @@ static void asm_mul(ASMState *as, IRIns *ir) | |||
1464 | asm_intmul(as, ir); | 1478 | asm_intmul(as, ir); |
1465 | } | 1479 | } |
1466 | 1480 | ||
1481 | #define asm_addov(as, ir) asm_add(as, ir) | ||
1482 | #define asm_subov(as, ir) asm_sub(as, ir) | ||
1483 | #define asm_mulov(as, ir) asm_mul(as, ir) | ||
1484 | |||
1485 | #if LJ_SOFTFP | ||
1486 | #define asm_div(as, ir) lua_assert(0) | ||
1487 | #define asm_pow(as, ir) lua_assert(0) | ||
1488 | #define asm_abs(as, ir) lua_assert(0) | ||
1489 | #define asm_atan2(as, ir) lua_assert(0) | ||
1490 | #define asm_ldexp(as, ir) lua_assert(0) | ||
1491 | #else | ||
1492 | #define asm_div(as, ir) asm_fparith(as, ir, ARMI_VDIV_D) | ||
1493 | #define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi) | ||
1494 | #define asm_abs(as, ir) asm_fpunary(as, ir, ARMI_VABS_D) | ||
1495 | #define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) | ||
1496 | #define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp) | ||
1497 | #endif | ||
1498 | |||
1499 | #define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi) | ||
1500 | |||
1467 | static void asm_neg(ASMState *as, IRIns *ir) | 1501 | static void asm_neg(ASMState *as, IRIns *ir) |
1468 | { | 1502 | { |
1469 | #if !LJ_SOFTFP | 1503 | #if !LJ_SOFTFP |
@@ -1475,31 +1509,35 @@ static void asm_neg(ASMState *as, IRIns *ir) | |||
1475 | asm_intneg(as, ir, ARMI_RSB); | 1509 | asm_intneg(as, ir, ARMI_RSB); |
1476 | } | 1510 | } |
1477 | 1511 | ||
1478 | #if !LJ_SOFTFP | 1512 | static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai) |
1479 | static void asm_callround(ASMState *as, IRIns *ir, int id) | ||
1480 | { | 1513 | { |
1481 | /* The modified regs must match with the *.dasc implementation. */ | 1514 | if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */ |
1482 | RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)| | 1515 | uint32_t cc = (as->mcp[1] >> 28); |
1483 | RID2RSET(RID_R3)|RID2RSET(RID_R12); | 1516 | as->flagmcp = NULL; |
1484 | RegSet of; | 1517 | if (cc <= CC_NE) { |
1485 | Reg dest, src; | 1518 | as->mcp++; |
1486 | ra_evictset(as, drop); | 1519 | ai |= ARMI_S; |
1487 | dest = ra_dest(as, ir, RSET_FPR); | 1520 | } else if (cc == CC_GE) { |
1488 | emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15)); | 1521 | *++as->mcp ^= ((CC_GE^CC_PL) << 28); |
1489 | emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf : | 1522 | ai |= ARMI_S; |
1490 | id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf : | 1523 | } else if (cc == CC_LT) { |
1491 | (void *)lj_vm_trunc_sf); | 1524 | *++as->mcp ^= ((CC_LT^CC_MI) << 28); |
1492 | /* Workaround to protect argument GPRs from being used for remat. */ | 1525 | ai |= ARMI_S; |
1493 | of = as->freeset; | 1526 | } /* else: other conds don't work with bit ops. */ |
1494 | as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1); | 1527 | } |
1495 | as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L); | 1528 | if (ir->op2 == 0) { |
1496 | src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */ | 1529 | Reg dest = ra_dest(as, ir, RSET_GPR); |
1497 | as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1)); | 1530 | uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR); |
1498 | emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15)); | 1531 | emit_d(as, ai^m, dest); |
1532 | } else { | ||
1533 | /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */ | ||
1534 | asm_intop(as, ir, ai); | ||
1535 | } | ||
1499 | } | 1536 | } |
1500 | #endif | ||
1501 | 1537 | ||
1502 | static void asm_bitswap(ASMState *as, IRIns *ir) | 1538 | #define asm_bnot(as, ir) asm_bitop(as, ir, ARMI_MVN) |
1539 | |||
1540 | static void asm_bswap(ASMState *as, IRIns *ir) | ||
1503 | { | 1541 | { |
1504 | Reg dest = ra_dest(as, ir, RSET_GPR); | 1542 | Reg dest = ra_dest(as, ir, RSET_GPR); |
1505 | Reg left = ra_alloc1(as, ir->op1, RSET_GPR); | 1543 | Reg left = ra_alloc1(as, ir->op1, RSET_GPR); |
@@ -1516,6 +1554,10 @@ static void asm_bitswap(ASMState *as, IRIns *ir) | |||
1516 | } | 1554 | } |
1517 | } | 1555 | } |
1518 | 1556 | ||
1557 | #define asm_band(as, ir) asm_bitop(as, ir, ARMI_AND) | ||
1558 | #define asm_bor(as, ir) asm_bitop(as, ir, ARMI_ORR) | ||
1559 | #define asm_bxor(as, ir) asm_bitop(as, ir, ARMI_EOR) | ||
1560 | |||
1519 | static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) | 1561 | static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) |
1520 | { | 1562 | { |
1521 | if (irref_isk(ir->op2)) { /* Constant shifts. */ | 1563 | if (irref_isk(ir->op2)) { /* Constant shifts. */ |
@@ -1533,6 +1575,12 @@ static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) | |||
1533 | } | 1575 | } |
1534 | } | 1576 | } |
1535 | 1577 | ||
1578 | #define asm_bshl(as, ir) asm_bitshift(as, ir, ARMSH_LSL) | ||
1579 | #define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR) | ||
1580 | #define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR) | ||
1581 | #define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR) | ||
1582 | #define asm_brol(as, ir) lua_assert(0) | ||
1583 | |||
1536 | static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) | 1584 | static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) |
1537 | { | 1585 | { |
1538 | uint32_t kcmp = 0, kmov = 0; | 1586 | uint32_t kcmp = 0, kmov = 0; |
@@ -1606,6 +1654,9 @@ static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc) | |||
1606 | asm_intmin_max(as, ir, cc); | 1654 | asm_intmin_max(as, ir, cc); |
1607 | } | 1655 | } |
1608 | 1656 | ||
1657 | #define asm_min(as, ir) asm_min_max(as, ir, CC_GT, CC_HI) | ||
1658 | #define asm_max(as, ir) asm_min_max(as, ir, CC_LT, CC_LO) | ||
1659 | |||
1609 | /* -- Comparisons --------------------------------------------------------- */ | 1660 | /* -- Comparisons --------------------------------------------------------- */ |
1610 | 1661 | ||
1611 | /* Map of comparisons to flags. ORDER IR. */ | 1662 | /* Map of comparisons to flags. ORDER IR. */ |
@@ -1721,6 +1772,18 @@ notst: | |||
1721 | as->flagmcp = as->mcp; /* Allow elimination of the compare. */ | 1772 | as->flagmcp = as->mcp; /* Allow elimination of the compare. */ |
1722 | } | 1773 | } |
1723 | 1774 | ||
1775 | static void asm_comp(ASMState *as, IRIns *ir) | ||
1776 | { | ||
1777 | #if !LJ_SOFTFP | ||
1778 | if (irt_isnum(ir->t)) | ||
1779 | asm_fpcomp(as, ir); | ||
1780 | else | ||
1781 | #endif | ||
1782 | asm_intcomp(as, ir); | ||
1783 | } | ||
1784 | |||
1785 | #define asm_equal(as, ir) asm_comp(as, ir) | ||
1786 | |||
1724 | #if LJ_HASFFI | 1787 | #if LJ_HASFFI |
1725 | /* 64 bit integer comparisons. */ | 1788 | /* 64 bit integer comparisons. */ |
1726 | static void asm_int64comp(ASMState *as, IRIns *ir) | 1789 | static void asm_int64comp(ASMState *as, IRIns *ir) |
@@ -1795,7 +1858,7 @@ static void asm_hiop(ASMState *as, IRIns *ir) | |||
1795 | #endif | 1858 | #endif |
1796 | } else if ((ir-1)->o == IR_XSTORE) { | 1859 | } else if ((ir-1)->o == IR_XSTORE) { |
1797 | if ((ir-1)->r != RID_SINK) | 1860 | if ((ir-1)->r != RID_SINK) |
1798 | asm_xstore(as, ir, 4); | 1861 | asm_xstore_(as, ir, 4); |
1799 | return; | 1862 | return; |
1800 | } | 1863 | } |
1801 | if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ | 1864 | if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ |
@@ -2064,141 +2127,6 @@ static void asm_tail_prep(ASMState *as) | |||
2064 | *p = 0; /* Prevent load/store merging. */ | 2127 | *p = 0; /* Prevent load/store merging. */ |
2065 | } | 2128 | } |
2066 | 2129 | ||
2067 | /* -- Instruction dispatch ------------------------------------------------ */ | ||
2068 | |||
2069 | /* Assemble a single instruction. */ | ||
2070 | static void asm_ir(ASMState *as, IRIns *ir) | ||
2071 | { | ||
2072 | switch ((IROp)ir->o) { | ||
2073 | /* Miscellaneous ops. */ | ||
2074 | case IR_LOOP: asm_loop(as); break; | ||
2075 | case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break; | ||
2076 | case IR_USE: | ||
2077 | ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break; | ||
2078 | case IR_PHI: asm_phi(as, ir); break; | ||
2079 | case IR_HIOP: asm_hiop(as, ir); break; | ||
2080 | case IR_GCSTEP: asm_gcstep(as, ir); break; | ||
2081 | |||
2082 | /* Guarded assertions. */ | ||
2083 | case IR_EQ: case IR_NE: | ||
2084 | if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) { | ||
2085 | as->curins--; | ||
2086 | asm_href(as, ir-1, (IROp)ir->o); | ||
2087 | break; | ||
2088 | } | ||
2089 | /* fallthrough */ | ||
2090 | case IR_LT: case IR_GE: case IR_LE: case IR_GT: | ||
2091 | case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT: | ||
2092 | case IR_ABC: | ||
2093 | #if !LJ_SOFTFP | ||
2094 | if (irt_isnum(ir->t)) { asm_fpcomp(as, ir); break; } | ||
2095 | #endif | ||
2096 | asm_intcomp(as, ir); | ||
2097 | break; | ||
2098 | |||
2099 | case IR_RETF: asm_retf(as, ir); break; | ||
2100 | |||
2101 | /* Bit ops. */ | ||
2102 | case IR_BNOT: asm_bitop(as, ir, ARMI_MVN); break; | ||
2103 | case IR_BSWAP: asm_bitswap(as, ir); break; | ||
2104 | |||
2105 | case IR_BAND: asm_bitop(as, ir, ARMI_AND); break; | ||
2106 | case IR_BOR: asm_bitop(as, ir, ARMI_ORR); break; | ||
2107 | case IR_BXOR: asm_bitop(as, ir, ARMI_EOR); break; | ||
2108 | |||
2109 | case IR_BSHL: asm_bitshift(as, ir, ARMSH_LSL); break; | ||
2110 | case IR_BSHR: asm_bitshift(as, ir, ARMSH_LSR); break; | ||
2111 | case IR_BSAR: asm_bitshift(as, ir, ARMSH_ASR); break; | ||
2112 | case IR_BROR: asm_bitshift(as, ir, ARMSH_ROR); break; | ||
2113 | case IR_BROL: lua_assert(0); break; | ||
2114 | |||
2115 | /* Arithmetic ops. */ | ||
2116 | case IR_ADD: case IR_ADDOV: asm_add(as, ir); break; | ||
2117 | case IR_SUB: case IR_SUBOV: asm_sub(as, ir); break; | ||
2118 | case IR_MUL: case IR_MULOV: asm_mul(as, ir); break; | ||
2119 | case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break; | ||
2120 | case IR_NEG: asm_neg(as, ir); break; | ||
2121 | |||
2122 | #if LJ_SOFTFP | ||
2123 | case IR_DIV: case IR_POW: case IR_ABS: | ||
2124 | case IR_ATAN2: case IR_LDEXP: case IR_FPMATH: case IR_TOBIT: | ||
2125 | lua_assert(0); /* Unused for LJ_SOFTFP. */ | ||
2126 | break; | ||
2127 | #else | ||
2128 | case IR_DIV: asm_fparith(as, ir, ARMI_VDIV_D); break; | ||
2129 | case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break; | ||
2130 | case IR_ABS: asm_fpunary(as, ir, ARMI_VABS_D); break; | ||
2131 | case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break; | ||
2132 | case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break; | ||
2133 | case IR_FPMATH: | ||
2134 | if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) | ||
2135 | break; | ||
2136 | if (ir->op2 <= IRFPM_TRUNC) | ||
2137 | asm_callround(as, ir, ir->op2); | ||
2138 | else if (ir->op2 == IRFPM_SQRT) | ||
2139 | asm_fpunary(as, ir, ARMI_VSQRT_D); | ||
2140 | else | ||
2141 | asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); | ||
2142 | break; | ||
2143 | case IR_TOBIT: asm_tobit(as, ir); break; | ||
2144 | #endif | ||
2145 | |||
2146 | case IR_MIN: asm_min_max(as, ir, CC_GT, CC_HI); break; | ||
2147 | case IR_MAX: asm_min_max(as, ir, CC_LT, CC_LO); break; | ||
2148 | |||
2149 | /* Memory references. */ | ||
2150 | case IR_AREF: asm_aref(as, ir); break; | ||
2151 | case IR_HREF: asm_href(as, ir, 0); break; | ||
2152 | case IR_HREFK: asm_hrefk(as, ir); break; | ||
2153 | case IR_NEWREF: asm_newref(as, ir); break; | ||
2154 | case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break; | ||
2155 | case IR_FREF: asm_fref(as, ir); break; | ||
2156 | case IR_STRREF: asm_strref(as, ir); break; | ||
2157 | |||
2158 | /* Loads and stores. */ | ||
2159 | case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: | ||
2160 | asm_ahuvload(as, ir); | ||
2161 | break; | ||
2162 | case IR_FLOAD: asm_fload(as, ir); break; | ||
2163 | case IR_XLOAD: asm_xload(as, ir); break; | ||
2164 | case IR_SLOAD: asm_sload(as, ir); break; | ||
2165 | |||
2166 | case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break; | ||
2167 | case IR_FSTORE: asm_fstore(as, ir); break; | ||
2168 | case IR_XSTORE: asm_xstore(as, ir, 0); break; | ||
2169 | |||
2170 | /* Allocations. */ | ||
2171 | case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break; | ||
2172 | case IR_TNEW: asm_tnew(as, ir); break; | ||
2173 | case IR_TDUP: asm_tdup(as, ir); break; | ||
2174 | case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break; | ||
2175 | |||
2176 | /* Buffer operations. */ | ||
2177 | case IR_BUFHDR: asm_bufhdr(as, ir); break; | ||
2178 | case IR_BUFPUT: asm_bufput(as, ir); break; | ||
2179 | case IR_BUFSTR: asm_bufstr(as, ir); break; | ||
2180 | |||
2181 | /* Write barriers. */ | ||
2182 | case IR_TBAR: asm_tbar(as, ir); break; | ||
2183 | case IR_OBAR: asm_obar(as, ir); break; | ||
2184 | |||
2185 | /* Type conversions. */ | ||
2186 | case IR_CONV: asm_conv(as, ir); break; | ||
2187 | case IR_TOSTR: asm_tostr(as, ir); break; | ||
2188 | case IR_STRTO: asm_strto(as, ir); break; | ||
2189 | |||
2190 | /* Calls. */ | ||
2191 | case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break; | ||
2192 | case IR_CALLXS: asm_callx(as, ir); break; | ||
2193 | case IR_CARG: break; | ||
2194 | |||
2195 | default: | ||
2196 | setintV(&as->J->errinfo, ir->o); | ||
2197 | lj_trace_err_info(as->J, LJ_TRERR_NYIIR); | ||
2198 | break; | ||
2199 | } | ||
2200 | } | ||
2201 | |||
2202 | /* -- Trace setup --------------------------------------------------------- */ | 2130 | /* -- Trace setup --------------------------------------------------------- */ |
2203 | 2131 | ||
2204 | /* Ensure there are enough stack slots for call arguments. */ | 2132 | /* Ensure there are enough stack slots for call arguments. */ |