diff options
| author | Mike Pall <mike> | 2011-05-12 01:35:09 +0200 |
|---|---|---|
| committer | Mike Pall <mike> | 2011-05-12 01:35:09 +0200 |
| commit | 58f38c254bcdcf1f874f2e052751f568c2866133 (patch) | |
| tree | e3e7d951b8b77c934da407d44568299d9689b72a /src | |
| parent | 800349387a25b73e1c98f4dcf39a65b7559894fe (diff) | |
| download | luajit-58f38c254bcdcf1f874f2e052751f568c2866133.tar.gz luajit-58f38c254bcdcf1f874f2e052751f568c2866133.tar.bz2 luajit-58f38c254bcdcf1f874f2e052751f568c2866133.zip | |
Move x86/x64 parts of JIT assembler backend to extra files.
Diffstat (limited to 'src')
| -rw-r--r-- | src/Makefile | 4 | ||||
| -rw-r--r-- | src/Makefile.dep | 9 | ||||
| -rw-r--r-- | src/lj_asm.c | 3161 | ||||
| -rw-r--r-- | src/lj_asm_x86.h | 2668 | ||||
| -rw-r--r-- | src/lj_emit_x86.h | 456 |
5 files changed, 3173 insertions, 3125 deletions
diff --git a/src/Makefile b/src/Makefile index 6ac05c49..03e45d2d 100644 --- a/src/Makefile +++ b/src/Makefile | |||
| @@ -482,7 +482,9 @@ depend: | |||
| 482 | @$(HOST_CC) $(HOST_ACFLAGS) -MM *.c | \ | 482 | @$(HOST_CC) $(HOST_ACFLAGS) -MM *.c | \ |
| 483 | sed -e "s| [^ ]*/dasm_\S*\.h||g" \ | 483 | sed -e "s| [^ ]*/dasm_\S*\.h||g" \ |
| 484 | -e "s| buildvm_\S*\.h||g" \ | 484 | -e "s| buildvm_\S*\.h||g" \ |
| 485 | -e "s| lj_target_\S*\.h| lj_target_*.h|g" >Makefile.dep | 485 | -e "s| lj_target_\S*\.h| lj_target_*.h|g" \ |
| 486 | -e "s| lj_emit_\S*\.h| lj_emit_*.h|g" \ | ||
| 487 | -e "s| lj_asm_\S*\.h| lj_asm_*.h|g" >Makefile.dep | ||
| 486 | @for file in $(ALL_HDRGEN) $(ALL_DYNGEN); do \ | 488 | @for file in $(ALL_HDRGEN) $(ALL_DYNGEN); do \ |
| 487 | test -s $$file || $(HOST_RM) $$file; \ | 489 | test -s $$file || $(HOST_RM) $$file; \ |
| 488 | done | 490 | done |
diff --git a/src/Makefile.dep b/src/Makefile.dep index 4f5317b9..05ef5748 100644 --- a/src/Makefile.dep +++ b/src/Makefile.dep | |||
| @@ -53,7 +53,7 @@ lj_api.o: lj_api.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ | |||
| 53 | lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ | 53 | lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ |
| 54 | lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h \ | 54 | lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h \ |
| 55 | lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h lj_snap.h \ | 55 | lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h lj_snap.h \ |
| 56 | lj_asm.h lj_vm.h lj_target.h lj_target_*.h | 56 | lj_asm.h lj_vm.h lj_target.h lj_target_*.h lj_emit_*.h lj_asm_*.h |
| 57 | lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \ | 57 | lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \ |
| 58 | lj_bcdef.h | 58 | lj_bcdef.h |
| 59 | lj_carith.o: lj_carith.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ | 59 | lj_carith.o: lj_carith.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ |
| @@ -177,7 +177,8 @@ ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_gc.c lj_obj.h lj_def.h \ | |||
| 177 | lj_opt_loop.c lj_snap.h lj_opt_split.c lj_mcode.c lj_mcode.h lj_snap.c \ | 177 | lj_opt_loop.c lj_snap.h lj_opt_split.c lj_mcode.c lj_mcode.h lj_snap.c \ |
| 178 | lj_target.h lj_target_*.h lj_record.c lj_record.h lj_ffrecord.h \ | 178 | lj_target.h lj_target_*.h lj_record.c lj_record.h lj_ffrecord.h \ |
| 179 | lj_crecord.c lj_crecord.h lj_ffrecord.c lj_recdef.h lj_asm.c lj_asm.h \ | 179 | lj_crecord.c lj_crecord.h lj_ffrecord.c lj_recdef.h lj_asm.c lj_asm.h \ |
| 180 | lj_trace.c lj_gdbjit.h lj_gdbjit.c lj_alloc.c lib_aux.c lib_base.c \ | 180 | lj_emit_*.h lj_asm_*.h lj_trace.c lj_gdbjit.h lj_gdbjit.c lj_alloc.c \ |
| 181 | lj_libdef.h lib_math.c lib_string.c lib_table.c lib_io.c lib_os.c \ | 181 | lib_aux.c lib_base.c lj_libdef.h lib_math.c lib_string.c lib_table.c \ |
| 182 | lib_package.c lib_debug.c lib_bit.c lib_jit.c lib_ffi.c lib_init.c | 182 | lib_io.c lib_os.c lib_package.c lib_debug.c lib_bit.c lib_jit.c \ |
| 183 | lib_ffi.c lib_init.c | ||
| 183 | luajit.o: luajit.c lua.h luaconf.h lauxlib.h lualib.h luajit.h lj_arch.h | 184 | luajit.o: luajit.c lua.h luaconf.h lauxlib.h lualib.h luajit.h lj_arch.h |
diff --git a/src/lj_asm.c b/src/lj_asm.c index be6c359e..87c09bb9 100644 --- a/src/lj_asm.c +++ b/src/lj_asm.c | |||
| @@ -130,457 +130,14 @@ IRFLDEF(FLOFS) | |||
| 130 | #define VG_INVALIDATE(p, sz) ((void)0) | 130 | #define VG_INVALIDATE(p, sz) ((void)0) |
| 131 | #endif | 131 | #endif |
| 132 | 132 | ||
| 133 | /* -- Emit basic instructions --------------------------------------------- */ | 133 | /* -- Target-specific instruction emitter --------------------------------- */ |
| 134 | 134 | ||
| 135 | #define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7))) | 135 | #if LJ_TARGET_X86ORX64 |
| 136 | 136 | #include "lj_emit_x86.h" | |
| 137 | #if LJ_64 | ||
| 138 | #define REXRB(p, rr, rb) \ | ||
| 139 | { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \ | ||
| 140 | if (rex != 0x40) *--(p) = rex; } | ||
| 141 | #define FORCE_REX 0x200 | ||
| 142 | #define REX_64 (FORCE_REX|0x080000) | ||
| 143 | #else | ||
| 144 | #define REXRB(p, rr, rb) ((void)0) | ||
| 145 | #define FORCE_REX 0 | ||
| 146 | #define REX_64 0 | ||
| 147 | #endif | ||
| 148 | |||
| 149 | #define emit_i8(as, i) (*--as->mcp = (MCode)(i)) | ||
| 150 | #define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4) | ||
| 151 | #define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4) | ||
| 152 | |||
| 153 | #define emit_x87op(as, xo) \ | ||
| 154 | (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2) | ||
| 155 | |||
| 156 | /* op */ | ||
| 157 | static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx, | ||
| 158 | MCode *p, int delta) | ||
| 159 | { | ||
| 160 | int n = (int8_t)xo; | ||
| 161 | #if defined(__GNUC__) | ||
| 162 | if (__builtin_constant_p(xo) && n == -2) | ||
| 163 | p[delta-2] = (MCode)(xo >> 24); | ||
| 164 | else if (__builtin_constant_p(xo) && n == -3) | ||
| 165 | *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16); | ||
| 166 | else | ||
| 167 | #endif | ||
| 168 | *(uint32_t *)(p+delta-5) = (uint32_t)xo; | ||
| 169 | p += n + delta; | ||
| 170 | #if LJ_64 | ||
| 171 | { | ||
| 172 | uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1); | ||
| 173 | if (rex != 0x40) { | ||
| 174 | rex |= (rr >> 16); | ||
| 175 | if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); } | ||
| 176 | else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; } | ||
| 177 | *--p = (MCode)rex; | ||
| 178 | } | ||
| 179 | } | ||
| 180 | #else | ||
| 181 | UNUSED(rr); UNUSED(rb); UNUSED(rx); | ||
| 182 | #endif | ||
| 183 | return p; | ||
| 184 | } | ||
| 185 | |||
| 186 | /* op + modrm */ | ||
| 187 | #define emit_opm(xo, mode, rr, rb, p, delta) \ | ||
| 188 | (p[(delta)-1] = MODRM((mode), (rr), (rb)), \ | ||
| 189 | emit_op((xo), (rr), (rb), 0, (p), (delta))) | ||
| 190 | |||
| 191 | /* op + modrm + sib */ | ||
| 192 | #define emit_opmx(xo, mode, scale, rr, rb, rx, p) \ | ||
| 193 | (p[-1] = MODRM((scale), (rx), (rb)), \ | ||
| 194 | p[-2] = MODRM((mode), (rr), RID_ESP), \ | ||
| 195 | emit_op((xo), (rr), (rb), (rx), (p), -1)) | ||
| 196 | |||
| 197 | /* op r1, r2 */ | ||
| 198 | static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2) | ||
| 199 | { | ||
| 200 | MCode *p = as->mcp; | ||
| 201 | as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0); | ||
| 202 | } | ||
| 203 | |||
| 204 | #if LJ_64 && defined(LUA_USE_ASSERT) | ||
| 205 | /* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */ | ||
| 206 | static int32_t ptr2addr(const void *p) | ||
| 207 | { | ||
| 208 | lua_assert((uintptr_t)p < (uintptr_t)0x80000000); | ||
| 209 | return i32ptr(p); | ||
| 210 | } | ||
| 211 | #else | ||
| 212 | #define ptr2addr(p) (i32ptr((p))) | ||
| 213 | #endif | ||
| 214 | |||
| 215 | /* op r, [addr] */ | ||
| 216 | static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr) | ||
| 217 | { | ||
| 218 | MCode *p = as->mcp; | ||
| 219 | *(int32_t *)(p-4) = ptr2addr(addr); | ||
| 220 | #if LJ_64 | ||
| 221 | p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP); | ||
| 222 | as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5); | ||
| 223 | #else | ||
| 224 | as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4); | ||
| 225 | #endif | ||
| 226 | } | ||
| 227 | |||
| 228 | /* op r, [base+ofs] */ | ||
| 229 | static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs) | ||
| 230 | { | ||
| 231 | MCode *p = as->mcp; | ||
| 232 | x86Mode mode; | ||
| 233 | if (ra_hasreg(rb)) { | ||
| 234 | if (ofs == 0 && (rb&7) != RID_EBP) { | ||
| 235 | mode = XM_OFS0; | ||
| 236 | } else if (checki8(ofs)) { | ||
| 237 | *--p = (MCode)ofs; | ||
| 238 | mode = XM_OFS8; | ||
| 239 | } else { | ||
| 240 | p -= 4; | ||
| 241 | *(int32_t *)p = ofs; | ||
| 242 | mode = XM_OFS32; | ||
| 243 | } | ||
| 244 | if ((rb&7) == RID_ESP) | ||
| 245 | *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | ||
| 246 | } else { | ||
| 247 | *(int32_t *)(p-4) = ofs; | ||
| 248 | #if LJ_64 | ||
| 249 | p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP); | ||
| 250 | p -= 5; | ||
| 251 | rb = RID_ESP; | ||
| 252 | #else | ||
| 253 | p -= 4; | ||
| 254 | rb = RID_EBP; | ||
| 255 | #endif | ||
| 256 | mode = XM_OFS0; | ||
| 257 | } | ||
| 258 | as->mcp = emit_opm(xo, mode, rr, rb, p, 0); | ||
| 259 | } | ||
| 260 | |||
| 261 | /* op r, [base+idx*scale+ofs] */ | ||
| 262 | static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx, | ||
| 263 | x86Mode scale, int32_t ofs) | ||
| 264 | { | ||
| 265 | MCode *p = as->mcp; | ||
| 266 | x86Mode mode; | ||
| 267 | if (ofs == 0 && (rb&7) != RID_EBP) { | ||
| 268 | mode = XM_OFS0; | ||
| 269 | } else if (checki8(ofs)) { | ||
| 270 | mode = XM_OFS8; | ||
| 271 | *--p = (MCode)ofs; | ||
| 272 | } else { | ||
| 273 | mode = XM_OFS32; | ||
| 274 | p -= 4; | ||
| 275 | *(int32_t *)p = ofs; | ||
| 276 | } | ||
| 277 | as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p); | ||
| 278 | } | ||
| 279 | |||
| 280 | /* op r, i */ | ||
| 281 | static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i) | ||
| 282 | { | ||
| 283 | MCode *p = as->mcp; | ||
| 284 | x86Op xo; | ||
| 285 | if (checki8(i)) { | ||
| 286 | *--p = (MCode)i; | ||
| 287 | xo = XG_TOXOi8(xg); | ||
| 288 | } else { | ||
| 289 | p -= 4; | ||
| 290 | *(int32_t *)p = i; | ||
| 291 | xo = XG_TOXOi(xg); | ||
| 292 | } | ||
| 293 | as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0); | ||
| 294 | } | ||
| 295 | |||
| 296 | /* op [base+ofs], i */ | ||
| 297 | static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs, | ||
| 298 | int32_t i) | ||
| 299 | { | ||
| 300 | x86Op xo; | ||
| 301 | if (checki8(i)) { | ||
| 302 | emit_i8(as, i); | ||
| 303 | xo = XG_TOXOi8(xg); | ||
| 304 | } else { | ||
| 305 | emit_i32(as, i); | ||
| 306 | xo = XG_TOXOi(xg); | ||
| 307 | } | ||
| 308 | emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs); | ||
| 309 | } | ||
| 310 | |||
| 311 | #define emit_shifti(as, xg, r, i) \ | ||
| 312 | (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r))) | ||
| 313 | |||
| 314 | /* op r, rm/mrm */ | ||
| 315 | static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb) | ||
| 316 | { | ||
| 317 | MCode *p = as->mcp; | ||
| 318 | x86Mode mode = XM_REG; | ||
| 319 | if (rb == RID_MRM) { | ||
| 320 | rb = as->mrm.base; | ||
| 321 | if (rb == RID_NONE) { | ||
| 322 | rb = RID_EBP; | ||
| 323 | mode = XM_OFS0; | ||
| 324 | p -= 4; | ||
| 325 | *(int32_t *)p = as->mrm.ofs; | ||
| 326 | if (as->mrm.idx != RID_NONE) | ||
| 327 | goto mrmidx; | ||
| 328 | #if LJ_64 | ||
| 329 | *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP); | ||
| 330 | rb = RID_ESP; | ||
| 331 | #endif | ||
| 332 | } else { | ||
| 333 | if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) { | ||
| 334 | mode = XM_OFS0; | ||
| 335 | } else if (checki8(as->mrm.ofs)) { | ||
| 336 | *--p = (MCode)as->mrm.ofs; | ||
| 337 | mode = XM_OFS8; | ||
| 338 | } else { | ||
| 339 | p -= 4; | ||
| 340 | *(int32_t *)p = as->mrm.ofs; | ||
| 341 | mode = XM_OFS32; | ||
| 342 | } | ||
| 343 | if (as->mrm.idx != RID_NONE) { | ||
| 344 | mrmidx: | ||
| 345 | as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p); | ||
| 346 | return; | ||
| 347 | } | ||
| 348 | if ((rb&7) == RID_ESP) | ||
| 349 | *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | ||
| 350 | } | ||
| 351 | } | ||
| 352 | as->mcp = emit_opm(xo, mode, rr, rb, p, 0); | ||
| 353 | } | ||
| 354 | |||
| 355 | /* op rm/mrm, i */ | ||
| 356 | static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i) | ||
| 357 | { | ||
| 358 | x86Op xo; | ||
| 359 | if (checki8(i)) { | ||
| 360 | emit_i8(as, i); | ||
| 361 | xo = XG_TOXOi8(xg); | ||
| 362 | } else { | ||
| 363 | emit_i32(as, i); | ||
| 364 | xo = XG_TOXOi(xg); | ||
| 365 | } | ||
| 366 | emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64)); | ||
| 367 | } | ||
| 368 | |||
| 369 | /* -- Emit loads/stores --------------------------------------------------- */ | ||
| 370 | |||
| 371 | /* Instruction selection for XMM moves. */ | ||
| 372 | #define XMM_MOVRR(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVSD : XO_MOVAPS) | ||
| 373 | #define XMM_MOVRM(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVLPD : XO_MOVSD) | ||
| 374 | |||
| 375 | /* mov [base+ofs], i */ | ||
| 376 | static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i) | ||
| 377 | { | ||
| 378 | emit_i32(as, i); | ||
| 379 | emit_rmro(as, XO_MOVmi, 0, base, ofs); | ||
| 380 | } | ||
| 381 | |||
| 382 | /* mov [base+ofs], r */ | ||
| 383 | #define emit_movtomro(as, r, base, ofs) \ | ||
| 384 | emit_rmro(as, XO_MOVto, (r), (base), (ofs)) | ||
| 385 | |||
| 386 | /* Get/set global_State fields. */ | ||
| 387 | #define emit_opgl(as, xo, r, field) \ | ||
| 388 | emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field) | ||
| 389 | #define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field) | ||
| 390 | #define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field) | ||
| 391 | #define emit_setgli(as, field, i) \ | ||
| 392 | (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, field)) | ||
| 393 | |||
| 394 | /* mov r, i / xor r, r */ | ||
| 395 | static void emit_loadi(ASMState *as, Reg r, int32_t i) | ||
| 396 | { | ||
| 397 | /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP. */ | ||
| 398 | if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP || | ||
| 399 | (as->curins+1 < as->T->nins && | ||
| 400 | IR(as->curins+1)->o == IR_HIOP)))) { | ||
| 401 | emit_rr(as, XO_ARITH(XOg_XOR), r, r); | ||
| 402 | } else { | ||
| 403 | MCode *p = as->mcp; | ||
| 404 | *(int32_t *)(p-4) = i; | ||
| 405 | p[-5] = (MCode)(XI_MOVri+(r&7)); | ||
| 406 | p -= 5; | ||
| 407 | REXRB(p, 0, r); | ||
| 408 | as->mcp = p; | ||
| 409 | } | ||
| 410 | } | ||
| 411 | |||
| 412 | /* mov r, addr */ | ||
| 413 | #define emit_loada(as, r, addr) \ | ||
| 414 | emit_loadi(as, (r), ptr2addr((addr))) | ||
| 415 | |||
| 416 | #if LJ_64 | ||
| 417 | /* mov r, imm64 or shorter 32 bit extended load. */ | ||
| 418 | static void emit_loadu64(ASMState *as, Reg r, uint64_t u64) | ||
| 419 | { | ||
| 420 | if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */ | ||
| 421 | emit_loadi(as, r, (int32_t)u64); | ||
| 422 | } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */ | ||
| 423 | MCode *p = as->mcp; | ||
| 424 | *(int32_t *)(p-4) = (int32_t)u64; | ||
| 425 | as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4); | ||
| 426 | } else { /* Full-size 64 bit load. */ | ||
| 427 | MCode *p = as->mcp; | ||
| 428 | *(uint64_t *)(p-8) = u64; | ||
| 429 | p[-9] = (MCode)(XI_MOVri+(r&7)); | ||
| 430 | p[-10] = 0x48 + ((r>>3)&1); | ||
| 431 | p -= 10; | ||
| 432 | as->mcp = p; | ||
| 433 | } | ||
| 434 | } | ||
| 435 | #endif | ||
| 436 | |||
| 437 | /* movsd r, [&tv->n] / xorps r, r */ | ||
| 438 | static void emit_loadn(ASMState *as, Reg r, cTValue *tv) | ||
| 439 | { | ||
| 440 | if (tvispzero(tv)) /* Use xor only for +0. */ | ||
| 441 | emit_rr(as, XO_XORPS, r, r); | ||
| 442 | else | ||
| 443 | emit_rma(as, XMM_MOVRM(as), r, &tv->n); | ||
| 444 | } | ||
| 445 | |||
| 446 | /* -- Emit control-flow instructions -------------------------------------- */ | ||
| 447 | |||
| 448 | /* Label for short jumps. */ | ||
| 449 | typedef MCode *MCLabel; | ||
| 450 | |||
| 451 | #if LJ_32 && LJ_HASFFI | ||
| 452 | /* jmp short target */ | ||
| 453 | static void emit_sjmp(ASMState *as, MCLabel target) | ||
| 454 | { | ||
| 455 | MCode *p = as->mcp; | ||
| 456 | ptrdiff_t delta = target - p; | ||
| 457 | lua_assert(delta == (int8_t)delta); | ||
| 458 | p[-1] = (MCode)(int8_t)delta; | ||
| 459 | p[-2] = XI_JMPs; | ||
| 460 | as->mcp = p - 2; | ||
| 461 | } | ||
| 462 | #endif | ||
| 463 | |||
| 464 | /* jcc short target */ | ||
| 465 | static void emit_sjcc(ASMState *as, int cc, MCLabel target) | ||
| 466 | { | ||
| 467 | MCode *p = as->mcp; | ||
| 468 | ptrdiff_t delta = target - p; | ||
| 469 | lua_assert(delta == (int8_t)delta); | ||
| 470 | p[-1] = (MCode)(int8_t)delta; | ||
| 471 | p[-2] = (MCode)(XI_JCCs+(cc&15)); | ||
| 472 | as->mcp = p - 2; | ||
| 473 | } | ||
| 474 | |||
| 475 | /* jcc short (pending target) */ | ||
| 476 | static MCLabel emit_sjcc_label(ASMState *as, int cc) | ||
| 477 | { | ||
| 478 | MCode *p = as->mcp; | ||
| 479 | p[-1] = 0; | ||
| 480 | p[-2] = (MCode)(XI_JCCs+(cc&15)); | ||
| 481 | as->mcp = p - 2; | ||
| 482 | return p; | ||
| 483 | } | ||
| 484 | |||
| 485 | /* Fixup jcc short target. */ | ||
| 486 | static void emit_sfixup(ASMState *as, MCLabel source) | ||
| 487 | { | ||
| 488 | source[-1] = (MCode)(as->mcp-source); | ||
| 489 | } | ||
| 490 | |||
| 491 | /* Return label pointing to current PC. */ | ||
| 492 | #define emit_label(as) ((as)->mcp) | ||
| 493 | |||
| 494 | /* Compute relative 32 bit offset for jump and call instructions. */ | ||
| 495 | static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target) | ||
| 496 | { | ||
| 497 | ptrdiff_t delta = target - p; | ||
| 498 | lua_assert(delta == (int32_t)delta); | ||
| 499 | return (int32_t)delta; | ||
| 500 | } | ||
| 501 | |||
| 502 | /* jcc target */ | ||
| 503 | static void emit_jcc(ASMState *as, int cc, MCode *target) | ||
| 504 | { | ||
| 505 | MCode *p = as->mcp; | ||
| 506 | *(int32_t *)(p-4) = jmprel(p, target); | ||
| 507 | p[-5] = (MCode)(XI_JCCn+(cc&15)); | ||
| 508 | p[-6] = 0x0f; | ||
| 509 | as->mcp = p - 6; | ||
| 510 | } | ||
| 511 | |||
| 512 | /* call target */ | ||
| 513 | static void emit_call_(ASMState *as, MCode *target) | ||
| 514 | { | ||
| 515 | MCode *p = as->mcp; | ||
| 516 | #if LJ_64 | ||
| 517 | if (target-p != (int32_t)(target-p)) { | ||
| 518 | /* Assumes RID_RET is never an argument to calls and always clobbered. */ | ||
| 519 | emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET); | ||
| 520 | emit_loadu64(as, RID_RET, (uint64_t)target); | ||
| 521 | return; | ||
| 522 | } | ||
| 523 | #endif | ||
| 524 | *(int32_t *)(p-4) = jmprel(p, target); | ||
| 525 | p[-5] = XI_CALL; | ||
| 526 | as->mcp = p - 5; | ||
| 527 | } | ||
| 528 | |||
| 529 | #define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f)) | ||
| 530 | |||
| 531 | /* -- Emit generic operations --------------------------------------------- */ | ||
| 532 | |||
| 533 | /* Use 64 bit operations to handle 64 bit IR types. */ | ||
| 534 | #if LJ_64 | ||
| 535 | #define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0)) | ||
| 536 | #else | 137 | #else |
| 537 | #define REX_64IR(ir, r) (r) | 138 | #error "Missing instruction emitter for target CPU" |
| 538 | #endif | 139 | #endif |
| 539 | 140 | ||
| 540 | /* Generic move between two regs. */ | ||
| 541 | static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) | ||
| 542 | { | ||
| 543 | UNUSED(ir); | ||
| 544 | if (dst < RID_MAX_GPR) | ||
| 545 | emit_rr(as, XO_MOV, REX_64IR(ir, dst), src); | ||
| 546 | else | ||
| 547 | emit_rr(as, XMM_MOVRR(as), dst, src); | ||
| 548 | } | ||
| 549 | |||
| 550 | /* Generic load of register from stack slot. */ | ||
| 551 | static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs) | ||
| 552 | { | ||
| 553 | if (r < RID_MAX_GPR) | ||
| 554 | emit_rmro(as, XO_MOV, REX_64IR(ir, r), RID_ESP, ofs); | ||
| 555 | else | ||
| 556 | emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, r, RID_ESP, ofs); | ||
| 557 | } | ||
| 558 | |||
| 559 | /* Generic store of register to stack slot. */ | ||
| 560 | static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs) | ||
| 561 | { | ||
| 562 | if (r < RID_MAX_GPR) | ||
| 563 | emit_rmro(as, XO_MOVto, REX_64IR(ir, r), RID_ESP, ofs); | ||
| 564 | else | ||
| 565 | emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, RID_ESP, ofs); | ||
| 566 | } | ||
| 567 | |||
| 568 | /* Add offset to pointer. */ | ||
| 569 | static void emit_addptr(ASMState *as, Reg r, int32_t ofs) | ||
| 570 | { | ||
| 571 | if (ofs) { | ||
| 572 | if ((as->flags & JIT_F_LEA_AGU)) | ||
| 573 | emit_rmro(as, XO_LEA, r, r, ofs); | ||
| 574 | else | ||
| 575 | emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs); | ||
| 576 | } | ||
| 577 | } | ||
| 578 | |||
| 579 | #define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs)) | ||
| 580 | |||
| 581 | /* Prefer rematerialization of BASE/L from global_State over spills. */ | ||
| 582 | #define emit_canremat(ref) ((ref) <= REF_BASE) | ||
| 583 | |||
| 584 | /* -- Register allocator debugging ---------------------------------------- */ | 141 | /* -- Register allocator debugging ---------------------------------------- */ |
| 585 | 142 | ||
| 586 | /* #define LUAJIT_DEBUG_RA */ | 143 | /* #define LUAJIT_DEBUG_RA */ |
| @@ -1026,52 +583,7 @@ static void ra_left(ASMState *as, Reg dest, IRRef lref) | |||
| 1026 | } | 583 | } |
| 1027 | } | 584 | } |
| 1028 | 585 | ||
| 1029 | /* -- Exit stubs ---------------------------------------------------------- */ | 586 | /* -- Snapshot handling --------- ----------------------------------------- */ |
| 1030 | |||
| 1031 | /* Generate an exit stub group at the bottom of the reserved MCode memory. */ | ||
| 1032 | static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) | ||
| 1033 | { | ||
| 1034 | ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff; | ||
| 1035 | MCode *mxp = as->mcbot; | ||
| 1036 | MCode *mxpstart = mxp; | ||
| 1037 | if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop) | ||
| 1038 | asm_mclimit(as); | ||
| 1039 | /* Push low byte of exitno for each exit stub. */ | ||
| 1040 | *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs; | ||
| 1041 | for (i = 1; i < EXITSTUBS_PER_GROUP; i++) { | ||
| 1042 | *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2); | ||
| 1043 | *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i); | ||
| 1044 | } | ||
| 1045 | /* Push the high byte of the exitno for each exit stub group. */ | ||
| 1046 | *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8); | ||
| 1047 | /* Store DISPATCH at original stack slot 0. Account for the two push ops. */ | ||
| 1048 | *mxp++ = XI_MOVmi; | ||
| 1049 | *mxp++ = MODRM(XM_OFS8, 0, RID_ESP); | ||
| 1050 | *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | ||
| 1051 | *mxp++ = 2*sizeof(void *); | ||
| 1052 | *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4; | ||
| 1053 | /* Jump to exit handler which fills in the ExitState. */ | ||
| 1054 | *mxp++ = XI_JMP; mxp += 4; | ||
| 1055 | *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler); | ||
| 1056 | /* Commit the code for this group (even if assembly fails later on). */ | ||
| 1057 | lj_mcode_commitbot(as->J, mxp); | ||
| 1058 | as->mcbot = mxp; | ||
| 1059 | as->mclim = as->mcbot + MCLIM_REDZONE; | ||
| 1060 | return mxpstart; | ||
| 1061 | } | ||
| 1062 | |||
| 1063 | /* Setup all needed exit stubs. */ | ||
| 1064 | static void asm_exitstub_setup(ASMState *as, ExitNo nexits) | ||
| 1065 | { | ||
| 1066 | ExitNo i; | ||
| 1067 | if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) | ||
| 1068 | lj_trace_err(as->J, LJ_TRERR_SNAPOV); | ||
| 1069 | for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++) | ||
| 1070 | if (as->J->exitstubgroup[i] == NULL) | ||
| 1071 | as->J->exitstubgroup[i] = asm_exitstub_gen(as, i); | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | /* -- Snapshot and guard handling ----------------------------------------- */ | ||
| 1075 | 587 | ||
| 1076 | /* Can we rematerialize a KNUM instead of forcing a spill? */ | 588 | /* Can we rematerialize a KNUM instead of forcing a spill? */ |
| 1077 | static int asm_snap_canremat(ASMState *as) | 589 | static int asm_snap_canremat(ASMState *as) |
| @@ -1157,451 +669,7 @@ static void asm_snap_prep(ASMState *as) | |||
| 1157 | } | 669 | } |
| 1158 | } | 670 | } |
| 1159 | 671 | ||
| 1160 | /* Emit conditional branch to exit for guard. | 672 | /* -- Miscellaneous helpers ----------------------------------------------- */ |
| 1161 | ** It's important to emit this *after* all registers have been allocated, | ||
| 1162 | ** because rematerializations may invalidate the flags. | ||
| 1163 | */ | ||
| 1164 | static void asm_guardcc(ASMState *as, int cc) | ||
| 1165 | { | ||
| 1166 | MCode *target = exitstub_addr(as->J, as->snapno); | ||
| 1167 | MCode *p = as->mcp; | ||
| 1168 | if (LJ_UNLIKELY(p == as->invmcp)) { | ||
| 1169 | as->loopinv = 1; | ||
| 1170 | *(int32_t *)(p+1) = jmprel(p+5, target); | ||
| 1171 | target = p; | ||
| 1172 | cc ^= 1; | ||
| 1173 | if (as->realign) { | ||
| 1174 | emit_sjcc(as, cc, target); | ||
| 1175 | return; | ||
| 1176 | } | ||
| 1177 | } | ||
| 1178 | emit_jcc(as, cc, target); | ||
| 1179 | } | ||
| 1180 | |||
| 1181 | /* -- Memory operand fusion ----------------------------------------------- */ | ||
| 1182 | |||
| 1183 | /* Limit linear search to this distance. Avoids O(n^2) behavior. */ | ||
| 1184 | #define CONFLICT_SEARCH_LIM 31 | ||
| 1185 | |||
| 1186 | /* Check if a reference is a signed 32 bit constant. */ | ||
| 1187 | static int asm_isk32(ASMState *as, IRRef ref, int32_t *k) | ||
| 1188 | { | ||
| 1189 | if (irref_isk(ref)) { | ||
| 1190 | IRIns *ir = IR(ref); | ||
| 1191 | if (ir->o != IR_KINT64) { | ||
| 1192 | *k = ir->i; | ||
| 1193 | return 1; | ||
| 1194 | } else if (checki32((int64_t)ir_kint64(ir)->u64)) { | ||
| 1195 | *k = (int32_t)ir_kint64(ir)->u64; | ||
| 1196 | return 1; | ||
| 1197 | } | ||
| 1198 | } | ||
| 1199 | return 0; | ||
| 1200 | } | ||
| 1201 | |||
| 1202 | /* Check if there's no conflicting instruction between curins and ref. | ||
| 1203 | ** Also avoid fusing loads if there are multiple references. | ||
| 1204 | */ | ||
| 1205 | static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload) | ||
| 1206 | { | ||
| 1207 | IRIns *ir = as->ir; | ||
| 1208 | IRRef i = as->curins; | ||
| 1209 | if (i > ref + CONFLICT_SEARCH_LIM) | ||
| 1210 | return 0; /* Give up, ref is too far away. */ | ||
| 1211 | while (--i > ref) { | ||
| 1212 | if (ir[i].o == conflict) | ||
| 1213 | return 0; /* Conflict found. */ | ||
| 1214 | else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref)) | ||
| 1215 | return 0; | ||
| 1216 | } | ||
| 1217 | return 1; /* Ok, no conflict. */ | ||
| 1218 | } | ||
| 1219 | |||
| 1220 | /* Fuse array base into memory operand. */ | ||
| 1221 | static IRRef asm_fuseabase(ASMState *as, IRRef ref) | ||
| 1222 | { | ||
| 1223 | IRIns *irb = IR(ref); | ||
| 1224 | as->mrm.ofs = 0; | ||
| 1225 | if (irb->o == IR_FLOAD) { | ||
| 1226 | IRIns *ira = IR(irb->op1); | ||
| 1227 | lua_assert(irb->op2 == IRFL_TAB_ARRAY); | ||
| 1228 | /* We can avoid the FLOAD of t->array for colocated arrays. */ | ||
| 1229 | if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE && | ||
| 1230 | noconflict(as, irb->op1, IR_NEWREF, 1)) { | ||
| 1231 | as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */ | ||
| 1232 | return irb->op1; /* Table obj. */ | ||
| 1233 | } | ||
| 1234 | } else if (irb->o == IR_ADD && irref_isk(irb->op2)) { | ||
| 1235 | /* Fuse base offset (vararg load). */ | ||
| 1236 | as->mrm.ofs = IR(irb->op2)->i; | ||
| 1237 | return irb->op1; | ||
| 1238 | } | ||
| 1239 | return ref; /* Otherwise use the given array base. */ | ||
| 1240 | } | ||
| 1241 | |||
| 1242 | /* Fuse array reference into memory operand. */ | ||
| 1243 | static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow) | ||
| 1244 | { | ||
| 1245 | IRIns *irx; | ||
| 1246 | lua_assert(ir->o == IR_AREF); | ||
| 1247 | as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow); | ||
| 1248 | irx = IR(ir->op2); | ||
| 1249 | if (irref_isk(ir->op2)) { | ||
| 1250 | as->mrm.ofs += 8*irx->i; | ||
| 1251 | as->mrm.idx = RID_NONE; | ||
| 1252 | } else { | ||
| 1253 | rset_clear(allow, as->mrm.base); | ||
| 1254 | as->mrm.scale = XM_SCALE8; | ||
| 1255 | /* Fuse a constant ADD (e.g. t[i+1]) into the offset. | ||
| 1256 | ** Doesn't help much without ABCelim, but reduces register pressure. | ||
| 1257 | */ | ||
| 1258 | if (!LJ_64 && /* Has bad effects with negative index on x64. */ | ||
| 1259 | mayfuse(as, ir->op2) && ra_noreg(irx->r) && | ||
| 1260 | irx->o == IR_ADD && irref_isk(irx->op2)) { | ||
| 1261 | as->mrm.ofs += 8*IR(irx->op2)->i; | ||
| 1262 | as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow); | ||
| 1263 | } else { | ||
| 1264 | as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow); | ||
| 1265 | } | ||
| 1266 | } | ||
| 1267 | } | ||
| 1268 | |||
| 1269 | /* Fuse array/hash/upvalue reference into memory operand. | ||
| 1270 | ** Caveat: this may allocate GPRs for the base/idx registers. Be sure to | ||
| 1271 | ** pass the final allow mask, excluding any GPRs used for other inputs. | ||
| 1272 | ** In particular: 2-operand GPR instructions need to call ra_dest() first! | ||
| 1273 | */ | ||
| 1274 | static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow) | ||
| 1275 | { | ||
| 1276 | IRIns *ir = IR(ref); | ||
| 1277 | if (ra_noreg(ir->r)) { | ||
| 1278 | switch ((IROp)ir->o) { | ||
| 1279 | case IR_AREF: | ||
| 1280 | if (mayfuse(as, ref)) { | ||
| 1281 | asm_fusearef(as, ir, allow); | ||
| 1282 | return; | ||
| 1283 | } | ||
| 1284 | break; | ||
| 1285 | case IR_HREFK: | ||
| 1286 | if (mayfuse(as, ref)) { | ||
| 1287 | as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); | ||
| 1288 | as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); | ||
| 1289 | as->mrm.idx = RID_NONE; | ||
| 1290 | return; | ||
| 1291 | } | ||
| 1292 | break; | ||
| 1293 | case IR_UREFC: | ||
| 1294 | if (irref_isk(ir->op1)) { | ||
| 1295 | GCfunc *fn = ir_kfunc(IR(ir->op1)); | ||
| 1296 | GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv; | ||
| 1297 | as->mrm.ofs = ptr2addr(&uv->tv); | ||
| 1298 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
| 1299 | return; | ||
| 1300 | } | ||
| 1301 | break; | ||
| 1302 | default: | ||
| 1303 | lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO); | ||
| 1304 | break; | ||
| 1305 | } | ||
| 1306 | } | ||
| 1307 | as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); | ||
| 1308 | as->mrm.ofs = 0; | ||
| 1309 | as->mrm.idx = RID_NONE; | ||
| 1310 | } | ||
| 1311 | |||
| 1312 | /* Fuse FLOAD/FREF reference into memory operand. */ | ||
| 1313 | static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) | ||
| 1314 | { | ||
| 1315 | lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF); | ||
| 1316 | as->mrm.ofs = field_ofs[ir->op2]; | ||
| 1317 | as->mrm.idx = RID_NONE; | ||
| 1318 | if (irref_isk(ir->op1)) { | ||
| 1319 | as->mrm.ofs += IR(ir->op1)->i; | ||
| 1320 | as->mrm.base = RID_NONE; | ||
| 1321 | } else { | ||
| 1322 | as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); | ||
| 1323 | } | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | /* Fuse string reference into memory operand. */ | ||
| 1327 | static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow) | ||
| 1328 | { | ||
| 1329 | IRIns *irr; | ||
| 1330 | lua_assert(ir->o == IR_STRREF); | ||
| 1331 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
| 1332 | as->mrm.scale = XM_SCALE1; | ||
| 1333 | as->mrm.ofs = sizeof(GCstr); | ||
| 1334 | if (irref_isk(ir->op1)) { | ||
| 1335 | as->mrm.ofs += IR(ir->op1)->i; | ||
| 1336 | } else { | ||
| 1337 | Reg r = ra_alloc1(as, ir->op1, allow); | ||
| 1338 | rset_clear(allow, r); | ||
| 1339 | as->mrm.base = (uint8_t)r; | ||
| 1340 | } | ||
| 1341 | irr = IR(ir->op2); | ||
| 1342 | if (irref_isk(ir->op2)) { | ||
| 1343 | as->mrm.ofs += irr->i; | ||
| 1344 | } else { | ||
| 1345 | Reg r; | ||
| 1346 | /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */ | ||
| 1347 | if (!LJ_64 && /* Has bad effects with negative index on x64. */ | ||
| 1348 | mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) { | ||
| 1349 | as->mrm.ofs += IR(irr->op2)->i; | ||
| 1350 | r = ra_alloc1(as, irr->op1, allow); | ||
| 1351 | } else { | ||
| 1352 | r = ra_alloc1(as, ir->op2, allow); | ||
| 1353 | } | ||
| 1354 | if (as->mrm.base == RID_NONE) | ||
| 1355 | as->mrm.base = (uint8_t)r; | ||
| 1356 | else | ||
| 1357 | as->mrm.idx = (uint8_t)r; | ||
| 1358 | } | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow) | ||
| 1362 | { | ||
| 1363 | IRIns *ir = IR(ref); | ||
| 1364 | as->mrm.idx = RID_NONE; | ||
| 1365 | if (ir->o == IR_KPTR || ir->o == IR_KKPTR) { | ||
| 1366 | as->mrm.ofs = ir->i; | ||
| 1367 | as->mrm.base = RID_NONE; | ||
| 1368 | } else if (ir->o == IR_STRREF) { | ||
| 1369 | asm_fusestrref(as, ir, allow); | ||
| 1370 | } else { | ||
| 1371 | as->mrm.ofs = 0; | ||
| 1372 | if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) { | ||
| 1373 | /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */ | ||
| 1374 | IRIns *irx; | ||
| 1375 | IRRef idx; | ||
| 1376 | Reg r; | ||
| 1377 | if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */ | ||
| 1378 | ref = ir->op1; | ||
| 1379 | ir = IR(ref); | ||
| 1380 | if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r))) | ||
| 1381 | goto noadd; | ||
| 1382 | } | ||
| 1383 | as->mrm.scale = XM_SCALE1; | ||
| 1384 | idx = ir->op1; | ||
| 1385 | ref = ir->op2; | ||
| 1386 | irx = IR(idx); | ||
| 1387 | if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */ | ||
| 1388 | idx = ir->op2; | ||
| 1389 | ref = ir->op1; | ||
| 1390 | irx = IR(idx); | ||
| 1391 | } | ||
| 1392 | if (canfuse(as, irx) && ra_noreg(irx->r)) { | ||
| 1393 | if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) { | ||
| 1394 | /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */ | ||
| 1395 | idx = irx->op1; | ||
| 1396 | as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6); | ||
| 1397 | } else if (irx->o == IR_ADD && irx->op1 == irx->op2) { | ||
| 1398 | /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */ | ||
| 1399 | idx = irx->op1; | ||
| 1400 | as->mrm.scale = XM_SCALE2; | ||
| 1401 | } | ||
| 1402 | } | ||
| 1403 | r = ra_alloc1(as, idx, allow); | ||
| 1404 | rset_clear(allow, r); | ||
| 1405 | as->mrm.idx = (uint8_t)r; | ||
| 1406 | } | ||
| 1407 | noadd: | ||
| 1408 | as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); | ||
| 1409 | } | ||
| 1410 | } | ||
| 1411 | |||
| 1412 | /* Fuse load into memory operand. */ | ||
| 1413 | static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow) | ||
| 1414 | { | ||
| 1415 | IRIns *ir = IR(ref); | ||
| 1416 | if (ra_hasreg(ir->r)) { | ||
| 1417 | if (allow != RSET_EMPTY) { /* Fast path. */ | ||
| 1418 | ra_noweak(as, ir->r); | ||
| 1419 | return ir->r; | ||
| 1420 | } | ||
| 1421 | fusespill: | ||
| 1422 | /* Force a spill if only memory operands are allowed (asm_x87load). */ | ||
| 1423 | as->mrm.base = RID_ESP; | ||
| 1424 | as->mrm.ofs = ra_spill(as, ir); | ||
| 1425 | as->mrm.idx = RID_NONE; | ||
| 1426 | return RID_MRM; | ||
| 1427 | } | ||
| 1428 | if (ir->o == IR_KNUM) { | ||
| 1429 | RegSet avail = as->freeset & ~as->modset & RSET_FPR; | ||
| 1430 | lua_assert(allow != RSET_EMPTY); | ||
| 1431 | if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ | ||
| 1432 | as->mrm.ofs = ptr2addr(ir_knum(ir)); | ||
| 1433 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
| 1434 | return RID_MRM; | ||
| 1435 | } | ||
| 1436 | } else if (mayfuse(as, ref)) { | ||
| 1437 | RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR; | ||
| 1438 | if (ir->o == IR_SLOAD) { | ||
| 1439 | if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) && | ||
| 1440 | noconflict(as, ref, IR_RETF, 0)) { | ||
| 1441 | as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow); | ||
| 1442 | as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0); | ||
| 1443 | as->mrm.idx = RID_NONE; | ||
| 1444 | return RID_MRM; | ||
| 1445 | } | ||
| 1446 | } else if (ir->o == IR_FLOAD) { | ||
| 1447 | /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */ | ||
| 1448 | if ((irt_isint(ir->t) || irt_isaddr(ir->t)) && | ||
| 1449 | noconflict(as, ref, IR_FSTORE, 0)) { | ||
| 1450 | asm_fusefref(as, ir, xallow); | ||
| 1451 | return RID_MRM; | ||
| 1452 | } | ||
| 1453 | } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) { | ||
| 1454 | if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) { | ||
| 1455 | asm_fuseahuref(as, ir->op1, xallow); | ||
| 1456 | return RID_MRM; | ||
| 1457 | } | ||
| 1458 | } else if (ir->o == IR_XLOAD) { | ||
| 1459 | /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp). | ||
| 1460 | ** Fusing unaligned memory operands is ok on x86 (except for SIMD types). | ||
| 1461 | */ | ||
| 1462 | if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) && | ||
| 1463 | noconflict(as, ref, IR_XSTORE, 0)) { | ||
| 1464 | asm_fusexref(as, ir->op1, xallow); | ||
| 1465 | return RID_MRM; | ||
| 1466 | } | ||
| 1467 | } else if (ir->o == IR_VLOAD) { | ||
| 1468 | asm_fuseahuref(as, ir->op1, xallow); | ||
| 1469 | return RID_MRM; | ||
| 1470 | } | ||
| 1471 | } | ||
| 1472 | if (!(as->freeset & allow) && | ||
| 1473 | (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref))) | ||
| 1474 | goto fusespill; | ||
| 1475 | return ra_allocref(as, ref, allow); | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | /* -- Calls --------------------------------------------------------------- */ | ||
| 1479 | |||
| 1480 | /* Generate a call to a C function. */ | ||
| 1481 | static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | ||
| 1482 | { | ||
| 1483 | uint32_t n, nargs = CCI_NARGS(ci); | ||
| 1484 | int32_t ofs = STACKARG_OFS; | ||
| 1485 | uint32_t gprs = REGARG_GPRS; | ||
| 1486 | #if LJ_64 | ||
| 1487 | Reg fpr = REGARG_FIRSTFPR; | ||
| 1488 | #endif | ||
| 1489 | lua_assert(!(nargs > 2 && (ci->flags&CCI_FASTCALL))); /* Avoid stack adj. */ | ||
| 1490 | if ((void *)ci->func) | ||
| 1491 | emit_call(as, ci->func); | ||
| 1492 | for (n = 0; n < nargs; n++) { /* Setup args. */ | ||
| 1493 | IRRef ref = args[n]; | ||
| 1494 | IRIns *ir = IR(ref); | ||
| 1495 | Reg r; | ||
| 1496 | #if LJ_64 && LJ_ABI_WIN | ||
| 1497 | /* Windows/x64 argument registers are strictly positional. */ | ||
| 1498 | r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31); | ||
| 1499 | fpr++; gprs >>= 5; | ||
| 1500 | #elif LJ_64 | ||
| 1501 | /* POSIX/x64 argument registers are used in order of appearance. */ | ||
| 1502 | if (irt_isfp(ir->t)) { | ||
| 1503 | r = fpr <= REGARG_LASTFPR ? fpr : 0; fpr++; | ||
| 1504 | } else { | ||
| 1505 | r = gprs & 31; gprs >>= 5; | ||
| 1506 | } | ||
| 1507 | #else | ||
| 1508 | if (irt_isfp(ir->t) || !(ci->flags & CCI_FASTCALL)) { | ||
| 1509 | r = 0; | ||
| 1510 | } else { | ||
| 1511 | r = gprs & 31; gprs >>= 5; | ||
| 1512 | } | ||
| 1513 | #endif | ||
| 1514 | if (r) { /* Argument is in a register. */ | ||
| 1515 | if (r < RID_MAX_GPR && ref < ASMREF_TMP1) { | ||
| 1516 | #if LJ_64 | ||
| 1517 | if (ir->o == IR_KINT64) | ||
| 1518 | emit_loadu64(as, r, ir_kint64(ir)->u64); | ||
| 1519 | else | ||
| 1520 | #endif | ||
| 1521 | emit_loadi(as, r, ir->i); | ||
| 1522 | } else { | ||
| 1523 | lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */ | ||
| 1524 | if (ra_hasreg(ir->r)) { | ||
| 1525 | ra_noweak(as, ir->r); | ||
| 1526 | emit_movrr(as, ir, r, ir->r); | ||
| 1527 | } else { | ||
| 1528 | ra_allocref(as, ref, RID2RSET(r)); | ||
| 1529 | } | ||
| 1530 | } | ||
| 1531 | } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */ | ||
| 1532 | lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */ | ||
| 1533 | if (LJ_32 && (ofs & 4) && irref_isk(ref)) { | ||
| 1534 | /* Split stores for unaligned FP consts. */ | ||
| 1535 | emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo); | ||
| 1536 | emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi); | ||
| 1537 | } else { | ||
| 1538 | r = ra_alloc1(as, ref, RSET_FPR); | ||
| 1539 | emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, | ||
| 1540 | r, RID_ESP, ofs); | ||
| 1541 | } | ||
| 1542 | ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8; | ||
| 1543 | } else { /* Non-FP argument is on stack. */ | ||
| 1544 | if (LJ_32 && ref < ASMREF_TMP1) { | ||
| 1545 | emit_movmroi(as, RID_ESP, ofs, ir->i); | ||
| 1546 | } else { | ||
| 1547 | r = ra_alloc1(as, ref, RSET_GPR); | ||
| 1548 | emit_movtomro(as, REX_64IR(ir, r), RID_ESP, ofs); | ||
| 1549 | } | ||
| 1550 | ofs += sizeof(intptr_t); | ||
| 1551 | } | ||
| 1552 | } | ||
| 1553 | } | ||
| 1554 | |||
| 1555 | /* Setup result reg/sp for call. Evict scratch regs. */ | ||
| 1556 | static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | ||
| 1557 | { | ||
| 1558 | RegSet drop = RSET_SCRATCH; | ||
| 1559 | if ((ci->flags & CCI_NOFPRCLOBBER)) | ||
| 1560 | drop &= ~RSET_FPR; | ||
| 1561 | if (ra_hasreg(ir->r)) | ||
| 1562 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
| 1563 | ra_evictset(as, drop); /* Evictions must be performed first. */ | ||
| 1564 | if (ra_used(ir)) { | ||
| 1565 | if (irt_isfp(ir->t)) { | ||
| 1566 | int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | ||
| 1567 | #if LJ_64 | ||
| 1568 | if ((ci->flags & CCI_CASTU64)) { | ||
| 1569 | Reg dest = ir->r; | ||
| 1570 | if (ra_hasreg(dest)) { | ||
| 1571 | ra_free(as, dest); | ||
| 1572 | ra_modified(as, dest); | ||
| 1573 | emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */ | ||
| 1574 | } else { | ||
| 1575 | emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs); | ||
| 1576 | } | ||
| 1577 | } else { | ||
| 1578 | ra_destreg(as, ir, RID_FPRET); | ||
| 1579 | } | ||
| 1580 | #else | ||
| 1581 | /* Number result is in x87 st0 for x86 calling convention. */ | ||
| 1582 | Reg dest = ir->r; | ||
| 1583 | if (ra_hasreg(dest)) { | ||
| 1584 | ra_free(as, dest); | ||
| 1585 | ra_modified(as, dest); | ||
| 1586 | emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, | ||
| 1587 | dest, RID_ESP, ofs); | ||
| 1588 | } | ||
| 1589 | if ((ci->flags & CCI_CASTU64)) { | ||
| 1590 | emit_movtomro(as, RID_RET, RID_ESP, ofs); | ||
| 1591 | emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4); | ||
| 1592 | } else { | ||
| 1593 | emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, | ||
| 1594 | irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); | ||
| 1595 | } | ||
| 1596 | #endif | ||
| 1597 | } else { | ||
| 1598 | lua_assert(!irt_ispri(ir->t)); | ||
| 1599 | ra_destreg(as, ir, RID_RET); | ||
| 1600 | } | ||
| 1601 | } else if (LJ_32 && irt_isfp(ir->t)) { | ||
| 1602 | emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */ | ||
| 1603 | } | ||
| 1604 | } | ||
| 1605 | 673 | ||
| 1606 | /* Collect arguments from CALL* and CARG instructions. */ | 674 | /* Collect arguments from CALL* and CARG instructions. */ |
| 1607 | static void asm_collectargs(ASMState *as, IRIns *ir, | 675 | static void asm_collectargs(ASMState *as, IRIns *ir, |
| @@ -1619,15 +687,6 @@ static void asm_collectargs(ASMState *as, IRIns *ir, | |||
| 1619 | lua_assert(IR(ir->op1)->o != IR_CARG); | 687 | lua_assert(IR(ir->op1)->o != IR_CARG); |
| 1620 | } | 688 | } |
| 1621 | 689 | ||
| 1622 | static void asm_call(ASMState *as, IRIns *ir) | ||
| 1623 | { | ||
| 1624 | IRRef args[CCI_NARGS_MAX]; | ||
| 1625 | const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; | ||
| 1626 | asm_collectargs(as, ir, ci, args); | ||
| 1627 | asm_setupresult(as, ir, ci); | ||
| 1628 | asm_gencall(as, ci, args); | ||
| 1629 | } | ||
| 1630 | |||
| 1631 | /* Reconstruct CCallInfo flags for CALLX*. */ | 690 | /* Reconstruct CCallInfo flags for CALLX*. */ |
| 1632 | static uint32_t asm_callx_flags(ASMState *as, IRIns *ir) | 691 | static uint32_t asm_callx_flags(ASMState *as, IRIns *ir) |
| 1633 | { | 692 | { |
| @@ -1641,349 +700,36 @@ static uint32_t asm_callx_flags(ASMState *as, IRIns *ir) | |||
| 1641 | return (nargs | (ir->t.irt << CCI_OTSHIFT)); | 700 | return (nargs | (ir->t.irt << CCI_OTSHIFT)); |
| 1642 | } | 701 | } |
| 1643 | 702 | ||
| 1644 | static void asm_callx(ASMState *as, IRIns *ir) | 703 | /* Get extent of the stack for a snapshot. */ |
| 1645 | { | 704 | static BCReg asm_stack_extent(ASMState *as, SnapShot *snap, BCReg *ptopslot) |
| 1646 | IRRef args[CCI_NARGS_MAX]; | ||
| 1647 | CCallInfo ci; | ||
| 1648 | IRIns *irf; | ||
| 1649 | ci.flags = asm_callx_flags(as, ir); | ||
| 1650 | asm_collectargs(as, ir, &ci, args); | ||
| 1651 | asm_setupresult(as, ir, &ci); | ||
| 1652 | irf = IR(ir->op2); | ||
| 1653 | if (LJ_32 && irref_isk(ir->op2)) { /* Call to constant address on x86. */ | ||
| 1654 | ci.func = (ASMFunction)(void *)(uintptr_t)(uint32_t)irf->i; | ||
| 1655 | } else { | ||
| 1656 | /* Prefer a non-argument register or RID_RET for indirect calls. */ | ||
| 1657 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH)|RID2RSET(RID_RET); | ||
| 1658 | Reg r = ra_alloc1(as, ir->op2, allow); | ||
| 1659 | emit_rr(as, XO_GROUP5, XOg_CALL, r); | ||
| 1660 | ci.func = (ASMFunction)(void *)0; | ||
| 1661 | } | ||
| 1662 | asm_gencall(as, &ci, args); | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | /* -- Returns ------------------------------------------------------------- */ | ||
| 1666 | |||
| 1667 | /* Return to lower frame. Guard that it goes to the right spot. */ | ||
| 1668 | static void asm_retf(ASMState *as, IRIns *ir) | ||
| 1669 | { | ||
| 1670 | Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
| 1671 | void *pc = ir_kptr(IR(ir->op2)); | ||
| 1672 | int32_t delta = 1+bc_a(*((const BCIns *)pc - 1)); | ||
| 1673 | as->topslot -= (BCReg)delta; | ||
| 1674 | if ((int32_t)as->topslot < 0) as->topslot = 0; | ||
| 1675 | emit_setgl(as, base, jit_base); | ||
| 1676 | emit_addptr(as, base, -8*delta); | ||
| 1677 | asm_guardcc(as, CC_NE); | ||
| 1678 | emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc)); | ||
| 1679 | } | ||
| 1680 | |||
| 1681 | /* -- Type conversions ---------------------------------------------------- */ | ||
| 1682 | |||
| 1683 | static void asm_tointg(ASMState *as, IRIns *ir, Reg left) | ||
| 1684 | { | ||
| 1685 | Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); | ||
| 1686 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1687 | asm_guardcc(as, CC_P); | ||
| 1688 | asm_guardcc(as, CC_NE); | ||
| 1689 | emit_rr(as, XO_UCOMISD, left, tmp); | ||
| 1690 | emit_rr(as, XO_CVTSI2SD, tmp, dest); | ||
| 1691 | if (!(as->flags & JIT_F_SPLIT_XMM)) | ||
| 1692 | emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */ | ||
| 1693 | emit_rr(as, XO_CVTTSD2SI, dest, left); | ||
| 1694 | /* Can't fuse since left is needed twice. */ | ||
| 1695 | } | ||
| 1696 | |||
| 1697 | static void asm_tobit(ASMState *as, IRIns *ir) | ||
| 1698 | { | ||
| 1699 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1700 | Reg tmp = ra_noreg(IR(ir->op1)->r) ? | ||
| 1701 | ra_alloc1(as, ir->op1, RSET_FPR) : | ||
| 1702 | ra_scratch(as, RSET_FPR); | ||
| 1703 | Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp)); | ||
| 1704 | emit_rr(as, XO_MOVDto, tmp, dest); | ||
| 1705 | emit_mrm(as, XO_ADDSD, tmp, right); | ||
| 1706 | ra_left(as, tmp, ir->op1); | ||
| 1707 | } | ||
| 1708 | |||
| 1709 | static void asm_conv(ASMState *as, IRIns *ir) | ||
| 1710 | { | 705 | { |
| 1711 | IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); | 706 | SnapEntry *map = &as->T->snapmap[snap->mapofs]; |
| 1712 | int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64)); | 707 | MSize n, nent = snap->nent; |
| 1713 | int stfp = (st == IRT_NUM || st == IRT_FLOAT); | 708 | BCReg baseslot = 0, topslot = 0; |
| 1714 | IRRef lref = ir->op1; | 709 | /* Must check all frames to find topslot (outer can be larger than inner). */ |
| 1715 | lua_assert(irt_type(ir->t) != st); | 710 | for (n = 0; n < nent; n++) { |
| 1716 | lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */ | 711 | SnapEntry sn = map[n]; |
| 1717 | if (irt_isfp(ir->t)) { | 712 | if ((sn & SNAP_FRAME)) { |
| 1718 | Reg dest = ra_dest(as, ir, RSET_FPR); | 713 | IRIns *ir = IR(snap_ref(sn)); |
| 1719 | if (stfp) { /* FP to FP conversion. */ | 714 | GCfunc *fn = ir_kfunc(ir); |
| 1720 | Reg left = asm_fuseload(as, lref, RSET_FPR); | 715 | if (isluafunc(fn)) { |
| 1721 | emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left); | 716 | BCReg s = snap_slot(sn); |
| 1722 | if (left == dest) return; /* Avoid the XO_XORPS. */ | 717 | BCReg fs = s + funcproto(fn)->framesize; |
| 1723 | } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */ | 718 | if (fs > topslot) topslot = fs; |
| 1724 | /* number = (2^52+2^51 .. u32) - (2^52+2^51) */ | 719 | baseslot = s; |
| 1725 | cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000)); | ||
| 1726 | Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); | ||
| 1727 | if (irt_isfloat(ir->t)) | ||
| 1728 | emit_rr(as, XO_CVTSD2SS, dest, dest); | ||
| 1729 | emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */ | ||
| 1730 | emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */ | ||
| 1731 | emit_loadn(as, bias, k); | ||
| 1732 | emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR)); | ||
| 1733 | return; | ||
| 1734 | } else { /* Integer to FP conversion. */ | ||
| 1735 | Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ? | ||
| 1736 | ra_alloc1(as, lref, RSET_GPR) : | ||
| 1737 | asm_fuseload(as, lref, RSET_GPR); | ||
| 1738 | if (LJ_64 && st == IRT_U64) { | ||
| 1739 | MCLabel l_end = emit_label(as); | ||
| 1740 | const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000)); | ||
| 1741 | emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */ | ||
| 1742 | emit_sjcc(as, CC_NS, l_end); | ||
| 1743 | emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */ | ||
| 1744 | } | ||
| 1745 | emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS, | ||
| 1746 | dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left); | ||
| 1747 | } | ||
| 1748 | if (!(as->flags & JIT_F_SPLIT_XMM)) | ||
| 1749 | emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */ | ||
| 1750 | } else if (stfp) { /* FP to integer conversion. */ | ||
| 1751 | if (irt_isguard(ir->t)) { | ||
| 1752 | /* Checked conversions are only supported from number to int. */ | ||
| 1753 | lua_assert(irt_isint(ir->t) && st == IRT_NUM); | ||
| 1754 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); | ||
| 1755 | } else { | ||
| 1756 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1757 | x86Op op = st == IRT_NUM ? | ||
| 1758 | ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSD2SI : XO_CVTSD2SI) : | ||
| 1759 | ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSS2SI : XO_CVTSS2SI); | ||
| 1760 | if (LJ_32 && irt_isu32(ir->t)) { /* FP to U32 conversion on x86. */ | ||
| 1761 | /* u32 = (int32_t)(number - 2^31) + 2^31 */ | ||
| 1762 | Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) : | ||
| 1763 | ra_scratch(as, RSET_FPR); | ||
| 1764 | emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000); | ||
| 1765 | emit_rr(as, op, dest, tmp); | ||
| 1766 | if (st == IRT_NUM) | ||
| 1767 | emit_rma(as, XO_ADDSD, tmp, | ||
| 1768 | lj_ir_k64_find(as->J, U64x(c1e00000,00000000))); | ||
| 1769 | else | ||
| 1770 | emit_rma(as, XO_ADDSS, tmp, | ||
| 1771 | lj_ir_k64_find(as->J, U64x(00000000,cf000000))); | ||
| 1772 | ra_left(as, tmp, lref); | ||
| 1773 | } else if (LJ_64 && irt_isu64(ir->t)) { | ||
| 1774 | /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */ | ||
| 1775 | Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) : | ||
| 1776 | ra_scratch(as, RSET_FPR); | ||
| 1777 | MCLabel l_end = emit_label(as); | ||
| 1778 | emit_rr(as, op, dest|REX_64, tmp); | ||
| 1779 | if (st == IRT_NUM) | ||
| 1780 | emit_rma(as, XO_ADDSD, tmp, | ||
| 1781 | lj_ir_k64_find(as->J, U64x(c3f00000,00000000))); | ||
| 1782 | else | ||
| 1783 | emit_rma(as, XO_ADDSS, tmp, | ||
| 1784 | lj_ir_k64_find(as->J, U64x(00000000,df800000))); | ||
| 1785 | emit_sjcc(as, CC_NS, l_end); | ||
| 1786 | emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest < 2^63. */ | ||
| 1787 | emit_rr(as, op, dest|REX_64, tmp); | ||
| 1788 | ra_left(as, tmp, lref); | ||
| 1789 | } else { | ||
| 1790 | Reg left = asm_fuseload(as, lref, RSET_FPR); | ||
| 1791 | if (LJ_64 && irt_isu32(ir->t)) | ||
| 1792 | emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */ | ||
| 1793 | emit_mrm(as, op, | ||
| 1794 | dest|((LJ_64 && | ||
| 1795 | (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0), | ||
| 1796 | left); | ||
| 1797 | } | ||
| 1798 | } | ||
| 1799 | } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ | ||
| 1800 | Reg left, dest = ra_dest(as, ir, RSET_GPR); | ||
| 1801 | RegSet allow = RSET_GPR; | ||
| 1802 | x86Op op; | ||
| 1803 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); | ||
| 1804 | if (st == IRT_I8) { | ||
| 1805 | op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX; | ||
| 1806 | } else if (st == IRT_U8) { | ||
| 1807 | op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX; | ||
| 1808 | } else if (st == IRT_I16) { | ||
| 1809 | op = XO_MOVSXw; | ||
| 1810 | } else { | ||
| 1811 | op = XO_MOVZXw; | ||
| 1812 | } | ||
| 1813 | left = asm_fuseload(as, lref, allow); | ||
| 1814 | /* Add extra MOV if source is already in wrong register. */ | ||
| 1815 | if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) { | ||
| 1816 | Reg tmp = ra_scratch(as, allow); | ||
| 1817 | emit_rr(as, op, dest, tmp); | ||
| 1818 | emit_rr(as, XO_MOV, tmp, left); | ||
| 1819 | } else { | ||
| 1820 | emit_mrm(as, op, dest, left); | ||
| 1821 | } | ||
| 1822 | } else { /* 32/64 bit integer conversions. */ | ||
| 1823 | if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */ | ||
| 1824 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1825 | ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | ||
| 1826 | } else if (irt_is64(ir->t)) { | ||
| 1827 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1828 | if (st64 || !(ir->op2 & IRCONV_SEXT)) { | ||
| 1829 | /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */ | ||
| 1830 | ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | ||
| 1831 | } else { /* 32 to 64 bit sign extension. */ | ||
| 1832 | Reg left = asm_fuseload(as, lref, RSET_GPR); | ||
| 1833 | emit_mrm(as, XO_MOVSXd, dest|REX_64, left); | ||
| 1834 | } | ||
| 1835 | } else { | ||
| 1836 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1837 | if (st64) { | ||
| 1838 | Reg left = asm_fuseload(as, lref, RSET_GPR); | ||
| 1839 | /* This is either a 32 bit reg/reg mov which zeroes the hiword | ||
| 1840 | ** or a load of the loword from a 64 bit address. | ||
| 1841 | */ | ||
| 1842 | emit_mrm(as, XO_MOV, dest, left); | ||
| 1843 | } else { /* 32/32 bit no-op (cast). */ | ||
| 1844 | ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | ||
| 1845 | } | 720 | } |
| 1846 | } | 721 | } |
| 1847 | } | 722 | } |
| 723 | *ptopslot = topslot; | ||
| 724 | return baseslot; | ||
| 1848 | } | 725 | } |
| 1849 | 726 | ||
| 1850 | #if LJ_32 && LJ_HASFFI | 727 | /* Calculate stack adjustment. */ |
| 1851 | /* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */ | 728 | static int32_t asm_stack_adjust(ASMState *as) |
| 1852 | |||
| 1853 | /* 64 bit integer to FP conversion in 32 bit mode. */ | ||
| 1854 | static void asm_conv_fp_int64(ASMState *as, IRIns *ir) | ||
| 1855 | { | ||
| 1856 | Reg hi = ra_alloc1(as, ir->op1, RSET_GPR); | ||
| 1857 | Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi)); | ||
| 1858 | int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | ||
| 1859 | Reg dest = ir->r; | ||
| 1860 | if (ra_hasreg(dest)) { | ||
| 1861 | ra_free(as, dest); | ||
| 1862 | ra_modified(as, dest); | ||
| 1863 | emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, | ||
| 1864 | dest, RID_ESP, ofs); | ||
| 1865 | } | ||
| 1866 | emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, | ||
| 1867 | irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); | ||
| 1868 | if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) { | ||
| 1869 | /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */ | ||
| 1870 | MCLabel l_end = emit_label(as); | ||
| 1871 | emit_rma(as, XO_FADDq, XOg_FADDq, | ||
| 1872 | lj_ir_k64_find(as->J, U64x(43f00000,00000000))); | ||
| 1873 | emit_sjcc(as, CC_NS, l_end); | ||
| 1874 | emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */ | ||
| 1875 | } else { | ||
| 1876 | lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64); | ||
| 1877 | } | ||
| 1878 | emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0); | ||
| 1879 | /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */ | ||
| 1880 | emit_rmro(as, XO_MOVto, hi, RID_ESP, 4); | ||
| 1881 | emit_rmro(as, XO_MOVto, lo, RID_ESP, 0); | ||
| 1882 | } | ||
| 1883 | |||
| 1884 | /* FP to 64 bit integer conversion in 32 bit mode. */ | ||
| 1885 | static void asm_conv_int64_fp(ASMState *as, IRIns *ir) | ||
| 1886 | { | ||
| 1887 | IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); | ||
| 1888 | IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); | ||
| 1889 | Reg lo, hi; | ||
| 1890 | lua_assert(st == IRT_NUM || st == IRT_FLOAT); | ||
| 1891 | lua_assert(dt == IRT_I64 || dt == IRT_U64); | ||
| 1892 | lua_assert(((ir-1)->op2 & IRCONV_TRUNC)); | ||
| 1893 | hi = ra_dest(as, ir, RSET_GPR); | ||
| 1894 | lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi)); | ||
| 1895 | if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0); | ||
| 1896 | /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */ | ||
| 1897 | if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */ | ||
| 1898 | emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4); | ||
| 1899 | emit_rmro(as, XO_MOVto, lo, RID_ESP, 4); | ||
| 1900 | emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff); | ||
| 1901 | } | ||
| 1902 | if (dt == IRT_U64) { | ||
| 1903 | /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */ | ||
| 1904 | MCLabel l_pop, l_end = emit_label(as); | ||
| 1905 | emit_x87op(as, XI_FPOP); | ||
| 1906 | l_pop = emit_label(as); | ||
| 1907 | emit_sjmp(as, l_end); | ||
| 1908 | emit_rmro(as, XO_MOV, hi, RID_ESP, 4); | ||
| 1909 | if ((as->flags & JIT_F_SSE3)) | ||
| 1910 | emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); | ||
| 1911 | else | ||
| 1912 | emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); | ||
| 1913 | emit_rma(as, XO_FADDq, XOg_FADDq, | ||
| 1914 | lj_ir_k64_find(as->J, U64x(c3f00000,00000000))); | ||
| 1915 | emit_sjcc(as, CC_NS, l_pop); | ||
| 1916 | emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */ | ||
| 1917 | } | ||
| 1918 | emit_rmro(as, XO_MOV, hi, RID_ESP, 4); | ||
| 1919 | if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */ | ||
| 1920 | emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); | ||
| 1921 | } else { /* Otherwise set FPU rounding mode to truncate before the store. */ | ||
| 1922 | emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); | ||
| 1923 | emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0); | ||
| 1924 | emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0); | ||
| 1925 | emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0); | ||
| 1926 | emit_loadi(as, lo, 0xc00); | ||
| 1927 | emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0); | ||
| 1928 | } | ||
| 1929 | if (dt == IRT_U64) | ||
| 1930 | emit_x87op(as, XI_FDUP); | ||
| 1931 | emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd, | ||
| 1932 | st == IRT_NUM ? XOg_FLDq: XOg_FLDd, | ||
| 1933 | asm_fuseload(as, ir->op1, RSET_EMPTY)); | ||
| 1934 | } | ||
| 1935 | #endif | ||
| 1936 | |||
| 1937 | static void asm_strto(ASMState *as, IRIns *ir) | ||
| 1938 | { | ||
| 1939 | /* Force a spill slot for the destination register (if any). */ | ||
| 1940 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_tonum]; | ||
| 1941 | IRRef args[2]; | ||
| 1942 | RegSet drop = RSET_SCRATCH; | ||
| 1943 | if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r)) | ||
| 1944 | rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */ | ||
| 1945 | ra_evictset(as, drop); | ||
| 1946 | asm_guardcc(as, CC_E); | ||
| 1947 | emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */ | ||
| 1948 | args[0] = ir->op1; /* GCstr *str */ | ||
| 1949 | args[1] = ASMREF_TMP1; /* TValue *n */ | ||
| 1950 | asm_gencall(as, ci, args); | ||
| 1951 | /* Store the result to the spill slot or temp slots. */ | ||
| 1952 | emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, | ||
| 1953 | RID_ESP, sps_scale(ir->s)); | ||
| 1954 | } | ||
| 1955 | |||
| 1956 | static void asm_tostr(ASMState *as, IRIns *ir) | ||
| 1957 | { | ||
| 1958 | IRIns *irl = IR(ir->op1); | ||
| 1959 | IRRef args[2]; | ||
| 1960 | args[0] = ASMREF_L; | ||
| 1961 | as->gcsteps++; | ||
| 1962 | if (irt_isnum(irl->t)) { | ||
| 1963 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum]; | ||
| 1964 | args[1] = ASMREF_TMP1; /* const lua_Number * */ | ||
| 1965 | asm_setupresult(as, ir, ci); /* GCstr * */ | ||
| 1966 | asm_gencall(as, ci, args); | ||
| 1967 | emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, | ||
| 1968 | RID_ESP, ra_spill(as, irl)); | ||
| 1969 | } else { | ||
| 1970 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint]; | ||
| 1971 | args[1] = ir->op1; /* int32_t k */ | ||
| 1972 | asm_setupresult(as, ir, ci); /* GCstr * */ | ||
| 1973 | asm_gencall(as, ci, args); | ||
| 1974 | } | ||
| 1975 | } | ||
| 1976 | |||
| 1977 | /* -- Memory references --------------------------------------------------- */ | ||
| 1978 | |||
| 1979 | static void asm_aref(ASMState *as, IRIns *ir) | ||
| 1980 | { | 729 | { |
| 1981 | Reg dest = ra_dest(as, ir, RSET_GPR); | 730 | if (as->evenspill <= SPS_FIXED) |
| 1982 | asm_fusearef(as, ir, RSET_GPR); | 731 | return 0; |
| 1983 | if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0)) | 732 | return sps_scale((as->evenspill - SPS_FIXED + 3) & ~3); |
| 1984 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
| 1985 | else if (as->mrm.base != dest) | ||
| 1986 | emit_rr(as, XO_MOV, dest, as->mrm.base); | ||
| 1987 | } | 733 | } |
| 1988 | 734 | ||
| 1989 | /* Must match with hash*() in lj_tab.c. */ | 735 | /* Must match with hash*() in lj_tab.c. */ |
| @@ -2006,536 +752,11 @@ static uint32_t ir_khash(IRIns *ir) | |||
| 2006 | return hashrot(lo, hi); | 752 | return hashrot(lo, hi); |
| 2007 | } | 753 | } |
| 2008 | 754 | ||
| 2009 | /* Merge NE(HREF, niltv) check. */ | ||
| 2010 | static MCode *merge_href_niltv(ASMState *as, IRIns *ir) | ||
| 2011 | { | ||
| 2012 | /* Assumes nothing else generates NE of HREF. */ | ||
| 2013 | if ((ir[1].o == IR_NE || ir[1].o == IR_EQ) && ir[1].op1 == as->curins && | ||
| 2014 | ra_hasreg(ir->r)) { | ||
| 2015 | MCode *p = as->mcp; | ||
| 2016 | p += (LJ_64 && *p != XI_ARITHi) ? 7+6 : 6+6; | ||
| 2017 | /* Ensure no loop branch inversion happened. */ | ||
| 2018 | if (p[-6] == 0x0f && p[-5] == XI_JCCn+(CC_NE^(ir[1].o & 1))) { | ||
| 2019 | as->mcp = p; /* Kill cmp reg, imm32 + jz exit. */ | ||
| 2020 | return p + *(int32_t *)(p-4); /* Return exit address. */ | ||
| 2021 | } | ||
| 2022 | } | ||
| 2023 | return NULL; | ||
| 2024 | } | ||
| 2025 | |||
| 2026 | /* Inlined hash lookup. Specialized for key type and for const keys. | ||
| 2027 | ** The equivalent C code is: | ||
| 2028 | ** Node *n = hashkey(t, key); | ||
| 2029 | ** do { | ||
| 2030 | ** if (lj_obj_equal(&n->key, key)) return &n->val; | ||
| 2031 | ** } while ((n = nextnode(n))); | ||
| 2032 | ** return niltv(L); | ||
| 2033 | */ | ||
| 2034 | static void asm_href(ASMState *as, IRIns *ir) | ||
| 2035 | { | ||
| 2036 | MCode *nilexit = merge_href_niltv(as, ir); /* Do this before any restores. */ | ||
| 2037 | RegSet allow = RSET_GPR; | ||
| 2038 | Reg dest = ra_dest(as, ir, allow); | ||
| 2039 | Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); | ||
| 2040 | Reg key = RID_NONE, tmp = RID_NONE; | ||
| 2041 | IRIns *irkey = IR(ir->op2); | ||
| 2042 | int isk = irref_isk(ir->op2); | ||
| 2043 | IRType1 kt = irkey->t; | ||
| 2044 | uint32_t khash; | ||
| 2045 | MCLabel l_end, l_loop, l_next; | ||
| 2046 | |||
| 2047 | if (!isk) { | ||
| 2048 | rset_clear(allow, tab); | ||
| 2049 | key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow); | ||
| 2050 | if (!irt_isstr(kt)) | ||
| 2051 | tmp = ra_scratch(as, rset_exclude(allow, key)); | ||
| 2052 | } | ||
| 2053 | |||
| 2054 | /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */ | ||
| 2055 | l_end = emit_label(as); | ||
| 2056 | if (nilexit && ir[1].o == IR_NE) { | ||
| 2057 | emit_jcc(as, CC_E, nilexit); /* XI_JMP is not found by lj_asm_patchexit. */ | ||
| 2058 | nilexit = NULL; | ||
| 2059 | } else { | ||
| 2060 | emit_loada(as, dest, niltvg(J2G(as->J))); | ||
| 2061 | } | ||
| 2062 | |||
| 2063 | /* Follow hash chain until the end. */ | ||
| 2064 | l_loop = emit_sjcc_label(as, CC_NZ); | ||
| 2065 | emit_rr(as, XO_TEST, dest, dest); | ||
| 2066 | emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next)); | ||
| 2067 | l_next = emit_label(as); | ||
| 2068 | |||
| 2069 | /* Type and value comparison. */ | ||
| 2070 | if (nilexit) | ||
| 2071 | emit_jcc(as, CC_E, nilexit); | ||
| 2072 | else | ||
| 2073 | emit_sjcc(as, CC_E, l_end); | ||
| 2074 | if (irt_isnum(kt)) { | ||
| 2075 | if (isk) { | ||
| 2076 | /* Assumes -0.0 is already canonicalized to +0.0. */ | ||
| 2077 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo), | ||
| 2078 | (int32_t)ir_knum(irkey)->u32.lo); | ||
| 2079 | emit_sjcc(as, CC_NE, l_next); | ||
| 2080 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi), | ||
| 2081 | (int32_t)ir_knum(irkey)->u32.hi); | ||
| 2082 | } else { | ||
| 2083 | emit_sjcc(as, CC_P, l_next); | ||
| 2084 | emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n)); | ||
| 2085 | emit_sjcc(as, CC_AE, l_next); | ||
| 2086 | /* The type check avoids NaN penalties and complaints from Valgrind. */ | ||
| 2087 | #if LJ_64 | ||
| 2088 | emit_u32(as, LJ_TISNUM); | ||
| 2089 | emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); | ||
| 2090 | #else | ||
| 2091 | emit_i8(as, LJ_TISNUM); | ||
| 2092 | emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); | ||
| 2093 | #endif | ||
| 2094 | } | ||
| 2095 | #if LJ_64 | ||
| 2096 | } else if (irt_islightud(kt)) { | ||
| 2097 | emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64)); | ||
| 2098 | #endif | ||
| 2099 | } else { | ||
| 2100 | if (!irt_ispri(kt)) { | ||
| 2101 | lua_assert(irt_isaddr(kt)); | ||
| 2102 | if (isk) | ||
| 2103 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr), | ||
| 2104 | ptr2addr(ir_kgc(irkey))); | ||
| 2105 | else | ||
| 2106 | emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr)); | ||
| 2107 | emit_sjcc(as, CC_NE, l_next); | ||
| 2108 | } | ||
| 2109 | lua_assert(!irt_isnil(kt)); | ||
| 2110 | emit_i8(as, irt_toitype(kt)); | ||
| 2111 | emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); | ||
| 2112 | } | ||
| 2113 | emit_sfixup(as, l_loop); | ||
| 2114 | checkmclim(as); | ||
| 2115 | |||
| 2116 | /* Load main position relative to tab->node into dest. */ | ||
| 2117 | khash = isk ? ir_khash(irkey) : 1; | ||
| 2118 | if (khash == 0) { | ||
| 2119 | emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node)); | ||
| 2120 | } else { | ||
| 2121 | emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node)); | ||
| 2122 | if ((as->flags & JIT_F_PREFER_IMUL)) { | ||
| 2123 | emit_i8(as, sizeof(Node)); | ||
| 2124 | emit_rr(as, XO_IMULi8, dest, dest); | ||
| 2125 | } else { | ||
| 2126 | emit_shifti(as, XOg_SHL, dest, 3); | ||
| 2127 | emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0); | ||
| 2128 | } | ||
| 2129 | if (isk) { | ||
| 2130 | emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash); | ||
| 2131 | emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); | ||
| 2132 | } else if (irt_isstr(kt)) { | ||
| 2133 | emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash)); | ||
| 2134 | emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); | ||
| 2135 | } else { /* Must match with hashrot() in lj_tab.c. */ | ||
| 2136 | emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask)); | ||
| 2137 | emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp); | ||
| 2138 | emit_shifti(as, XOg_ROL, tmp, HASH_ROT3); | ||
| 2139 | emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp); | ||
| 2140 | emit_shifti(as, XOg_ROL, dest, HASH_ROT2); | ||
| 2141 | emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest); | ||
| 2142 | emit_shifti(as, XOg_ROL, dest, HASH_ROT1); | ||
| 2143 | emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest); | ||
| 2144 | if (irt_isnum(kt)) { | ||
| 2145 | emit_rr(as, XO_ARITH(XOg_ADD), dest, dest); | ||
| 2146 | #if LJ_64 | ||
| 2147 | emit_shifti(as, XOg_SHR|REX_64, dest, 32); | ||
| 2148 | emit_rr(as, XO_MOV, tmp, dest); | ||
| 2149 | emit_rr(as, XO_MOVDto, key|REX_64, dest); | ||
| 2150 | #else | ||
| 2151 | emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4); | ||
| 2152 | emit_rr(as, XO_MOVDto, key, tmp); | ||
| 2153 | #endif | ||
| 2154 | } else { | ||
| 2155 | emit_rr(as, XO_MOV, tmp, key); | ||
| 2156 | emit_rmro(as, XO_LEA, dest, key, HASH_BIAS); | ||
| 2157 | } | ||
| 2158 | } | ||
| 2159 | } | ||
| 2160 | } | ||
| 2161 | |||
| 2162 | static void asm_hrefk(ASMState *as, IRIns *ir) | ||
| 2163 | { | ||
| 2164 | IRIns *kslot = IR(ir->op2); | ||
| 2165 | IRIns *irkey = IR(kslot->op1); | ||
| 2166 | int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); | ||
| 2167 | Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; | ||
| 2168 | Reg node = ra_alloc1(as, ir->op1, RSET_GPR); | ||
| 2169 | #if !LJ_64 | ||
| 2170 | MCLabel l_exit; | ||
| 2171 | #endif | ||
| 2172 | lua_assert(ofs % sizeof(Node) == 0); | ||
| 2173 | if (ra_hasreg(dest)) { | ||
| 2174 | if (ofs != 0) { | ||
| 2175 | if (dest == node && !(as->flags & JIT_F_LEA_AGU)) | ||
| 2176 | emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs); | ||
| 2177 | else | ||
| 2178 | emit_rmro(as, XO_LEA, dest, node, ofs); | ||
| 2179 | } else if (dest != node) { | ||
| 2180 | emit_rr(as, XO_MOV, dest, node); | ||
| 2181 | } | ||
| 2182 | } | ||
| 2183 | asm_guardcc(as, CC_NE); | ||
| 2184 | #if LJ_64 | ||
| 2185 | if (!irt_ispri(irkey->t)) { | ||
| 2186 | Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node)); | ||
| 2187 | emit_rmro(as, XO_CMP, key|REX_64, node, | ||
| 2188 | ofs + (int32_t)offsetof(Node, key.u64)); | ||
| 2189 | lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t)); | ||
| 2190 | /* Assumes -0.0 is already canonicalized to +0.0. */ | ||
| 2191 | emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 : | ||
| 2192 | ((uint64_t)irt_toitype(irkey->t) << 32) | | ||
| 2193 | (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey))); | ||
| 2194 | } else { | ||
| 2195 | lua_assert(!irt_isnil(irkey->t)); | ||
| 2196 | emit_i8(as, irt_toitype(irkey->t)); | ||
| 2197 | emit_rmro(as, XO_ARITHi8, XOg_CMP, node, | ||
| 2198 | ofs + (int32_t)offsetof(Node, key.it)); | ||
| 2199 | } | ||
| 2200 | #else | ||
| 2201 | l_exit = emit_label(as); | ||
| 2202 | if (irt_isnum(irkey->t)) { | ||
| 2203 | /* Assumes -0.0 is already canonicalized to +0.0. */ | ||
| 2204 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | ||
| 2205 | ofs + (int32_t)offsetof(Node, key.u32.lo), | ||
| 2206 | (int32_t)ir_knum(irkey)->u32.lo); | ||
| 2207 | emit_sjcc(as, CC_NE, l_exit); | ||
| 2208 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | ||
| 2209 | ofs + (int32_t)offsetof(Node, key.u32.hi), | ||
| 2210 | (int32_t)ir_knum(irkey)->u32.hi); | ||
| 2211 | } else { | ||
| 2212 | if (!irt_ispri(irkey->t)) { | ||
| 2213 | lua_assert(irt_isgcv(irkey->t)); | ||
| 2214 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | ||
| 2215 | ofs + (int32_t)offsetof(Node, key.gcr), | ||
| 2216 | ptr2addr(ir_kgc(irkey))); | ||
| 2217 | emit_sjcc(as, CC_NE, l_exit); | ||
| 2218 | } | ||
| 2219 | lua_assert(!irt_isnil(irkey->t)); | ||
| 2220 | emit_i8(as, irt_toitype(irkey->t)); | ||
| 2221 | emit_rmro(as, XO_ARITHi8, XOg_CMP, node, | ||
| 2222 | ofs + (int32_t)offsetof(Node, key.it)); | ||
| 2223 | } | ||
| 2224 | #endif | ||
| 2225 | } | ||
| 2226 | |||
| 2227 | static void asm_newref(ASMState *as, IRIns *ir) | ||
| 2228 | { | ||
| 2229 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey]; | ||
| 2230 | IRRef args[3]; | ||
| 2231 | IRIns *irkey; | ||
| 2232 | Reg tmp; | ||
| 2233 | args[0] = ASMREF_L; /* lua_State *L */ | ||
| 2234 | args[1] = ir->op1; /* GCtab *t */ | ||
| 2235 | args[2] = ASMREF_TMP1; /* cTValue *key */ | ||
| 2236 | asm_setupresult(as, ir, ci); /* TValue * */ | ||
| 2237 | asm_gencall(as, ci, args); | ||
| 2238 | tmp = ra_releasetmp(as, ASMREF_TMP1); | ||
| 2239 | irkey = IR(ir->op2); | ||
| 2240 | if (irt_isnum(irkey->t)) { | ||
| 2241 | /* For numbers use the constant itself or a spill slot as a TValue. */ | ||
| 2242 | if (irref_isk(ir->op2)) | ||
| 2243 | emit_loada(as, tmp, ir_knum(irkey)); | ||
| 2244 | else | ||
| 2245 | emit_rmro(as, XO_LEA, tmp|REX_64, RID_ESP, ra_spill(as, irkey)); | ||
| 2246 | } else { | ||
| 2247 | /* Otherwise use g->tmptv to hold the TValue. */ | ||
| 2248 | if (!irref_isk(ir->op2)) { | ||
| 2249 | Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp)); | ||
| 2250 | emit_movtomro(as, REX_64IR(irkey, src), tmp, 0); | ||
| 2251 | } else if (!irt_ispri(irkey->t)) { | ||
| 2252 | emit_movmroi(as, tmp, 0, irkey->i); | ||
| 2253 | } | ||
| 2254 | if (!(LJ_64 && irt_islightud(irkey->t))) | ||
| 2255 | emit_movmroi(as, tmp, 4, irt_toitype(irkey->t)); | ||
| 2256 | emit_loada(as, tmp, &J2G(as->J)->tmptv); | ||
| 2257 | } | ||
| 2258 | } | ||
| 2259 | |||
| 2260 | static void asm_uref(ASMState *as, IRIns *ir) | ||
| 2261 | { | ||
| 2262 | /* NYI: Check that UREFO is still open and not aliasing a slot. */ | ||
| 2263 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 2264 | if (irref_isk(ir->op1)) { | ||
| 2265 | GCfunc *fn = ir_kfunc(IR(ir->op1)); | ||
| 2266 | MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; | ||
| 2267 | emit_rma(as, XO_MOV, dest, v); | ||
| 2268 | } else { | ||
| 2269 | Reg uv = ra_scratch(as, RSET_GPR); | ||
| 2270 | Reg func = ra_alloc1(as, ir->op1, RSET_GPR); | ||
| 2271 | if (ir->o == IR_UREFC) { | ||
| 2272 | emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv)); | ||
| 2273 | asm_guardcc(as, CC_NE); | ||
| 2274 | emit_i8(as, 1); | ||
| 2275 | emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed)); | ||
| 2276 | } else { | ||
| 2277 | emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v)); | ||
| 2278 | } | ||
| 2279 | emit_rmro(as, XO_MOV, uv, func, | ||
| 2280 | (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); | ||
| 2281 | } | ||
| 2282 | } | ||
| 2283 | |||
| 2284 | static void asm_fref(ASMState *as, IRIns *ir) | ||
| 2285 | { | ||
| 2286 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 2287 | asm_fusefref(as, ir, RSET_GPR); | ||
| 2288 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
| 2289 | } | ||
| 2290 | |||
| 2291 | static void asm_strref(ASMState *as, IRIns *ir) | ||
| 2292 | { | ||
| 2293 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 2294 | asm_fusestrref(as, ir, RSET_GPR); | ||
| 2295 | if (as->mrm.base == RID_NONE) | ||
| 2296 | emit_loadi(as, dest, as->mrm.ofs); | ||
| 2297 | else if (as->mrm.base == dest && as->mrm.idx == RID_NONE) | ||
| 2298 | emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs); | ||
| 2299 | else | ||
| 2300 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
| 2301 | } | ||
| 2302 | |||
| 2303 | /* -- Loads and stores ---------------------------------------------------- */ | ||
| 2304 | |||
| 2305 | static void asm_fxload(ASMState *as, IRIns *ir) | ||
| 2306 | { | ||
| 2307 | Reg dest = ra_dest(as, ir, irt_isnum(ir->t) ? RSET_FPR : RSET_GPR); | ||
| 2308 | x86Op xo; | ||
| 2309 | if (ir->o == IR_FLOAD) | ||
| 2310 | asm_fusefref(as, ir, RSET_GPR); | ||
| 2311 | else | ||
| 2312 | asm_fusexref(as, ir->op1, RSET_GPR); | ||
| 2313 | /* ir->op2 is ignored -- unaligned loads are ok on x86. */ | ||
| 2314 | switch (irt_type(ir->t)) { | ||
| 2315 | case IRT_I8: xo = XO_MOVSXb; break; | ||
| 2316 | case IRT_U8: xo = XO_MOVZXb; break; | ||
| 2317 | case IRT_I16: xo = XO_MOVSXw; break; | ||
| 2318 | case IRT_U16: xo = XO_MOVZXw; break; | ||
| 2319 | case IRT_NUM: xo = XMM_MOVRM(as); break; | ||
| 2320 | case IRT_FLOAT: xo = XO_MOVSS; break; | ||
| 2321 | default: | ||
| 2322 | if (LJ_64 && irt_is64(ir->t)) | ||
| 2323 | dest |= REX_64; | ||
| 2324 | else | ||
| 2325 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); | ||
| 2326 | xo = XO_MOV; | ||
| 2327 | break; | ||
| 2328 | } | ||
| 2329 | emit_mrm(as, xo, dest, RID_MRM); | ||
| 2330 | } | ||
| 2331 | |||
| 2332 | static void asm_fxstore(ASMState *as, IRIns *ir) | ||
| 2333 | { | ||
| 2334 | RegSet allow = RSET_GPR; | ||
| 2335 | Reg src = RID_NONE, osrc = RID_NONE; | ||
| 2336 | int32_t k = 0; | ||
| 2337 | /* The IRT_I16/IRT_U16 stores should never be simplified for constant | ||
| 2338 | ** values since mov word [mem], imm16 has a length-changing prefix. | ||
| 2339 | */ | ||
| 2340 | if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) || | ||
| 2341 | !asm_isk32(as, ir->op2, &k)) { | ||
| 2342 | RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR : | ||
| 2343 | (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR; | ||
| 2344 | src = osrc = ra_alloc1(as, ir->op2, allow8); | ||
| 2345 | if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */ | ||
| 2346 | rset_clear(allow, osrc); | ||
| 2347 | src = ra_scratch(as, allow8); | ||
| 2348 | } | ||
| 2349 | rset_clear(allow, src); | ||
| 2350 | } | ||
| 2351 | if (ir->o == IR_FSTORE) | ||
| 2352 | asm_fusefref(as, IR(ir->op1), allow); | ||
| 2353 | else | ||
| 2354 | asm_fusexref(as, ir->op1, allow); | ||
| 2355 | /* ir->op2 is ignored -- unaligned stores are ok on x86. */ | ||
| 2356 | if (ra_hasreg(src)) { | ||
| 2357 | x86Op xo; | ||
| 2358 | switch (irt_type(ir->t)) { | ||
| 2359 | case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break; | ||
| 2360 | case IRT_I16: case IRT_U16: xo = XO_MOVtow; break; | ||
| 2361 | case IRT_NUM: xo = XO_MOVSDto; break; | ||
| 2362 | case IRT_FLOAT: xo = XO_MOVSSto; break; | ||
| 2363 | #if LJ_64 | ||
| 2364 | case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */ | ||
| 2365 | #endif | ||
| 2366 | default: | ||
| 2367 | if (LJ_64 && irt_is64(ir->t)) | ||
| 2368 | src |= REX_64; | ||
| 2369 | else | ||
| 2370 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); | ||
| 2371 | xo = XO_MOVto; | ||
| 2372 | break; | ||
| 2373 | } | ||
| 2374 | emit_mrm(as, xo, src, RID_MRM); | ||
| 2375 | if (!LJ_64 && src != osrc) { | ||
| 2376 | ra_noweak(as, osrc); | ||
| 2377 | emit_rr(as, XO_MOV, src, osrc); | ||
| 2378 | } | ||
| 2379 | } else { | ||
| 2380 | if (irt_isi8(ir->t) || irt_isu8(ir->t)) { | ||
| 2381 | emit_i8(as, k); | ||
| 2382 | emit_mrm(as, XO_MOVmib, 0, RID_MRM); | ||
| 2383 | } else { | ||
| 2384 | lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) || | ||
| 2385 | irt_isaddr(ir->t)); | ||
| 2386 | emit_i32(as, k); | ||
| 2387 | emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM); | ||
| 2388 | } | ||
| 2389 | } | ||
| 2390 | } | ||
| 2391 | |||
| 2392 | #if LJ_64 | ||
| 2393 | static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck) | ||
| 2394 | { | ||
| 2395 | if (ra_used(ir) || typecheck) { | ||
| 2396 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 2397 | if (typecheck) { | ||
| 2398 | Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest)); | ||
| 2399 | asm_guardcc(as, CC_NE); | ||
| 2400 | emit_i8(as, -2); | ||
| 2401 | emit_rr(as, XO_ARITHi8, XOg_CMP, tmp); | ||
| 2402 | emit_shifti(as, XOg_SAR|REX_64, tmp, 47); | ||
| 2403 | emit_rr(as, XO_MOV, tmp|REX_64, dest); | ||
| 2404 | } | ||
| 2405 | return dest; | ||
| 2406 | } else { | ||
| 2407 | return RID_NONE; | ||
| 2408 | } | ||
| 2409 | } | ||
| 2410 | #endif | ||
| 2411 | |||
| 2412 | static void asm_ahuvload(ASMState *as, IRIns *ir) | ||
| 2413 | { | ||
| 2414 | lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || | ||
| 2415 | (LJ_DUALNUM && irt_isint(ir->t))); | ||
| 2416 | #if LJ_64 | ||
| 2417 | if (irt_islightud(ir->t)) { | ||
| 2418 | Reg dest = asm_load_lightud64(as, ir, 1); | ||
| 2419 | if (ra_hasreg(dest)) { | ||
| 2420 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
| 2421 | emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM); | ||
| 2422 | } | ||
| 2423 | return; | ||
| 2424 | } else | ||
| 2425 | #endif | ||
| 2426 | if (ra_used(ir)) { | ||
| 2427 | RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; | ||
| 2428 | Reg dest = ra_dest(as, ir, allow); | ||
| 2429 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
| 2430 | emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), dest, RID_MRM); | ||
| 2431 | } else { | ||
| 2432 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
| 2433 | } | ||
| 2434 | /* Always do the type check, even if the load result is unused. */ | ||
| 2435 | as->mrm.ofs += 4; | ||
| 2436 | asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE); | ||
| 2437 | if (LJ_64 && irt_type(ir->t) >= IRT_NUM) { | ||
| 2438 | lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t)); | ||
| 2439 | emit_u32(as, LJ_TISNUM); | ||
| 2440 | emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM); | ||
| 2441 | } else { | ||
| 2442 | emit_i8(as, irt_toitype(ir->t)); | ||
| 2443 | emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM); | ||
| 2444 | } | ||
| 2445 | } | ||
| 2446 | |||
| 2447 | static void asm_ahustore(ASMState *as, IRIns *ir) | ||
| 2448 | { | ||
| 2449 | if (irt_isnum(ir->t)) { | ||
| 2450 | Reg src = ra_alloc1(as, ir->op2, RSET_FPR); | ||
| 2451 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
| 2452 | emit_mrm(as, XO_MOVSDto, src, RID_MRM); | ||
| 2453 | #if LJ_64 | ||
| 2454 | } else if (irt_islightud(ir->t)) { | ||
| 2455 | Reg src = ra_alloc1(as, ir->op2, RSET_GPR); | ||
| 2456 | asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src)); | ||
| 2457 | emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM); | ||
| 2458 | #endif | ||
| 2459 | } else { | ||
| 2460 | IRIns *irr = IR(ir->op2); | ||
| 2461 | RegSet allow = RSET_GPR; | ||
| 2462 | Reg src = RID_NONE; | ||
| 2463 | if (!irref_isk(ir->op2)) { | ||
| 2464 | src = ra_alloc1(as, ir->op2, allow); | ||
| 2465 | rset_clear(allow, src); | ||
| 2466 | } | ||
| 2467 | asm_fuseahuref(as, ir->op1, allow); | ||
| 2468 | if (ra_hasreg(src)) { | ||
| 2469 | emit_mrm(as, XO_MOVto, src, RID_MRM); | ||
| 2470 | } else if (!irt_ispri(irr->t)) { | ||
| 2471 | lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t))); | ||
| 2472 | emit_i32(as, irr->i); | ||
| 2473 | emit_mrm(as, XO_MOVmi, 0, RID_MRM); | ||
| 2474 | } | ||
| 2475 | as->mrm.ofs += 4; | ||
| 2476 | emit_i32(as, (int32_t)irt_toitype(ir->t)); | ||
| 2477 | emit_mrm(as, XO_MOVmi, 0, RID_MRM); | ||
| 2478 | } | ||
| 2479 | } | ||
| 2480 | |||
| 2481 | static void asm_sload(ASMState *as, IRIns *ir) | ||
| 2482 | { | ||
| 2483 | int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); | ||
| 2484 | IRType1 t = ir->t; | ||
| 2485 | Reg base; | ||
| 2486 | lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ | ||
| 2487 | lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); | ||
| 2488 | lua_assert(LJ_DUALNUM || | ||
| 2489 | !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); | ||
| 2490 | if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { | ||
| 2491 | Reg left = ra_scratch(as, RSET_FPR); | ||
| 2492 | asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */ | ||
| 2493 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
| 2494 | emit_rmro(as, XMM_MOVRM(as), left, base, ofs); | ||
| 2495 | t.irt = IRT_NUM; /* Continue with a regular number type check. */ | ||
| 2496 | #if LJ_64 | ||
| 2497 | } else if (irt_islightud(t)) { | ||
| 2498 | Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK)); | ||
| 2499 | if (ra_hasreg(dest)) { | ||
| 2500 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
| 2501 | emit_rmro(as, XO_MOV, dest|REX_64, base, ofs); | ||
| 2502 | } | ||
| 2503 | return; | ||
| 2504 | #endif | ||
| 2505 | } else if (ra_used(ir)) { | ||
| 2506 | RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR; | ||
| 2507 | Reg dest = ra_dest(as, ir, allow); | ||
| 2508 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
| 2509 | lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); | ||
| 2510 | if ((ir->op2 & IRSLOAD_CONVERT)) { | ||
| 2511 | t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */ | ||
| 2512 | emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTSD2SI, dest, base, ofs); | ||
| 2513 | } else if (irt_isnum(t)) { | ||
| 2514 | emit_rmro(as, XMM_MOVRM(as), dest, base, ofs); | ||
| 2515 | } else { | ||
| 2516 | emit_rmro(as, XO_MOV, dest, base, ofs); | ||
| 2517 | } | ||
| 2518 | } else { | ||
| 2519 | if (!(ir->op2 & IRSLOAD_TYPECHECK)) | ||
| 2520 | return; /* No type check: avoid base alloc. */ | ||
| 2521 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
| 2522 | } | ||
| 2523 | if ((ir->op2 & IRSLOAD_TYPECHECK)) { | ||
| 2524 | /* Need type check, even if the load result is unused. */ | ||
| 2525 | asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE); | ||
| 2526 | if (LJ_64 && irt_type(t) >= IRT_NUM) { | ||
| 2527 | lua_assert(irt_isinteger(t) || irt_isnum(t)); | ||
| 2528 | emit_u32(as, LJ_TISNUM); | ||
| 2529 | emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4); | ||
| 2530 | } else { | ||
| 2531 | emit_i8(as, irt_toitype(t)); | ||
| 2532 | emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4); | ||
| 2533 | } | ||
| 2534 | } | ||
| 2535 | } | ||
| 2536 | |||
| 2537 | /* -- Allocations --------------------------------------------------------- */ | 755 | /* -- Allocations --------------------------------------------------------- */ |
| 2538 | 756 | ||
| 757 | static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args); | ||
| 758 | static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci); | ||
| 759 | |||
| 2539 | static void asm_snew(ASMState *as, IRIns *ir) | 760 | static void asm_snew(ASMState *as, IRIns *ir) |
| 2540 | { | 761 | { |
| 2541 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new]; | 762 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new]; |
| @@ -2571,933 +792,6 @@ static void asm_tdup(ASMState *as, IRIns *ir) | |||
| 2571 | asm_gencall(as, ci, args); | 792 | asm_gencall(as, ci, args); |
| 2572 | } | 793 | } |
| 2573 | 794 | ||
| 2574 | #if LJ_HASFFI | ||
| 2575 | static void asm_cnew(ASMState *as, IRIns *ir) | ||
| 2576 | { | ||
| 2577 | CTState *cts = ctype_ctsG(J2G(as->J)); | ||
| 2578 | CTypeID typeid = (CTypeID)IR(ir->op1)->i; | ||
| 2579 | CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ? | ||
| 2580 | lj_ctype_size(cts, typeid) : (CTSize)IR(ir->op2)->i; | ||
| 2581 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; | ||
| 2582 | IRRef args[2]; | ||
| 2583 | lua_assert(sz != CTSIZE_INVALID); | ||
| 2584 | |||
| 2585 | args[0] = ASMREF_L; /* lua_State *L */ | ||
| 2586 | args[1] = ASMREF_TMP1; /* MSize size */ | ||
| 2587 | as->gcsteps++; | ||
| 2588 | asm_setupresult(as, ir, ci); /* GCcdata * */ | ||
| 2589 | |||
| 2590 | /* Initialize immutable cdata object. */ | ||
| 2591 | if (ir->o == IR_CNEWI) { | ||
| 2592 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | ||
| 2593 | #if LJ_64 | ||
| 2594 | Reg r64 = sz == 8 ? REX_64 : 0; | ||
| 2595 | if (irref_isk(ir->op2)) { | ||
| 2596 | IRIns *irk = IR(ir->op2); | ||
| 2597 | uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 : | ||
| 2598 | (uint64_t)(uint32_t)irk->i; | ||
| 2599 | if (sz == 4 || checki32((int64_t)k)) { | ||
| 2600 | emit_i32(as, (int32_t)k); | ||
| 2601 | emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata)); | ||
| 2602 | } else { | ||
| 2603 | emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata)); | ||
| 2604 | emit_loadu64(as, RID_ECX, k); | ||
| 2605 | } | ||
| 2606 | } else { | ||
| 2607 | Reg r = ra_alloc1(as, ir->op2, allow); | ||
| 2608 | emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata)); | ||
| 2609 | } | ||
| 2610 | #else | ||
| 2611 | int32_t ofs = sizeof(GCcdata); | ||
| 2612 | if (LJ_HASFFI && sz == 8) { | ||
| 2613 | ofs += 4; ir++; | ||
| 2614 | lua_assert(ir->o == IR_HIOP); | ||
| 2615 | } | ||
| 2616 | do { | ||
| 2617 | if (irref_isk(ir->op2)) { | ||
| 2618 | emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i); | ||
| 2619 | } else { | ||
| 2620 | Reg r = ra_alloc1(as, ir->op2, allow); | ||
| 2621 | emit_movtomro(as, r, RID_RET, ofs); | ||
| 2622 | rset_clear(allow, r); | ||
| 2623 | } | ||
| 2624 | if (!LJ_HASFFI || ofs == sizeof(GCcdata)) break; | ||
| 2625 | ofs -= 4; ir--; | ||
| 2626 | } while (1); | ||
| 2627 | #endif | ||
| 2628 | lua_assert(sz == 4 || (sz == 8 && (LJ_64 || LJ_HASFFI))); | ||
| 2629 | } | ||
| 2630 | |||
| 2631 | /* Combine initialization of marked, gct and typeid. */ | ||
| 2632 | emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked)); | ||
| 2633 | emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX, | ||
| 2634 | (int32_t)((~LJ_TCDATA<<8)+(typeid<<16))); | ||
| 2635 | emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES); | ||
| 2636 | emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite); | ||
| 2637 | |||
| 2638 | asm_gencall(as, ci, args); | ||
| 2639 | emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata))); | ||
| 2640 | } | ||
| 2641 | #else | ||
| 2642 | #define asm_cnew(as, ir) ((void)0) | ||
| 2643 | #endif | ||
| 2644 | |||
| 2645 | /* -- Write barriers ------------------------------------------------------ */ | ||
| 2646 | |||
| 2647 | static void asm_tbar(ASMState *as, IRIns *ir) | ||
| 2648 | { | ||
| 2649 | Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); | ||
| 2650 | Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab)); | ||
| 2651 | MCLabel l_end = emit_label(as); | ||
| 2652 | emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist)); | ||
| 2653 | emit_setgl(as, tab, gc.grayagain); | ||
| 2654 | emit_getgl(as, tmp, gc.grayagain); | ||
| 2655 | emit_i8(as, ~LJ_GC_BLACK); | ||
| 2656 | emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked)); | ||
| 2657 | emit_sjcc(as, CC_Z, l_end); | ||
| 2658 | emit_i8(as, LJ_GC_BLACK); | ||
| 2659 | emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked)); | ||
| 2660 | } | ||
| 2661 | |||
| 2662 | static void asm_obar(ASMState *as, IRIns *ir) | ||
| 2663 | { | ||
| 2664 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; | ||
| 2665 | IRRef args[2]; | ||
| 2666 | MCLabel l_end; | ||
| 2667 | Reg obj; | ||
| 2668 | /* No need for other object barriers (yet). */ | ||
| 2669 | lua_assert(IR(ir->op1)->o == IR_UREFC); | ||
| 2670 | ra_evictset(as, RSET_SCRATCH); | ||
| 2671 | l_end = emit_label(as); | ||
| 2672 | args[0] = ASMREF_TMP1; /* global_State *g */ | ||
| 2673 | args[1] = ir->op1; /* TValue *tv */ | ||
| 2674 | asm_gencall(as, ci, args); | ||
| 2675 | emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J)); | ||
| 2676 | obj = IR(ir->op1)->r; | ||
| 2677 | emit_sjcc(as, CC_Z, l_end); | ||
| 2678 | emit_i8(as, LJ_GC_WHITES); | ||
| 2679 | if (irref_isk(ir->op2)) { | ||
| 2680 | GCobj *vp = ir_kgc(IR(ir->op2)); | ||
| 2681 | emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked); | ||
| 2682 | } else { | ||
| 2683 | Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj)); | ||
| 2684 | emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked)); | ||
| 2685 | } | ||
| 2686 | emit_sjcc(as, CC_Z, l_end); | ||
| 2687 | emit_i8(as, LJ_GC_BLACK); | ||
| 2688 | emit_rmro(as, XO_GROUP3b, XOg_TEST, obj, | ||
| 2689 | (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); | ||
| 2690 | } | ||
| 2691 | |||
| 2692 | /* -- FP/int arithmetic and logic operations ------------------------------ */ | ||
| 2693 | |||
| 2694 | /* Load reference onto x87 stack. Force a spill to memory if needed. */ | ||
| 2695 | static void asm_x87load(ASMState *as, IRRef ref) | ||
| 2696 | { | ||
| 2697 | IRIns *ir = IR(ref); | ||
| 2698 | if (ir->o == IR_KNUM) { | ||
| 2699 | cTValue *tv = ir_knum(ir); | ||
| 2700 | if (tvispzero(tv)) /* Use fldz only for +0. */ | ||
| 2701 | emit_x87op(as, XI_FLDZ); | ||
| 2702 | else if (tvispone(tv)) | ||
| 2703 | emit_x87op(as, XI_FLD1); | ||
| 2704 | else | ||
| 2705 | emit_rma(as, XO_FLDq, XOg_FLDq, tv); | ||
| 2706 | } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) && | ||
| 2707 | !irref_isk(ir->op1) && mayfuse(as, ir->op1)) { | ||
| 2708 | IRIns *iri = IR(ir->op1); | ||
| 2709 | emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri)); | ||
| 2710 | } else { | ||
| 2711 | emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY)); | ||
| 2712 | } | ||
| 2713 | } | ||
| 2714 | |||
| 2715 | /* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */ | ||
| 2716 | static int fpmjoin_pow(ASMState *as, IRIns *ir) | ||
| 2717 | { | ||
| 2718 | IRIns *irp = IR(ir->op1); | ||
| 2719 | if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) { | ||
| 2720 | IRIns *irpp = IR(irp->op1); | ||
| 2721 | if (irpp == ir-2 && irpp->o == IR_FPMATH && | ||
| 2722 | irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) { | ||
| 2723 | /* The modified regs must match with the *.dasc implementation. */ | ||
| 2724 | RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX); | ||
| 2725 | IRIns *irx; | ||
| 2726 | if (ra_hasreg(ir->r)) | ||
| 2727 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
| 2728 | ra_evictset(as, drop); | ||
| 2729 | ra_destreg(as, ir, RID_XMM0); | ||
| 2730 | emit_call(as, lj_vm_pow_sse); | ||
| 2731 | irx = IR(irpp->op1); | ||
| 2732 | if (ra_noreg(irx->r) && ra_gethint(irx->r) == RID_XMM1) | ||
| 2733 | irx->r = RID_INIT; /* Avoid allocating xmm1 for x. */ | ||
| 2734 | ra_left(as, RID_XMM0, irpp->op1); | ||
| 2735 | ra_left(as, RID_XMM1, irp->op2); | ||
| 2736 | return 1; | ||
| 2737 | } | ||
| 2738 | } | ||
| 2739 | return 0; | ||
| 2740 | } | ||
| 2741 | |||
| 2742 | static void asm_fpmath(ASMState *as, IRIns *ir) | ||
| 2743 | { | ||
| 2744 | IRFPMathOp fpm = ir->o == IR_FPMATH ? (IRFPMathOp)ir->op2 : IRFPM_OTHER; | ||
| 2745 | if (fpm == IRFPM_SQRT) { | ||
| 2746 | Reg dest = ra_dest(as, ir, RSET_FPR); | ||
| 2747 | Reg left = asm_fuseload(as, ir->op1, RSET_FPR); | ||
| 2748 | emit_mrm(as, XO_SQRTSD, dest, left); | ||
| 2749 | } else if (fpm <= IRFPM_TRUNC) { | ||
| 2750 | if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */ | ||
| 2751 | Reg dest = ra_dest(as, ir, RSET_FPR); | ||
| 2752 | Reg left = asm_fuseload(as, ir->op1, RSET_FPR); | ||
| 2753 | /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op. | ||
| 2754 | ** Let's pretend it's a 3-byte opcode, and compensate afterwards. | ||
| 2755 | ** This is atrocious, but the alternatives are much worse. | ||
| 2756 | */ | ||
| 2757 | /* Round down/up/trunc == 1001/1010/1011. */ | ||
| 2758 | emit_i8(as, 0x09 + fpm); | ||
| 2759 | emit_mrm(as, XO_ROUNDSD, dest, left); | ||
| 2760 | if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) { | ||
| 2761 | as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */ | ||
| 2762 | } | ||
| 2763 | *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */ | ||
| 2764 | } else { /* Call helper functions for SSE2 variant. */ | ||
| 2765 | /* The modified regs must match with the *.dasc implementation. */ | ||
| 2766 | RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX); | ||
| 2767 | if (ra_hasreg(ir->r)) | ||
| 2768 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
| 2769 | ra_evictset(as, drop); | ||
| 2770 | ra_destreg(as, ir, RID_XMM0); | ||
| 2771 | emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse : | ||
| 2772 | fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse); | ||
| 2773 | ra_left(as, RID_XMM0, ir->op1); | ||
| 2774 | } | ||
| 2775 | } else if (fpm == IRFPM_EXP2 && fpmjoin_pow(as, ir)) { | ||
| 2776 | /* Rejoined to pow(). */ | ||
| 2777 | } else { /* Handle x87 ops. */ | ||
| 2778 | int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | ||
| 2779 | Reg dest = ir->r; | ||
| 2780 | if (ra_hasreg(dest)) { | ||
| 2781 | ra_free(as, dest); | ||
| 2782 | ra_modified(as, dest); | ||
| 2783 | emit_rmro(as, XMM_MOVRM(as), dest, RID_ESP, ofs); | ||
| 2784 | } | ||
| 2785 | emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs); | ||
| 2786 | switch (fpm) { /* st0 = lj_vm_*(st0) */ | ||
| 2787 | case IRFPM_EXP: emit_call(as, lj_vm_exp); break; | ||
| 2788 | case IRFPM_EXP2: emit_call(as, lj_vm_exp2); break; | ||
| 2789 | case IRFPM_SIN: emit_x87op(as, XI_FSIN); break; | ||
| 2790 | case IRFPM_COS: emit_x87op(as, XI_FCOS); break; | ||
| 2791 | case IRFPM_TAN: emit_x87op(as, XI_FPOP); emit_x87op(as, XI_FPTAN); break; | ||
| 2792 | case IRFPM_LOG: case IRFPM_LOG2: case IRFPM_LOG10: | ||
| 2793 | /* Note: the use of fyl2xp1 would be pointless here. When computing | ||
| 2794 | ** log(1.0+eps) the precision is already lost after 1.0 is added. | ||
| 2795 | ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense. | ||
| 2796 | */ | ||
| 2797 | emit_x87op(as, XI_FYL2X); break; | ||
| 2798 | case IRFPM_OTHER: | ||
| 2799 | switch (ir->o) { | ||
| 2800 | case IR_ATAN2: | ||
| 2801 | emit_x87op(as, XI_FPATAN); asm_x87load(as, ir->op2); break; | ||
| 2802 | case IR_LDEXP: | ||
| 2803 | emit_x87op(as, XI_FPOP1); emit_x87op(as, XI_FSCALE); break; | ||
| 2804 | default: lua_assert(0); break; | ||
| 2805 | } | ||
| 2806 | break; | ||
| 2807 | default: lua_assert(0); break; | ||
| 2808 | } | ||
| 2809 | asm_x87load(as, ir->op1); | ||
| 2810 | switch (fpm) { | ||
| 2811 | case IRFPM_LOG: emit_x87op(as, XI_FLDLN2); break; | ||
| 2812 | case IRFPM_LOG2: emit_x87op(as, XI_FLD1); break; | ||
| 2813 | case IRFPM_LOG10: emit_x87op(as, XI_FLDLG2); break; | ||
| 2814 | case IRFPM_OTHER: | ||
| 2815 | if (ir->o == IR_LDEXP) asm_x87load(as, ir->op2); | ||
| 2816 | break; | ||
| 2817 | default: break; | ||
| 2818 | } | ||
| 2819 | } | ||
| 2820 | } | ||
| 2821 | |||
| 2822 | static void asm_fppowi(ASMState *as, IRIns *ir) | ||
| 2823 | { | ||
| 2824 | /* The modified regs must match with the *.dasc implementation. */ | ||
| 2825 | RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX); | ||
| 2826 | if (ra_hasreg(ir->r)) | ||
| 2827 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
| 2828 | ra_evictset(as, drop); | ||
| 2829 | ra_destreg(as, ir, RID_XMM0); | ||
| 2830 | emit_call(as, lj_vm_powi_sse); | ||
| 2831 | ra_left(as, RID_XMM0, ir->op1); | ||
| 2832 | ra_left(as, RID_EAX, ir->op2); | ||
| 2833 | } | ||
| 2834 | |||
| 2835 | #if LJ_64 && LJ_HASFFI | ||
| 2836 | static void asm_arith64(ASMState *as, IRIns *ir, IRCallID id) | ||
| 2837 | { | ||
| 2838 | const CCallInfo *ci = &lj_ir_callinfo[id]; | ||
| 2839 | IRRef args[2]; | ||
| 2840 | args[0] = ir->op1; | ||
| 2841 | args[1] = ir->op2; | ||
| 2842 | asm_setupresult(as, ir, ci); | ||
| 2843 | asm_gencall(as, ci, args); | ||
| 2844 | } | ||
| 2845 | #endif | ||
| 2846 | |||
| 2847 | /* Find out whether swapping operands might be beneficial. */ | ||
| 2848 | static int swapops(ASMState *as, IRIns *ir) | ||
| 2849 | { | ||
| 2850 | IRIns *irl = IR(ir->op1); | ||
| 2851 | IRIns *irr = IR(ir->op2); | ||
| 2852 | lua_assert(ra_noreg(irr->r)); | ||
| 2853 | if (!irm_iscomm(lj_ir_mode[ir->o])) | ||
| 2854 | return 0; /* Can't swap non-commutative operations. */ | ||
| 2855 | if (irref_isk(ir->op2)) | ||
| 2856 | return 0; /* Don't swap constants to the left. */ | ||
| 2857 | if (ra_hasreg(irl->r)) | ||
| 2858 | return 1; /* Swap if left already has a register. */ | ||
| 2859 | if (ra_samehint(ir->r, irr->r)) | ||
| 2860 | return 1; /* Swap if dest and right have matching hints. */ | ||
| 2861 | if (as->curins > as->loopref) { /* In variant part? */ | ||
| 2862 | if (ir->op2 < as->loopref && !irt_isphi(irr->t)) | ||
| 2863 | return 0; /* Keep invariants on the right. */ | ||
| 2864 | if (ir->op1 < as->loopref && !irt_isphi(irl->t)) | ||
| 2865 | return 1; /* Swap invariants to the right. */ | ||
| 2866 | } | ||
| 2867 | if (opisfusableload(irl->o)) | ||
| 2868 | return 1; /* Swap fusable loads to the right. */ | ||
| 2869 | return 0; /* Otherwise don't swap. */ | ||
| 2870 | } | ||
| 2871 | |||
| 2872 | static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo) | ||
| 2873 | { | ||
| 2874 | IRRef lref = ir->op1; | ||
| 2875 | IRRef rref = ir->op2; | ||
| 2876 | RegSet allow = RSET_FPR; | ||
| 2877 | Reg dest; | ||
| 2878 | Reg right = IR(rref)->r; | ||
| 2879 | if (ra_hasreg(right)) { | ||
| 2880 | rset_clear(allow, right); | ||
| 2881 | ra_noweak(as, right); | ||
| 2882 | } | ||
| 2883 | dest = ra_dest(as, ir, allow); | ||
| 2884 | if (lref == rref) { | ||
| 2885 | right = dest; | ||
| 2886 | } else if (ra_noreg(right)) { | ||
| 2887 | if (swapops(as, ir)) { | ||
| 2888 | IRRef tmp = lref; lref = rref; rref = tmp; | ||
| 2889 | } | ||
| 2890 | right = asm_fuseload(as, rref, rset_clear(allow, dest)); | ||
| 2891 | } | ||
| 2892 | emit_mrm(as, xo, dest, right); | ||
| 2893 | ra_left(as, dest, lref); | ||
| 2894 | } | ||
| 2895 | |||
| 2896 | static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa) | ||
| 2897 | { | ||
| 2898 | IRRef lref = ir->op1; | ||
| 2899 | IRRef rref = ir->op2; | ||
| 2900 | RegSet allow = RSET_GPR; | ||
| 2901 | Reg dest, right; | ||
| 2902 | int32_t k = 0; | ||
| 2903 | if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */ | ||
| 2904 | as->flagmcp = NULL; | ||
| 2905 | as->mcp += (LJ_64 && *as->mcp != XI_TEST) ? 3 : 2; | ||
| 2906 | } | ||
| 2907 | right = IR(rref)->r; | ||
| 2908 | if (ra_hasreg(right)) { | ||
| 2909 | rset_clear(allow, right); | ||
| 2910 | ra_noweak(as, right); | ||
| 2911 | } | ||
| 2912 | dest = ra_dest(as, ir, allow); | ||
| 2913 | if (lref == rref) { | ||
| 2914 | right = dest; | ||
| 2915 | } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) { | ||
| 2916 | if (swapops(as, ir)) { | ||
| 2917 | IRRef tmp = lref; lref = rref; rref = tmp; | ||
| 2918 | } | ||
| 2919 | right = asm_fuseload(as, rref, rset_clear(allow, dest)); | ||
| 2920 | } | ||
| 2921 | if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */ | ||
| 2922 | asm_guardcc(as, CC_O); | ||
| 2923 | if (xa != XOg_X_IMUL) { | ||
| 2924 | if (ra_hasreg(right)) | ||
| 2925 | emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right); | ||
| 2926 | else | ||
| 2927 | emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k); | ||
| 2928 | } else if (ra_hasreg(right)) { /* IMUL r, mrm. */ | ||
| 2929 | emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right); | ||
| 2930 | } else { /* IMUL r, r, k. */ | ||
| 2931 | /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */ | ||
| 2932 | Reg left = asm_fuseload(as, lref, RSET_GPR); | ||
| 2933 | x86Op xo; | ||
| 2934 | if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8; | ||
| 2935 | } else { emit_i32(as, k); xo = XO_IMULi; } | ||
| 2936 | emit_mrm(as, xo, REX_64IR(ir, dest), left); | ||
| 2937 | return; | ||
| 2938 | } | ||
| 2939 | ra_left(as, dest, lref); | ||
| 2940 | } | ||
| 2941 | |||
| 2942 | /* LEA is really a 4-operand ADD with an independent destination register, | ||
| 2943 | ** up to two source registers and an immediate. One register can be scaled | ||
| 2944 | ** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several | ||
| 2945 | ** instructions. | ||
| 2946 | ** | ||
| 2947 | ** Currently only a few common cases are supported: | ||
| 2948 | ** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated | ||
| 2949 | ** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b | ||
| 2950 | ** - Right ADD fusion: y = a+(b+k) | ||
| 2951 | ** The ommited variants have already been reduced by FOLD. | ||
| 2952 | ** | ||
| 2953 | ** There are more fusion opportunities, like gathering shifts or joining | ||
| 2954 | ** common references. But these are probably not worth the trouble, since | ||
| 2955 | ** array indexing is not decomposed and already makes use of all fields | ||
| 2956 | ** of the ModRM operand. | ||
| 2957 | */ | ||
| 2958 | static int asm_lea(ASMState *as, IRIns *ir) | ||
| 2959 | { | ||
| 2960 | IRIns *irl = IR(ir->op1); | ||
| 2961 | IRIns *irr = IR(ir->op2); | ||
| 2962 | RegSet allow = RSET_GPR; | ||
| 2963 | Reg dest; | ||
| 2964 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
| 2965 | as->mrm.scale = XM_SCALE1; | ||
| 2966 | as->mrm.ofs = 0; | ||
| 2967 | if (ra_hasreg(irl->r)) { | ||
| 2968 | rset_clear(allow, irl->r); | ||
| 2969 | ra_noweak(as, irl->r); | ||
| 2970 | as->mrm.base = irl->r; | ||
| 2971 | if (irref_isk(ir->op2) || ra_hasreg(irr->r)) { | ||
| 2972 | /* The PHI renaming logic does a better job in some cases. */ | ||
| 2973 | if (ra_hasreg(ir->r) && | ||
| 2974 | ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) || | ||
| 2975 | (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2))) | ||
| 2976 | return 0; | ||
| 2977 | if (irref_isk(ir->op2)) { | ||
| 2978 | as->mrm.ofs = irr->i; | ||
| 2979 | } else { | ||
| 2980 | rset_clear(allow, irr->r); | ||
| 2981 | ra_noweak(as, irr->r); | ||
| 2982 | as->mrm.idx = irr->r; | ||
| 2983 | } | ||
| 2984 | } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) && | ||
| 2985 | irref_isk(irr->op2)) { | ||
| 2986 | Reg idx = ra_alloc1(as, irr->op1, allow); | ||
| 2987 | rset_clear(allow, idx); | ||
| 2988 | as->mrm.idx = (uint8_t)idx; | ||
| 2989 | as->mrm.ofs = IR(irr->op2)->i; | ||
| 2990 | } else { | ||
| 2991 | return 0; | ||
| 2992 | } | ||
| 2993 | } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) && | ||
| 2994 | (irref_isk(ir->op2) || irref_isk(irl->op2))) { | ||
| 2995 | Reg idx, base = ra_alloc1(as, irl->op1, allow); | ||
| 2996 | rset_clear(allow, base); | ||
| 2997 | as->mrm.base = (uint8_t)base; | ||
| 2998 | if (irref_isk(ir->op2)) { | ||
| 2999 | as->mrm.ofs = irr->i; | ||
| 3000 | idx = ra_alloc1(as, irl->op2, allow); | ||
| 3001 | } else { | ||
| 3002 | as->mrm.ofs = IR(irl->op2)->i; | ||
| 3003 | idx = ra_alloc1(as, ir->op2, allow); | ||
| 3004 | } | ||
| 3005 | rset_clear(allow, idx); | ||
| 3006 | as->mrm.idx = (uint8_t)idx; | ||
| 3007 | } else { | ||
| 3008 | return 0; | ||
| 3009 | } | ||
| 3010 | dest = ra_dest(as, ir, allow); | ||
| 3011 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
| 3012 | return 1; /* Success. */ | ||
| 3013 | } | ||
| 3014 | |||
| 3015 | static void asm_add(ASMState *as, IRIns *ir) | ||
| 3016 | { | ||
| 3017 | if (irt_isnum(ir->t)) | ||
| 3018 | asm_fparith(as, ir, XO_ADDSD); | ||
| 3019 | else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp || | ||
| 3020 | irt_is64(ir->t) || !asm_lea(as, ir)) | ||
| 3021 | asm_intarith(as, ir, XOg_ADD); | ||
| 3022 | } | ||
| 3023 | |||
| 3024 | static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg) | ||
| 3025 | { | ||
| 3026 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 3027 | emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest); | ||
| 3028 | ra_left(as, dest, ir->op1); | ||
| 3029 | } | ||
| 3030 | |||
| 3031 | static void asm_min_max(ASMState *as, IRIns *ir, int cc) | ||
| 3032 | { | ||
| 3033 | Reg right, dest = ra_dest(as, ir, RSET_GPR); | ||
| 3034 | IRRef lref = ir->op1, rref = ir->op2; | ||
| 3035 | if (irref_isk(rref)) { lref = rref; rref = ir->op1; } | ||
| 3036 | right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest)); | ||
| 3037 | emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right); | ||
| 3038 | emit_rr(as, XO_CMP, REX_64IR(ir, dest), right); | ||
| 3039 | ra_left(as, dest, lref); | ||
| 3040 | } | ||
| 3041 | |||
| 3042 | static void asm_bitswap(ASMState *as, IRIns *ir) | ||
| 3043 | { | ||
| 3044 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 3045 | as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24), | ||
| 3046 | REX_64IR(ir, dest), 0, 0, as->mcp, 1); | ||
| 3047 | ra_left(as, dest, ir->op1); | ||
| 3048 | } | ||
| 3049 | |||
| 3050 | static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs) | ||
| 3051 | { | ||
| 3052 | IRRef rref = ir->op2; | ||
| 3053 | IRIns *irr = IR(rref); | ||
| 3054 | Reg dest; | ||
| 3055 | if (irref_isk(rref)) { /* Constant shifts. */ | ||
| 3056 | int shift; | ||
| 3057 | dest = ra_dest(as, ir, RSET_GPR); | ||
| 3058 | shift = irr->i & (irt_is64(ir->t) ? 63 : 31); | ||
| 3059 | switch (shift) { | ||
| 3060 | case 0: break; | ||
| 3061 | case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break; | ||
| 3062 | default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break; | ||
| 3063 | } | ||
| 3064 | } else { /* Variable shifts implicitly use register cl (i.e. ecx). */ | ||
| 3065 | RegSet allow = rset_exclude(RSET_GPR, RID_ECX); | ||
| 3066 | Reg right = irr->r; | ||
| 3067 | if (ra_noreg(right)) { | ||
| 3068 | right = ra_allocref(as, rref, RID2RSET(RID_ECX)); | ||
| 3069 | } else if (right != RID_ECX) { | ||
| 3070 | rset_clear(allow, right); | ||
| 3071 | ra_scratch(as, RID2RSET(RID_ECX)); | ||
| 3072 | } | ||
| 3073 | dest = ra_dest(as, ir, allow); | ||
| 3074 | emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest); | ||
| 3075 | if (right != RID_ECX) { | ||
| 3076 | ra_noweak(as, right); | ||
| 3077 | emit_rr(as, XO_MOV, RID_ECX, right); | ||
| 3078 | } | ||
| 3079 | } | ||
| 3080 | ra_left(as, dest, ir->op1); | ||
| 3081 | /* | ||
| 3082 | ** Note: avoid using the flags resulting from a shift or rotate! | ||
| 3083 | ** All of them cause a partial flag stall, except for r,1 shifts | ||
| 3084 | ** (but not rotates). And a shift count of 0 leaves the flags unmodified. | ||
| 3085 | */ | ||
| 3086 | } | ||
| 3087 | |||
| 3088 | /* -- Comparisons --------------------------------------------------------- */ | ||
| 3089 | |||
| 3090 | /* Virtual flags for unordered FP comparisons. */ | ||
| 3091 | #define VCC_U 0x1000 /* Unordered. */ | ||
| 3092 | #define VCC_P 0x2000 /* Needs extra CC_P branch. */ | ||
| 3093 | #define VCC_S 0x4000 /* Swap avoids CC_P branch. */ | ||
| 3094 | #define VCC_PS (VCC_P|VCC_S) | ||
| 3095 | |||
| 3096 | /* Map of comparisons to flags. ORDER IR. */ | ||
| 3097 | #define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf)) | ||
| 3098 | static const uint16_t asm_compmap[IR_ABC+1] = { | ||
| 3099 | /* signed non-eq unsigned flags */ | ||
| 3100 | /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS), | ||
| 3101 | /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0), | ||
| 3102 | /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS), | ||
| 3103 | /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0), | ||
| 3104 | /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U), | ||
| 3105 | /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS), | ||
| 3106 | /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U), | ||
| 3107 | /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS), | ||
| 3108 | /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P), | ||
| 3109 | /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P), | ||
| 3110 | /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */ | ||
| 3111 | }; | ||
| 3112 | |||
| 3113 | /* FP and integer comparisons. */ | ||
| 3114 | static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc) | ||
| 3115 | { | ||
| 3116 | if (irt_isnum(ir->t)) { | ||
| 3117 | IRRef lref = ir->op1; | ||
| 3118 | IRRef rref = ir->op2; | ||
| 3119 | Reg left, right; | ||
| 3120 | MCLabel l_around; | ||
| 3121 | /* | ||
| 3122 | ** An extra CC_P branch is required to preserve ordered/unordered | ||
| 3123 | ** semantics for FP comparisons. This can be avoided by swapping | ||
| 3124 | ** the operands and inverting the condition (except for EQ and UNE). | ||
| 3125 | ** So always try to swap if possible. | ||
| 3126 | ** | ||
| 3127 | ** Another option would be to swap operands to achieve better memory | ||
| 3128 | ** operand fusion. But it's unlikely that this outweighs the cost | ||
| 3129 | ** of the extra branches. | ||
| 3130 | */ | ||
| 3131 | if (cc & VCC_S) { /* Swap? */ | ||
| 3132 | IRRef tmp = lref; lref = rref; rref = tmp; | ||
| 3133 | cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */ | ||
| 3134 | } | ||
| 3135 | left = ra_alloc1(as, lref, RSET_FPR); | ||
| 3136 | right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left)); | ||
| 3137 | l_around = emit_label(as); | ||
| 3138 | asm_guardcc(as, cc >> 4); | ||
| 3139 | if (cc & VCC_P) { /* Extra CC_P branch required? */ | ||
| 3140 | if (!(cc & VCC_U)) { | ||
| 3141 | asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */ | ||
| 3142 | } else if (l_around != as->invmcp) { | ||
| 3143 | emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */ | ||
| 3144 | } else { | ||
| 3145 | /* Patched to mcloop by asm_loop_fixup. */ | ||
| 3146 | as->loopinv = 2; | ||
| 3147 | if (as->realign) | ||
| 3148 | emit_sjcc(as, CC_P, as->mcp); | ||
| 3149 | else | ||
| 3150 | emit_jcc(as, CC_P, as->mcp); | ||
| 3151 | } | ||
| 3152 | } | ||
| 3153 | emit_mrm(as, XO_UCOMISD, left, right); | ||
| 3154 | } else { | ||
| 3155 | IRRef lref = ir->op1, rref = ir->op2; | ||
| 3156 | IROp leftop = (IROp)(IR(lref)->o); | ||
| 3157 | Reg r64 = REX_64IR(ir, 0); | ||
| 3158 | int32_t imm = 0; | ||
| 3159 | lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isaddr(ir->t)); | ||
| 3160 | /* Swap constants (only for ABC) and fusable loads to the right. */ | ||
| 3161 | if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) { | ||
| 3162 | if ((cc & 0xc) == 0xc) cc ^= 3; /* L <-> G, LE <-> GE */ | ||
| 3163 | else if ((cc & 0xa) == 0x2) cc ^= 5; /* A <-> B, AE <-> BE */ | ||
| 3164 | lref = ir->op2; rref = ir->op1; | ||
| 3165 | } | ||
| 3166 | if (asm_isk32(as, rref, &imm)) { | ||
| 3167 | IRIns *irl = IR(lref); | ||
| 3168 | /* Check wether we can use test ins. Not for unsigned, since CF=0. */ | ||
| 3169 | int usetest = (imm == 0 && (cc & 0xa) != 0x2); | ||
| 3170 | if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) { | ||
| 3171 | /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */ | ||
| 3172 | Reg right, left = RID_NONE; | ||
| 3173 | RegSet allow = RSET_GPR; | ||
| 3174 | if (!asm_isk32(as, irl->op2, &imm)) { | ||
| 3175 | left = ra_alloc1(as, irl->op2, allow); | ||
| 3176 | rset_clear(allow, left); | ||
| 3177 | } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */ | ||
| 3178 | IRIns *irll = IR(irl->op1); | ||
| 3179 | if (opisfusableload((IROp)irll->o) && | ||
| 3180 | (irt_isi8(irll->t) || irt_isu8(irll->t))) { | ||
| 3181 | IRType1 origt = irll->t; /* Temporarily flip types. */ | ||
| 3182 | irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT; | ||
| 3183 | as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ | ||
| 3184 | right = asm_fuseload(as, irl->op1, RSET_GPR); | ||
| 3185 | as->curins++; | ||
| 3186 | irll->t = origt; | ||
| 3187 | if (right != RID_MRM) goto test_nofuse; | ||
| 3188 | /* Fusion succeeded, emit test byte mrm, imm8. */ | ||
| 3189 | asm_guardcc(as, cc); | ||
| 3190 | emit_i8(as, (imm & 0xff)); | ||
| 3191 | emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM); | ||
| 3192 | return; | ||
| 3193 | } | ||
| 3194 | } | ||
| 3195 | as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ | ||
| 3196 | right = asm_fuseload(as, irl->op1, allow); | ||
| 3197 | as->curins++; /* Undo the above. */ | ||
| 3198 | test_nofuse: | ||
| 3199 | asm_guardcc(as, cc); | ||
| 3200 | if (ra_noreg(left)) { | ||
| 3201 | emit_i32(as, imm); | ||
| 3202 | emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right); | ||
| 3203 | } else { | ||
| 3204 | emit_mrm(as, XO_TEST, r64 + left, right); | ||
| 3205 | } | ||
| 3206 | } else { | ||
| 3207 | Reg left; | ||
| 3208 | if (opisfusableload((IROp)irl->o) && | ||
| 3209 | ((irt_isu8(irl->t) && checku8(imm)) || | ||
| 3210 | ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) || | ||
| 3211 | (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) { | ||
| 3212 | /* Only the IRT_INT case is fused by asm_fuseload. | ||
| 3213 | ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads | ||
| 3214 | ** are handled here. | ||
| 3215 | ** Note that cmp word [mem], imm16 should not be generated, | ||
| 3216 | ** since it has a length-changing prefix. Compares of a word | ||
| 3217 | ** against a sign-extended imm8 are ok, however. | ||
| 3218 | */ | ||
| 3219 | IRType1 origt = irl->t; /* Temporarily flip types. */ | ||
| 3220 | irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT; | ||
| 3221 | left = asm_fuseload(as, lref, RSET_GPR); | ||
| 3222 | irl->t = origt; | ||
| 3223 | if (left == RID_MRM) { /* Fusion succeeded? */ | ||
| 3224 | asm_guardcc(as, cc); | ||
| 3225 | emit_i8(as, imm); | ||
| 3226 | emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ? | ||
| 3227 | XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM); | ||
| 3228 | return; | ||
| 3229 | } /* Otherwise handle register case as usual. */ | ||
| 3230 | } else { | ||
| 3231 | left = asm_fuseload(as, lref, RSET_GPR); | ||
| 3232 | } | ||
| 3233 | asm_guardcc(as, cc); | ||
| 3234 | if (usetest && left != RID_MRM) { | ||
| 3235 | /* Use test r,r instead of cmp r,0. */ | ||
| 3236 | emit_rr(as, XO_TEST, r64 + left, left); | ||
| 3237 | if (irl+1 == ir) /* Referencing previous ins? */ | ||
| 3238 | as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */ | ||
| 3239 | } else { | ||
| 3240 | emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm); | ||
| 3241 | } | ||
| 3242 | } | ||
| 3243 | } else { | ||
| 3244 | Reg left = ra_alloc1(as, lref, RSET_GPR); | ||
| 3245 | Reg right = asm_fuseload(as, rref, rset_exclude(RSET_GPR, left)); | ||
| 3246 | asm_guardcc(as, cc); | ||
| 3247 | emit_mrm(as, XO_CMP, r64 + left, right); | ||
| 3248 | } | ||
| 3249 | } | ||
| 3250 | } | ||
| 3251 | |||
| 3252 | #if LJ_32 && LJ_HASFFI | ||
| 3253 | /* 64 bit integer comparisons in 32 bit mode. */ | ||
| 3254 | static void asm_comp_int64(ASMState *as, IRIns *ir) | ||
| 3255 | { | ||
| 3256 | uint32_t cc = asm_compmap[(ir-1)->o]; | ||
| 3257 | RegSet allow = RSET_GPR; | ||
| 3258 | Reg lefthi = RID_NONE, leftlo = RID_NONE; | ||
| 3259 | Reg righthi = RID_NONE, rightlo = RID_NONE; | ||
| 3260 | MCLabel l_around; | ||
| 3261 | x86ModRM mrm; | ||
| 3262 | |||
| 3263 | as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */ | ||
| 3264 | |||
| 3265 | /* Allocate/fuse hiword operands. */ | ||
| 3266 | if (irref_isk(ir->op2)) { | ||
| 3267 | lefthi = asm_fuseload(as, ir->op1, allow); | ||
| 3268 | } else { | ||
| 3269 | lefthi = ra_alloc1(as, ir->op1, allow); | ||
| 3270 | righthi = asm_fuseload(as, ir->op2, allow); | ||
| 3271 | if (righthi == RID_MRM) { | ||
| 3272 | if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); | ||
| 3273 | if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); | ||
| 3274 | } else { | ||
| 3275 | rset_clear(allow, righthi); | ||
| 3276 | } | ||
| 3277 | } | ||
| 3278 | mrm = as->mrm; /* Save state for hiword instruction. */ | ||
| 3279 | |||
| 3280 | /* Allocate/fuse loword operands. */ | ||
| 3281 | if (irref_isk((ir-1)->op2)) { | ||
| 3282 | leftlo = asm_fuseload(as, (ir-1)->op1, allow); | ||
| 3283 | } else { | ||
| 3284 | leftlo = ra_alloc1(as, (ir-1)->op1, allow); | ||
| 3285 | rightlo = asm_fuseload(as, (ir-1)->op2, allow); | ||
| 3286 | if (rightlo == RID_MRM) { | ||
| 3287 | if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); | ||
| 3288 | if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); | ||
| 3289 | } else { | ||
| 3290 | rset_clear(allow, rightlo); | ||
| 3291 | } | ||
| 3292 | } | ||
| 3293 | |||
| 3294 | /* All register allocations must be performed _before_ this point. */ | ||
| 3295 | l_around = emit_label(as); | ||
| 3296 | as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */ | ||
| 3297 | |||
| 3298 | /* Loword comparison and branch. */ | ||
| 3299 | asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */ | ||
| 3300 | if (ra_noreg(rightlo)) { | ||
| 3301 | int32_t imm = IR((ir-1)->op2)->i; | ||
| 3302 | if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM) | ||
| 3303 | emit_rr(as, XO_TEST, leftlo, leftlo); | ||
| 3304 | else | ||
| 3305 | emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm); | ||
| 3306 | } else { | ||
| 3307 | emit_mrm(as, XO_CMP, leftlo, rightlo); | ||
| 3308 | } | ||
| 3309 | |||
| 3310 | /* Hiword comparison and branches. */ | ||
| 3311 | if ((cc & 15) != CC_NE) | ||
| 3312 | emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */ | ||
| 3313 | if ((cc & 15) != CC_E) | ||
| 3314 | asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */ | ||
| 3315 | as->mrm = mrm; /* Restore state. */ | ||
| 3316 | if (ra_noreg(righthi)) { | ||
| 3317 | int32_t imm = IR(ir->op2)->i; | ||
| 3318 | if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM) | ||
| 3319 | emit_rr(as, XO_TEST, lefthi, lefthi); | ||
| 3320 | else | ||
| 3321 | emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm); | ||
| 3322 | } else { | ||
| 3323 | emit_mrm(as, XO_CMP, lefthi, righthi); | ||
| 3324 | } | ||
| 3325 | } | ||
| 3326 | #endif | ||
| 3327 | |||
| 3328 | /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ | ||
| 3329 | |||
| 3330 | /* Hiword op of a split 64 bit op. Previous op must be the loword op. */ | ||
| 3331 | static void asm_hiop(ASMState *as, IRIns *ir) | ||
| 3332 | { | ||
| 3333 | #if LJ_32 && LJ_HASFFI | ||
| 3334 | /* HIOP is marked as a store because it needs its own DCE logic. */ | ||
| 3335 | int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ | ||
| 3336 | if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; | ||
| 3337 | if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ | ||
| 3338 | if (usehi || uselo) { | ||
| 3339 | if (irt_isfp(ir->t)) | ||
| 3340 | asm_conv_fp_int64(as, ir); | ||
| 3341 | else | ||
| 3342 | asm_conv_int64_fp(as, ir); | ||
| 3343 | } | ||
| 3344 | as->curins--; /* Always skip the CONV. */ | ||
| 3345 | return; | ||
| 3346 | } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ | ||
| 3347 | asm_comp_int64(as, ir); | ||
| 3348 | return; | ||
| 3349 | } | ||
| 3350 | if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ | ||
| 3351 | switch ((ir-1)->o) { | ||
| 3352 | case IR_ADD: | ||
| 3353 | asm_intarith(as, ir, uselo ? XOg_ADC : XOg_ADD); | ||
| 3354 | break; | ||
| 3355 | case IR_SUB: | ||
| 3356 | asm_intarith(as, ir, uselo ? XOg_SBB : XOg_SUB); | ||
| 3357 | break; | ||
| 3358 | case IR_NEG: { | ||
| 3359 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 3360 | emit_rr(as, XO_GROUP3, XOg_NEG, dest); | ||
| 3361 | if (uselo) { | ||
| 3362 | emit_i8(as, 0); | ||
| 3363 | emit_rr(as, XO_ARITHi8, XOg_ADC, dest); | ||
| 3364 | } | ||
| 3365 | ra_left(as, dest, ir->op1); | ||
| 3366 | break; | ||
| 3367 | } | ||
| 3368 | case IR_CALLN: | ||
| 3369 | case IR_CALLXS: | ||
| 3370 | ra_destreg(as, ir, RID_RETHI); | ||
| 3371 | if (!uselo) | ||
| 3372 | ra_allocref(as, ir->op1, RID2RSET(RID_RET)); /* Mark call as used. */ | ||
| 3373 | break; | ||
| 3374 | case IR_CNEWI: | ||
| 3375 | /* Nothing to do here. Handled by CNEWI itself. */ | ||
| 3376 | break; | ||
| 3377 | default: lua_assert(0); break; | ||
| 3378 | } | ||
| 3379 | #else | ||
| 3380 | UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */ | ||
| 3381 | #endif | ||
| 3382 | } | ||
| 3383 | |||
| 3384 | /* -- Stack handling ------------------------------------------------------ */ | ||
| 3385 | |||
| 3386 | /* Get extent of the stack for a snapshot. */ | ||
| 3387 | static BCReg asm_stack_extent(ASMState *as, SnapShot *snap, BCReg *ptopslot) | ||
| 3388 | { | ||
| 3389 | SnapEntry *map = &as->T->snapmap[snap->mapofs]; | ||
| 3390 | MSize n, nent = snap->nent; | ||
| 3391 | BCReg baseslot = 0, topslot = 0; | ||
| 3392 | /* Must check all frames to find topslot (outer can be larger than inner). */ | ||
| 3393 | for (n = 0; n < nent; n++) { | ||
| 3394 | SnapEntry sn = map[n]; | ||
| 3395 | if ((sn & SNAP_FRAME)) { | ||
| 3396 | IRIns *ir = IR(snap_ref(sn)); | ||
| 3397 | GCfunc *fn = ir_kfunc(ir); | ||
| 3398 | if (isluafunc(fn)) { | ||
| 3399 | BCReg s = snap_slot(sn); | ||
| 3400 | BCReg fs = s + funcproto(fn)->framesize; | ||
| 3401 | if (fs > topslot) topslot = fs; | ||
| 3402 | baseslot = s; | ||
| 3403 | } | ||
| 3404 | } | ||
| 3405 | } | ||
| 3406 | *ptopslot = topslot; | ||
| 3407 | return baseslot; | ||
| 3408 | } | ||
| 3409 | |||
| 3410 | /* Check Lua stack size for overflow. Use exit handler as fallback. */ | ||
| 3411 | static void asm_stack_check(ASMState *as, BCReg topslot, | ||
| 3412 | Reg pbase, RegSet allow, ExitNo exitno) | ||
| 3413 | { | ||
| 3414 | /* Try to get an unused temp. register, otherwise spill/restore eax. */ | ||
| 3415 | Reg r = allow ? rset_pickbot(allow) : RID_EAX; | ||
| 3416 | emit_jcc(as, CC_B, exitstub_addr(as->J, exitno)); | ||
| 3417 | if (allow == RSET_EMPTY) /* Restore temp. register. */ | ||
| 3418 | emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0); | ||
| 3419 | else | ||
| 3420 | ra_modified(as, r); | ||
| 3421 | emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot)); | ||
| 3422 | if (ra_hasreg(pbase) && pbase != r) | ||
| 3423 | emit_rr(as, XO_ARITH(XOg_SUB), r, pbase); | ||
| 3424 | else | ||
| 3425 | emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE, | ||
| 3426 | ptr2addr(&J2G(as->J)->jit_base)); | ||
| 3427 | emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack)); | ||
| 3428 | emit_getgl(as, r, jit_L); | ||
| 3429 | if (allow == RSET_EMPTY) /* Spill temp. register. */ | ||
| 3430 | emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0); | ||
| 3431 | } | ||
| 3432 | |||
| 3433 | /* Restore Lua stack from on-trace state. */ | ||
| 3434 | static void asm_stack_restore(ASMState *as, SnapShot *snap) | ||
| 3435 | { | ||
| 3436 | SnapEntry *map = &as->T->snapmap[snap->mapofs]; | ||
| 3437 | MSize n, nent = snap->nent; | ||
| 3438 | SnapEntry *flinks = map + nent + snap->depth; | ||
| 3439 | /* Store the value of all modified slots to the Lua stack. */ | ||
| 3440 | for (n = 0; n < nent; n++) { | ||
| 3441 | SnapEntry sn = map[n]; | ||
| 3442 | BCReg s = snap_slot(sn); | ||
| 3443 | int32_t ofs = 8*((int32_t)s-1); | ||
| 3444 | IRRef ref = snap_ref(sn); | ||
| 3445 | IRIns *ir = IR(ref); | ||
| 3446 | if ((sn & SNAP_NORESTORE)) | ||
| 3447 | continue; | ||
| 3448 | if (irt_isnum(ir->t)) { | ||
| 3449 | Reg src = ra_alloc1(as, ref, RSET_FPR); | ||
| 3450 | emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs); | ||
| 3451 | } else { | ||
| 3452 | lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || | ||
| 3453 | (LJ_DUALNUM && irt_isinteger(ir->t))); | ||
| 3454 | if (!irref_isk(ref)) { | ||
| 3455 | Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); | ||
| 3456 | emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs); | ||
| 3457 | } else if (!irt_ispri(ir->t)) { | ||
| 3458 | emit_movmroi(as, RID_BASE, ofs, ir->i); | ||
| 3459 | } | ||
| 3460 | if ((sn & (SNAP_CONT|SNAP_FRAME))) { | ||
| 3461 | if (s != 0) /* Do not overwrite link to previous frame. */ | ||
| 3462 | emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--)); | ||
| 3463 | } else { | ||
| 3464 | if (!(LJ_64 && irt_islightud(ir->t))) | ||
| 3465 | emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t)); | ||
| 3466 | } | ||
| 3467 | } | ||
| 3468 | checkmclim(as); | ||
| 3469 | } | ||
| 3470 | lua_assert(map + nent == flinks); | ||
| 3471 | } | ||
| 3472 | |||
| 3473 | /* -- GC handling --------------------------------------------------------- */ | ||
| 3474 | |||
| 3475 | /* Check GC threshold and do one or more GC steps. */ | ||
| 3476 | static void asm_gc_check(ASMState *as) | ||
| 3477 | { | ||
| 3478 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; | ||
| 3479 | IRRef args[2]; | ||
| 3480 | MCLabel l_end; | ||
| 3481 | Reg tmp; | ||
| 3482 | ra_evictset(as, RSET_SCRATCH); | ||
| 3483 | l_end = emit_label(as); | ||
| 3484 | /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ | ||
| 3485 | asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ | ||
| 3486 | emit_rr(as, XO_TEST, RID_RET, RID_RET); | ||
| 3487 | args[0] = ASMREF_TMP1; /* global_State *g */ | ||
| 3488 | args[1] = ASMREF_TMP2; /* MSize steps */ | ||
| 3489 | asm_gencall(as, ci, args); | ||
| 3490 | tmp = ra_releasetmp(as, ASMREF_TMP1); | ||
| 3491 | emit_loada(as, tmp, J2G(as->J)); | ||
| 3492 | emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), (int32_t)as->gcsteps); | ||
| 3493 | /* Jump around GC step if GC total < GC threshold. */ | ||
| 3494 | emit_sjcc(as, CC_B, l_end); | ||
| 3495 | emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold); | ||
| 3496 | emit_getgl(as, tmp, gc.total); | ||
| 3497 | as->gcsteps = 0; | ||
| 3498 | checkmclim(as); | ||
| 3499 | } | ||
| 3500 | |||
| 3501 | /* -- PHI and loop handling ----------------------------------------------- */ | 795 | /* -- PHI and loop handling ----------------------------------------------- */ |
| 3502 | 796 | ||
| 3503 | /* Break a PHI cycle by renaming to a free register (evict if needed). */ | 797 | /* Break a PHI cycle by renaming to a free register (evict if needed). */ |
| @@ -3657,50 +951,8 @@ static void asm_phi(ASMState *as, IRIns *ir) | |||
| 3657 | } | 951 | } |
| 3658 | } | 952 | } |
| 3659 | 953 | ||
| 3660 | /* Fixup the loop branch. */ | 954 | static void asm_gc_check(ASMState *as); |
| 3661 | static void asm_loop_fixup(ASMState *as) | 955 | static void asm_loop_fixup(ASMState *as); |
| 3662 | { | ||
| 3663 | MCode *p = as->mctop; | ||
| 3664 | MCode *target = as->mcp; | ||
| 3665 | if (as->realign) { /* Realigned loops use short jumps. */ | ||
| 3666 | as->realign = NULL; /* Stop another retry. */ | ||
| 3667 | lua_assert(((intptr_t)target & 15) == 0); | ||
| 3668 | if (as->loopinv) { /* Inverted loop branch? */ | ||
| 3669 | p -= 5; | ||
| 3670 | p[0] = XI_JMP; | ||
| 3671 | lua_assert(target - p >= -128); | ||
| 3672 | p[-1] = (MCode)(target - p); /* Patch sjcc. */ | ||
| 3673 | if (as->loopinv == 2) | ||
| 3674 | p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */ | ||
| 3675 | } else { | ||
| 3676 | lua_assert(target - p >= -128); | ||
| 3677 | p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */ | ||
| 3678 | p[-2] = XI_JMPs; | ||
| 3679 | } | ||
| 3680 | } else { | ||
| 3681 | MCode *newloop; | ||
| 3682 | p[-5] = XI_JMP; | ||
| 3683 | if (as->loopinv) { /* Inverted loop branch? */ | ||
| 3684 | /* asm_guardcc already inverted the jcc and patched the jmp. */ | ||
| 3685 | p -= 5; | ||
| 3686 | newloop = target+4; | ||
| 3687 | *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */ | ||
| 3688 | if (as->loopinv == 2) { | ||
| 3689 | *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */ | ||
| 3690 | newloop = target+8; | ||
| 3691 | } | ||
| 3692 | } else { /* Otherwise just patch jmp. */ | ||
| 3693 | *(int32_t *)(p-4) = (int32_t)(target - p); | ||
| 3694 | newloop = target+3; | ||
| 3695 | } | ||
| 3696 | /* Realign small loops and shorten the loop branch. */ | ||
| 3697 | if (newloop >= p - 128) { | ||
| 3698 | as->realign = newloop; /* Force a retry and remember alignment. */ | ||
| 3699 | as->curins = as->stopins; /* Abort asm_trace now. */ | ||
| 3700 | as->T->nins = as->orignins; /* Remove any added renames. */ | ||
| 3701 | } | ||
| 3702 | } | ||
| 3703 | } | ||
| 3704 | 956 | ||
| 3705 | /* Middle part of a loop. */ | 957 | /* Middle part of a loop. */ |
| 3706 | static void asm_loop(ASMState *as) | 958 | static void asm_loop(ASMState *as) |
| @@ -3720,29 +972,15 @@ static void asm_loop(ASMState *as) | |||
| 3720 | if (!as->realign) RA_DBG_FLUSH(); | 972 | if (!as->realign) RA_DBG_FLUSH(); |
| 3721 | } | 973 | } |
| 3722 | 974 | ||
| 3723 | /* -- Head of trace ------------------------------------------------------- */ | 975 | /* -- Target-specific assembler ------------------------------------------- */ |
| 3724 | 976 | ||
| 3725 | /* Calculate stack adjustment. */ | 977 | #if LJ_TARGET_X86ORX64 |
| 3726 | static int32_t asm_stack_adjust(ASMState *as) | 978 | #include "lj_asm_x86.h" |
| 3727 | { | 979 | #else |
| 3728 | if (as->evenspill <= SPS_FIXED) | 980 | #error "Missing instruction emitter for target CPU" |
| 3729 | return 0; | 981 | #endif |
| 3730 | return sps_scale((as->evenspill - SPS_FIXED + 3) & ~3); | ||
| 3731 | } | ||
| 3732 | 982 | ||
| 3733 | /* Coalesce BASE register for a root trace. */ | 983 | /* -- Head of trace ------------------------------------------------------- */ |
| 3734 | static void asm_head_root_base(ASMState *as) | ||
| 3735 | { | ||
| 3736 | IRIns *ir = IR(REF_BASE); | ||
| 3737 | Reg r = ir->r; | ||
| 3738 | if (ra_hasreg(r)) { | ||
| 3739 | ra_free(as, r); | ||
| 3740 | if (rset_test(as->modset, r)) | ||
| 3741 | ir->r = RID_INIT; /* No inheritance for modified BASE register. */ | ||
| 3742 | if (r != RID_BASE) | ||
| 3743 | emit_rr(as, XO_MOV, r, RID_BASE); | ||
| 3744 | } | ||
| 3745 | } | ||
| 3746 | 984 | ||
| 3747 | /* Head of a root trace. */ | 985 | /* Head of a root trace. */ |
| 3748 | static void asm_head_root(ASMState *as) | 986 | static void asm_head_root(ASMState *as) |
| @@ -3757,27 +995,6 @@ static void asm_head_root(ASMState *as) | |||
| 3757 | as->T->topslot = gcref(as->T->startpt)->pt.framesize; | 995 | as->T->topslot = gcref(as->T->startpt)->pt.framesize; |
| 3758 | } | 996 | } |
| 3759 | 997 | ||
| 3760 | /* Coalesce or reload BASE register for a side trace. */ | ||
| 3761 | static RegSet asm_head_side_base(ASMState *as, Reg pbase, RegSet allow) | ||
| 3762 | { | ||
| 3763 | IRIns *ir = IR(REF_BASE); | ||
| 3764 | Reg r = ir->r; | ||
| 3765 | if (ra_hasreg(r)) { | ||
| 3766 | ra_free(as, r); | ||
| 3767 | if (rset_test(as->modset, r)) | ||
| 3768 | ir->r = RID_INIT; /* No inheritance for modified BASE register. */ | ||
| 3769 | if (pbase == r) { | ||
| 3770 | rset_clear(allow, r); /* Mark same BASE register as coalesced. */ | ||
| 3771 | } else if (ra_hasreg(pbase) && rset_test(as->freeset, pbase)) { | ||
| 3772 | rset_clear(allow, pbase); | ||
| 3773 | emit_rr(as, XO_MOV, r, pbase); /* Move from coalesced parent register. */ | ||
| 3774 | } else { | ||
| 3775 | emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ | ||
| 3776 | } | ||
| 3777 | } | ||
| 3778 | return allow; | ||
| 3779 | } | ||
| 3780 | |||
| 3781 | /* Head of a side trace. | 998 | /* Head of a side trace. |
| 3782 | ** | 999 | ** |
| 3783 | ** The current simplistic algorithm requires that all slots inherited | 1000 | ** The current simplistic algorithm requires that all slots inherited |
| @@ -3976,276 +1193,8 @@ static void asm_tail_link(ASMState *as) | |||
| 3976 | asm_stack_check(as, as->topslot, RID_BASE, as->freeset & RSET_GPR, snapno); | 1193 | asm_stack_check(as, as->topslot, RID_BASE, as->freeset & RSET_GPR, snapno); |
| 3977 | } | 1194 | } |
| 3978 | 1195 | ||
| 3979 | /* Fixup the tail code. */ | ||
| 3980 | static void asm_tail_fixup(ASMState *as, TraceNo lnk) | ||
| 3981 | { | ||
| 3982 | /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */ | ||
| 3983 | MCode *p = as->mctop; | ||
| 3984 | MCode *target, *q; | ||
| 3985 | int32_t spadj = as->T->spadjust; | ||
| 3986 | if (spadj == 0) { | ||
| 3987 | p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0); | ||
| 3988 | } else { | ||
| 3989 | MCode *p1; | ||
| 3990 | /* Patch stack adjustment. */ | ||
| 3991 | if (checki8(spadj)) { | ||
| 3992 | p -= 3; | ||
| 3993 | p1 = p-6; | ||
| 3994 | *p1 = (MCode)spadj; | ||
| 3995 | } else { | ||
| 3996 | p1 = p-9; | ||
| 3997 | *(int32_t *)p1 = spadj; | ||
| 3998 | } | ||
| 3999 | if ((as->flags & JIT_F_LEA_AGU)) { | ||
| 4000 | #if LJ_64 | ||
| 4001 | p1[-4] = 0x48; | ||
| 4002 | #endif | ||
| 4003 | p1[-3] = (MCode)XI_LEA; | ||
| 4004 | p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP); | ||
| 4005 | p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | ||
| 4006 | } else { | ||
| 4007 | #if LJ_64 | ||
| 4008 | p1[-3] = 0x48; | ||
| 4009 | #endif | ||
| 4010 | p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi); | ||
| 4011 | p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP); | ||
| 4012 | } | ||
| 4013 | } | ||
| 4014 | /* Patch exit branch. */ | ||
| 4015 | target = lnk == TRACE_INTERP ? (MCode *)lj_vm_exit_interp : | ||
| 4016 | traceref(as->J, lnk)->mcode; | ||
| 4017 | *(int32_t *)(p-4) = jmprel(p, target); | ||
| 4018 | p[-5] = XI_JMP; | ||
| 4019 | /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */ | ||
| 4020 | for (q = as->mctop-1; q >= p; q--) | ||
| 4021 | *q = XI_NOP; | ||
| 4022 | as->mctop = p; | ||
| 4023 | } | ||
| 4024 | |||
| 4025 | /* Prepare tail of code. */ | ||
| 4026 | static void asm_tail_prep(ASMState *as) | ||
| 4027 | { | ||
| 4028 | MCode *p = as->mctop; | ||
| 4029 | /* Realign and leave room for backwards loop branch or exit branch. */ | ||
| 4030 | if (as->realign) { | ||
| 4031 | int i = ((int)(intptr_t)as->realign) & 15; | ||
| 4032 | /* Fill unused mcode tail with NOPs to make the prefetcher happy. */ | ||
| 4033 | while (i-- > 0) | ||
| 4034 | *--p = XI_NOP; | ||
| 4035 | as->mctop = p; | ||
| 4036 | p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */ | ||
| 4037 | } else { | ||
| 4038 | p -= 5; /* Space for exit branch (near jmp). */ | ||
| 4039 | } | ||
| 4040 | if (as->loopref) { | ||
| 4041 | as->invmcp = as->mcp = p; | ||
| 4042 | } else { | ||
| 4043 | /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */ | ||
| 4044 | as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0)); | ||
| 4045 | as->invmcp = NULL; | ||
| 4046 | } | ||
| 4047 | } | ||
| 4048 | |||
| 4049 | /* -- Instruction dispatch ------------------------------------------------ */ | ||
| 4050 | |||
| 4051 | /* Assemble a single instruction. */ | ||
| 4052 | static void asm_ir(ASMState *as, IRIns *ir) | ||
| 4053 | { | ||
| 4054 | switch ((IROp)ir->o) { | ||
| 4055 | /* Miscellaneous ops. */ | ||
| 4056 | case IR_LOOP: asm_loop(as); break; | ||
| 4057 | case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break; | ||
| 4058 | case IR_USE: | ||
| 4059 | ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break; | ||
| 4060 | case IR_PHI: asm_phi(as, ir); break; | ||
| 4061 | case IR_HIOP: asm_hiop(as, ir); break; | ||
| 4062 | |||
| 4063 | /* Guarded assertions. */ | ||
| 4064 | case IR_LT: case IR_GE: case IR_LE: case IR_GT: | ||
| 4065 | case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT: | ||
| 4066 | case IR_EQ: case IR_NE: case IR_ABC: | ||
| 4067 | asm_comp(as, ir, asm_compmap[ir->o]); | ||
| 4068 | break; | ||
| 4069 | |||
| 4070 | case IR_RETF: asm_retf(as, ir); break; | ||
| 4071 | |||
| 4072 | /* Bit ops. */ | ||
| 4073 | case IR_BNOT: asm_neg_not(as, ir, XOg_NOT); break; | ||
| 4074 | case IR_BSWAP: asm_bitswap(as, ir); break; | ||
| 4075 | |||
| 4076 | case IR_BAND: asm_intarith(as, ir, XOg_AND); break; | ||
| 4077 | case IR_BOR: asm_intarith(as, ir, XOg_OR); break; | ||
| 4078 | case IR_BXOR: asm_intarith(as, ir, XOg_XOR); break; | ||
| 4079 | |||
| 4080 | case IR_BSHL: asm_bitshift(as, ir, XOg_SHL); break; | ||
| 4081 | case IR_BSHR: asm_bitshift(as, ir, XOg_SHR); break; | ||
| 4082 | case IR_BSAR: asm_bitshift(as, ir, XOg_SAR); break; | ||
| 4083 | case IR_BROL: asm_bitshift(as, ir, XOg_ROL); break; | ||
| 4084 | case IR_BROR: asm_bitshift(as, ir, XOg_ROR); break; | ||
| 4085 | |||
| 4086 | /* Arithmetic ops. */ | ||
| 4087 | case IR_ADD: asm_add(as, ir); break; | ||
| 4088 | case IR_SUB: | ||
| 4089 | if (irt_isnum(ir->t)) | ||
| 4090 | asm_fparith(as, ir, XO_SUBSD); | ||
| 4091 | else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */ | ||
| 4092 | asm_intarith(as, ir, XOg_SUB); | ||
| 4093 | break; | ||
| 4094 | case IR_MUL: | ||
| 4095 | if (irt_isnum(ir->t)) | ||
| 4096 | asm_fparith(as, ir, XO_MULSD); | ||
| 4097 | else | ||
| 4098 | asm_intarith(as, ir, XOg_X_IMUL); | ||
| 4099 | break; | ||
| 4100 | case IR_DIV: | ||
| 4101 | #if LJ_64 && LJ_HASFFI | ||
| 4102 | if (!irt_isnum(ir->t)) | ||
| 4103 | asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : | ||
| 4104 | IRCALL_lj_carith_divu64); | ||
| 4105 | else | ||
| 4106 | #endif | ||
| 4107 | asm_fparith(as, ir, XO_DIVSD); | ||
| 4108 | break; | ||
| 4109 | case IR_MOD: | ||
| 4110 | #if LJ_64 && LJ_HASFFI | ||
| 4111 | asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : | ||
| 4112 | IRCALL_lj_carith_modu64); | ||
| 4113 | #else | ||
| 4114 | lua_assert(0); | ||
| 4115 | #endif | ||
| 4116 | break; | ||
| 4117 | |||
| 4118 | case IR_NEG: | ||
| 4119 | if (irt_isnum(ir->t)) | ||
| 4120 | asm_fparith(as, ir, XO_XORPS); | ||
| 4121 | else | ||
| 4122 | asm_neg_not(as, ir, XOg_NEG); | ||
| 4123 | break; | ||
| 4124 | case IR_ABS: asm_fparith(as, ir, XO_ANDPS); break; | ||
| 4125 | |||
| 4126 | case IR_MIN: | ||
| 4127 | if (irt_isnum(ir->t)) | ||
| 4128 | asm_fparith(as, ir, XO_MINSD); | ||
| 4129 | else | ||
| 4130 | asm_min_max(as, ir, CC_G); | ||
| 4131 | break; | ||
| 4132 | case IR_MAX: | ||
| 4133 | if (irt_isnum(ir->t)) | ||
| 4134 | asm_fparith(as, ir, XO_MAXSD); | ||
| 4135 | else | ||
| 4136 | asm_min_max(as, ir, CC_L); | ||
| 4137 | break; | ||
| 4138 | |||
| 4139 | case IR_FPMATH: case IR_ATAN2: case IR_LDEXP: | ||
| 4140 | asm_fpmath(as, ir); | ||
| 4141 | break; | ||
| 4142 | case IR_POW: | ||
| 4143 | #if LJ_64 && LJ_HASFFI | ||
| 4144 | if (!irt_isnum(ir->t)) | ||
| 4145 | asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : | ||
| 4146 | IRCALL_lj_carith_powu64); | ||
| 4147 | else | ||
| 4148 | #endif | ||
| 4149 | asm_fppowi(as, ir); | ||
| 4150 | break; | ||
| 4151 | |||
| 4152 | /* Overflow-checking arithmetic ops. Note: don't use LEA here! */ | ||
| 4153 | case IR_ADDOV: asm_intarith(as, ir, XOg_ADD); break; | ||
| 4154 | case IR_SUBOV: asm_intarith(as, ir, XOg_SUB); break; | ||
| 4155 | case IR_MULOV: asm_intarith(as, ir, XOg_X_IMUL); break; | ||
| 4156 | |||
| 4157 | /* Memory references. */ | ||
| 4158 | case IR_AREF: asm_aref(as, ir); break; | ||
| 4159 | case IR_HREF: asm_href(as, ir); break; | ||
| 4160 | case IR_HREFK: asm_hrefk(as, ir); break; | ||
| 4161 | case IR_NEWREF: asm_newref(as, ir); break; | ||
| 4162 | case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break; | ||
| 4163 | case IR_FREF: asm_fref(as, ir); break; | ||
| 4164 | case IR_STRREF: asm_strref(as, ir); break; | ||
| 4165 | |||
| 4166 | /* Loads and stores. */ | ||
| 4167 | case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: | ||
| 4168 | asm_ahuvload(as, ir); | ||
| 4169 | break; | ||
| 4170 | case IR_FLOAD: case IR_XLOAD: asm_fxload(as, ir); break; | ||
| 4171 | case IR_SLOAD: asm_sload(as, ir); break; | ||
| 4172 | |||
| 4173 | case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break; | ||
| 4174 | case IR_FSTORE: case IR_XSTORE: asm_fxstore(as, ir); break; | ||
| 4175 | |||
| 4176 | /* Allocations. */ | ||
| 4177 | case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break; | ||
| 4178 | case IR_TNEW: asm_tnew(as, ir); break; | ||
| 4179 | case IR_TDUP: asm_tdup(as, ir); break; | ||
| 4180 | case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break; | ||
| 4181 | |||
| 4182 | /* Write barriers. */ | ||
| 4183 | case IR_TBAR: asm_tbar(as, ir); break; | ||
| 4184 | case IR_OBAR: asm_obar(as, ir); break; | ||
| 4185 | |||
| 4186 | /* Type conversions. */ | ||
| 4187 | case IR_TOBIT: asm_tobit(as, ir); break; | ||
| 4188 | case IR_CONV: asm_conv(as, ir); break; | ||
| 4189 | case IR_TOSTR: asm_tostr(as, ir); break; | ||
| 4190 | case IR_STRTO: asm_strto(as, ir); break; | ||
| 4191 | |||
| 4192 | /* Calls. */ | ||
| 4193 | case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break; | ||
| 4194 | case IR_CALLXS: asm_callx(as, ir); break; | ||
| 4195 | case IR_CARG: break; | ||
| 4196 | |||
| 4197 | default: | ||
| 4198 | setintV(&as->J->errinfo, ir->o); | ||
| 4199 | lj_trace_err_info(as->J, LJ_TRERR_NYIIR); | ||
| 4200 | break; | ||
| 4201 | } | ||
| 4202 | } | ||
| 4203 | |||
| 4204 | /* -- Trace setup --------------------------------------------------------- */ | 1196 | /* -- Trace setup --------------------------------------------------------- */ |
| 4205 | 1197 | ||
| 4206 | /* Ensure there are enough stack slots for call arguments. */ | ||
| 4207 | static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) | ||
| 4208 | { | ||
| 4209 | IRRef args[CCI_NARGS_MAX]; | ||
| 4210 | uint32_t nargs = (int)CCI_NARGS(ci); | ||
| 4211 | int nslots = 0; | ||
| 4212 | asm_collectargs(as, ir, ci, args); | ||
| 4213 | #if LJ_64 | ||
| 4214 | if (LJ_ABI_WIN) { | ||
| 4215 | nslots = (int)(nargs*2); /* Only matters for more than four args. */ | ||
| 4216 | } else { | ||
| 4217 | uint32_t i; | ||
| 4218 | int ngpr = 6, nfpr = 8; | ||
| 4219 | for (i = 0; i < nargs; i++) | ||
| 4220 | if (irt_isfp(IR(args[i])->t)) { | ||
| 4221 | if (nfpr > 0) nfpr--; else nslots += 2; | ||
| 4222 | } else { | ||
| 4223 | if (ngpr > 0) ngpr--; else nslots += 2; | ||
| 4224 | } | ||
| 4225 | } | ||
| 4226 | if (nslots > as->evenspill) /* Leave room for args in stack slots. */ | ||
| 4227 | as->evenspill = nslots; | ||
| 4228 | return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); | ||
| 4229 | #else | ||
| 4230 | if ((ci->flags & CCI_FASTCALL)) { | ||
| 4231 | lua_assert(nargs <= 2); | ||
| 4232 | } else { | ||
| 4233 | uint32_t i; | ||
| 4234 | for (i = 0; i < nargs; i++) | ||
| 4235 | nslots += irt_isnum(IR(args[i])->t) ? 2 : 1; | ||
| 4236 | if (nslots > as->evenspill) /* Leave room for args. */ | ||
| 4237 | as->evenspill = nslots; | ||
| 4238 | } | ||
| 4239 | return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET); | ||
| 4240 | #endif | ||
| 4241 | } | ||
| 4242 | |||
| 4243 | /* Target-specific setup. */ | ||
| 4244 | static void asm_setup_target(ASMState *as) | ||
| 4245 | { | ||
| 4246 | asm_exitstub_setup(as, as->T->nsnap); | ||
| 4247 | } | ||
| 4248 | |||
| 4249 | /* Clear reg/sp for all instructions and add register hints. */ | 1198 | /* Clear reg/sp for all instructions and add register hints. */ |
| 4250 | static void asm_setup_regsp(ASMState *as) | 1199 | static void asm_setup_regsp(ASMState *as) |
| 4251 | { | 1200 | { |
| @@ -4497,34 +1446,6 @@ void lj_asm_trace(jit_State *J, GCtrace *T) | |||
| 4497 | VG_INVALIDATE(T->mcode, T->szmcode); | 1446 | VG_INVALIDATE(T->mcode, T->szmcode); |
| 4498 | } | 1447 | } |
| 4499 | 1448 | ||
| 4500 | /* Patch exit jumps of existing machine code to a new target. */ | ||
| 4501 | void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | ||
| 4502 | { | ||
| 4503 | MCode *p = T->mcode; | ||
| 4504 | MCode *mcarea = lj_mcode_patch(J, p, 0); | ||
| 4505 | MSize len = T->szmcode; | ||
| 4506 | MCode *px = exitstub_addr(J, exitno) - 6; | ||
| 4507 | MCode *pe = p+len-6; | ||
| 4508 | uint32_t stateaddr = u32ptr(&J2G(J)->vmstate); | ||
| 4509 | if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px) | ||
| 4510 | *(int32_t *)(p+len-4) = jmprel(p+len, target); | ||
| 4511 | /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */ | ||
| 4512 | for (; p < pe; p++) | ||
| 4513 | if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi) { | ||
| 4514 | p += LJ_64 ? 11 : 10; | ||
| 4515 | break; | ||
| 4516 | } | ||
| 4517 | lua_assert(p < pe); | ||
| 4518 | for (; p < pe; p++) { | ||
| 4519 | if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) { | ||
| 4520 | *(int32_t *)(p+2) = jmprel(p+6, target); | ||
| 4521 | p += 5; | ||
| 4522 | } | ||
| 4523 | } | ||
| 4524 | lj_mcode_patch(J, mcarea, 1); | ||
| 4525 | VG_INVALIDATE(T->mcode, T->szmcode); | ||
| 4526 | } | ||
| 4527 | |||
| 4528 | #undef IR | 1449 | #undef IR |
| 4529 | 1450 | ||
| 4530 | #endif | 1451 | #endif |
diff --git a/src/lj_asm_x86.h b/src/lj_asm_x86.h new file mode 100644 index 00000000..fb46b3ad --- /dev/null +++ b/src/lj_asm_x86.h | |||
| @@ -0,0 +1,2668 @@ | |||
| 1 | /* | ||
| 2 | ** x86/x64 IR assembler (SSA IR -> machine code). | ||
| 3 | ** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h | ||
| 4 | */ | ||
| 5 | |||
| 6 | /* -- Guard handling ------------------------------------------------------ */ | ||
| 7 | |||
| 8 | /* Generate an exit stub group at the bottom of the reserved MCode memory. */ | ||
| 9 | static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) | ||
| 10 | { | ||
| 11 | ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff; | ||
| 12 | MCode *mxp = as->mcbot; | ||
| 13 | MCode *mxpstart = mxp; | ||
| 14 | if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop) | ||
| 15 | asm_mclimit(as); | ||
| 16 | /* Push low byte of exitno for each exit stub. */ | ||
| 17 | *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs; | ||
| 18 | for (i = 1; i < EXITSTUBS_PER_GROUP; i++) { | ||
| 19 | *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2); | ||
| 20 | *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i); | ||
| 21 | } | ||
| 22 | /* Push the high byte of the exitno for each exit stub group. */ | ||
| 23 | *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8); | ||
| 24 | /* Store DISPATCH at original stack slot 0. Account for the two push ops. */ | ||
| 25 | *mxp++ = XI_MOVmi; | ||
| 26 | *mxp++ = MODRM(XM_OFS8, 0, RID_ESP); | ||
| 27 | *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | ||
| 28 | *mxp++ = 2*sizeof(void *); | ||
| 29 | *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4; | ||
| 30 | /* Jump to exit handler which fills in the ExitState. */ | ||
| 31 | *mxp++ = XI_JMP; mxp += 4; | ||
| 32 | *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler); | ||
| 33 | /* Commit the code for this group (even if assembly fails later on). */ | ||
| 34 | lj_mcode_commitbot(as->J, mxp); | ||
| 35 | as->mcbot = mxp; | ||
| 36 | as->mclim = as->mcbot + MCLIM_REDZONE; | ||
| 37 | return mxpstart; | ||
| 38 | } | ||
| 39 | |||
| 40 | /* Setup all needed exit stubs. */ | ||
| 41 | static void asm_exitstub_setup(ASMState *as, ExitNo nexits) | ||
| 42 | { | ||
| 43 | ExitNo i; | ||
| 44 | if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) | ||
| 45 | lj_trace_err(as->J, LJ_TRERR_SNAPOV); | ||
| 46 | for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++) | ||
| 47 | if (as->J->exitstubgroup[i] == NULL) | ||
| 48 | as->J->exitstubgroup[i] = asm_exitstub_gen(as, i); | ||
| 49 | } | ||
| 50 | |||
| 51 | /* Emit conditional branch to exit for guard. | ||
| 52 | ** It's important to emit this *after* all registers have been allocated, | ||
| 53 | ** because rematerializations may invalidate the flags. | ||
| 54 | */ | ||
| 55 | static void asm_guardcc(ASMState *as, int cc) | ||
| 56 | { | ||
| 57 | MCode *target = exitstub_addr(as->J, as->snapno); | ||
| 58 | MCode *p = as->mcp; | ||
| 59 | if (LJ_UNLIKELY(p == as->invmcp)) { | ||
| 60 | as->loopinv = 1; | ||
| 61 | *(int32_t *)(p+1) = jmprel(p+5, target); | ||
| 62 | target = p; | ||
| 63 | cc ^= 1; | ||
| 64 | if (as->realign) { | ||
| 65 | emit_sjcc(as, cc, target); | ||
| 66 | return; | ||
| 67 | } | ||
| 68 | } | ||
| 69 | emit_jcc(as, cc, target); | ||
| 70 | } | ||
| 71 | |||
| 72 | /* -- Memory operand fusion ----------------------------------------------- */ | ||
| 73 | |||
| 74 | /* Limit linear search to this distance. Avoids O(n^2) behavior. */ | ||
| 75 | #define CONFLICT_SEARCH_LIM 31 | ||
| 76 | |||
| 77 | /* Check if a reference is a signed 32 bit constant. */ | ||
| 78 | static int asm_isk32(ASMState *as, IRRef ref, int32_t *k) | ||
| 79 | { | ||
| 80 | if (irref_isk(ref)) { | ||
| 81 | IRIns *ir = IR(ref); | ||
| 82 | if (ir->o != IR_KINT64) { | ||
| 83 | *k = ir->i; | ||
| 84 | return 1; | ||
| 85 | } else if (checki32((int64_t)ir_kint64(ir)->u64)) { | ||
| 86 | *k = (int32_t)ir_kint64(ir)->u64; | ||
| 87 | return 1; | ||
| 88 | } | ||
| 89 | } | ||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | /* Check if there's no conflicting instruction between curins and ref. | ||
| 94 | ** Also avoid fusing loads if there are multiple references. | ||
| 95 | */ | ||
| 96 | static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload) | ||
| 97 | { | ||
| 98 | IRIns *ir = as->ir; | ||
| 99 | IRRef i = as->curins; | ||
| 100 | if (i > ref + CONFLICT_SEARCH_LIM) | ||
| 101 | return 0; /* Give up, ref is too far away. */ | ||
| 102 | while (--i > ref) { | ||
| 103 | if (ir[i].o == conflict) | ||
| 104 | return 0; /* Conflict found. */ | ||
| 105 | else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref)) | ||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | return 1; /* Ok, no conflict. */ | ||
| 109 | } | ||
| 110 | |||
| 111 | /* Fuse array base into memory operand. */ | ||
| 112 | static IRRef asm_fuseabase(ASMState *as, IRRef ref) | ||
| 113 | { | ||
| 114 | IRIns *irb = IR(ref); | ||
| 115 | as->mrm.ofs = 0; | ||
| 116 | if (irb->o == IR_FLOAD) { | ||
| 117 | IRIns *ira = IR(irb->op1); | ||
| 118 | lua_assert(irb->op2 == IRFL_TAB_ARRAY); | ||
| 119 | /* We can avoid the FLOAD of t->array for colocated arrays. */ | ||
| 120 | if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE && | ||
| 121 | noconflict(as, irb->op1, IR_NEWREF, 1)) { | ||
| 122 | as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */ | ||
| 123 | return irb->op1; /* Table obj. */ | ||
| 124 | } | ||
| 125 | } else if (irb->o == IR_ADD && irref_isk(irb->op2)) { | ||
| 126 | /* Fuse base offset (vararg load). */ | ||
| 127 | as->mrm.ofs = IR(irb->op2)->i; | ||
| 128 | return irb->op1; | ||
| 129 | } | ||
| 130 | return ref; /* Otherwise use the given array base. */ | ||
| 131 | } | ||
| 132 | |||
| 133 | /* Fuse array reference into memory operand. */ | ||
| 134 | static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow) | ||
| 135 | { | ||
| 136 | IRIns *irx; | ||
| 137 | lua_assert(ir->o == IR_AREF); | ||
| 138 | as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow); | ||
| 139 | irx = IR(ir->op2); | ||
| 140 | if (irref_isk(ir->op2)) { | ||
| 141 | as->mrm.ofs += 8*irx->i; | ||
| 142 | as->mrm.idx = RID_NONE; | ||
| 143 | } else { | ||
| 144 | rset_clear(allow, as->mrm.base); | ||
| 145 | as->mrm.scale = XM_SCALE8; | ||
| 146 | /* Fuse a constant ADD (e.g. t[i+1]) into the offset. | ||
| 147 | ** Doesn't help much without ABCelim, but reduces register pressure. | ||
| 148 | */ | ||
| 149 | if (!LJ_64 && /* Has bad effects with negative index on x64. */ | ||
| 150 | mayfuse(as, ir->op2) && ra_noreg(irx->r) && | ||
| 151 | irx->o == IR_ADD && irref_isk(irx->op2)) { | ||
| 152 | as->mrm.ofs += 8*IR(irx->op2)->i; | ||
| 153 | as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow); | ||
| 154 | } else { | ||
| 155 | as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow); | ||
| 156 | } | ||
| 157 | } | ||
| 158 | } | ||
| 159 | |||
| 160 | /* Fuse array/hash/upvalue reference into memory operand. | ||
| 161 | ** Caveat: this may allocate GPRs for the base/idx registers. Be sure to | ||
| 162 | ** pass the final allow mask, excluding any GPRs used for other inputs. | ||
| 163 | ** In particular: 2-operand GPR instructions need to call ra_dest() first! | ||
| 164 | */ | ||
| 165 | static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow) | ||
| 166 | { | ||
| 167 | IRIns *ir = IR(ref); | ||
| 168 | if (ra_noreg(ir->r)) { | ||
| 169 | switch ((IROp)ir->o) { | ||
| 170 | case IR_AREF: | ||
| 171 | if (mayfuse(as, ref)) { | ||
| 172 | asm_fusearef(as, ir, allow); | ||
| 173 | return; | ||
| 174 | } | ||
| 175 | break; | ||
| 176 | case IR_HREFK: | ||
| 177 | if (mayfuse(as, ref)) { | ||
| 178 | as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); | ||
| 179 | as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); | ||
| 180 | as->mrm.idx = RID_NONE; | ||
| 181 | return; | ||
| 182 | } | ||
| 183 | break; | ||
| 184 | case IR_UREFC: | ||
| 185 | if (irref_isk(ir->op1)) { | ||
| 186 | GCfunc *fn = ir_kfunc(IR(ir->op1)); | ||
| 187 | GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv; | ||
| 188 | as->mrm.ofs = ptr2addr(&uv->tv); | ||
| 189 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
| 190 | return; | ||
| 191 | } | ||
| 192 | break; | ||
| 193 | default: | ||
| 194 | lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO); | ||
| 195 | break; | ||
| 196 | } | ||
| 197 | } | ||
| 198 | as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); | ||
| 199 | as->mrm.ofs = 0; | ||
| 200 | as->mrm.idx = RID_NONE; | ||
| 201 | } | ||
| 202 | |||
| 203 | /* Fuse FLOAD/FREF reference into memory operand. */ | ||
| 204 | static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) | ||
| 205 | { | ||
| 206 | lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF); | ||
| 207 | as->mrm.ofs = field_ofs[ir->op2]; | ||
| 208 | as->mrm.idx = RID_NONE; | ||
| 209 | if (irref_isk(ir->op1)) { | ||
| 210 | as->mrm.ofs += IR(ir->op1)->i; | ||
| 211 | as->mrm.base = RID_NONE; | ||
| 212 | } else { | ||
| 213 | as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | /* Fuse string reference into memory operand. */ | ||
| 218 | static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow) | ||
| 219 | { | ||
| 220 | IRIns *irr; | ||
| 221 | lua_assert(ir->o == IR_STRREF); | ||
| 222 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
| 223 | as->mrm.scale = XM_SCALE1; | ||
| 224 | as->mrm.ofs = sizeof(GCstr); | ||
| 225 | if (irref_isk(ir->op1)) { | ||
| 226 | as->mrm.ofs += IR(ir->op1)->i; | ||
| 227 | } else { | ||
| 228 | Reg r = ra_alloc1(as, ir->op1, allow); | ||
| 229 | rset_clear(allow, r); | ||
| 230 | as->mrm.base = (uint8_t)r; | ||
| 231 | } | ||
| 232 | irr = IR(ir->op2); | ||
| 233 | if (irref_isk(ir->op2)) { | ||
| 234 | as->mrm.ofs += irr->i; | ||
| 235 | } else { | ||
| 236 | Reg r; | ||
| 237 | /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */ | ||
| 238 | if (!LJ_64 && /* Has bad effects with negative index on x64. */ | ||
| 239 | mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) { | ||
| 240 | as->mrm.ofs += IR(irr->op2)->i; | ||
| 241 | r = ra_alloc1(as, irr->op1, allow); | ||
| 242 | } else { | ||
| 243 | r = ra_alloc1(as, ir->op2, allow); | ||
| 244 | } | ||
| 245 | if (as->mrm.base == RID_NONE) | ||
| 246 | as->mrm.base = (uint8_t)r; | ||
| 247 | else | ||
| 248 | as->mrm.idx = (uint8_t)r; | ||
| 249 | } | ||
| 250 | } | ||
| 251 | |||
| 252 | static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow) | ||
| 253 | { | ||
| 254 | IRIns *ir = IR(ref); | ||
| 255 | as->mrm.idx = RID_NONE; | ||
| 256 | if (ir->o == IR_KPTR || ir->o == IR_KKPTR) { | ||
| 257 | as->mrm.ofs = ir->i; | ||
| 258 | as->mrm.base = RID_NONE; | ||
| 259 | } else if (ir->o == IR_STRREF) { | ||
| 260 | asm_fusestrref(as, ir, allow); | ||
| 261 | } else { | ||
| 262 | as->mrm.ofs = 0; | ||
| 263 | if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) { | ||
| 264 | /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */ | ||
| 265 | IRIns *irx; | ||
| 266 | IRRef idx; | ||
| 267 | Reg r; | ||
| 268 | if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */ | ||
| 269 | ref = ir->op1; | ||
| 270 | ir = IR(ref); | ||
| 271 | if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r))) | ||
| 272 | goto noadd; | ||
| 273 | } | ||
| 274 | as->mrm.scale = XM_SCALE1; | ||
| 275 | idx = ir->op1; | ||
| 276 | ref = ir->op2; | ||
| 277 | irx = IR(idx); | ||
| 278 | if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */ | ||
| 279 | idx = ir->op2; | ||
| 280 | ref = ir->op1; | ||
| 281 | irx = IR(idx); | ||
| 282 | } | ||
| 283 | if (canfuse(as, irx) && ra_noreg(irx->r)) { | ||
| 284 | if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) { | ||
| 285 | /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */ | ||
| 286 | idx = irx->op1; | ||
| 287 | as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6); | ||
| 288 | } else if (irx->o == IR_ADD && irx->op1 == irx->op2) { | ||
| 289 | /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */ | ||
| 290 | idx = irx->op1; | ||
| 291 | as->mrm.scale = XM_SCALE2; | ||
| 292 | } | ||
| 293 | } | ||
| 294 | r = ra_alloc1(as, idx, allow); | ||
| 295 | rset_clear(allow, r); | ||
| 296 | as->mrm.idx = (uint8_t)r; | ||
| 297 | } | ||
| 298 | noadd: | ||
| 299 | as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); | ||
| 300 | } | ||
| 301 | } | ||
| 302 | |||
| 303 | /* Fuse load into memory operand. */ | ||
| 304 | static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow) | ||
| 305 | { | ||
| 306 | IRIns *ir = IR(ref); | ||
| 307 | if (ra_hasreg(ir->r)) { | ||
| 308 | if (allow != RSET_EMPTY) { /* Fast path. */ | ||
| 309 | ra_noweak(as, ir->r); | ||
| 310 | return ir->r; | ||
| 311 | } | ||
| 312 | fusespill: | ||
| 313 | /* Force a spill if only memory operands are allowed (asm_x87load). */ | ||
| 314 | as->mrm.base = RID_ESP; | ||
| 315 | as->mrm.ofs = ra_spill(as, ir); | ||
| 316 | as->mrm.idx = RID_NONE; | ||
| 317 | return RID_MRM; | ||
| 318 | } | ||
| 319 | if (ir->o == IR_KNUM) { | ||
| 320 | RegSet avail = as->freeset & ~as->modset & RSET_FPR; | ||
| 321 | lua_assert(allow != RSET_EMPTY); | ||
| 322 | if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ | ||
| 323 | as->mrm.ofs = ptr2addr(ir_knum(ir)); | ||
| 324 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
| 325 | return RID_MRM; | ||
| 326 | } | ||
| 327 | } else if (mayfuse(as, ref)) { | ||
| 328 | RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR; | ||
| 329 | if (ir->o == IR_SLOAD) { | ||
| 330 | if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) && | ||
| 331 | noconflict(as, ref, IR_RETF, 0)) { | ||
| 332 | as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow); | ||
| 333 | as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0); | ||
| 334 | as->mrm.idx = RID_NONE; | ||
| 335 | return RID_MRM; | ||
| 336 | } | ||
| 337 | } else if (ir->o == IR_FLOAD) { | ||
| 338 | /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */ | ||
| 339 | if ((irt_isint(ir->t) || irt_isaddr(ir->t)) && | ||
| 340 | noconflict(as, ref, IR_FSTORE, 0)) { | ||
| 341 | asm_fusefref(as, ir, xallow); | ||
| 342 | return RID_MRM; | ||
| 343 | } | ||
| 344 | } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) { | ||
| 345 | if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) { | ||
| 346 | asm_fuseahuref(as, ir->op1, xallow); | ||
| 347 | return RID_MRM; | ||
| 348 | } | ||
| 349 | } else if (ir->o == IR_XLOAD) { | ||
| 350 | /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp). | ||
| 351 | ** Fusing unaligned memory operands is ok on x86 (except for SIMD types). | ||
| 352 | */ | ||
| 353 | if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) && | ||
| 354 | noconflict(as, ref, IR_XSTORE, 0)) { | ||
| 355 | asm_fusexref(as, ir->op1, xallow); | ||
| 356 | return RID_MRM; | ||
| 357 | } | ||
| 358 | } else if (ir->o == IR_VLOAD) { | ||
| 359 | asm_fuseahuref(as, ir->op1, xallow); | ||
| 360 | return RID_MRM; | ||
| 361 | } | ||
| 362 | } | ||
| 363 | if (!(as->freeset & allow) && | ||
| 364 | (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref))) | ||
| 365 | goto fusespill; | ||
| 366 | return ra_allocref(as, ref, allow); | ||
| 367 | } | ||
| 368 | |||
| 369 | /* -- Calls --------------------------------------------------------------- */ | ||
| 370 | |||
| 371 | /* Generate a call to a C function. */ | ||
| 372 | static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | ||
| 373 | { | ||
| 374 | uint32_t n, nargs = CCI_NARGS(ci); | ||
| 375 | int32_t ofs = STACKARG_OFS; | ||
| 376 | uint32_t gprs = REGARG_GPRS; | ||
| 377 | #if LJ_64 | ||
| 378 | Reg fpr = REGARG_FIRSTFPR; | ||
| 379 | #endif | ||
| 380 | lua_assert(!(nargs > 2 && (ci->flags&CCI_FASTCALL))); /* Avoid stack adj. */ | ||
| 381 | if ((void *)ci->func) | ||
| 382 | emit_call(as, ci->func); | ||
| 383 | for (n = 0; n < nargs; n++) { /* Setup args. */ | ||
| 384 | IRRef ref = args[n]; | ||
| 385 | IRIns *ir = IR(ref); | ||
| 386 | Reg r; | ||
| 387 | #if LJ_64 && LJ_ABI_WIN | ||
| 388 | /* Windows/x64 argument registers are strictly positional. */ | ||
| 389 | r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31); | ||
| 390 | fpr++; gprs >>= 5; | ||
| 391 | #elif LJ_64 | ||
| 392 | /* POSIX/x64 argument registers are used in order of appearance. */ | ||
| 393 | if (irt_isfp(ir->t)) { | ||
| 394 | r = fpr <= REGARG_LASTFPR ? fpr : 0; fpr++; | ||
| 395 | } else { | ||
| 396 | r = gprs & 31; gprs >>= 5; | ||
| 397 | } | ||
| 398 | #else | ||
| 399 | if (irt_isfp(ir->t) || !(ci->flags & CCI_FASTCALL)) { | ||
| 400 | r = 0; | ||
| 401 | } else { | ||
| 402 | r = gprs & 31; gprs >>= 5; | ||
| 403 | } | ||
| 404 | #endif | ||
| 405 | if (r) { /* Argument is in a register. */ | ||
| 406 | if (r < RID_MAX_GPR && ref < ASMREF_TMP1) { | ||
| 407 | #if LJ_64 | ||
| 408 | if (ir->o == IR_KINT64) | ||
| 409 | emit_loadu64(as, r, ir_kint64(ir)->u64); | ||
| 410 | else | ||
| 411 | #endif | ||
| 412 | emit_loadi(as, r, ir->i); | ||
| 413 | } else { | ||
| 414 | lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */ | ||
| 415 | if (ra_hasreg(ir->r)) { | ||
| 416 | ra_noweak(as, ir->r); | ||
| 417 | emit_movrr(as, ir, r, ir->r); | ||
| 418 | } else { | ||
| 419 | ra_allocref(as, ref, RID2RSET(r)); | ||
| 420 | } | ||
| 421 | } | ||
| 422 | } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */ | ||
| 423 | lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */ | ||
| 424 | if (LJ_32 && (ofs & 4) && irref_isk(ref)) { | ||
| 425 | /* Split stores for unaligned FP consts. */ | ||
| 426 | emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo); | ||
| 427 | emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi); | ||
| 428 | } else { | ||
| 429 | r = ra_alloc1(as, ref, RSET_FPR); | ||
| 430 | emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, | ||
| 431 | r, RID_ESP, ofs); | ||
| 432 | } | ||
| 433 | ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8; | ||
| 434 | } else { /* Non-FP argument is on stack. */ | ||
| 435 | if (LJ_32 && ref < ASMREF_TMP1) { | ||
| 436 | emit_movmroi(as, RID_ESP, ofs, ir->i); | ||
| 437 | } else { | ||
| 438 | r = ra_alloc1(as, ref, RSET_GPR); | ||
| 439 | emit_movtomro(as, REX_64IR(ir, r), RID_ESP, ofs); | ||
| 440 | } | ||
| 441 | ofs += sizeof(intptr_t); | ||
| 442 | } | ||
| 443 | } | ||
| 444 | } | ||
| 445 | |||
| 446 | /* Setup result reg/sp for call. Evict scratch regs. */ | ||
| 447 | static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | ||
| 448 | { | ||
| 449 | RegSet drop = RSET_SCRATCH; | ||
| 450 | if ((ci->flags & CCI_NOFPRCLOBBER)) | ||
| 451 | drop &= ~RSET_FPR; | ||
| 452 | if (ra_hasreg(ir->r)) | ||
| 453 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
| 454 | ra_evictset(as, drop); /* Evictions must be performed first. */ | ||
| 455 | if (ra_used(ir)) { | ||
| 456 | if (irt_isfp(ir->t)) { | ||
| 457 | int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | ||
| 458 | #if LJ_64 | ||
| 459 | if ((ci->flags & CCI_CASTU64)) { | ||
| 460 | Reg dest = ir->r; | ||
| 461 | if (ra_hasreg(dest)) { | ||
| 462 | ra_free(as, dest); | ||
| 463 | ra_modified(as, dest); | ||
| 464 | emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */ | ||
| 465 | } else { | ||
| 466 | emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs); | ||
| 467 | } | ||
| 468 | } else { | ||
| 469 | ra_destreg(as, ir, RID_FPRET); | ||
| 470 | } | ||
| 471 | #else | ||
| 472 | /* Number result is in x87 st0 for x86 calling convention. */ | ||
| 473 | Reg dest = ir->r; | ||
| 474 | if (ra_hasreg(dest)) { | ||
| 475 | ra_free(as, dest); | ||
| 476 | ra_modified(as, dest); | ||
| 477 | emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, | ||
| 478 | dest, RID_ESP, ofs); | ||
| 479 | } | ||
| 480 | if ((ci->flags & CCI_CASTU64)) { | ||
| 481 | emit_movtomro(as, RID_RET, RID_ESP, ofs); | ||
| 482 | emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4); | ||
| 483 | } else { | ||
| 484 | emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, | ||
| 485 | irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); | ||
| 486 | } | ||
| 487 | #endif | ||
| 488 | } else { | ||
| 489 | lua_assert(!irt_ispri(ir->t)); | ||
| 490 | ra_destreg(as, ir, RID_RET); | ||
| 491 | } | ||
| 492 | } else if (LJ_32 && irt_isfp(ir->t)) { | ||
| 493 | emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */ | ||
| 494 | } | ||
| 495 | } | ||
| 496 | |||
| 497 | static void asm_call(ASMState *as, IRIns *ir) | ||
| 498 | { | ||
| 499 | IRRef args[CCI_NARGS_MAX]; | ||
| 500 | const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; | ||
| 501 | asm_collectargs(as, ir, ci, args); | ||
| 502 | asm_setupresult(as, ir, ci); | ||
| 503 | asm_gencall(as, ci, args); | ||
| 504 | } | ||
| 505 | |||
| 506 | static void asm_callx(ASMState *as, IRIns *ir) | ||
| 507 | { | ||
| 508 | IRRef args[CCI_NARGS_MAX]; | ||
| 509 | CCallInfo ci; | ||
| 510 | IRIns *irf; | ||
| 511 | ci.flags = asm_callx_flags(as, ir); | ||
| 512 | asm_collectargs(as, ir, &ci, args); | ||
| 513 | asm_setupresult(as, ir, &ci); | ||
| 514 | irf = IR(ir->op2); | ||
| 515 | if (LJ_32 && irref_isk(ir->op2)) { /* Call to constant address on x86. */ | ||
| 516 | ci.func = (ASMFunction)(void *)(uintptr_t)(uint32_t)irf->i; | ||
| 517 | } else { | ||
| 518 | /* Prefer a non-argument register or RID_RET for indirect calls. */ | ||
| 519 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH)|RID2RSET(RID_RET); | ||
| 520 | Reg r = ra_alloc1(as, ir->op2, allow); | ||
| 521 | emit_rr(as, XO_GROUP5, XOg_CALL, r); | ||
| 522 | ci.func = (ASMFunction)(void *)0; | ||
| 523 | } | ||
| 524 | asm_gencall(as, &ci, args); | ||
| 525 | } | ||
| 526 | |||
| 527 | /* -- Returns ------------------------------------------------------------- */ | ||
| 528 | |||
| 529 | /* Return to lower frame. Guard that it goes to the right spot. */ | ||
| 530 | static void asm_retf(ASMState *as, IRIns *ir) | ||
| 531 | { | ||
| 532 | Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
| 533 | void *pc = ir_kptr(IR(ir->op2)); | ||
| 534 | int32_t delta = 1+bc_a(*((const BCIns *)pc - 1)); | ||
| 535 | as->topslot -= (BCReg)delta; | ||
| 536 | if ((int32_t)as->topslot < 0) as->topslot = 0; | ||
| 537 | emit_setgl(as, base, jit_base); | ||
| 538 | emit_addptr(as, base, -8*delta); | ||
| 539 | asm_guardcc(as, CC_NE); | ||
| 540 | emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc)); | ||
| 541 | } | ||
| 542 | |||
| 543 | /* -- Type conversions ---------------------------------------------------- */ | ||
| 544 | |||
| 545 | static void asm_tointg(ASMState *as, IRIns *ir, Reg left) | ||
| 546 | { | ||
| 547 | Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); | ||
| 548 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 549 | asm_guardcc(as, CC_P); | ||
| 550 | asm_guardcc(as, CC_NE); | ||
| 551 | emit_rr(as, XO_UCOMISD, left, tmp); | ||
| 552 | emit_rr(as, XO_CVTSI2SD, tmp, dest); | ||
| 553 | if (!(as->flags & JIT_F_SPLIT_XMM)) | ||
| 554 | emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */ | ||
| 555 | emit_rr(as, XO_CVTTSD2SI, dest, left); | ||
| 556 | /* Can't fuse since left is needed twice. */ | ||
| 557 | } | ||
| 558 | |||
| 559 | static void asm_tobit(ASMState *as, IRIns *ir) | ||
| 560 | { | ||
| 561 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 562 | Reg tmp = ra_noreg(IR(ir->op1)->r) ? | ||
| 563 | ra_alloc1(as, ir->op1, RSET_FPR) : | ||
| 564 | ra_scratch(as, RSET_FPR); | ||
| 565 | Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp)); | ||
| 566 | emit_rr(as, XO_MOVDto, tmp, dest); | ||
| 567 | emit_mrm(as, XO_ADDSD, tmp, right); | ||
| 568 | ra_left(as, tmp, ir->op1); | ||
| 569 | } | ||
| 570 | |||
| 571 | static void asm_conv(ASMState *as, IRIns *ir) | ||
| 572 | { | ||
| 573 | IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); | ||
| 574 | int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64)); | ||
| 575 | int stfp = (st == IRT_NUM || st == IRT_FLOAT); | ||
| 576 | IRRef lref = ir->op1; | ||
| 577 | lua_assert(irt_type(ir->t) != st); | ||
| 578 | lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */ | ||
| 579 | if (irt_isfp(ir->t)) { | ||
| 580 | Reg dest = ra_dest(as, ir, RSET_FPR); | ||
| 581 | if (stfp) { /* FP to FP conversion. */ | ||
| 582 | Reg left = asm_fuseload(as, lref, RSET_FPR); | ||
| 583 | emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left); | ||
| 584 | if (left == dest) return; /* Avoid the XO_XORPS. */ | ||
| 585 | } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */ | ||
| 586 | /* number = (2^52+2^51 .. u32) - (2^52+2^51) */ | ||
| 587 | cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000)); | ||
| 588 | Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); | ||
| 589 | if (irt_isfloat(ir->t)) | ||
| 590 | emit_rr(as, XO_CVTSD2SS, dest, dest); | ||
| 591 | emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */ | ||
| 592 | emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */ | ||
| 593 | emit_loadn(as, bias, k); | ||
| 594 | emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR)); | ||
| 595 | return; | ||
| 596 | } else { /* Integer to FP conversion. */ | ||
| 597 | Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ? | ||
| 598 | ra_alloc1(as, lref, RSET_GPR) : | ||
| 599 | asm_fuseload(as, lref, RSET_GPR); | ||
| 600 | if (LJ_64 && st == IRT_U64) { | ||
| 601 | MCLabel l_end = emit_label(as); | ||
| 602 | const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000)); | ||
| 603 | emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */ | ||
| 604 | emit_sjcc(as, CC_NS, l_end); | ||
| 605 | emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */ | ||
| 606 | } | ||
| 607 | emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS, | ||
| 608 | dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left); | ||
| 609 | } | ||
| 610 | if (!(as->flags & JIT_F_SPLIT_XMM)) | ||
| 611 | emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */ | ||
| 612 | } else if (stfp) { /* FP to integer conversion. */ | ||
| 613 | if (irt_isguard(ir->t)) { | ||
| 614 | /* Checked conversions are only supported from number to int. */ | ||
| 615 | lua_assert(irt_isint(ir->t) && st == IRT_NUM); | ||
| 616 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); | ||
| 617 | } else { | ||
| 618 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 619 | x86Op op = st == IRT_NUM ? | ||
| 620 | ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSD2SI : XO_CVTSD2SI) : | ||
| 621 | ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSS2SI : XO_CVTSS2SI); | ||
| 622 | if (LJ_32 && irt_isu32(ir->t)) { /* FP to U32 conversion on x86. */ | ||
| 623 | /* u32 = (int32_t)(number - 2^31) + 2^31 */ | ||
| 624 | Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) : | ||
| 625 | ra_scratch(as, RSET_FPR); | ||
| 626 | emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000); | ||
| 627 | emit_rr(as, op, dest, tmp); | ||
| 628 | if (st == IRT_NUM) | ||
| 629 | emit_rma(as, XO_ADDSD, tmp, | ||
| 630 | lj_ir_k64_find(as->J, U64x(c1e00000,00000000))); | ||
| 631 | else | ||
| 632 | emit_rma(as, XO_ADDSS, tmp, | ||
| 633 | lj_ir_k64_find(as->J, U64x(00000000,cf000000))); | ||
| 634 | ra_left(as, tmp, lref); | ||
| 635 | } else if (LJ_64 && irt_isu64(ir->t)) { | ||
| 636 | /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */ | ||
| 637 | Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) : | ||
| 638 | ra_scratch(as, RSET_FPR); | ||
| 639 | MCLabel l_end = emit_label(as); | ||
| 640 | emit_rr(as, op, dest|REX_64, tmp); | ||
| 641 | if (st == IRT_NUM) | ||
| 642 | emit_rma(as, XO_ADDSD, tmp, | ||
| 643 | lj_ir_k64_find(as->J, U64x(c3f00000,00000000))); | ||
| 644 | else | ||
| 645 | emit_rma(as, XO_ADDSS, tmp, | ||
| 646 | lj_ir_k64_find(as->J, U64x(00000000,df800000))); | ||
| 647 | emit_sjcc(as, CC_NS, l_end); | ||
| 648 | emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest < 2^63. */ | ||
| 649 | emit_rr(as, op, dest|REX_64, tmp); | ||
| 650 | ra_left(as, tmp, lref); | ||
| 651 | } else { | ||
| 652 | Reg left = asm_fuseload(as, lref, RSET_FPR); | ||
| 653 | if (LJ_64 && irt_isu32(ir->t)) | ||
| 654 | emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */ | ||
| 655 | emit_mrm(as, op, | ||
| 656 | dest|((LJ_64 && | ||
| 657 | (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0), | ||
| 658 | left); | ||
| 659 | } | ||
| 660 | } | ||
| 661 | } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ | ||
| 662 | Reg left, dest = ra_dest(as, ir, RSET_GPR); | ||
| 663 | RegSet allow = RSET_GPR; | ||
| 664 | x86Op op; | ||
| 665 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); | ||
| 666 | if (st == IRT_I8) { | ||
| 667 | op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX; | ||
| 668 | } else if (st == IRT_U8) { | ||
| 669 | op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX; | ||
| 670 | } else if (st == IRT_I16) { | ||
| 671 | op = XO_MOVSXw; | ||
| 672 | } else { | ||
| 673 | op = XO_MOVZXw; | ||
| 674 | } | ||
| 675 | left = asm_fuseload(as, lref, allow); | ||
| 676 | /* Add extra MOV if source is already in wrong register. */ | ||
| 677 | if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) { | ||
| 678 | Reg tmp = ra_scratch(as, allow); | ||
| 679 | emit_rr(as, op, dest, tmp); | ||
| 680 | emit_rr(as, XO_MOV, tmp, left); | ||
| 681 | } else { | ||
| 682 | emit_mrm(as, op, dest, left); | ||
| 683 | } | ||
| 684 | } else { /* 32/64 bit integer conversions. */ | ||
| 685 | if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */ | ||
| 686 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 687 | ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | ||
| 688 | } else if (irt_is64(ir->t)) { | ||
| 689 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 690 | if (st64 || !(ir->op2 & IRCONV_SEXT)) { | ||
| 691 | /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */ | ||
| 692 | ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | ||
| 693 | } else { /* 32 to 64 bit sign extension. */ | ||
| 694 | Reg left = asm_fuseload(as, lref, RSET_GPR); | ||
| 695 | emit_mrm(as, XO_MOVSXd, dest|REX_64, left); | ||
| 696 | } | ||
| 697 | } else { | ||
| 698 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 699 | if (st64) { | ||
| 700 | Reg left = asm_fuseload(as, lref, RSET_GPR); | ||
| 701 | /* This is either a 32 bit reg/reg mov which zeroes the hiword | ||
| 702 | ** or a load of the loword from a 64 bit address. | ||
| 703 | */ | ||
| 704 | emit_mrm(as, XO_MOV, dest, left); | ||
| 705 | } else { /* 32/32 bit no-op (cast). */ | ||
| 706 | ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | ||
| 707 | } | ||
| 708 | } | ||
| 709 | } | ||
| 710 | } | ||
| 711 | |||
| 712 | #if LJ_32 && LJ_HASFFI | ||
| 713 | /* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */ | ||
| 714 | |||
| 715 | /* 64 bit integer to FP conversion in 32 bit mode. */ | ||
| 716 | static void asm_conv_fp_int64(ASMState *as, IRIns *ir) | ||
| 717 | { | ||
| 718 | Reg hi = ra_alloc1(as, ir->op1, RSET_GPR); | ||
| 719 | Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi)); | ||
| 720 | int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | ||
| 721 | Reg dest = ir->r; | ||
| 722 | if (ra_hasreg(dest)) { | ||
| 723 | ra_free(as, dest); | ||
| 724 | ra_modified(as, dest); | ||
| 725 | emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, | ||
| 726 | dest, RID_ESP, ofs); | ||
| 727 | } | ||
| 728 | emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, | ||
| 729 | irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); | ||
| 730 | if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) { | ||
| 731 | /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */ | ||
| 732 | MCLabel l_end = emit_label(as); | ||
| 733 | emit_rma(as, XO_FADDq, XOg_FADDq, | ||
| 734 | lj_ir_k64_find(as->J, U64x(43f00000,00000000))); | ||
| 735 | emit_sjcc(as, CC_NS, l_end); | ||
| 736 | emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */ | ||
| 737 | } else { | ||
| 738 | lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64); | ||
| 739 | } | ||
| 740 | emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0); | ||
| 741 | /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */ | ||
| 742 | emit_rmro(as, XO_MOVto, hi, RID_ESP, 4); | ||
| 743 | emit_rmro(as, XO_MOVto, lo, RID_ESP, 0); | ||
| 744 | } | ||
| 745 | |||
| 746 | /* FP to 64 bit integer conversion in 32 bit mode. */ | ||
| 747 | static void asm_conv_int64_fp(ASMState *as, IRIns *ir) | ||
| 748 | { | ||
| 749 | IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); | ||
| 750 | IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); | ||
| 751 | Reg lo, hi; | ||
| 752 | lua_assert(st == IRT_NUM || st == IRT_FLOAT); | ||
| 753 | lua_assert(dt == IRT_I64 || dt == IRT_U64); | ||
| 754 | lua_assert(((ir-1)->op2 & IRCONV_TRUNC)); | ||
| 755 | hi = ra_dest(as, ir, RSET_GPR); | ||
| 756 | lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi)); | ||
| 757 | if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0); | ||
| 758 | /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */ | ||
| 759 | if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */ | ||
| 760 | emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4); | ||
| 761 | emit_rmro(as, XO_MOVto, lo, RID_ESP, 4); | ||
| 762 | emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff); | ||
| 763 | } | ||
| 764 | if (dt == IRT_U64) { | ||
| 765 | /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */ | ||
| 766 | MCLabel l_pop, l_end = emit_label(as); | ||
| 767 | emit_x87op(as, XI_FPOP); | ||
| 768 | l_pop = emit_label(as); | ||
| 769 | emit_sjmp(as, l_end); | ||
| 770 | emit_rmro(as, XO_MOV, hi, RID_ESP, 4); | ||
| 771 | if ((as->flags & JIT_F_SSE3)) | ||
| 772 | emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); | ||
| 773 | else | ||
| 774 | emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); | ||
| 775 | emit_rma(as, XO_FADDq, XOg_FADDq, | ||
| 776 | lj_ir_k64_find(as->J, U64x(c3f00000,00000000))); | ||
| 777 | emit_sjcc(as, CC_NS, l_pop); | ||
| 778 | emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */ | ||
| 779 | } | ||
| 780 | emit_rmro(as, XO_MOV, hi, RID_ESP, 4); | ||
| 781 | if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */ | ||
| 782 | emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); | ||
| 783 | } else { /* Otherwise set FPU rounding mode to truncate before the store. */ | ||
| 784 | emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); | ||
| 785 | emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0); | ||
| 786 | emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0); | ||
| 787 | emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0); | ||
| 788 | emit_loadi(as, lo, 0xc00); | ||
| 789 | emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0); | ||
| 790 | } | ||
| 791 | if (dt == IRT_U64) | ||
| 792 | emit_x87op(as, XI_FDUP); | ||
| 793 | emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd, | ||
| 794 | st == IRT_NUM ? XOg_FLDq: XOg_FLDd, | ||
| 795 | asm_fuseload(as, ir->op1, RSET_EMPTY)); | ||
| 796 | } | ||
| 797 | #endif | ||
| 798 | |||
| 799 | static void asm_strto(ASMState *as, IRIns *ir) | ||
| 800 | { | ||
| 801 | /* Force a spill slot for the destination register (if any). */ | ||
| 802 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_tonum]; | ||
| 803 | IRRef args[2]; | ||
| 804 | RegSet drop = RSET_SCRATCH; | ||
| 805 | if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r)) | ||
| 806 | rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */ | ||
| 807 | ra_evictset(as, drop); | ||
| 808 | asm_guardcc(as, CC_E); | ||
| 809 | emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */ | ||
| 810 | args[0] = ir->op1; /* GCstr *str */ | ||
| 811 | args[1] = ASMREF_TMP1; /* TValue *n */ | ||
| 812 | asm_gencall(as, ci, args); | ||
| 813 | /* Store the result to the spill slot or temp slots. */ | ||
| 814 | emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, | ||
| 815 | RID_ESP, sps_scale(ir->s)); | ||
| 816 | } | ||
| 817 | |||
| 818 | static void asm_tostr(ASMState *as, IRIns *ir) | ||
| 819 | { | ||
| 820 | IRIns *irl = IR(ir->op1); | ||
| 821 | IRRef args[2]; | ||
| 822 | args[0] = ASMREF_L; | ||
| 823 | as->gcsteps++; | ||
| 824 | if (irt_isnum(irl->t)) { | ||
| 825 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum]; | ||
| 826 | args[1] = ASMREF_TMP1; /* const lua_Number * */ | ||
| 827 | asm_setupresult(as, ir, ci); /* GCstr * */ | ||
| 828 | asm_gencall(as, ci, args); | ||
| 829 | emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, | ||
| 830 | RID_ESP, ra_spill(as, irl)); | ||
| 831 | } else { | ||
| 832 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint]; | ||
| 833 | args[1] = ir->op1; /* int32_t k */ | ||
| 834 | asm_setupresult(as, ir, ci); /* GCstr * */ | ||
| 835 | asm_gencall(as, ci, args); | ||
| 836 | } | ||
| 837 | } | ||
| 838 | |||
| 839 | /* -- Memory references --------------------------------------------------- */ | ||
| 840 | |||
| 841 | static void asm_aref(ASMState *as, IRIns *ir) | ||
| 842 | { | ||
| 843 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 844 | asm_fusearef(as, ir, RSET_GPR); | ||
| 845 | if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0)) | ||
| 846 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
| 847 | else if (as->mrm.base != dest) | ||
| 848 | emit_rr(as, XO_MOV, dest, as->mrm.base); | ||
| 849 | } | ||
| 850 | |||
| 851 | /* Merge NE(HREF, niltv) check. */ | ||
| 852 | static MCode *merge_href_niltv(ASMState *as, IRIns *ir) | ||
| 853 | { | ||
| 854 | /* Assumes nothing else generates NE of HREF. */ | ||
| 855 | if ((ir[1].o == IR_NE || ir[1].o == IR_EQ) && ir[1].op1 == as->curins && | ||
| 856 | ra_hasreg(ir->r)) { | ||
| 857 | MCode *p = as->mcp; | ||
| 858 | p += (LJ_64 && *p != XI_ARITHi) ? 7+6 : 6+6; | ||
| 859 | /* Ensure no loop branch inversion happened. */ | ||
| 860 | if (p[-6] == 0x0f && p[-5] == XI_JCCn+(CC_NE^(ir[1].o & 1))) { | ||
| 861 | as->mcp = p; /* Kill cmp reg, imm32 + jz exit. */ | ||
| 862 | return p + *(int32_t *)(p-4); /* Return exit address. */ | ||
| 863 | } | ||
| 864 | } | ||
| 865 | return NULL; | ||
| 866 | } | ||
| 867 | |||
| 868 | /* Inlined hash lookup. Specialized for key type and for const keys. | ||
| 869 | ** The equivalent C code is: | ||
| 870 | ** Node *n = hashkey(t, key); | ||
| 871 | ** do { | ||
| 872 | ** if (lj_obj_equal(&n->key, key)) return &n->val; | ||
| 873 | ** } while ((n = nextnode(n))); | ||
| 874 | ** return niltv(L); | ||
| 875 | */ | ||
| 876 | static void asm_href(ASMState *as, IRIns *ir) | ||
| 877 | { | ||
| 878 | MCode *nilexit = merge_href_niltv(as, ir); /* Do this before any restores. */ | ||
| 879 | RegSet allow = RSET_GPR; | ||
| 880 | Reg dest = ra_dest(as, ir, allow); | ||
| 881 | Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); | ||
| 882 | Reg key = RID_NONE, tmp = RID_NONE; | ||
| 883 | IRIns *irkey = IR(ir->op2); | ||
| 884 | int isk = irref_isk(ir->op2); | ||
| 885 | IRType1 kt = irkey->t; | ||
| 886 | uint32_t khash; | ||
| 887 | MCLabel l_end, l_loop, l_next; | ||
| 888 | |||
| 889 | if (!isk) { | ||
| 890 | rset_clear(allow, tab); | ||
| 891 | key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow); | ||
| 892 | if (!irt_isstr(kt)) | ||
| 893 | tmp = ra_scratch(as, rset_exclude(allow, key)); | ||
| 894 | } | ||
| 895 | |||
| 896 | /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */ | ||
| 897 | l_end = emit_label(as); | ||
| 898 | if (nilexit && ir[1].o == IR_NE) { | ||
| 899 | emit_jcc(as, CC_E, nilexit); /* XI_JMP is not found by lj_asm_patchexit. */ | ||
| 900 | nilexit = NULL; | ||
| 901 | } else { | ||
| 902 | emit_loada(as, dest, niltvg(J2G(as->J))); | ||
| 903 | } | ||
| 904 | |||
| 905 | /* Follow hash chain until the end. */ | ||
| 906 | l_loop = emit_sjcc_label(as, CC_NZ); | ||
| 907 | emit_rr(as, XO_TEST, dest, dest); | ||
| 908 | emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next)); | ||
| 909 | l_next = emit_label(as); | ||
| 910 | |||
| 911 | /* Type and value comparison. */ | ||
| 912 | if (nilexit) | ||
| 913 | emit_jcc(as, CC_E, nilexit); | ||
| 914 | else | ||
| 915 | emit_sjcc(as, CC_E, l_end); | ||
| 916 | if (irt_isnum(kt)) { | ||
| 917 | if (isk) { | ||
| 918 | /* Assumes -0.0 is already canonicalized to +0.0. */ | ||
| 919 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo), | ||
| 920 | (int32_t)ir_knum(irkey)->u32.lo); | ||
| 921 | emit_sjcc(as, CC_NE, l_next); | ||
| 922 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi), | ||
| 923 | (int32_t)ir_knum(irkey)->u32.hi); | ||
| 924 | } else { | ||
| 925 | emit_sjcc(as, CC_P, l_next); | ||
| 926 | emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n)); | ||
| 927 | emit_sjcc(as, CC_AE, l_next); | ||
| 928 | /* The type check avoids NaN penalties and complaints from Valgrind. */ | ||
| 929 | #if LJ_64 | ||
| 930 | emit_u32(as, LJ_TISNUM); | ||
| 931 | emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); | ||
| 932 | #else | ||
| 933 | emit_i8(as, LJ_TISNUM); | ||
| 934 | emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); | ||
| 935 | #endif | ||
| 936 | } | ||
| 937 | #if LJ_64 | ||
| 938 | } else if (irt_islightud(kt)) { | ||
| 939 | emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64)); | ||
| 940 | #endif | ||
| 941 | } else { | ||
| 942 | if (!irt_ispri(kt)) { | ||
| 943 | lua_assert(irt_isaddr(kt)); | ||
| 944 | if (isk) | ||
| 945 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr), | ||
| 946 | ptr2addr(ir_kgc(irkey))); | ||
| 947 | else | ||
| 948 | emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr)); | ||
| 949 | emit_sjcc(as, CC_NE, l_next); | ||
| 950 | } | ||
| 951 | lua_assert(!irt_isnil(kt)); | ||
| 952 | emit_i8(as, irt_toitype(kt)); | ||
| 953 | emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); | ||
| 954 | } | ||
| 955 | emit_sfixup(as, l_loop); | ||
| 956 | checkmclim(as); | ||
| 957 | |||
| 958 | /* Load main position relative to tab->node into dest. */ | ||
| 959 | khash = isk ? ir_khash(irkey) : 1; | ||
| 960 | if (khash == 0) { | ||
| 961 | emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node)); | ||
| 962 | } else { | ||
| 963 | emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node)); | ||
| 964 | if ((as->flags & JIT_F_PREFER_IMUL)) { | ||
| 965 | emit_i8(as, sizeof(Node)); | ||
| 966 | emit_rr(as, XO_IMULi8, dest, dest); | ||
| 967 | } else { | ||
| 968 | emit_shifti(as, XOg_SHL, dest, 3); | ||
| 969 | emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0); | ||
| 970 | } | ||
| 971 | if (isk) { | ||
| 972 | emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash); | ||
| 973 | emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); | ||
| 974 | } else if (irt_isstr(kt)) { | ||
| 975 | emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash)); | ||
| 976 | emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); | ||
| 977 | } else { /* Must match with hashrot() in lj_tab.c. */ | ||
| 978 | emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask)); | ||
| 979 | emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp); | ||
| 980 | emit_shifti(as, XOg_ROL, tmp, HASH_ROT3); | ||
| 981 | emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp); | ||
| 982 | emit_shifti(as, XOg_ROL, dest, HASH_ROT2); | ||
| 983 | emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest); | ||
| 984 | emit_shifti(as, XOg_ROL, dest, HASH_ROT1); | ||
| 985 | emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest); | ||
| 986 | if (irt_isnum(kt)) { | ||
| 987 | emit_rr(as, XO_ARITH(XOg_ADD), dest, dest); | ||
| 988 | #if LJ_64 | ||
| 989 | emit_shifti(as, XOg_SHR|REX_64, dest, 32); | ||
| 990 | emit_rr(as, XO_MOV, tmp, dest); | ||
| 991 | emit_rr(as, XO_MOVDto, key|REX_64, dest); | ||
| 992 | #else | ||
| 993 | emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4); | ||
| 994 | emit_rr(as, XO_MOVDto, key, tmp); | ||
| 995 | #endif | ||
| 996 | } else { | ||
| 997 | emit_rr(as, XO_MOV, tmp, key); | ||
| 998 | emit_rmro(as, XO_LEA, dest, key, HASH_BIAS); | ||
| 999 | } | ||
| 1000 | } | ||
| 1001 | } | ||
| 1002 | } | ||
| 1003 | |||
| 1004 | static void asm_hrefk(ASMState *as, IRIns *ir) | ||
| 1005 | { | ||
| 1006 | IRIns *kslot = IR(ir->op2); | ||
| 1007 | IRIns *irkey = IR(kslot->op1); | ||
| 1008 | int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); | ||
| 1009 | Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; | ||
| 1010 | Reg node = ra_alloc1(as, ir->op1, RSET_GPR); | ||
| 1011 | #if !LJ_64 | ||
| 1012 | MCLabel l_exit; | ||
| 1013 | #endif | ||
| 1014 | lua_assert(ofs % sizeof(Node) == 0); | ||
| 1015 | if (ra_hasreg(dest)) { | ||
| 1016 | if (ofs != 0) { | ||
| 1017 | if (dest == node && !(as->flags & JIT_F_LEA_AGU)) | ||
| 1018 | emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs); | ||
| 1019 | else | ||
| 1020 | emit_rmro(as, XO_LEA, dest, node, ofs); | ||
| 1021 | } else if (dest != node) { | ||
| 1022 | emit_rr(as, XO_MOV, dest, node); | ||
| 1023 | } | ||
| 1024 | } | ||
| 1025 | asm_guardcc(as, CC_NE); | ||
| 1026 | #if LJ_64 | ||
| 1027 | if (!irt_ispri(irkey->t)) { | ||
| 1028 | Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node)); | ||
| 1029 | emit_rmro(as, XO_CMP, key|REX_64, node, | ||
| 1030 | ofs + (int32_t)offsetof(Node, key.u64)); | ||
| 1031 | lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t)); | ||
| 1032 | /* Assumes -0.0 is already canonicalized to +0.0. */ | ||
| 1033 | emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 : | ||
| 1034 | ((uint64_t)irt_toitype(irkey->t) << 32) | | ||
| 1035 | (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey))); | ||
| 1036 | } else { | ||
| 1037 | lua_assert(!irt_isnil(irkey->t)); | ||
| 1038 | emit_i8(as, irt_toitype(irkey->t)); | ||
| 1039 | emit_rmro(as, XO_ARITHi8, XOg_CMP, node, | ||
| 1040 | ofs + (int32_t)offsetof(Node, key.it)); | ||
| 1041 | } | ||
| 1042 | #else | ||
| 1043 | l_exit = emit_label(as); | ||
| 1044 | if (irt_isnum(irkey->t)) { | ||
| 1045 | /* Assumes -0.0 is already canonicalized to +0.0. */ | ||
| 1046 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | ||
| 1047 | ofs + (int32_t)offsetof(Node, key.u32.lo), | ||
| 1048 | (int32_t)ir_knum(irkey)->u32.lo); | ||
| 1049 | emit_sjcc(as, CC_NE, l_exit); | ||
| 1050 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | ||
| 1051 | ofs + (int32_t)offsetof(Node, key.u32.hi), | ||
| 1052 | (int32_t)ir_knum(irkey)->u32.hi); | ||
| 1053 | } else { | ||
| 1054 | if (!irt_ispri(irkey->t)) { | ||
| 1055 | lua_assert(irt_isgcv(irkey->t)); | ||
| 1056 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | ||
| 1057 | ofs + (int32_t)offsetof(Node, key.gcr), | ||
| 1058 | ptr2addr(ir_kgc(irkey))); | ||
| 1059 | emit_sjcc(as, CC_NE, l_exit); | ||
| 1060 | } | ||
| 1061 | lua_assert(!irt_isnil(irkey->t)); | ||
| 1062 | emit_i8(as, irt_toitype(irkey->t)); | ||
| 1063 | emit_rmro(as, XO_ARITHi8, XOg_CMP, node, | ||
| 1064 | ofs + (int32_t)offsetof(Node, key.it)); | ||
| 1065 | } | ||
| 1066 | #endif | ||
| 1067 | } | ||
| 1068 | |||
| 1069 | static void asm_newref(ASMState *as, IRIns *ir) | ||
| 1070 | { | ||
| 1071 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey]; | ||
| 1072 | IRRef args[3]; | ||
| 1073 | IRIns *irkey; | ||
| 1074 | Reg tmp; | ||
| 1075 | args[0] = ASMREF_L; /* lua_State *L */ | ||
| 1076 | args[1] = ir->op1; /* GCtab *t */ | ||
| 1077 | args[2] = ASMREF_TMP1; /* cTValue *key */ | ||
| 1078 | asm_setupresult(as, ir, ci); /* TValue * */ | ||
| 1079 | asm_gencall(as, ci, args); | ||
| 1080 | tmp = ra_releasetmp(as, ASMREF_TMP1); | ||
| 1081 | irkey = IR(ir->op2); | ||
| 1082 | if (irt_isnum(irkey->t)) { | ||
| 1083 | /* For numbers use the constant itself or a spill slot as a TValue. */ | ||
| 1084 | if (irref_isk(ir->op2)) | ||
| 1085 | emit_loada(as, tmp, ir_knum(irkey)); | ||
| 1086 | else | ||
| 1087 | emit_rmro(as, XO_LEA, tmp|REX_64, RID_ESP, ra_spill(as, irkey)); | ||
| 1088 | } else { | ||
| 1089 | /* Otherwise use g->tmptv to hold the TValue. */ | ||
| 1090 | if (!irref_isk(ir->op2)) { | ||
| 1091 | Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp)); | ||
| 1092 | emit_movtomro(as, REX_64IR(irkey, src), tmp, 0); | ||
| 1093 | } else if (!irt_ispri(irkey->t)) { | ||
| 1094 | emit_movmroi(as, tmp, 0, irkey->i); | ||
| 1095 | } | ||
| 1096 | if (!(LJ_64 && irt_islightud(irkey->t))) | ||
| 1097 | emit_movmroi(as, tmp, 4, irt_toitype(irkey->t)); | ||
| 1098 | emit_loada(as, tmp, &J2G(as->J)->tmptv); | ||
| 1099 | } | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | static void asm_uref(ASMState *as, IRIns *ir) | ||
| 1103 | { | ||
| 1104 | /* NYI: Check that UREFO is still open and not aliasing a slot. */ | ||
| 1105 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1106 | if (irref_isk(ir->op1)) { | ||
| 1107 | GCfunc *fn = ir_kfunc(IR(ir->op1)); | ||
| 1108 | MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; | ||
| 1109 | emit_rma(as, XO_MOV, dest, v); | ||
| 1110 | } else { | ||
| 1111 | Reg uv = ra_scratch(as, RSET_GPR); | ||
| 1112 | Reg func = ra_alloc1(as, ir->op1, RSET_GPR); | ||
| 1113 | if (ir->o == IR_UREFC) { | ||
| 1114 | emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv)); | ||
| 1115 | asm_guardcc(as, CC_NE); | ||
| 1116 | emit_i8(as, 1); | ||
| 1117 | emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed)); | ||
| 1118 | } else { | ||
| 1119 | emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v)); | ||
| 1120 | } | ||
| 1121 | emit_rmro(as, XO_MOV, uv, func, | ||
| 1122 | (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); | ||
| 1123 | } | ||
| 1124 | } | ||
| 1125 | |||
| 1126 | static void asm_fref(ASMState *as, IRIns *ir) | ||
| 1127 | { | ||
| 1128 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1129 | asm_fusefref(as, ir, RSET_GPR); | ||
| 1130 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | static void asm_strref(ASMState *as, IRIns *ir) | ||
| 1134 | { | ||
| 1135 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1136 | asm_fusestrref(as, ir, RSET_GPR); | ||
| 1137 | if (as->mrm.base == RID_NONE) | ||
| 1138 | emit_loadi(as, dest, as->mrm.ofs); | ||
| 1139 | else if (as->mrm.base == dest && as->mrm.idx == RID_NONE) | ||
| 1140 | emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs); | ||
| 1141 | else | ||
| 1142 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
| 1143 | } | ||
| 1144 | |||
| 1145 | /* -- Loads and stores ---------------------------------------------------- */ | ||
| 1146 | |||
| 1147 | static void asm_fxload(ASMState *as, IRIns *ir) | ||
| 1148 | { | ||
| 1149 | Reg dest = ra_dest(as, ir, irt_isnum(ir->t) ? RSET_FPR : RSET_GPR); | ||
| 1150 | x86Op xo; | ||
| 1151 | if (ir->o == IR_FLOAD) | ||
| 1152 | asm_fusefref(as, ir, RSET_GPR); | ||
| 1153 | else | ||
| 1154 | asm_fusexref(as, ir->op1, RSET_GPR); | ||
| 1155 | /* ir->op2 is ignored -- unaligned loads are ok on x86. */ | ||
| 1156 | switch (irt_type(ir->t)) { | ||
| 1157 | case IRT_I8: xo = XO_MOVSXb; break; | ||
| 1158 | case IRT_U8: xo = XO_MOVZXb; break; | ||
| 1159 | case IRT_I16: xo = XO_MOVSXw; break; | ||
| 1160 | case IRT_U16: xo = XO_MOVZXw; break; | ||
| 1161 | case IRT_NUM: xo = XMM_MOVRM(as); break; | ||
| 1162 | case IRT_FLOAT: xo = XO_MOVSS; break; | ||
| 1163 | default: | ||
| 1164 | if (LJ_64 && irt_is64(ir->t)) | ||
| 1165 | dest |= REX_64; | ||
| 1166 | else | ||
| 1167 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); | ||
| 1168 | xo = XO_MOV; | ||
| 1169 | break; | ||
| 1170 | } | ||
| 1171 | emit_mrm(as, xo, dest, RID_MRM); | ||
| 1172 | } | ||
| 1173 | |||
| 1174 | static void asm_fxstore(ASMState *as, IRIns *ir) | ||
| 1175 | { | ||
| 1176 | RegSet allow = RSET_GPR; | ||
| 1177 | Reg src = RID_NONE, osrc = RID_NONE; | ||
| 1178 | int32_t k = 0; | ||
| 1179 | /* The IRT_I16/IRT_U16 stores should never be simplified for constant | ||
| 1180 | ** values since mov word [mem], imm16 has a length-changing prefix. | ||
| 1181 | */ | ||
| 1182 | if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) || | ||
| 1183 | !asm_isk32(as, ir->op2, &k)) { | ||
| 1184 | RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR : | ||
| 1185 | (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR; | ||
| 1186 | src = osrc = ra_alloc1(as, ir->op2, allow8); | ||
| 1187 | if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */ | ||
| 1188 | rset_clear(allow, osrc); | ||
| 1189 | src = ra_scratch(as, allow8); | ||
| 1190 | } | ||
| 1191 | rset_clear(allow, src); | ||
| 1192 | } | ||
| 1193 | if (ir->o == IR_FSTORE) | ||
| 1194 | asm_fusefref(as, IR(ir->op1), allow); | ||
| 1195 | else | ||
| 1196 | asm_fusexref(as, ir->op1, allow); | ||
| 1197 | /* ir->op2 is ignored -- unaligned stores are ok on x86. */ | ||
| 1198 | if (ra_hasreg(src)) { | ||
| 1199 | x86Op xo; | ||
| 1200 | switch (irt_type(ir->t)) { | ||
| 1201 | case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break; | ||
| 1202 | case IRT_I16: case IRT_U16: xo = XO_MOVtow; break; | ||
| 1203 | case IRT_NUM: xo = XO_MOVSDto; break; | ||
| 1204 | case IRT_FLOAT: xo = XO_MOVSSto; break; | ||
| 1205 | #if LJ_64 | ||
| 1206 | case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */ | ||
| 1207 | #endif | ||
| 1208 | default: | ||
| 1209 | if (LJ_64 && irt_is64(ir->t)) | ||
| 1210 | src |= REX_64; | ||
| 1211 | else | ||
| 1212 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); | ||
| 1213 | xo = XO_MOVto; | ||
| 1214 | break; | ||
| 1215 | } | ||
| 1216 | emit_mrm(as, xo, src, RID_MRM); | ||
| 1217 | if (!LJ_64 && src != osrc) { | ||
| 1218 | ra_noweak(as, osrc); | ||
| 1219 | emit_rr(as, XO_MOV, src, osrc); | ||
| 1220 | } | ||
| 1221 | } else { | ||
| 1222 | if (irt_isi8(ir->t) || irt_isu8(ir->t)) { | ||
| 1223 | emit_i8(as, k); | ||
| 1224 | emit_mrm(as, XO_MOVmib, 0, RID_MRM); | ||
| 1225 | } else { | ||
| 1226 | lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) || | ||
| 1227 | irt_isaddr(ir->t)); | ||
| 1228 | emit_i32(as, k); | ||
| 1229 | emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM); | ||
| 1230 | } | ||
| 1231 | } | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | #if LJ_64 | ||
| 1235 | static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck) | ||
| 1236 | { | ||
| 1237 | if (ra_used(ir) || typecheck) { | ||
| 1238 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1239 | if (typecheck) { | ||
| 1240 | Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest)); | ||
| 1241 | asm_guardcc(as, CC_NE); | ||
| 1242 | emit_i8(as, -2); | ||
| 1243 | emit_rr(as, XO_ARITHi8, XOg_CMP, tmp); | ||
| 1244 | emit_shifti(as, XOg_SAR|REX_64, tmp, 47); | ||
| 1245 | emit_rr(as, XO_MOV, tmp|REX_64, dest); | ||
| 1246 | } | ||
| 1247 | return dest; | ||
| 1248 | } else { | ||
| 1249 | return RID_NONE; | ||
| 1250 | } | ||
| 1251 | } | ||
| 1252 | #endif | ||
| 1253 | |||
| 1254 | static void asm_ahuvload(ASMState *as, IRIns *ir) | ||
| 1255 | { | ||
| 1256 | lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || | ||
| 1257 | (LJ_DUALNUM && irt_isint(ir->t))); | ||
| 1258 | #if LJ_64 | ||
| 1259 | if (irt_islightud(ir->t)) { | ||
| 1260 | Reg dest = asm_load_lightud64(as, ir, 1); | ||
| 1261 | if (ra_hasreg(dest)) { | ||
| 1262 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
| 1263 | emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM); | ||
| 1264 | } | ||
| 1265 | return; | ||
| 1266 | } else | ||
| 1267 | #endif | ||
| 1268 | if (ra_used(ir)) { | ||
| 1269 | RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; | ||
| 1270 | Reg dest = ra_dest(as, ir, allow); | ||
| 1271 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
| 1272 | emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), dest, RID_MRM); | ||
| 1273 | } else { | ||
| 1274 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
| 1275 | } | ||
| 1276 | /* Always do the type check, even if the load result is unused. */ | ||
| 1277 | as->mrm.ofs += 4; | ||
| 1278 | asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE); | ||
| 1279 | if (LJ_64 && irt_type(ir->t) >= IRT_NUM) { | ||
| 1280 | lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t)); | ||
| 1281 | emit_u32(as, LJ_TISNUM); | ||
| 1282 | emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM); | ||
| 1283 | } else { | ||
| 1284 | emit_i8(as, irt_toitype(ir->t)); | ||
| 1285 | emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM); | ||
| 1286 | } | ||
| 1287 | } | ||
| 1288 | |||
| 1289 | static void asm_ahustore(ASMState *as, IRIns *ir) | ||
| 1290 | { | ||
| 1291 | if (irt_isnum(ir->t)) { | ||
| 1292 | Reg src = ra_alloc1(as, ir->op2, RSET_FPR); | ||
| 1293 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
| 1294 | emit_mrm(as, XO_MOVSDto, src, RID_MRM); | ||
| 1295 | #if LJ_64 | ||
| 1296 | } else if (irt_islightud(ir->t)) { | ||
| 1297 | Reg src = ra_alloc1(as, ir->op2, RSET_GPR); | ||
| 1298 | asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src)); | ||
| 1299 | emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM); | ||
| 1300 | #endif | ||
| 1301 | } else { | ||
| 1302 | IRIns *irr = IR(ir->op2); | ||
| 1303 | RegSet allow = RSET_GPR; | ||
| 1304 | Reg src = RID_NONE; | ||
| 1305 | if (!irref_isk(ir->op2)) { | ||
| 1306 | src = ra_alloc1(as, ir->op2, allow); | ||
| 1307 | rset_clear(allow, src); | ||
| 1308 | } | ||
| 1309 | asm_fuseahuref(as, ir->op1, allow); | ||
| 1310 | if (ra_hasreg(src)) { | ||
| 1311 | emit_mrm(as, XO_MOVto, src, RID_MRM); | ||
| 1312 | } else if (!irt_ispri(irr->t)) { | ||
| 1313 | lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t))); | ||
| 1314 | emit_i32(as, irr->i); | ||
| 1315 | emit_mrm(as, XO_MOVmi, 0, RID_MRM); | ||
| 1316 | } | ||
| 1317 | as->mrm.ofs += 4; | ||
| 1318 | emit_i32(as, (int32_t)irt_toitype(ir->t)); | ||
| 1319 | emit_mrm(as, XO_MOVmi, 0, RID_MRM); | ||
| 1320 | } | ||
| 1321 | } | ||
| 1322 | |||
| 1323 | static void asm_sload(ASMState *as, IRIns *ir) | ||
| 1324 | { | ||
| 1325 | int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); | ||
| 1326 | IRType1 t = ir->t; | ||
| 1327 | Reg base; | ||
| 1328 | lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ | ||
| 1329 | lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); | ||
| 1330 | lua_assert(LJ_DUALNUM || | ||
| 1331 | !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); | ||
| 1332 | if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { | ||
| 1333 | Reg left = ra_scratch(as, RSET_FPR); | ||
| 1334 | asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */ | ||
| 1335 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
| 1336 | emit_rmro(as, XMM_MOVRM(as), left, base, ofs); | ||
| 1337 | t.irt = IRT_NUM; /* Continue with a regular number type check. */ | ||
| 1338 | #if LJ_64 | ||
| 1339 | } else if (irt_islightud(t)) { | ||
| 1340 | Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK)); | ||
| 1341 | if (ra_hasreg(dest)) { | ||
| 1342 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
| 1343 | emit_rmro(as, XO_MOV, dest|REX_64, base, ofs); | ||
| 1344 | } | ||
| 1345 | return; | ||
| 1346 | #endif | ||
| 1347 | } else if (ra_used(ir)) { | ||
| 1348 | RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR; | ||
| 1349 | Reg dest = ra_dest(as, ir, allow); | ||
| 1350 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
| 1351 | lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); | ||
| 1352 | if ((ir->op2 & IRSLOAD_CONVERT)) { | ||
| 1353 | t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */ | ||
| 1354 | emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTSD2SI, dest, base, ofs); | ||
| 1355 | } else if (irt_isnum(t)) { | ||
| 1356 | emit_rmro(as, XMM_MOVRM(as), dest, base, ofs); | ||
| 1357 | } else { | ||
| 1358 | emit_rmro(as, XO_MOV, dest, base, ofs); | ||
| 1359 | } | ||
| 1360 | } else { | ||
| 1361 | if (!(ir->op2 & IRSLOAD_TYPECHECK)) | ||
| 1362 | return; /* No type check: avoid base alloc. */ | ||
| 1363 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
| 1364 | } | ||
| 1365 | if ((ir->op2 & IRSLOAD_TYPECHECK)) { | ||
| 1366 | /* Need type check, even if the load result is unused. */ | ||
| 1367 | asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE); | ||
| 1368 | if (LJ_64 && irt_type(t) >= IRT_NUM) { | ||
| 1369 | lua_assert(irt_isinteger(t) || irt_isnum(t)); | ||
| 1370 | emit_u32(as, LJ_TISNUM); | ||
| 1371 | emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4); | ||
| 1372 | } else { | ||
| 1373 | emit_i8(as, irt_toitype(t)); | ||
| 1374 | emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4); | ||
| 1375 | } | ||
| 1376 | } | ||
| 1377 | } | ||
| 1378 | |||
| 1379 | /* -- Allocations --------------------------------------------------------- */ | ||
| 1380 | |||
| 1381 | #if LJ_HASFFI | ||
| 1382 | static void asm_cnew(ASMState *as, IRIns *ir) | ||
| 1383 | { | ||
| 1384 | CTState *cts = ctype_ctsG(J2G(as->J)); | ||
| 1385 | CTypeID typeid = (CTypeID)IR(ir->op1)->i; | ||
| 1386 | CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ? | ||
| 1387 | lj_ctype_size(cts, typeid) : (CTSize)IR(ir->op2)->i; | ||
| 1388 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; | ||
| 1389 | IRRef args[2]; | ||
| 1390 | lua_assert(sz != CTSIZE_INVALID); | ||
| 1391 | |||
| 1392 | args[0] = ASMREF_L; /* lua_State *L */ | ||
| 1393 | args[1] = ASMREF_TMP1; /* MSize size */ | ||
| 1394 | as->gcsteps++; | ||
| 1395 | asm_setupresult(as, ir, ci); /* GCcdata * */ | ||
| 1396 | |||
| 1397 | /* Initialize immutable cdata object. */ | ||
| 1398 | if (ir->o == IR_CNEWI) { | ||
| 1399 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | ||
| 1400 | #if LJ_64 | ||
| 1401 | Reg r64 = sz == 8 ? REX_64 : 0; | ||
| 1402 | if (irref_isk(ir->op2)) { | ||
| 1403 | IRIns *irk = IR(ir->op2); | ||
| 1404 | uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 : | ||
| 1405 | (uint64_t)(uint32_t)irk->i; | ||
| 1406 | if (sz == 4 || checki32((int64_t)k)) { | ||
| 1407 | emit_i32(as, (int32_t)k); | ||
| 1408 | emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata)); | ||
| 1409 | } else { | ||
| 1410 | emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata)); | ||
| 1411 | emit_loadu64(as, RID_ECX, k); | ||
| 1412 | } | ||
| 1413 | } else { | ||
| 1414 | Reg r = ra_alloc1(as, ir->op2, allow); | ||
| 1415 | emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata)); | ||
| 1416 | } | ||
| 1417 | #else | ||
| 1418 | int32_t ofs = sizeof(GCcdata); | ||
| 1419 | if (LJ_HASFFI && sz == 8) { | ||
| 1420 | ofs += 4; ir++; | ||
| 1421 | lua_assert(ir->o == IR_HIOP); | ||
| 1422 | } | ||
| 1423 | do { | ||
| 1424 | if (irref_isk(ir->op2)) { | ||
| 1425 | emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i); | ||
| 1426 | } else { | ||
| 1427 | Reg r = ra_alloc1(as, ir->op2, allow); | ||
| 1428 | emit_movtomro(as, r, RID_RET, ofs); | ||
| 1429 | rset_clear(allow, r); | ||
| 1430 | } | ||
| 1431 | if (!LJ_HASFFI || ofs == sizeof(GCcdata)) break; | ||
| 1432 | ofs -= 4; ir--; | ||
| 1433 | } while (1); | ||
| 1434 | #endif | ||
| 1435 | lua_assert(sz == 4 || (sz == 8 && (LJ_64 || LJ_HASFFI))); | ||
| 1436 | } | ||
| 1437 | |||
| 1438 | /* Combine initialization of marked, gct and typeid. */ | ||
| 1439 | emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked)); | ||
| 1440 | emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX, | ||
| 1441 | (int32_t)((~LJ_TCDATA<<8)+(typeid<<16))); | ||
| 1442 | emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES); | ||
| 1443 | emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite); | ||
| 1444 | |||
| 1445 | asm_gencall(as, ci, args); | ||
| 1446 | emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata))); | ||
| 1447 | } | ||
| 1448 | #else | ||
| 1449 | #define asm_cnew(as, ir) ((void)0) | ||
| 1450 | #endif | ||
| 1451 | |||
| 1452 | /* -- Write barriers ------------------------------------------------------ */ | ||
| 1453 | |||
| 1454 | static void asm_tbar(ASMState *as, IRIns *ir) | ||
| 1455 | { | ||
| 1456 | Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); | ||
| 1457 | Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab)); | ||
| 1458 | MCLabel l_end = emit_label(as); | ||
| 1459 | emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist)); | ||
| 1460 | emit_setgl(as, tab, gc.grayagain); | ||
| 1461 | emit_getgl(as, tmp, gc.grayagain); | ||
| 1462 | emit_i8(as, ~LJ_GC_BLACK); | ||
| 1463 | emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked)); | ||
| 1464 | emit_sjcc(as, CC_Z, l_end); | ||
| 1465 | emit_i8(as, LJ_GC_BLACK); | ||
| 1466 | emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked)); | ||
| 1467 | } | ||
| 1468 | |||
| 1469 | static void asm_obar(ASMState *as, IRIns *ir) | ||
| 1470 | { | ||
| 1471 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; | ||
| 1472 | IRRef args[2]; | ||
| 1473 | MCLabel l_end; | ||
| 1474 | Reg obj; | ||
| 1475 | /* No need for other object barriers (yet). */ | ||
| 1476 | lua_assert(IR(ir->op1)->o == IR_UREFC); | ||
| 1477 | ra_evictset(as, RSET_SCRATCH); | ||
| 1478 | l_end = emit_label(as); | ||
| 1479 | args[0] = ASMREF_TMP1; /* global_State *g */ | ||
| 1480 | args[1] = ir->op1; /* TValue *tv */ | ||
| 1481 | asm_gencall(as, ci, args); | ||
| 1482 | emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J)); | ||
| 1483 | obj = IR(ir->op1)->r; | ||
| 1484 | emit_sjcc(as, CC_Z, l_end); | ||
| 1485 | emit_i8(as, LJ_GC_WHITES); | ||
| 1486 | if (irref_isk(ir->op2)) { | ||
| 1487 | GCobj *vp = ir_kgc(IR(ir->op2)); | ||
| 1488 | emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked); | ||
| 1489 | } else { | ||
| 1490 | Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj)); | ||
| 1491 | emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked)); | ||
| 1492 | } | ||
| 1493 | emit_sjcc(as, CC_Z, l_end); | ||
| 1494 | emit_i8(as, LJ_GC_BLACK); | ||
| 1495 | emit_rmro(as, XO_GROUP3b, XOg_TEST, obj, | ||
| 1496 | (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); | ||
| 1497 | } | ||
| 1498 | |||
| 1499 | /* -- FP/int arithmetic and logic operations ------------------------------ */ | ||
| 1500 | |||
| 1501 | /* Load reference onto x87 stack. Force a spill to memory if needed. */ | ||
| 1502 | static void asm_x87load(ASMState *as, IRRef ref) | ||
| 1503 | { | ||
| 1504 | IRIns *ir = IR(ref); | ||
| 1505 | if (ir->o == IR_KNUM) { | ||
| 1506 | cTValue *tv = ir_knum(ir); | ||
| 1507 | if (tvispzero(tv)) /* Use fldz only for +0. */ | ||
| 1508 | emit_x87op(as, XI_FLDZ); | ||
| 1509 | else if (tvispone(tv)) | ||
| 1510 | emit_x87op(as, XI_FLD1); | ||
| 1511 | else | ||
| 1512 | emit_rma(as, XO_FLDq, XOg_FLDq, tv); | ||
| 1513 | } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) && | ||
| 1514 | !irref_isk(ir->op1) && mayfuse(as, ir->op1)) { | ||
| 1515 | IRIns *iri = IR(ir->op1); | ||
| 1516 | emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri)); | ||
| 1517 | } else { | ||
| 1518 | emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY)); | ||
| 1519 | } | ||
| 1520 | } | ||
| 1521 | |||
| 1522 | /* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */ | ||
| 1523 | static int fpmjoin_pow(ASMState *as, IRIns *ir) | ||
| 1524 | { | ||
| 1525 | IRIns *irp = IR(ir->op1); | ||
| 1526 | if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) { | ||
| 1527 | IRIns *irpp = IR(irp->op1); | ||
| 1528 | if (irpp == ir-2 && irpp->o == IR_FPMATH && | ||
| 1529 | irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) { | ||
| 1530 | /* The modified regs must match with the *.dasc implementation. */ | ||
| 1531 | RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX); | ||
| 1532 | IRIns *irx; | ||
| 1533 | if (ra_hasreg(ir->r)) | ||
| 1534 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
| 1535 | ra_evictset(as, drop); | ||
| 1536 | ra_destreg(as, ir, RID_XMM0); | ||
| 1537 | emit_call(as, lj_vm_pow_sse); | ||
| 1538 | irx = IR(irpp->op1); | ||
| 1539 | if (ra_noreg(irx->r) && ra_gethint(irx->r) == RID_XMM1) | ||
| 1540 | irx->r = RID_INIT; /* Avoid allocating xmm1 for x. */ | ||
| 1541 | ra_left(as, RID_XMM0, irpp->op1); | ||
| 1542 | ra_left(as, RID_XMM1, irp->op2); | ||
| 1543 | return 1; | ||
| 1544 | } | ||
| 1545 | } | ||
| 1546 | return 0; | ||
| 1547 | } | ||
| 1548 | |||
| 1549 | static void asm_fpmath(ASMState *as, IRIns *ir) | ||
| 1550 | { | ||
| 1551 | IRFPMathOp fpm = ir->o == IR_FPMATH ? (IRFPMathOp)ir->op2 : IRFPM_OTHER; | ||
| 1552 | if (fpm == IRFPM_SQRT) { | ||
| 1553 | Reg dest = ra_dest(as, ir, RSET_FPR); | ||
| 1554 | Reg left = asm_fuseload(as, ir->op1, RSET_FPR); | ||
| 1555 | emit_mrm(as, XO_SQRTSD, dest, left); | ||
| 1556 | } else if (fpm <= IRFPM_TRUNC) { | ||
| 1557 | if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */ | ||
| 1558 | Reg dest = ra_dest(as, ir, RSET_FPR); | ||
| 1559 | Reg left = asm_fuseload(as, ir->op1, RSET_FPR); | ||
| 1560 | /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op. | ||
| 1561 | ** Let's pretend it's a 3-byte opcode, and compensate afterwards. | ||
| 1562 | ** This is atrocious, but the alternatives are much worse. | ||
| 1563 | */ | ||
| 1564 | /* Round down/up/trunc == 1001/1010/1011. */ | ||
| 1565 | emit_i8(as, 0x09 + fpm); | ||
| 1566 | emit_mrm(as, XO_ROUNDSD, dest, left); | ||
| 1567 | if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) { | ||
| 1568 | as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */ | ||
| 1569 | } | ||
| 1570 | *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */ | ||
| 1571 | } else { /* Call helper functions for SSE2 variant. */ | ||
| 1572 | /* The modified regs must match with the *.dasc implementation. */ | ||
| 1573 | RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX); | ||
| 1574 | if (ra_hasreg(ir->r)) | ||
| 1575 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
| 1576 | ra_evictset(as, drop); | ||
| 1577 | ra_destreg(as, ir, RID_XMM0); | ||
| 1578 | emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse : | ||
| 1579 | fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse); | ||
| 1580 | ra_left(as, RID_XMM0, ir->op1); | ||
| 1581 | } | ||
| 1582 | } else if (fpm == IRFPM_EXP2 && fpmjoin_pow(as, ir)) { | ||
| 1583 | /* Rejoined to pow(). */ | ||
| 1584 | } else { /* Handle x87 ops. */ | ||
| 1585 | int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | ||
| 1586 | Reg dest = ir->r; | ||
| 1587 | if (ra_hasreg(dest)) { | ||
| 1588 | ra_free(as, dest); | ||
| 1589 | ra_modified(as, dest); | ||
| 1590 | emit_rmro(as, XMM_MOVRM(as), dest, RID_ESP, ofs); | ||
| 1591 | } | ||
| 1592 | emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs); | ||
| 1593 | switch (fpm) { /* st0 = lj_vm_*(st0) */ | ||
| 1594 | case IRFPM_EXP: emit_call(as, lj_vm_exp); break; | ||
| 1595 | case IRFPM_EXP2: emit_call(as, lj_vm_exp2); break; | ||
| 1596 | case IRFPM_SIN: emit_x87op(as, XI_FSIN); break; | ||
| 1597 | case IRFPM_COS: emit_x87op(as, XI_FCOS); break; | ||
| 1598 | case IRFPM_TAN: emit_x87op(as, XI_FPOP); emit_x87op(as, XI_FPTAN); break; | ||
| 1599 | case IRFPM_LOG: case IRFPM_LOG2: case IRFPM_LOG10: | ||
| 1600 | /* Note: the use of fyl2xp1 would be pointless here. When computing | ||
| 1601 | ** log(1.0+eps) the precision is already lost after 1.0 is added. | ||
| 1602 | ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense. | ||
| 1603 | */ | ||
| 1604 | emit_x87op(as, XI_FYL2X); break; | ||
| 1605 | case IRFPM_OTHER: | ||
| 1606 | switch (ir->o) { | ||
| 1607 | case IR_ATAN2: | ||
| 1608 | emit_x87op(as, XI_FPATAN); asm_x87load(as, ir->op2); break; | ||
| 1609 | case IR_LDEXP: | ||
| 1610 | emit_x87op(as, XI_FPOP1); emit_x87op(as, XI_FSCALE); break; | ||
| 1611 | default: lua_assert(0); break; | ||
| 1612 | } | ||
| 1613 | break; | ||
| 1614 | default: lua_assert(0); break; | ||
| 1615 | } | ||
| 1616 | asm_x87load(as, ir->op1); | ||
| 1617 | switch (fpm) { | ||
| 1618 | case IRFPM_LOG: emit_x87op(as, XI_FLDLN2); break; | ||
| 1619 | case IRFPM_LOG2: emit_x87op(as, XI_FLD1); break; | ||
| 1620 | case IRFPM_LOG10: emit_x87op(as, XI_FLDLG2); break; | ||
| 1621 | case IRFPM_OTHER: | ||
| 1622 | if (ir->o == IR_LDEXP) asm_x87load(as, ir->op2); | ||
| 1623 | break; | ||
| 1624 | default: break; | ||
| 1625 | } | ||
| 1626 | } | ||
| 1627 | } | ||
| 1628 | |||
| 1629 | static void asm_fppowi(ASMState *as, IRIns *ir) | ||
| 1630 | { | ||
| 1631 | /* The modified regs must match with the *.dasc implementation. */ | ||
| 1632 | RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX); | ||
| 1633 | if (ra_hasreg(ir->r)) | ||
| 1634 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
| 1635 | ra_evictset(as, drop); | ||
| 1636 | ra_destreg(as, ir, RID_XMM0); | ||
| 1637 | emit_call(as, lj_vm_powi_sse); | ||
| 1638 | ra_left(as, RID_XMM0, ir->op1); | ||
| 1639 | ra_left(as, RID_EAX, ir->op2); | ||
| 1640 | } | ||
| 1641 | |||
| 1642 | #if LJ_64 && LJ_HASFFI | ||
| 1643 | static void asm_arith64(ASMState *as, IRIns *ir, IRCallID id) | ||
| 1644 | { | ||
| 1645 | const CCallInfo *ci = &lj_ir_callinfo[id]; | ||
| 1646 | IRRef args[2]; | ||
| 1647 | args[0] = ir->op1; | ||
| 1648 | args[1] = ir->op2; | ||
| 1649 | asm_setupresult(as, ir, ci); | ||
| 1650 | asm_gencall(as, ci, args); | ||
| 1651 | } | ||
| 1652 | #endif | ||
| 1653 | |||
| 1654 | static int asm_swapops(ASMState *as, IRIns *ir) | ||
| 1655 | { | ||
| 1656 | IRIns *irl = IR(ir->op1); | ||
| 1657 | IRIns *irr = IR(ir->op2); | ||
| 1658 | lua_assert(ra_noreg(irr->r)); | ||
| 1659 | if (!irm_iscomm(lj_ir_mode[ir->o])) | ||
| 1660 | return 0; /* Can't swap non-commutative operations. */ | ||
| 1661 | if (irref_isk(ir->op2)) | ||
| 1662 | return 0; /* Don't swap constants to the left. */ | ||
| 1663 | if (ra_hasreg(irl->r)) | ||
| 1664 | return 1; /* Swap if left already has a register. */ | ||
| 1665 | if (ra_samehint(ir->r, irr->r)) | ||
| 1666 | return 1; /* Swap if dest and right have matching hints. */ | ||
| 1667 | if (as->curins > as->loopref) { /* In variant part? */ | ||
| 1668 | if (ir->op2 < as->loopref && !irt_isphi(irr->t)) | ||
| 1669 | return 0; /* Keep invariants on the right. */ | ||
| 1670 | if (ir->op1 < as->loopref && !irt_isphi(irl->t)) | ||
| 1671 | return 1; /* Swap invariants to the right. */ | ||
| 1672 | } | ||
| 1673 | if (opisfusableload(irl->o)) | ||
| 1674 | return 1; /* Swap fusable loads to the right. */ | ||
| 1675 | return 0; /* Otherwise don't swap. */ | ||
| 1676 | } | ||
| 1677 | |||
| 1678 | static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo) | ||
| 1679 | { | ||
| 1680 | IRRef lref = ir->op1; | ||
| 1681 | IRRef rref = ir->op2; | ||
| 1682 | RegSet allow = RSET_FPR; | ||
| 1683 | Reg dest; | ||
| 1684 | Reg right = IR(rref)->r; | ||
| 1685 | if (ra_hasreg(right)) { | ||
| 1686 | rset_clear(allow, right); | ||
| 1687 | ra_noweak(as, right); | ||
| 1688 | } | ||
| 1689 | dest = ra_dest(as, ir, allow); | ||
| 1690 | if (lref == rref) { | ||
| 1691 | right = dest; | ||
| 1692 | } else if (ra_noreg(right)) { | ||
| 1693 | if (asm_swapops(as, ir)) { | ||
| 1694 | IRRef tmp = lref; lref = rref; rref = tmp; | ||
| 1695 | } | ||
| 1696 | right = asm_fuseload(as, rref, rset_clear(allow, dest)); | ||
| 1697 | } | ||
| 1698 | emit_mrm(as, xo, dest, right); | ||
| 1699 | ra_left(as, dest, lref); | ||
| 1700 | } | ||
| 1701 | |||
| 1702 | static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa) | ||
| 1703 | { | ||
| 1704 | IRRef lref = ir->op1; | ||
| 1705 | IRRef rref = ir->op2; | ||
| 1706 | RegSet allow = RSET_GPR; | ||
| 1707 | Reg dest, right; | ||
| 1708 | int32_t k = 0; | ||
| 1709 | if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */ | ||
| 1710 | as->flagmcp = NULL; | ||
| 1711 | as->mcp += (LJ_64 && *as->mcp != XI_TEST) ? 3 : 2; | ||
| 1712 | } | ||
| 1713 | right = IR(rref)->r; | ||
| 1714 | if (ra_hasreg(right)) { | ||
| 1715 | rset_clear(allow, right); | ||
| 1716 | ra_noweak(as, right); | ||
| 1717 | } | ||
| 1718 | dest = ra_dest(as, ir, allow); | ||
| 1719 | if (lref == rref) { | ||
| 1720 | right = dest; | ||
| 1721 | } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) { | ||
| 1722 | if (asm_swapops(as, ir)) { | ||
| 1723 | IRRef tmp = lref; lref = rref; rref = tmp; | ||
| 1724 | } | ||
| 1725 | right = asm_fuseload(as, rref, rset_clear(allow, dest)); | ||
| 1726 | } | ||
| 1727 | if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */ | ||
| 1728 | asm_guardcc(as, CC_O); | ||
| 1729 | if (xa != XOg_X_IMUL) { | ||
| 1730 | if (ra_hasreg(right)) | ||
| 1731 | emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right); | ||
| 1732 | else | ||
| 1733 | emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k); | ||
| 1734 | } else if (ra_hasreg(right)) { /* IMUL r, mrm. */ | ||
| 1735 | emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right); | ||
| 1736 | } else { /* IMUL r, r, k. */ | ||
| 1737 | /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */ | ||
| 1738 | Reg left = asm_fuseload(as, lref, RSET_GPR); | ||
| 1739 | x86Op xo; | ||
| 1740 | if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8; | ||
| 1741 | } else { emit_i32(as, k); xo = XO_IMULi; } | ||
| 1742 | emit_mrm(as, xo, REX_64IR(ir, dest), left); | ||
| 1743 | return; | ||
| 1744 | } | ||
| 1745 | ra_left(as, dest, lref); | ||
| 1746 | } | ||
| 1747 | |||
| 1748 | /* LEA is really a 4-operand ADD with an independent destination register, | ||
| 1749 | ** up to two source registers and an immediate. One register can be scaled | ||
| 1750 | ** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several | ||
| 1751 | ** instructions. | ||
| 1752 | ** | ||
| 1753 | ** Currently only a few common cases are supported: | ||
| 1754 | ** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated | ||
| 1755 | ** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b | ||
| 1756 | ** - Right ADD fusion: y = a+(b+k) | ||
| 1757 | ** The ommited variants have already been reduced by FOLD. | ||
| 1758 | ** | ||
| 1759 | ** There are more fusion opportunities, like gathering shifts or joining | ||
| 1760 | ** common references. But these are probably not worth the trouble, since | ||
| 1761 | ** array indexing is not decomposed and already makes use of all fields | ||
| 1762 | ** of the ModRM operand. | ||
| 1763 | */ | ||
| 1764 | static int asm_lea(ASMState *as, IRIns *ir) | ||
| 1765 | { | ||
| 1766 | IRIns *irl = IR(ir->op1); | ||
| 1767 | IRIns *irr = IR(ir->op2); | ||
| 1768 | RegSet allow = RSET_GPR; | ||
| 1769 | Reg dest; | ||
| 1770 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
| 1771 | as->mrm.scale = XM_SCALE1; | ||
| 1772 | as->mrm.ofs = 0; | ||
| 1773 | if (ra_hasreg(irl->r)) { | ||
| 1774 | rset_clear(allow, irl->r); | ||
| 1775 | ra_noweak(as, irl->r); | ||
| 1776 | as->mrm.base = irl->r; | ||
| 1777 | if (irref_isk(ir->op2) || ra_hasreg(irr->r)) { | ||
| 1778 | /* The PHI renaming logic does a better job in some cases. */ | ||
| 1779 | if (ra_hasreg(ir->r) && | ||
| 1780 | ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) || | ||
| 1781 | (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2))) | ||
| 1782 | return 0; | ||
| 1783 | if (irref_isk(ir->op2)) { | ||
| 1784 | as->mrm.ofs = irr->i; | ||
| 1785 | } else { | ||
| 1786 | rset_clear(allow, irr->r); | ||
| 1787 | ra_noweak(as, irr->r); | ||
| 1788 | as->mrm.idx = irr->r; | ||
| 1789 | } | ||
| 1790 | } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) && | ||
| 1791 | irref_isk(irr->op2)) { | ||
| 1792 | Reg idx = ra_alloc1(as, irr->op1, allow); | ||
| 1793 | rset_clear(allow, idx); | ||
| 1794 | as->mrm.idx = (uint8_t)idx; | ||
| 1795 | as->mrm.ofs = IR(irr->op2)->i; | ||
| 1796 | } else { | ||
| 1797 | return 0; | ||
| 1798 | } | ||
| 1799 | } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) && | ||
| 1800 | (irref_isk(ir->op2) || irref_isk(irl->op2))) { | ||
| 1801 | Reg idx, base = ra_alloc1(as, irl->op1, allow); | ||
| 1802 | rset_clear(allow, base); | ||
| 1803 | as->mrm.base = (uint8_t)base; | ||
| 1804 | if (irref_isk(ir->op2)) { | ||
| 1805 | as->mrm.ofs = irr->i; | ||
| 1806 | idx = ra_alloc1(as, irl->op2, allow); | ||
| 1807 | } else { | ||
| 1808 | as->mrm.ofs = IR(irl->op2)->i; | ||
| 1809 | idx = ra_alloc1(as, ir->op2, allow); | ||
| 1810 | } | ||
| 1811 | rset_clear(allow, idx); | ||
| 1812 | as->mrm.idx = (uint8_t)idx; | ||
| 1813 | } else { | ||
| 1814 | return 0; | ||
| 1815 | } | ||
| 1816 | dest = ra_dest(as, ir, allow); | ||
| 1817 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
| 1818 | return 1; /* Success. */ | ||
| 1819 | } | ||
| 1820 | |||
| 1821 | static void asm_add(ASMState *as, IRIns *ir) | ||
| 1822 | { | ||
| 1823 | if (irt_isnum(ir->t)) | ||
| 1824 | asm_fparith(as, ir, XO_ADDSD); | ||
| 1825 | else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp || | ||
| 1826 | irt_is64(ir->t) || !asm_lea(as, ir)) | ||
| 1827 | asm_intarith(as, ir, XOg_ADD); | ||
| 1828 | } | ||
| 1829 | |||
| 1830 | static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg) | ||
| 1831 | { | ||
| 1832 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1833 | emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest); | ||
| 1834 | ra_left(as, dest, ir->op1); | ||
| 1835 | } | ||
| 1836 | |||
| 1837 | static void asm_min_max(ASMState *as, IRIns *ir, int cc) | ||
| 1838 | { | ||
| 1839 | Reg right, dest = ra_dest(as, ir, RSET_GPR); | ||
| 1840 | IRRef lref = ir->op1, rref = ir->op2; | ||
| 1841 | if (irref_isk(rref)) { lref = rref; rref = ir->op1; } | ||
| 1842 | right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest)); | ||
| 1843 | emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right); | ||
| 1844 | emit_rr(as, XO_CMP, REX_64IR(ir, dest), right); | ||
| 1845 | ra_left(as, dest, lref); | ||
| 1846 | } | ||
| 1847 | |||
| 1848 | static void asm_bitswap(ASMState *as, IRIns *ir) | ||
| 1849 | { | ||
| 1850 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 1851 | as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24), | ||
| 1852 | REX_64IR(ir, dest), 0, 0, as->mcp, 1); | ||
| 1853 | ra_left(as, dest, ir->op1); | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs) | ||
| 1857 | { | ||
| 1858 | IRRef rref = ir->op2; | ||
| 1859 | IRIns *irr = IR(rref); | ||
| 1860 | Reg dest; | ||
| 1861 | if (irref_isk(rref)) { /* Constant shifts. */ | ||
| 1862 | int shift; | ||
| 1863 | dest = ra_dest(as, ir, RSET_GPR); | ||
| 1864 | shift = irr->i & (irt_is64(ir->t) ? 63 : 31); | ||
| 1865 | switch (shift) { | ||
| 1866 | case 0: break; | ||
| 1867 | case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break; | ||
| 1868 | default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break; | ||
| 1869 | } | ||
| 1870 | } else { /* Variable shifts implicitly use register cl (i.e. ecx). */ | ||
| 1871 | RegSet allow = rset_exclude(RSET_GPR, RID_ECX); | ||
| 1872 | Reg right = irr->r; | ||
| 1873 | if (ra_noreg(right)) { | ||
| 1874 | right = ra_allocref(as, rref, RID2RSET(RID_ECX)); | ||
| 1875 | } else if (right != RID_ECX) { | ||
| 1876 | rset_clear(allow, right); | ||
| 1877 | ra_scratch(as, RID2RSET(RID_ECX)); | ||
| 1878 | } | ||
| 1879 | dest = ra_dest(as, ir, allow); | ||
| 1880 | emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest); | ||
| 1881 | if (right != RID_ECX) { | ||
| 1882 | ra_noweak(as, right); | ||
| 1883 | emit_rr(as, XO_MOV, RID_ECX, right); | ||
| 1884 | } | ||
| 1885 | } | ||
| 1886 | ra_left(as, dest, ir->op1); | ||
| 1887 | /* | ||
| 1888 | ** Note: avoid using the flags resulting from a shift or rotate! | ||
| 1889 | ** All of them cause a partial flag stall, except for r,1 shifts | ||
| 1890 | ** (but not rotates). And a shift count of 0 leaves the flags unmodified. | ||
| 1891 | */ | ||
| 1892 | } | ||
| 1893 | |||
| 1894 | /* -- Comparisons --------------------------------------------------------- */ | ||
| 1895 | |||
| 1896 | /* Virtual flags for unordered FP comparisons. */ | ||
| 1897 | #define VCC_U 0x1000 /* Unordered. */ | ||
| 1898 | #define VCC_P 0x2000 /* Needs extra CC_P branch. */ | ||
| 1899 | #define VCC_S 0x4000 /* Swap avoids CC_P branch. */ | ||
| 1900 | #define VCC_PS (VCC_P|VCC_S) | ||
| 1901 | |||
| 1902 | /* Map of comparisons to flags. ORDER IR. */ | ||
| 1903 | #define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf)) | ||
| 1904 | static const uint16_t asm_compmap[IR_ABC+1] = { | ||
| 1905 | /* signed non-eq unsigned flags */ | ||
| 1906 | /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS), | ||
| 1907 | /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0), | ||
| 1908 | /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS), | ||
| 1909 | /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0), | ||
| 1910 | /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U), | ||
| 1911 | /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS), | ||
| 1912 | /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U), | ||
| 1913 | /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS), | ||
| 1914 | /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P), | ||
| 1915 | /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P), | ||
| 1916 | /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */ | ||
| 1917 | }; | ||
| 1918 | |||
| 1919 | /* FP and integer comparisons. */ | ||
| 1920 | static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc) | ||
| 1921 | { | ||
| 1922 | if (irt_isnum(ir->t)) { | ||
| 1923 | IRRef lref = ir->op1; | ||
| 1924 | IRRef rref = ir->op2; | ||
| 1925 | Reg left, right; | ||
| 1926 | MCLabel l_around; | ||
| 1927 | /* | ||
| 1928 | ** An extra CC_P branch is required to preserve ordered/unordered | ||
| 1929 | ** semantics for FP comparisons. This can be avoided by swapping | ||
| 1930 | ** the operands and inverting the condition (except for EQ and UNE). | ||
| 1931 | ** So always try to swap if possible. | ||
| 1932 | ** | ||
| 1933 | ** Another option would be to swap operands to achieve better memory | ||
| 1934 | ** operand fusion. But it's unlikely that this outweighs the cost | ||
| 1935 | ** of the extra branches. | ||
| 1936 | */ | ||
| 1937 | if (cc & VCC_S) { /* Swap? */ | ||
| 1938 | IRRef tmp = lref; lref = rref; rref = tmp; | ||
| 1939 | cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */ | ||
| 1940 | } | ||
| 1941 | left = ra_alloc1(as, lref, RSET_FPR); | ||
| 1942 | right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left)); | ||
| 1943 | l_around = emit_label(as); | ||
| 1944 | asm_guardcc(as, cc >> 4); | ||
| 1945 | if (cc & VCC_P) { /* Extra CC_P branch required? */ | ||
| 1946 | if (!(cc & VCC_U)) { | ||
| 1947 | asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */ | ||
| 1948 | } else if (l_around != as->invmcp) { | ||
| 1949 | emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */ | ||
| 1950 | } else { | ||
| 1951 | /* Patched to mcloop by asm_loop_fixup. */ | ||
| 1952 | as->loopinv = 2; | ||
| 1953 | if (as->realign) | ||
| 1954 | emit_sjcc(as, CC_P, as->mcp); | ||
| 1955 | else | ||
| 1956 | emit_jcc(as, CC_P, as->mcp); | ||
| 1957 | } | ||
| 1958 | } | ||
| 1959 | emit_mrm(as, XO_UCOMISD, left, right); | ||
| 1960 | } else { | ||
| 1961 | IRRef lref = ir->op1, rref = ir->op2; | ||
| 1962 | IROp leftop = (IROp)(IR(lref)->o); | ||
| 1963 | Reg r64 = REX_64IR(ir, 0); | ||
| 1964 | int32_t imm = 0; | ||
| 1965 | lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isaddr(ir->t)); | ||
| 1966 | /* Swap constants (only for ABC) and fusable loads to the right. */ | ||
| 1967 | if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) { | ||
| 1968 | if ((cc & 0xc) == 0xc) cc ^= 3; /* L <-> G, LE <-> GE */ | ||
| 1969 | else if ((cc & 0xa) == 0x2) cc ^= 5; /* A <-> B, AE <-> BE */ | ||
| 1970 | lref = ir->op2; rref = ir->op1; | ||
| 1971 | } | ||
| 1972 | if (asm_isk32(as, rref, &imm)) { | ||
| 1973 | IRIns *irl = IR(lref); | ||
| 1974 | /* Check wether we can use test ins. Not for unsigned, since CF=0. */ | ||
| 1975 | int usetest = (imm == 0 && (cc & 0xa) != 0x2); | ||
| 1976 | if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) { | ||
| 1977 | /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */ | ||
| 1978 | Reg right, left = RID_NONE; | ||
| 1979 | RegSet allow = RSET_GPR; | ||
| 1980 | if (!asm_isk32(as, irl->op2, &imm)) { | ||
| 1981 | left = ra_alloc1(as, irl->op2, allow); | ||
| 1982 | rset_clear(allow, left); | ||
| 1983 | } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */ | ||
| 1984 | IRIns *irll = IR(irl->op1); | ||
| 1985 | if (opisfusableload((IROp)irll->o) && | ||
| 1986 | (irt_isi8(irll->t) || irt_isu8(irll->t))) { | ||
| 1987 | IRType1 origt = irll->t; /* Temporarily flip types. */ | ||
| 1988 | irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT; | ||
| 1989 | as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ | ||
| 1990 | right = asm_fuseload(as, irl->op1, RSET_GPR); | ||
| 1991 | as->curins++; | ||
| 1992 | irll->t = origt; | ||
| 1993 | if (right != RID_MRM) goto test_nofuse; | ||
| 1994 | /* Fusion succeeded, emit test byte mrm, imm8. */ | ||
| 1995 | asm_guardcc(as, cc); | ||
| 1996 | emit_i8(as, (imm & 0xff)); | ||
| 1997 | emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM); | ||
| 1998 | return; | ||
| 1999 | } | ||
| 2000 | } | ||
| 2001 | as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ | ||
| 2002 | right = asm_fuseload(as, irl->op1, allow); | ||
| 2003 | as->curins++; /* Undo the above. */ | ||
| 2004 | test_nofuse: | ||
| 2005 | asm_guardcc(as, cc); | ||
| 2006 | if (ra_noreg(left)) { | ||
| 2007 | emit_i32(as, imm); | ||
| 2008 | emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right); | ||
| 2009 | } else { | ||
| 2010 | emit_mrm(as, XO_TEST, r64 + left, right); | ||
| 2011 | } | ||
| 2012 | } else { | ||
| 2013 | Reg left; | ||
| 2014 | if (opisfusableload((IROp)irl->o) && | ||
| 2015 | ((irt_isu8(irl->t) && checku8(imm)) || | ||
| 2016 | ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) || | ||
| 2017 | (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) { | ||
| 2018 | /* Only the IRT_INT case is fused by asm_fuseload. | ||
| 2019 | ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads | ||
| 2020 | ** are handled here. | ||
| 2021 | ** Note that cmp word [mem], imm16 should not be generated, | ||
| 2022 | ** since it has a length-changing prefix. Compares of a word | ||
| 2023 | ** against a sign-extended imm8 are ok, however. | ||
| 2024 | */ | ||
| 2025 | IRType1 origt = irl->t; /* Temporarily flip types. */ | ||
| 2026 | irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT; | ||
| 2027 | left = asm_fuseload(as, lref, RSET_GPR); | ||
| 2028 | irl->t = origt; | ||
| 2029 | if (left == RID_MRM) { /* Fusion succeeded? */ | ||
| 2030 | asm_guardcc(as, cc); | ||
| 2031 | emit_i8(as, imm); | ||
| 2032 | emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ? | ||
| 2033 | XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM); | ||
| 2034 | return; | ||
| 2035 | } /* Otherwise handle register case as usual. */ | ||
| 2036 | } else { | ||
| 2037 | left = asm_fuseload(as, lref, RSET_GPR); | ||
| 2038 | } | ||
| 2039 | asm_guardcc(as, cc); | ||
| 2040 | if (usetest && left != RID_MRM) { | ||
| 2041 | /* Use test r,r instead of cmp r,0. */ | ||
| 2042 | emit_rr(as, XO_TEST, r64 + left, left); | ||
| 2043 | if (irl+1 == ir) /* Referencing previous ins? */ | ||
| 2044 | as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */ | ||
| 2045 | } else { | ||
| 2046 | emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm); | ||
| 2047 | } | ||
| 2048 | } | ||
| 2049 | } else { | ||
| 2050 | Reg left = ra_alloc1(as, lref, RSET_GPR); | ||
| 2051 | Reg right = asm_fuseload(as, rref, rset_exclude(RSET_GPR, left)); | ||
| 2052 | asm_guardcc(as, cc); | ||
| 2053 | emit_mrm(as, XO_CMP, r64 + left, right); | ||
| 2054 | } | ||
| 2055 | } | ||
| 2056 | } | ||
| 2057 | |||
| 2058 | #if LJ_32 && LJ_HASFFI | ||
| 2059 | /* 64 bit integer comparisons in 32 bit mode. */ | ||
| 2060 | static void asm_comp_int64(ASMState *as, IRIns *ir) | ||
| 2061 | { | ||
| 2062 | uint32_t cc = asm_compmap[(ir-1)->o]; | ||
| 2063 | RegSet allow = RSET_GPR; | ||
| 2064 | Reg lefthi = RID_NONE, leftlo = RID_NONE; | ||
| 2065 | Reg righthi = RID_NONE, rightlo = RID_NONE; | ||
| 2066 | MCLabel l_around; | ||
| 2067 | x86ModRM mrm; | ||
| 2068 | |||
| 2069 | as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */ | ||
| 2070 | |||
| 2071 | /* Allocate/fuse hiword operands. */ | ||
| 2072 | if (irref_isk(ir->op2)) { | ||
| 2073 | lefthi = asm_fuseload(as, ir->op1, allow); | ||
| 2074 | } else { | ||
| 2075 | lefthi = ra_alloc1(as, ir->op1, allow); | ||
| 2076 | righthi = asm_fuseload(as, ir->op2, allow); | ||
| 2077 | if (righthi == RID_MRM) { | ||
| 2078 | if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); | ||
| 2079 | if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); | ||
| 2080 | } else { | ||
| 2081 | rset_clear(allow, righthi); | ||
| 2082 | } | ||
| 2083 | } | ||
| 2084 | mrm = as->mrm; /* Save state for hiword instruction. */ | ||
| 2085 | |||
| 2086 | /* Allocate/fuse loword operands. */ | ||
| 2087 | if (irref_isk((ir-1)->op2)) { | ||
| 2088 | leftlo = asm_fuseload(as, (ir-1)->op1, allow); | ||
| 2089 | } else { | ||
| 2090 | leftlo = ra_alloc1(as, (ir-1)->op1, allow); | ||
| 2091 | rightlo = asm_fuseload(as, (ir-1)->op2, allow); | ||
| 2092 | if (rightlo == RID_MRM) { | ||
| 2093 | if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); | ||
| 2094 | if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); | ||
| 2095 | } else { | ||
| 2096 | rset_clear(allow, rightlo); | ||
| 2097 | } | ||
| 2098 | } | ||
| 2099 | |||
| 2100 | /* All register allocations must be performed _before_ this point. */ | ||
| 2101 | l_around = emit_label(as); | ||
| 2102 | as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */ | ||
| 2103 | |||
| 2104 | /* Loword comparison and branch. */ | ||
| 2105 | asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */ | ||
| 2106 | if (ra_noreg(rightlo)) { | ||
| 2107 | int32_t imm = IR((ir-1)->op2)->i; | ||
| 2108 | if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM) | ||
| 2109 | emit_rr(as, XO_TEST, leftlo, leftlo); | ||
| 2110 | else | ||
| 2111 | emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm); | ||
| 2112 | } else { | ||
| 2113 | emit_mrm(as, XO_CMP, leftlo, rightlo); | ||
| 2114 | } | ||
| 2115 | |||
| 2116 | /* Hiword comparison and branches. */ | ||
| 2117 | if ((cc & 15) != CC_NE) | ||
| 2118 | emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */ | ||
| 2119 | if ((cc & 15) != CC_E) | ||
| 2120 | asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */ | ||
| 2121 | as->mrm = mrm; /* Restore state. */ | ||
| 2122 | if (ra_noreg(righthi)) { | ||
| 2123 | int32_t imm = IR(ir->op2)->i; | ||
| 2124 | if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM) | ||
| 2125 | emit_rr(as, XO_TEST, lefthi, lefthi); | ||
| 2126 | else | ||
| 2127 | emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm); | ||
| 2128 | } else { | ||
| 2129 | emit_mrm(as, XO_CMP, lefthi, righthi); | ||
| 2130 | } | ||
| 2131 | } | ||
| 2132 | #endif | ||
| 2133 | |||
| 2134 | /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ | ||
| 2135 | |||
| 2136 | /* Hiword op of a split 64 bit op. Previous op must be the loword op. */ | ||
| 2137 | static void asm_hiop(ASMState *as, IRIns *ir) | ||
| 2138 | { | ||
| 2139 | #if LJ_32 && LJ_HASFFI | ||
| 2140 | /* HIOP is marked as a store because it needs its own DCE logic. */ | ||
| 2141 | int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ | ||
| 2142 | if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; | ||
| 2143 | if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ | ||
| 2144 | if (usehi || uselo) { | ||
| 2145 | if (irt_isfp(ir->t)) | ||
| 2146 | asm_conv_fp_int64(as, ir); | ||
| 2147 | else | ||
| 2148 | asm_conv_int64_fp(as, ir); | ||
| 2149 | } | ||
| 2150 | as->curins--; /* Always skip the CONV. */ | ||
| 2151 | return; | ||
| 2152 | } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ | ||
| 2153 | asm_comp_int64(as, ir); | ||
| 2154 | return; | ||
| 2155 | } | ||
| 2156 | if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ | ||
| 2157 | switch ((ir-1)->o) { | ||
| 2158 | case IR_ADD: | ||
| 2159 | asm_intarith(as, ir, uselo ? XOg_ADC : XOg_ADD); | ||
| 2160 | break; | ||
| 2161 | case IR_SUB: | ||
| 2162 | asm_intarith(as, ir, uselo ? XOg_SBB : XOg_SUB); | ||
| 2163 | break; | ||
| 2164 | case IR_NEG: { | ||
| 2165 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
| 2166 | emit_rr(as, XO_GROUP3, XOg_NEG, dest); | ||
| 2167 | if (uselo) { | ||
| 2168 | emit_i8(as, 0); | ||
| 2169 | emit_rr(as, XO_ARITHi8, XOg_ADC, dest); | ||
| 2170 | } | ||
| 2171 | ra_left(as, dest, ir->op1); | ||
| 2172 | break; | ||
| 2173 | } | ||
| 2174 | case IR_CALLN: | ||
| 2175 | case IR_CALLXS: | ||
| 2176 | ra_destreg(as, ir, RID_RETHI); | ||
| 2177 | if (!uselo) | ||
| 2178 | ra_allocref(as, ir->op1, RID2RSET(RID_RET)); /* Mark call as used. */ | ||
| 2179 | break; | ||
| 2180 | case IR_CNEWI: | ||
| 2181 | /* Nothing to do here. Handled by CNEWI itself. */ | ||
| 2182 | break; | ||
| 2183 | default: lua_assert(0); break; | ||
| 2184 | } | ||
| 2185 | #else | ||
| 2186 | UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */ | ||
| 2187 | #endif | ||
| 2188 | } | ||
| 2189 | |||
| 2190 | /* -- Stack handling ------------------------------------------------------ */ | ||
| 2191 | |||
| 2192 | /* Check Lua stack size for overflow. Use exit handler as fallback. */ | ||
| 2193 | static void asm_stack_check(ASMState *as, BCReg topslot, | ||
| 2194 | Reg pbase, RegSet allow, ExitNo exitno) | ||
| 2195 | { | ||
| 2196 | /* Try to get an unused temp. register, otherwise spill/restore eax. */ | ||
| 2197 | Reg r = allow ? rset_pickbot(allow) : RID_EAX; | ||
| 2198 | emit_jcc(as, CC_B, exitstub_addr(as->J, exitno)); | ||
| 2199 | if (allow == RSET_EMPTY) /* Restore temp. register. */ | ||
| 2200 | emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0); | ||
| 2201 | else | ||
| 2202 | ra_modified(as, r); | ||
| 2203 | emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot)); | ||
| 2204 | if (ra_hasreg(pbase) && pbase != r) | ||
| 2205 | emit_rr(as, XO_ARITH(XOg_SUB), r, pbase); | ||
| 2206 | else | ||
| 2207 | emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE, | ||
| 2208 | ptr2addr(&J2G(as->J)->jit_base)); | ||
| 2209 | emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack)); | ||
| 2210 | emit_getgl(as, r, jit_L); | ||
| 2211 | if (allow == RSET_EMPTY) /* Spill temp. register. */ | ||
| 2212 | emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0); | ||
| 2213 | } | ||
| 2214 | |||
| 2215 | /* Restore Lua stack from on-trace state. */ | ||
| 2216 | static void asm_stack_restore(ASMState *as, SnapShot *snap) | ||
| 2217 | { | ||
| 2218 | SnapEntry *map = &as->T->snapmap[snap->mapofs]; | ||
| 2219 | MSize n, nent = snap->nent; | ||
| 2220 | SnapEntry *flinks = map + nent + snap->depth; | ||
| 2221 | /* Store the value of all modified slots to the Lua stack. */ | ||
| 2222 | for (n = 0; n < nent; n++) { | ||
| 2223 | SnapEntry sn = map[n]; | ||
| 2224 | BCReg s = snap_slot(sn); | ||
| 2225 | int32_t ofs = 8*((int32_t)s-1); | ||
| 2226 | IRRef ref = snap_ref(sn); | ||
| 2227 | IRIns *ir = IR(ref); | ||
| 2228 | if ((sn & SNAP_NORESTORE)) | ||
| 2229 | continue; | ||
| 2230 | if (irt_isnum(ir->t)) { | ||
| 2231 | Reg src = ra_alloc1(as, ref, RSET_FPR); | ||
| 2232 | emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs); | ||
| 2233 | } else { | ||
| 2234 | lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || | ||
| 2235 | (LJ_DUALNUM && irt_isinteger(ir->t))); | ||
| 2236 | if (!irref_isk(ref)) { | ||
| 2237 | Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); | ||
| 2238 | emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs); | ||
| 2239 | } else if (!irt_ispri(ir->t)) { | ||
| 2240 | emit_movmroi(as, RID_BASE, ofs, ir->i); | ||
| 2241 | } | ||
| 2242 | if ((sn & (SNAP_CONT|SNAP_FRAME))) { | ||
| 2243 | if (s != 0) /* Do not overwrite link to previous frame. */ | ||
| 2244 | emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--)); | ||
| 2245 | } else { | ||
| 2246 | if (!(LJ_64 && irt_islightud(ir->t))) | ||
| 2247 | emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t)); | ||
| 2248 | } | ||
| 2249 | } | ||
| 2250 | checkmclim(as); | ||
| 2251 | } | ||
| 2252 | lua_assert(map + nent == flinks); | ||
| 2253 | } | ||
| 2254 | |||
| 2255 | /* -- GC handling --------------------------------------------------------- */ | ||
| 2256 | |||
| 2257 | /* Check GC threshold and do one or more GC steps. */ | ||
| 2258 | static void asm_gc_check(ASMState *as) | ||
| 2259 | { | ||
| 2260 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; | ||
| 2261 | IRRef args[2]; | ||
| 2262 | MCLabel l_end; | ||
| 2263 | Reg tmp; | ||
| 2264 | ra_evictset(as, RSET_SCRATCH); | ||
| 2265 | l_end = emit_label(as); | ||
| 2266 | /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ | ||
| 2267 | asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ | ||
| 2268 | emit_rr(as, XO_TEST, RID_RET, RID_RET); | ||
| 2269 | args[0] = ASMREF_TMP1; /* global_State *g */ | ||
| 2270 | args[1] = ASMREF_TMP2; /* MSize steps */ | ||
| 2271 | asm_gencall(as, ci, args); | ||
| 2272 | tmp = ra_releasetmp(as, ASMREF_TMP1); | ||
| 2273 | emit_loada(as, tmp, J2G(as->J)); | ||
| 2274 | emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), (int32_t)as->gcsteps); | ||
| 2275 | /* Jump around GC step if GC total < GC threshold. */ | ||
| 2276 | emit_sjcc(as, CC_B, l_end); | ||
| 2277 | emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold); | ||
| 2278 | emit_getgl(as, tmp, gc.total); | ||
| 2279 | as->gcsteps = 0; | ||
| 2280 | checkmclim(as); | ||
| 2281 | } | ||
| 2282 | |||
| 2283 | /* -- Loop handling ------------------------------------------------------- */ | ||
| 2284 | |||
| 2285 | /* Fixup the loop branch. */ | ||
| 2286 | static void asm_loop_fixup(ASMState *as) | ||
| 2287 | { | ||
| 2288 | MCode *p = as->mctop; | ||
| 2289 | MCode *target = as->mcp; | ||
| 2290 | if (as->realign) { /* Realigned loops use short jumps. */ | ||
| 2291 | as->realign = NULL; /* Stop another retry. */ | ||
| 2292 | lua_assert(((intptr_t)target & 15) == 0); | ||
| 2293 | if (as->loopinv) { /* Inverted loop branch? */ | ||
| 2294 | p -= 5; | ||
| 2295 | p[0] = XI_JMP; | ||
| 2296 | lua_assert(target - p >= -128); | ||
| 2297 | p[-1] = (MCode)(target - p); /* Patch sjcc. */ | ||
| 2298 | if (as->loopinv == 2) | ||
| 2299 | p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */ | ||
| 2300 | } else { | ||
| 2301 | lua_assert(target - p >= -128); | ||
| 2302 | p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */ | ||
| 2303 | p[-2] = XI_JMPs; | ||
| 2304 | } | ||
| 2305 | } else { | ||
| 2306 | MCode *newloop; | ||
| 2307 | p[-5] = XI_JMP; | ||
| 2308 | if (as->loopinv) { /* Inverted loop branch? */ | ||
| 2309 | /* asm_guardcc already inverted the jcc and patched the jmp. */ | ||
| 2310 | p -= 5; | ||
| 2311 | newloop = target+4; | ||
| 2312 | *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */ | ||
| 2313 | if (as->loopinv == 2) { | ||
| 2314 | *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */ | ||
| 2315 | newloop = target+8; | ||
| 2316 | } | ||
| 2317 | } else { /* Otherwise just patch jmp. */ | ||
| 2318 | *(int32_t *)(p-4) = (int32_t)(target - p); | ||
| 2319 | newloop = target+3; | ||
| 2320 | } | ||
| 2321 | /* Realign small loops and shorten the loop branch. */ | ||
| 2322 | if (newloop >= p - 128) { | ||
| 2323 | as->realign = newloop; /* Force a retry and remember alignment. */ | ||
| 2324 | as->curins = as->stopins; /* Abort asm_trace now. */ | ||
| 2325 | as->T->nins = as->orignins; /* Remove any added renames. */ | ||
| 2326 | } | ||
| 2327 | } | ||
| 2328 | } | ||
| 2329 | |||
| 2330 | /* -- Head of trace ------------------------------------------------------- */ | ||
| 2331 | |||
| 2332 | /* Coalesce BASE register for a root trace. */ | ||
| 2333 | static void asm_head_root_base(ASMState *as) | ||
| 2334 | { | ||
| 2335 | IRIns *ir = IR(REF_BASE); | ||
| 2336 | Reg r = ir->r; | ||
| 2337 | if (ra_hasreg(r)) { | ||
| 2338 | ra_free(as, r); | ||
| 2339 | if (rset_test(as->modset, r)) | ||
| 2340 | ir->r = RID_INIT; /* No inheritance for modified BASE register. */ | ||
| 2341 | if (r != RID_BASE) | ||
| 2342 | emit_rr(as, XO_MOV, r, RID_BASE); | ||
| 2343 | } | ||
| 2344 | } | ||
| 2345 | |||
| 2346 | /* Coalesce or reload BASE register for a side trace. */ | ||
| 2347 | static RegSet asm_head_side_base(ASMState *as, Reg pbase, RegSet allow) | ||
| 2348 | { | ||
| 2349 | IRIns *ir = IR(REF_BASE); | ||
| 2350 | Reg r = ir->r; | ||
| 2351 | if (ra_hasreg(r)) { | ||
| 2352 | ra_free(as, r); | ||
| 2353 | if (rset_test(as->modset, r)) | ||
| 2354 | ir->r = RID_INIT; /* No inheritance for modified BASE register. */ | ||
| 2355 | if (pbase == r) { | ||
| 2356 | rset_clear(allow, r); /* Mark same BASE register as coalesced. */ | ||
| 2357 | } else if (ra_hasreg(pbase) && rset_test(as->freeset, pbase)) { | ||
| 2358 | rset_clear(allow, pbase); | ||
| 2359 | emit_rr(as, XO_MOV, r, pbase); /* Move from coalesced parent register. */ | ||
| 2360 | } else { | ||
| 2361 | emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ | ||
| 2362 | } | ||
| 2363 | } | ||
| 2364 | return allow; | ||
| 2365 | } | ||
| 2366 | |||
| 2367 | /* -- Tail of trace ------------------------------------------------------- */ | ||
| 2368 | |||
| 2369 | /* Fixup the tail code. */ | ||
| 2370 | static void asm_tail_fixup(ASMState *as, TraceNo lnk) | ||
| 2371 | { | ||
| 2372 | /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */ | ||
| 2373 | MCode *p = as->mctop; | ||
| 2374 | MCode *target, *q; | ||
| 2375 | int32_t spadj = as->T->spadjust; | ||
| 2376 | if (spadj == 0) { | ||
| 2377 | p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0); | ||
| 2378 | } else { | ||
| 2379 | MCode *p1; | ||
| 2380 | /* Patch stack adjustment. */ | ||
| 2381 | if (checki8(spadj)) { | ||
| 2382 | p -= 3; | ||
| 2383 | p1 = p-6; | ||
| 2384 | *p1 = (MCode)spadj; | ||
| 2385 | } else { | ||
| 2386 | p1 = p-9; | ||
| 2387 | *(int32_t *)p1 = spadj; | ||
| 2388 | } | ||
| 2389 | if ((as->flags & JIT_F_LEA_AGU)) { | ||
| 2390 | #if LJ_64 | ||
| 2391 | p1[-4] = 0x48; | ||
| 2392 | #endif | ||
| 2393 | p1[-3] = (MCode)XI_LEA; | ||
| 2394 | p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP); | ||
| 2395 | p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | ||
| 2396 | } else { | ||
| 2397 | #if LJ_64 | ||
| 2398 | p1[-3] = 0x48; | ||
| 2399 | #endif | ||
| 2400 | p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi); | ||
| 2401 | p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP); | ||
| 2402 | } | ||
| 2403 | } | ||
| 2404 | /* Patch exit branch. */ | ||
| 2405 | target = lnk == TRACE_INTERP ? (MCode *)lj_vm_exit_interp : | ||
| 2406 | traceref(as->J, lnk)->mcode; | ||
| 2407 | *(int32_t *)(p-4) = jmprel(p, target); | ||
| 2408 | p[-5] = XI_JMP; | ||
| 2409 | /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */ | ||
| 2410 | for (q = as->mctop-1; q >= p; q--) | ||
| 2411 | *q = XI_NOP; | ||
| 2412 | as->mctop = p; | ||
| 2413 | } | ||
| 2414 | |||
| 2415 | /* Prepare tail of code. */ | ||
| 2416 | static void asm_tail_prep(ASMState *as) | ||
| 2417 | { | ||
| 2418 | MCode *p = as->mctop; | ||
| 2419 | /* Realign and leave room for backwards loop branch or exit branch. */ | ||
| 2420 | if (as->realign) { | ||
| 2421 | int i = ((int)(intptr_t)as->realign) & 15; | ||
| 2422 | /* Fill unused mcode tail with NOPs to make the prefetcher happy. */ | ||
| 2423 | while (i-- > 0) | ||
| 2424 | *--p = XI_NOP; | ||
| 2425 | as->mctop = p; | ||
| 2426 | p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */ | ||
| 2427 | } else { | ||
| 2428 | p -= 5; /* Space for exit branch (near jmp). */ | ||
| 2429 | } | ||
| 2430 | if (as->loopref) { | ||
| 2431 | as->invmcp = as->mcp = p; | ||
| 2432 | } else { | ||
| 2433 | /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */ | ||
| 2434 | as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0)); | ||
| 2435 | as->invmcp = NULL; | ||
| 2436 | } | ||
| 2437 | } | ||
| 2438 | |||
| 2439 | /* -- Instruction dispatch ------------------------------------------------ */ | ||
| 2440 | |||
| 2441 | /* Assemble a single instruction. */ | ||
| 2442 | static void asm_ir(ASMState *as, IRIns *ir) | ||
| 2443 | { | ||
| 2444 | switch ((IROp)ir->o) { | ||
| 2445 | /* Miscellaneous ops. */ | ||
| 2446 | case IR_LOOP: asm_loop(as); break; | ||
| 2447 | case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break; | ||
| 2448 | case IR_USE: | ||
| 2449 | ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break; | ||
| 2450 | case IR_PHI: asm_phi(as, ir); break; | ||
| 2451 | case IR_HIOP: asm_hiop(as, ir); break; | ||
| 2452 | |||
| 2453 | /* Guarded assertions. */ | ||
| 2454 | case IR_LT: case IR_GE: case IR_LE: case IR_GT: | ||
| 2455 | case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT: | ||
| 2456 | case IR_EQ: case IR_NE: case IR_ABC: | ||
| 2457 | asm_comp(as, ir, asm_compmap[ir->o]); | ||
| 2458 | break; | ||
| 2459 | |||
| 2460 | case IR_RETF: asm_retf(as, ir); break; | ||
| 2461 | |||
| 2462 | /* Bit ops. */ | ||
| 2463 | case IR_BNOT: asm_neg_not(as, ir, XOg_NOT); break; | ||
| 2464 | case IR_BSWAP: asm_bitswap(as, ir); break; | ||
| 2465 | |||
| 2466 | case IR_BAND: asm_intarith(as, ir, XOg_AND); break; | ||
| 2467 | case IR_BOR: asm_intarith(as, ir, XOg_OR); break; | ||
| 2468 | case IR_BXOR: asm_intarith(as, ir, XOg_XOR); break; | ||
| 2469 | |||
| 2470 | case IR_BSHL: asm_bitshift(as, ir, XOg_SHL); break; | ||
| 2471 | case IR_BSHR: asm_bitshift(as, ir, XOg_SHR); break; | ||
| 2472 | case IR_BSAR: asm_bitshift(as, ir, XOg_SAR); break; | ||
| 2473 | case IR_BROL: asm_bitshift(as, ir, XOg_ROL); break; | ||
| 2474 | case IR_BROR: asm_bitshift(as, ir, XOg_ROR); break; | ||
| 2475 | |||
| 2476 | /* Arithmetic ops. */ | ||
| 2477 | case IR_ADD: asm_add(as, ir); break; | ||
| 2478 | case IR_SUB: | ||
| 2479 | if (irt_isnum(ir->t)) | ||
| 2480 | asm_fparith(as, ir, XO_SUBSD); | ||
| 2481 | else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */ | ||
| 2482 | asm_intarith(as, ir, XOg_SUB); | ||
| 2483 | break; | ||
| 2484 | case IR_MUL: | ||
| 2485 | if (irt_isnum(ir->t)) | ||
| 2486 | asm_fparith(as, ir, XO_MULSD); | ||
| 2487 | else | ||
| 2488 | asm_intarith(as, ir, XOg_X_IMUL); | ||
| 2489 | break; | ||
| 2490 | case IR_DIV: | ||
| 2491 | #if LJ_64 && LJ_HASFFI | ||
| 2492 | if (!irt_isnum(ir->t)) | ||
| 2493 | asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : | ||
| 2494 | IRCALL_lj_carith_divu64); | ||
| 2495 | else | ||
| 2496 | #endif | ||
| 2497 | asm_fparith(as, ir, XO_DIVSD); | ||
| 2498 | break; | ||
| 2499 | case IR_MOD: | ||
| 2500 | #if LJ_64 && LJ_HASFFI | ||
| 2501 | asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : | ||
| 2502 | IRCALL_lj_carith_modu64); | ||
| 2503 | #else | ||
| 2504 | lua_assert(0); | ||
| 2505 | #endif | ||
| 2506 | break; | ||
| 2507 | |||
| 2508 | case IR_NEG: | ||
| 2509 | if (irt_isnum(ir->t)) | ||
| 2510 | asm_fparith(as, ir, XO_XORPS); | ||
| 2511 | else | ||
| 2512 | asm_neg_not(as, ir, XOg_NEG); | ||
| 2513 | break; | ||
| 2514 | case IR_ABS: asm_fparith(as, ir, XO_ANDPS); break; | ||
| 2515 | |||
| 2516 | case IR_MIN: | ||
| 2517 | if (irt_isnum(ir->t)) | ||
| 2518 | asm_fparith(as, ir, XO_MINSD); | ||
| 2519 | else | ||
| 2520 | asm_min_max(as, ir, CC_G); | ||
| 2521 | break; | ||
| 2522 | case IR_MAX: | ||
| 2523 | if (irt_isnum(ir->t)) | ||
| 2524 | asm_fparith(as, ir, XO_MAXSD); | ||
| 2525 | else | ||
| 2526 | asm_min_max(as, ir, CC_L); | ||
| 2527 | break; | ||
| 2528 | |||
| 2529 | case IR_FPMATH: case IR_ATAN2: case IR_LDEXP: | ||
| 2530 | asm_fpmath(as, ir); | ||
| 2531 | break; | ||
| 2532 | case IR_POW: | ||
| 2533 | #if LJ_64 && LJ_HASFFI | ||
| 2534 | if (!irt_isnum(ir->t)) | ||
| 2535 | asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : | ||
| 2536 | IRCALL_lj_carith_powu64); | ||
| 2537 | else | ||
| 2538 | #endif | ||
| 2539 | asm_fppowi(as, ir); | ||
| 2540 | break; | ||
| 2541 | |||
| 2542 | /* Overflow-checking arithmetic ops. Note: don't use LEA here! */ | ||
| 2543 | case IR_ADDOV: asm_intarith(as, ir, XOg_ADD); break; | ||
| 2544 | case IR_SUBOV: asm_intarith(as, ir, XOg_SUB); break; | ||
| 2545 | case IR_MULOV: asm_intarith(as, ir, XOg_X_IMUL); break; | ||
| 2546 | |||
| 2547 | /* Memory references. */ | ||
| 2548 | case IR_AREF: asm_aref(as, ir); break; | ||
| 2549 | case IR_HREF: asm_href(as, ir); break; | ||
| 2550 | case IR_HREFK: asm_hrefk(as, ir); break; | ||
| 2551 | case IR_NEWREF: asm_newref(as, ir); break; | ||
| 2552 | case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break; | ||
| 2553 | case IR_FREF: asm_fref(as, ir); break; | ||
| 2554 | case IR_STRREF: asm_strref(as, ir); break; | ||
| 2555 | |||
| 2556 | /* Loads and stores. */ | ||
| 2557 | case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: | ||
| 2558 | asm_ahuvload(as, ir); | ||
| 2559 | break; | ||
| 2560 | case IR_FLOAD: case IR_XLOAD: asm_fxload(as, ir); break; | ||
| 2561 | case IR_SLOAD: asm_sload(as, ir); break; | ||
| 2562 | |||
| 2563 | case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break; | ||
| 2564 | case IR_FSTORE: case IR_XSTORE: asm_fxstore(as, ir); break; | ||
| 2565 | |||
| 2566 | /* Allocations. */ | ||
| 2567 | case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break; | ||
| 2568 | case IR_TNEW: asm_tnew(as, ir); break; | ||
| 2569 | case IR_TDUP: asm_tdup(as, ir); break; | ||
| 2570 | case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break; | ||
| 2571 | |||
| 2572 | /* Write barriers. */ | ||
| 2573 | case IR_TBAR: asm_tbar(as, ir); break; | ||
| 2574 | case IR_OBAR: asm_obar(as, ir); break; | ||
| 2575 | |||
| 2576 | /* Type conversions. */ | ||
| 2577 | case IR_TOBIT: asm_tobit(as, ir); break; | ||
| 2578 | case IR_CONV: asm_conv(as, ir); break; | ||
| 2579 | case IR_TOSTR: asm_tostr(as, ir); break; | ||
| 2580 | case IR_STRTO: asm_strto(as, ir); break; | ||
| 2581 | |||
| 2582 | /* Calls. */ | ||
| 2583 | case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break; | ||
| 2584 | case IR_CALLXS: asm_callx(as, ir); break; | ||
| 2585 | case IR_CARG: break; | ||
| 2586 | |||
| 2587 | default: | ||
| 2588 | setintV(&as->J->errinfo, ir->o); | ||
| 2589 | lj_trace_err_info(as->J, LJ_TRERR_NYIIR); | ||
| 2590 | break; | ||
| 2591 | } | ||
| 2592 | } | ||
| 2593 | |||
| 2594 | /* -- Trace setup --------------------------------------------------------- */ | ||
| 2595 | |||
| 2596 | /* Ensure there are enough stack slots for call arguments. */ | ||
| 2597 | static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) | ||
| 2598 | { | ||
| 2599 | IRRef args[CCI_NARGS_MAX]; | ||
| 2600 | uint32_t nargs = (int)CCI_NARGS(ci); | ||
| 2601 | int nslots = 0; | ||
| 2602 | asm_collectargs(as, ir, ci, args); | ||
| 2603 | #if LJ_64 | ||
| 2604 | if (LJ_ABI_WIN) { | ||
| 2605 | nslots = (int)(nargs*2); /* Only matters for more than four args. */ | ||
| 2606 | } else { | ||
| 2607 | uint32_t i; | ||
| 2608 | int ngpr = 6, nfpr = 8; | ||
| 2609 | for (i = 0; i < nargs; i++) | ||
| 2610 | if (irt_isfp(IR(args[i])->t)) { | ||
| 2611 | if (nfpr > 0) nfpr--; else nslots += 2; | ||
| 2612 | } else { | ||
| 2613 | if (ngpr > 0) ngpr--; else nslots += 2; | ||
| 2614 | } | ||
| 2615 | } | ||
| 2616 | if (nslots > as->evenspill) /* Leave room for args in stack slots. */ | ||
| 2617 | as->evenspill = nslots; | ||
| 2618 | return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); | ||
| 2619 | #else | ||
| 2620 | if ((ci->flags & CCI_FASTCALL)) { | ||
| 2621 | lua_assert(nargs <= 2); | ||
| 2622 | } else { | ||
| 2623 | uint32_t i; | ||
| 2624 | for (i = 0; i < nargs; i++) | ||
| 2625 | nslots += irt_isnum(IR(args[i])->t) ? 2 : 1; | ||
| 2626 | if (nslots > as->evenspill) /* Leave room for args. */ | ||
| 2627 | as->evenspill = nslots; | ||
| 2628 | } | ||
| 2629 | return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET); | ||
| 2630 | #endif | ||
| 2631 | } | ||
| 2632 | |||
| 2633 | /* Target-specific setup. */ | ||
| 2634 | static void asm_setup_target(ASMState *as) | ||
| 2635 | { | ||
| 2636 | asm_exitstub_setup(as, as->T->nsnap); | ||
| 2637 | } | ||
| 2638 | |||
| 2639 | /* -- Trace patching ------------------------------------------------------ */ | ||
| 2640 | |||
| 2641 | /* Patch exit jumps of existing machine code to a new target. */ | ||
| 2642 | void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | ||
| 2643 | { | ||
| 2644 | MCode *p = T->mcode; | ||
| 2645 | MCode *mcarea = lj_mcode_patch(J, p, 0); | ||
| 2646 | MSize len = T->szmcode; | ||
| 2647 | MCode *px = exitstub_addr(J, exitno) - 6; | ||
| 2648 | MCode *pe = p+len-6; | ||
| 2649 | uint32_t stateaddr = u32ptr(&J2G(J)->vmstate); | ||
| 2650 | if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px) | ||
| 2651 | *(int32_t *)(p+len-4) = jmprel(p+len, target); | ||
| 2652 | /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */ | ||
| 2653 | for (; p < pe; p++) | ||
| 2654 | if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi) { | ||
| 2655 | p += LJ_64 ? 11 : 10; | ||
| 2656 | break; | ||
| 2657 | } | ||
| 2658 | lua_assert(p < pe); | ||
| 2659 | for (; p < pe; p++) { | ||
| 2660 | if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) { | ||
| 2661 | *(int32_t *)(p+2) = jmprel(p+6, target); | ||
| 2662 | p += 5; | ||
| 2663 | } | ||
| 2664 | } | ||
| 2665 | lj_mcode_patch(J, mcarea, 1); | ||
| 2666 | VG_INVALIDATE(T->mcode, T->szmcode); | ||
| 2667 | } | ||
| 2668 | |||
diff --git a/src/lj_emit_x86.h b/src/lj_emit_x86.h new file mode 100644 index 00000000..c781e3d3 --- /dev/null +++ b/src/lj_emit_x86.h | |||
| @@ -0,0 +1,456 @@ | |||
| 1 | /* | ||
| 2 | ** x86/x64 instruction emitter. | ||
| 3 | ** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h | ||
| 4 | */ | ||
| 5 | |||
| 6 | /* -- Emit basic instructions --------------------------------------------- */ | ||
| 7 | |||
| 8 | #define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7))) | ||
| 9 | |||
| 10 | #if LJ_64 | ||
| 11 | #define REXRB(p, rr, rb) \ | ||
| 12 | { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \ | ||
| 13 | if (rex != 0x40) *--(p) = rex; } | ||
| 14 | #define FORCE_REX 0x200 | ||
| 15 | #define REX_64 (FORCE_REX|0x080000) | ||
| 16 | #else | ||
| 17 | #define REXRB(p, rr, rb) ((void)0) | ||
| 18 | #define FORCE_REX 0 | ||
| 19 | #define REX_64 0 | ||
| 20 | #endif | ||
| 21 | |||
| 22 | #define emit_i8(as, i) (*--as->mcp = (MCode)(i)) | ||
| 23 | #define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4) | ||
| 24 | #define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4) | ||
| 25 | |||
| 26 | #define emit_x87op(as, xo) \ | ||
| 27 | (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2) | ||
| 28 | |||
| 29 | /* op */ | ||
| 30 | static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx, | ||
| 31 | MCode *p, int delta) | ||
| 32 | { | ||
| 33 | int n = (int8_t)xo; | ||
| 34 | #if defined(__GNUC__) | ||
| 35 | if (__builtin_constant_p(xo) && n == -2) | ||
| 36 | p[delta-2] = (MCode)(xo >> 24); | ||
| 37 | else if (__builtin_constant_p(xo) && n == -3) | ||
| 38 | *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16); | ||
| 39 | else | ||
| 40 | #endif | ||
| 41 | *(uint32_t *)(p+delta-5) = (uint32_t)xo; | ||
| 42 | p += n + delta; | ||
| 43 | #if LJ_64 | ||
| 44 | { | ||
| 45 | uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1); | ||
| 46 | if (rex != 0x40) { | ||
| 47 | rex |= (rr >> 16); | ||
| 48 | if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); } | ||
| 49 | else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; } | ||
| 50 | *--p = (MCode)rex; | ||
| 51 | } | ||
| 52 | } | ||
| 53 | #else | ||
| 54 | UNUSED(rr); UNUSED(rb); UNUSED(rx); | ||
| 55 | #endif | ||
| 56 | return p; | ||
| 57 | } | ||
| 58 | |||
| 59 | /* op + modrm */ | ||
| 60 | #define emit_opm(xo, mode, rr, rb, p, delta) \ | ||
| 61 | (p[(delta)-1] = MODRM((mode), (rr), (rb)), \ | ||
| 62 | emit_op((xo), (rr), (rb), 0, (p), (delta))) | ||
| 63 | |||
| 64 | /* op + modrm + sib */ | ||
| 65 | #define emit_opmx(xo, mode, scale, rr, rb, rx, p) \ | ||
| 66 | (p[-1] = MODRM((scale), (rx), (rb)), \ | ||
| 67 | p[-2] = MODRM((mode), (rr), RID_ESP), \ | ||
| 68 | emit_op((xo), (rr), (rb), (rx), (p), -1)) | ||
| 69 | |||
| 70 | /* op r1, r2 */ | ||
| 71 | static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2) | ||
| 72 | { | ||
| 73 | MCode *p = as->mcp; | ||
| 74 | as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0); | ||
| 75 | } | ||
| 76 | |||
| 77 | #if LJ_64 && defined(LUA_USE_ASSERT) | ||
| 78 | /* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */ | ||
| 79 | static int32_t ptr2addr(const void *p) | ||
| 80 | { | ||
| 81 | lua_assert((uintptr_t)p < (uintptr_t)0x80000000); | ||
| 82 | return i32ptr(p); | ||
| 83 | } | ||
| 84 | #else | ||
| 85 | #define ptr2addr(p) (i32ptr((p))) | ||
| 86 | #endif | ||
| 87 | |||
| 88 | /* op r, [addr] */ | ||
| 89 | static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr) | ||
| 90 | { | ||
| 91 | MCode *p = as->mcp; | ||
| 92 | *(int32_t *)(p-4) = ptr2addr(addr); | ||
| 93 | #if LJ_64 | ||
| 94 | p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP); | ||
| 95 | as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5); | ||
| 96 | #else | ||
| 97 | as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4); | ||
| 98 | #endif | ||
| 99 | } | ||
| 100 | |||
| 101 | /* op r, [base+ofs] */ | ||
| 102 | static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs) | ||
| 103 | { | ||
| 104 | MCode *p = as->mcp; | ||
| 105 | x86Mode mode; | ||
| 106 | if (ra_hasreg(rb)) { | ||
| 107 | if (ofs == 0 && (rb&7) != RID_EBP) { | ||
| 108 | mode = XM_OFS0; | ||
| 109 | } else if (checki8(ofs)) { | ||
| 110 | *--p = (MCode)ofs; | ||
| 111 | mode = XM_OFS8; | ||
| 112 | } else { | ||
| 113 | p -= 4; | ||
| 114 | *(int32_t *)p = ofs; | ||
| 115 | mode = XM_OFS32; | ||
| 116 | } | ||
| 117 | if ((rb&7) == RID_ESP) | ||
| 118 | *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | ||
| 119 | } else { | ||
| 120 | *(int32_t *)(p-4) = ofs; | ||
| 121 | #if LJ_64 | ||
| 122 | p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP); | ||
| 123 | p -= 5; | ||
| 124 | rb = RID_ESP; | ||
| 125 | #else | ||
| 126 | p -= 4; | ||
| 127 | rb = RID_EBP; | ||
| 128 | #endif | ||
| 129 | mode = XM_OFS0; | ||
| 130 | } | ||
| 131 | as->mcp = emit_opm(xo, mode, rr, rb, p, 0); | ||
| 132 | } | ||
| 133 | |||
| 134 | /* op r, [base+idx*scale+ofs] */ | ||
| 135 | static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx, | ||
| 136 | x86Mode scale, int32_t ofs) | ||
| 137 | { | ||
| 138 | MCode *p = as->mcp; | ||
| 139 | x86Mode mode; | ||
| 140 | if (ofs == 0 && (rb&7) != RID_EBP) { | ||
| 141 | mode = XM_OFS0; | ||
| 142 | } else if (checki8(ofs)) { | ||
| 143 | mode = XM_OFS8; | ||
| 144 | *--p = (MCode)ofs; | ||
| 145 | } else { | ||
| 146 | mode = XM_OFS32; | ||
| 147 | p -= 4; | ||
| 148 | *(int32_t *)p = ofs; | ||
| 149 | } | ||
| 150 | as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p); | ||
| 151 | } | ||
| 152 | |||
| 153 | /* op r, i */ | ||
| 154 | static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i) | ||
| 155 | { | ||
| 156 | MCode *p = as->mcp; | ||
| 157 | x86Op xo; | ||
| 158 | if (checki8(i)) { | ||
| 159 | *--p = (MCode)i; | ||
| 160 | xo = XG_TOXOi8(xg); | ||
| 161 | } else { | ||
| 162 | p -= 4; | ||
| 163 | *(int32_t *)p = i; | ||
| 164 | xo = XG_TOXOi(xg); | ||
| 165 | } | ||
| 166 | as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0); | ||
| 167 | } | ||
| 168 | |||
| 169 | /* op [base+ofs], i */ | ||
| 170 | static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs, | ||
| 171 | int32_t i) | ||
| 172 | { | ||
| 173 | x86Op xo; | ||
| 174 | if (checki8(i)) { | ||
| 175 | emit_i8(as, i); | ||
| 176 | xo = XG_TOXOi8(xg); | ||
| 177 | } else { | ||
| 178 | emit_i32(as, i); | ||
| 179 | xo = XG_TOXOi(xg); | ||
| 180 | } | ||
| 181 | emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs); | ||
| 182 | } | ||
| 183 | |||
| 184 | #define emit_shifti(as, xg, r, i) \ | ||
| 185 | (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r))) | ||
| 186 | |||
| 187 | /* op r, rm/mrm */ | ||
| 188 | static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb) | ||
| 189 | { | ||
| 190 | MCode *p = as->mcp; | ||
| 191 | x86Mode mode = XM_REG; | ||
| 192 | if (rb == RID_MRM) { | ||
| 193 | rb = as->mrm.base; | ||
| 194 | if (rb == RID_NONE) { | ||
| 195 | rb = RID_EBP; | ||
| 196 | mode = XM_OFS0; | ||
| 197 | p -= 4; | ||
| 198 | *(int32_t *)p = as->mrm.ofs; | ||
| 199 | if (as->mrm.idx != RID_NONE) | ||
| 200 | goto mrmidx; | ||
| 201 | #if LJ_64 | ||
| 202 | *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP); | ||
| 203 | rb = RID_ESP; | ||
| 204 | #endif | ||
| 205 | } else { | ||
| 206 | if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) { | ||
| 207 | mode = XM_OFS0; | ||
| 208 | } else if (checki8(as->mrm.ofs)) { | ||
| 209 | *--p = (MCode)as->mrm.ofs; | ||
| 210 | mode = XM_OFS8; | ||
| 211 | } else { | ||
| 212 | p -= 4; | ||
| 213 | *(int32_t *)p = as->mrm.ofs; | ||
| 214 | mode = XM_OFS32; | ||
| 215 | } | ||
| 216 | if (as->mrm.idx != RID_NONE) { | ||
| 217 | mrmidx: | ||
| 218 | as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p); | ||
| 219 | return; | ||
| 220 | } | ||
| 221 | if ((rb&7) == RID_ESP) | ||
| 222 | *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | ||
| 223 | } | ||
| 224 | } | ||
| 225 | as->mcp = emit_opm(xo, mode, rr, rb, p, 0); | ||
| 226 | } | ||
| 227 | |||
| 228 | /* op rm/mrm, i */ | ||
| 229 | static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i) | ||
| 230 | { | ||
| 231 | x86Op xo; | ||
| 232 | if (checki8(i)) { | ||
| 233 | emit_i8(as, i); | ||
| 234 | xo = XG_TOXOi8(xg); | ||
| 235 | } else { | ||
| 236 | emit_i32(as, i); | ||
| 237 | xo = XG_TOXOi(xg); | ||
| 238 | } | ||
| 239 | emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64)); | ||
| 240 | } | ||
| 241 | |||
| 242 | /* -- Emit loads/stores --------------------------------------------------- */ | ||
| 243 | |||
| 244 | /* Instruction selection for XMM moves. */ | ||
| 245 | #define XMM_MOVRR(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVSD : XO_MOVAPS) | ||
| 246 | #define XMM_MOVRM(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVLPD : XO_MOVSD) | ||
| 247 | |||
| 248 | /* mov [base+ofs], i */ | ||
| 249 | static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i) | ||
| 250 | { | ||
| 251 | emit_i32(as, i); | ||
| 252 | emit_rmro(as, XO_MOVmi, 0, base, ofs); | ||
| 253 | } | ||
| 254 | |||
| 255 | /* mov [base+ofs], r */ | ||
| 256 | #define emit_movtomro(as, r, base, ofs) \ | ||
| 257 | emit_rmro(as, XO_MOVto, (r), (base), (ofs)) | ||
| 258 | |||
| 259 | /* Get/set global_State fields. */ | ||
| 260 | #define emit_opgl(as, xo, r, field) \ | ||
| 261 | emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field) | ||
| 262 | #define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field) | ||
| 263 | #define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field) | ||
| 264 | #define emit_setgli(as, field, i) \ | ||
| 265 | (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, field)) | ||
| 266 | |||
| 267 | /* mov r, i / xor r, r */ | ||
| 268 | static void emit_loadi(ASMState *as, Reg r, int32_t i) | ||
| 269 | { | ||
| 270 | /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP. */ | ||
| 271 | if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP || | ||
| 272 | (as->curins+1 < as->T->nins && | ||
| 273 | IR(as->curins+1)->o == IR_HIOP)))) { | ||
| 274 | emit_rr(as, XO_ARITH(XOg_XOR), r, r); | ||
| 275 | } else { | ||
| 276 | MCode *p = as->mcp; | ||
| 277 | *(int32_t *)(p-4) = i; | ||
| 278 | p[-5] = (MCode)(XI_MOVri+(r&7)); | ||
| 279 | p -= 5; | ||
| 280 | REXRB(p, 0, r); | ||
| 281 | as->mcp = p; | ||
| 282 | } | ||
| 283 | } | ||
| 284 | |||
| 285 | /* mov r, addr */ | ||
| 286 | #define emit_loada(as, r, addr) \ | ||
| 287 | emit_loadi(as, (r), ptr2addr((addr))) | ||
| 288 | |||
| 289 | #if LJ_64 | ||
| 290 | /* mov r, imm64 or shorter 32 bit extended load. */ | ||
| 291 | static void emit_loadu64(ASMState *as, Reg r, uint64_t u64) | ||
| 292 | { | ||
| 293 | if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */ | ||
| 294 | emit_loadi(as, r, (int32_t)u64); | ||
| 295 | } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */ | ||
| 296 | MCode *p = as->mcp; | ||
| 297 | *(int32_t *)(p-4) = (int32_t)u64; | ||
| 298 | as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4); | ||
| 299 | } else { /* Full-size 64 bit load. */ | ||
| 300 | MCode *p = as->mcp; | ||
| 301 | *(uint64_t *)(p-8) = u64; | ||
| 302 | p[-9] = (MCode)(XI_MOVri+(r&7)); | ||
| 303 | p[-10] = 0x48 + ((r>>3)&1); | ||
| 304 | p -= 10; | ||
| 305 | as->mcp = p; | ||
| 306 | } | ||
| 307 | } | ||
| 308 | #endif | ||
| 309 | |||
| 310 | /* movsd r, [&tv->n] / xorps r, r */ | ||
| 311 | static void emit_loadn(ASMState *as, Reg r, cTValue *tv) | ||
| 312 | { | ||
| 313 | if (tvispzero(tv)) /* Use xor only for +0. */ | ||
| 314 | emit_rr(as, XO_XORPS, r, r); | ||
| 315 | else | ||
| 316 | emit_rma(as, XMM_MOVRM(as), r, &tv->n); | ||
| 317 | } | ||
| 318 | |||
| 319 | /* -- Emit control-flow instructions -------------------------------------- */ | ||
| 320 | |||
| 321 | /* Label for short jumps. */ | ||
| 322 | typedef MCode *MCLabel; | ||
| 323 | |||
| 324 | #if LJ_32 && LJ_HASFFI | ||
| 325 | /* jmp short target */ | ||
| 326 | static void emit_sjmp(ASMState *as, MCLabel target) | ||
| 327 | { | ||
| 328 | MCode *p = as->mcp; | ||
| 329 | ptrdiff_t delta = target - p; | ||
| 330 | lua_assert(delta == (int8_t)delta); | ||
| 331 | p[-1] = (MCode)(int8_t)delta; | ||
| 332 | p[-2] = XI_JMPs; | ||
| 333 | as->mcp = p - 2; | ||
| 334 | } | ||
| 335 | #endif | ||
| 336 | |||
| 337 | /* jcc short target */ | ||
| 338 | static void emit_sjcc(ASMState *as, int cc, MCLabel target) | ||
| 339 | { | ||
| 340 | MCode *p = as->mcp; | ||
| 341 | ptrdiff_t delta = target - p; | ||
| 342 | lua_assert(delta == (int8_t)delta); | ||
| 343 | p[-1] = (MCode)(int8_t)delta; | ||
| 344 | p[-2] = (MCode)(XI_JCCs+(cc&15)); | ||
| 345 | as->mcp = p - 2; | ||
| 346 | } | ||
| 347 | |||
| 348 | /* jcc short (pending target) */ | ||
| 349 | static MCLabel emit_sjcc_label(ASMState *as, int cc) | ||
| 350 | { | ||
| 351 | MCode *p = as->mcp; | ||
| 352 | p[-1] = 0; | ||
| 353 | p[-2] = (MCode)(XI_JCCs+(cc&15)); | ||
| 354 | as->mcp = p - 2; | ||
| 355 | return p; | ||
| 356 | } | ||
| 357 | |||
| 358 | /* Fixup jcc short target. */ | ||
| 359 | static void emit_sfixup(ASMState *as, MCLabel source) | ||
| 360 | { | ||
| 361 | source[-1] = (MCode)(as->mcp-source); | ||
| 362 | } | ||
| 363 | |||
| 364 | /* Return label pointing to current PC. */ | ||
| 365 | #define emit_label(as) ((as)->mcp) | ||
| 366 | |||
| 367 | /* Compute relative 32 bit offset for jump and call instructions. */ | ||
| 368 | static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target) | ||
| 369 | { | ||
| 370 | ptrdiff_t delta = target - p; | ||
| 371 | lua_assert(delta == (int32_t)delta); | ||
| 372 | return (int32_t)delta; | ||
| 373 | } | ||
| 374 | |||
| 375 | /* jcc target */ | ||
| 376 | static void emit_jcc(ASMState *as, int cc, MCode *target) | ||
| 377 | { | ||
| 378 | MCode *p = as->mcp; | ||
| 379 | *(int32_t *)(p-4) = jmprel(p, target); | ||
| 380 | p[-5] = (MCode)(XI_JCCn+(cc&15)); | ||
| 381 | p[-6] = 0x0f; | ||
| 382 | as->mcp = p - 6; | ||
| 383 | } | ||
| 384 | |||
| 385 | /* call target */ | ||
| 386 | static void emit_call_(ASMState *as, MCode *target) | ||
| 387 | { | ||
| 388 | MCode *p = as->mcp; | ||
| 389 | #if LJ_64 | ||
| 390 | if (target-p != (int32_t)(target-p)) { | ||
| 391 | /* Assumes RID_RET is never an argument to calls and always clobbered. */ | ||
| 392 | emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET); | ||
| 393 | emit_loadu64(as, RID_RET, (uint64_t)target); | ||
| 394 | return; | ||
| 395 | } | ||
| 396 | #endif | ||
| 397 | *(int32_t *)(p-4) = jmprel(p, target); | ||
| 398 | p[-5] = XI_CALL; | ||
| 399 | as->mcp = p - 5; | ||
| 400 | } | ||
| 401 | |||
| 402 | #define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f)) | ||
| 403 | |||
| 404 | /* -- Emit generic operations --------------------------------------------- */ | ||
| 405 | |||
| 406 | /* Use 64 bit operations to handle 64 bit IR types. */ | ||
| 407 | #if LJ_64 | ||
| 408 | #define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0)) | ||
| 409 | #else | ||
| 410 | #define REX_64IR(ir, r) (r) | ||
| 411 | #endif | ||
| 412 | |||
| 413 | /* Generic move between two regs. */ | ||
| 414 | static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) | ||
| 415 | { | ||
| 416 | UNUSED(ir); | ||
| 417 | if (dst < RID_MAX_GPR) | ||
| 418 | emit_rr(as, XO_MOV, REX_64IR(ir, dst), src); | ||
| 419 | else | ||
| 420 | emit_rr(as, XMM_MOVRR(as), dst, src); | ||
| 421 | } | ||
| 422 | |||
| 423 | /* Generic load of register from stack slot. */ | ||
| 424 | static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs) | ||
| 425 | { | ||
| 426 | if (r < RID_MAX_GPR) | ||
| 427 | emit_rmro(as, XO_MOV, REX_64IR(ir, r), RID_ESP, ofs); | ||
| 428 | else | ||
| 429 | emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, r, RID_ESP, ofs); | ||
| 430 | } | ||
| 431 | |||
| 432 | /* Generic store of register to stack slot. */ | ||
| 433 | static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs) | ||
| 434 | { | ||
| 435 | if (r < RID_MAX_GPR) | ||
| 436 | emit_rmro(as, XO_MOVto, REX_64IR(ir, r), RID_ESP, ofs); | ||
| 437 | else | ||
| 438 | emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, RID_ESP, ofs); | ||
| 439 | } | ||
| 440 | |||
| 441 | /* Add offset to pointer. */ | ||
| 442 | static void emit_addptr(ASMState *as, Reg r, int32_t ofs) | ||
| 443 | { | ||
| 444 | if (ofs) { | ||
| 445 | if ((as->flags & JIT_F_LEA_AGU)) | ||
| 446 | emit_rmro(as, XO_LEA, r, r, ofs); | ||
| 447 | else | ||
| 448 | emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs); | ||
| 449 | } | ||
| 450 | } | ||
| 451 | |||
| 452 | #define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs)) | ||
| 453 | |||
| 454 | /* Prefer rematerialization of BASE/L from global_State over spills. */ | ||
| 455 | #define emit_canremat(ref) ((ref) <= REF_BASE) | ||
| 456 | |||
