aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/.gitignore2
-rw-r--r--src/Makefile96
-rw-r--r--src/Makefile.dep217
-rw-r--r--src/host/buildvm.c26
-rw-r--r--src/host/buildvm.h1
-rw-r--r--src/host/buildvm_asm.c56
-rw-r--r--src/host/buildvm_lib.c61
-rw-r--r--src/host/buildvm_libbc.h56
-rw-r--r--src/host/buildvm_peobj.c43
-rw-r--r--src/host/genlibbc.lua197
-rw-r--r--src/jit/bc.lua19
-rw-r--r--src/jit/bcsave.lua106
-rw-r--r--src/jit/dis_arm.lua18
-rw-r--r--src/jit/dis_arm64.lua1216
-rw-r--r--src/jit/dis_arm64be.lua12
-rw-r--r--src/jit/dis_mips.lua372
-rw-r--r--src/jit/dis_mips64.lua17
-rw-r--r--src/jit/dis_mips64el.lua17
-rw-r--r--src/jit/dis_mips64r6.lua17
-rw-r--r--src/jit/dis_mips64r6el.lua17
-rw-r--r--src/jit/dis_mipsel.lua15
-rw-r--r--src/jit/dis_ppc.lua18
-rw-r--r--src/jit/dis_x64.lua15
-rw-r--r--src/jit/dis_x86.lua297
-rw-r--r--src/jit/dump.lua45
-rw-r--r--src/jit/p.lua311
-rw-r--r--src/jit/v.lua17
-rw-r--r--src/jit/zone.lua45
-rw-r--r--src/lauxlib.h34
-rw-r--r--src/lib_aux.c82
-rw-r--r--src/lib_base.c134
-rw-r--r--src/lib_bit.c134
-rw-r--r--src/lib_debug.c10
-rw-r--r--src/lib_ffi.c82
-rw-r--r--src/lib_io.c55
-rw-r--r--src/lib_jit.c237
-rw-r--r--src/lib_math.c92
-rw-r--r--src/lib_os.c37
-rw-r--r--src/lib_package.c67
-rw-r--r--src/lib_string.c457
-rw-r--r--src/lib_table.c187
-rw-r--r--src/lj.supp41
-rw-r--r--src/lj_alloc.c275
-rw-r--r--src/lj_alloc.h3
-rw-r--r--src/lj_api.c372
-rw-r--r--src/lj_arch.h362
-rw-r--r--src/lj_asm.c870
-rw-r--r--src/lj_asm_arm.h537
-rw-r--r--src/lj_asm_arm64.h2018
-rw-r--r--src/lj_asm_mips.h1656
-rw-r--r--src/lj_asm_ppc.h819
-rw-r--r--src/lj_asm_x86.h1282
-rw-r--r--src/lj_assert.c28
-rw-r--r--src/lj_bc.h4
-rw-r--r--src/lj_bcdump.h6
-rw-r--r--src/lj_bcread.c152
-rw-r--r--src/lj_bcwrite.c245
-rw-r--r--src/lj_buf.c232
-rw-r--r--src/lj_buf.h103
-rw-r--r--src/lj_carith.c83
-rw-r--r--src/lj_carith.h10
-rw-r--r--src/lj_ccall.c395
-rw-r--r--src/lj_ccall.h49
-rw-r--r--src/lj_ccallback.c273
-rw-r--r--src/lj_cconv.c61
-rw-r--r--src/lj_cconv.h5
-rw-r--r--src/lj_cdata.c67
-rw-r--r--src/lj_cdata.h14
-rw-r--r--src/lj_clib.c47
-rw-r--r--src/lj_cparse.c178
-rw-r--r--src/lj_cparse.h2
-rw-r--r--src/lj_crecord.c337
-rw-r--r--src/lj_crecord.h7
-rw-r--r--src/lj_ctype.c30
-rw-r--r--src/lj_ctype.h16
-rw-r--r--src/lj_debug.c204
-rw-r--r--src/lj_debug.h8
-rw-r--r--src/lj_def.h56
-rw-r--r--src/lj_dispatch.c103
-rw-r--r--src/lj_dispatch.h43
-rw-r--r--src/lj_emit_arm.h71
-rw-r--r--src/lj_emit_arm64.h422
-rw-r--r--src/lj_emit_mips.h161
-rw-r--r--src/lj_emit_ppc.h34
-rw-r--r--src/lj_emit_x86.h200
-rw-r--r--src/lj_err.c211
-rw-r--r--src/lj_errmsg.h9
-rw-r--r--src/lj_ffrecord.c629
-rw-r--r--src/lj_frame.h160
-rw-r--r--src/lj_func.c18
-rw-r--r--src/lj_gc.c171
-rw-r--r--src/lj_gc.h16
-rw-r--r--src/lj_gdbjit.c55
-rw-r--r--src/lj_ir.c174
-rw-r--r--src/lj_ir.h87
-rw-r--r--src/lj_ircall.h211
-rw-r--r--src/lj_iropt.h15
-rw-r--r--src/lj_jit.h217
-rw-r--r--src/lj_lex.c385
-rw-r--r--src/lj_lex.h23
-rw-r--r--src/lj_lib.c75
-rw-r--r--src/lj_lib.h34
-rw-r--r--src/lj_load.c6
-rw-r--r--src/lj_mcode.c57
-rw-r--r--src/lj_meta.c129
-rw-r--r--src/lj_meta.h1
-rw-r--r--src/lj_obj.c17
-rw-r--r--src/lj_obj.h257
-rw-r--r--src/lj_opt_fold.c535
-rw-r--r--src/lj_opt_loop.c44
-rw-r--r--src/lj_opt_mem.c131
-rw-r--r--src/lj_opt_narrow.c48
-rw-r--r--src/lj_opt_sink.c14
-rw-r--r--src/lj_opt_split.c194
-rw-r--r--src/lj_parse.c301
-rw-r--r--src/lj_prng.c244
-rw-r--r--src/lj_prng.h24
-rw-r--r--src/lj_profile.c368
-rw-r--r--src/lj_profile.h21
-rw-r--r--src/lj_record.c780
-rw-r--r--src/lj_record.h1
-rw-r--r--src/lj_snap.c238
-rw-r--r--src/lj_snap.h3
-rw-r--r--src/lj_state.c105
-rw-r--r--src/lj_state.h4
-rw-r--r--src/lj_str.c519
-rw-r--r--src/lj_str.h39
-rw-r--r--src/lj_strfmt.c472
-rw-r--r--src/lj_strfmt.h126
-rw-r--r--src/lj_strfmt_num.c592
-rw-r--r--src/lj_strscan.c79
-rw-r--r--src/lj_strscan.h3
-rw-r--r--src/lj_tab.c157
-rw-r--r--src/lj_tab.h8
-rw-r--r--src/lj_target.h9
-rw-r--r--src/lj_target_arm.h4
-rw-r--r--src/lj_target_arm64.h332
-rw-r--r--src/lj_target_mips.h193
-rw-r--r--src/lj_target_ppc.h2
-rw-r--r--src/lj_target_x86.h36
-rw-r--r--src/lj_trace.c182
-rw-r--r--src/lj_trace.h2
-rw-r--r--src/lj_traceerr.h4
-rw-r--r--src/lj_vm.h31
-rw-r--r--src/lj_vmevent.c1
-rw-r--r--src/lj_vmmath.c75
-rw-r--r--src/ljamalg.c16
-rw-r--r--src/lua.h11
-rw-r--r--src/luaconf.h10
-rw-r--r--src/luajit.c134
-rw-r--r--src/luajit.h15
-rw-r--r--src/msvcbuild.bat12
-rw-r--r--src/ps4build.bat32
-rw-r--r--src/vm_arm.dasc356
-rw-r--r--src/vm_arm64.dasc3988
-rw-r--r--src/vm_mips.dasc2547
-rw-r--r--src/vm_mips64.dasc5453
-rw-r--r--src/vm_ppc.dasc1645
-rw-r--r--src/vm_ppcspe.dasc3691
-rw-r--r--src/vm_x64.dasc4907
-rw-r--r--src/vm_x86.dasc1573
-rw-r--r--src/xb1build.bat101
162 files changed, 37715 insertions, 12472 deletions
diff --git a/src/.gitignore b/src/.gitignore
index fc94e82c..1a30573c 100644
--- a/src/.gitignore
+++ b/src/.gitignore
@@ -4,4 +4,4 @@ lj_ffdef.h
4lj_libdef.h 4lj_libdef.h
5lj_recdef.h 5lj_recdef.h
6lj_folddef.h 6lj_folddef.h
7lj_vm.s 7lj_vm.[sS]
diff --git a/src/Makefile b/src/Makefile
index ce1537a8..e65b55ee 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -11,8 +11,8 @@
11############################################################################## 11##############################################################################
12 12
13MAJVER= 2 13MAJVER= 2
14MINVER= 0 14MINVER= 1
15RELVER= 5 15RELVER= 0
16ABIVER= 5.1 16ABIVER= 5.1
17NODOTABIVER= 51 17NODOTABIVER= 51
18 18
@@ -44,17 +44,14 @@ CCOPT= -O2 -fomit-frame-pointer
44# 44#
45# Target-specific compiler options: 45# Target-specific compiler options:
46# 46#
47# x86 only: it's recommended to compile at least for i686. Better yet,
48# compile for an architecture that has SSE2, too (-msse -msse2).
49#
50# x86/x64 only: For GCC 4.2 or higher and if you don't intend to distribute 47# x86/x64 only: For GCC 4.2 or higher and if you don't intend to distribute
51# the binaries to a different machine you could also use: -march=native 48# the binaries to a different machine you could also use: -march=native
52# 49#
53CCOPT_x86= -march=i686 50CCOPT_x86= -march=i686 -msse -msse2 -mfpmath=sse
54CCOPT_x64= 51CCOPT_x64=
55CCOPT_arm= 52CCOPT_arm=
53CCOPT_arm64=
56CCOPT_ppc= 54CCOPT_ppc=
57CCOPT_ppcspe=
58CCOPT_mips= 55CCOPT_mips=
59# 56#
60CCDEBUG= 57CCDEBUG=
@@ -113,6 +110,9 @@ XCFLAGS=
113#XCFLAGS+= -DLUAJIT_NUMMODE=1 110#XCFLAGS+= -DLUAJIT_NUMMODE=1
114#XCFLAGS+= -DLUAJIT_NUMMODE=2 111#XCFLAGS+= -DLUAJIT_NUMMODE=2
115# 112#
113# Disable LJ_GC64 mode for x64.
114#XCFLAGS+= -DLUAJIT_DISABLE_GC64
115#
116############################################################################## 116##############################################################################
117 117
118############################################################################## 118##############################################################################
@@ -124,15 +124,14 @@ XCFLAGS=
124# 124#
125# Use the system provided memory allocator (realloc) instead of the 125# Use the system provided memory allocator (realloc) instead of the
126# bundled memory allocator. This is slower, but sometimes helpful for 126# bundled memory allocator. This is slower, but sometimes helpful for
127# debugging. This option cannot be enabled on x64, since realloc usually 127# debugging. This option cannot be enabled on x64 without GC64, since
128# doesn't return addresses in the right address range. 128# realloc usually doesn't return addresses in the right address range.
129# OTOH this option is mandatory for Valgrind's memcheck tool on x64 and 129# OTOH this option is mandatory for Valgrind's memcheck tool on x64 and
130# the only way to get useful results from it for all other architectures. 130# the only way to get useful results from it for all other architectures.
131#XCFLAGS+= -DLUAJIT_USE_SYSMALLOC 131#XCFLAGS+= -DLUAJIT_USE_SYSMALLOC
132# 132#
133# This define is required to run LuaJIT under Valgrind. The Valgrind 133# This define is required to run LuaJIT under Valgrind. The Valgrind
134# header files must be installed. You should enable debug information, too. 134# header files must be installed. You should enable debug information, too.
135# Use --suppressions=lj.supp to avoid some false positives.
136#XCFLAGS+= -DLUAJIT_USE_VALGRIND 135#XCFLAGS+= -DLUAJIT_USE_VALGRIND
137# 136#
138# This is the client for the GDB JIT API. GDB 7.0 or higher is required 137# This is the client for the GDB JIT API. GDB 7.0 or higher is required
@@ -189,7 +188,8 @@ endif
189# make HOST_CC="gcc -m32" CROSS=i586-mingw32msvc- TARGET_SYS=Windows 188# make HOST_CC="gcc -m32" CROSS=i586-mingw32msvc- TARGET_SYS=Windows
190# make HOST_CC="gcc -m32" CROSS=powerpc-linux-gnu- 189# make HOST_CC="gcc -m32" CROSS=powerpc-linux-gnu-
191 190
192CCOPTIONS= $(CCDEBUG) $(CCOPT) $(CCWARN) $(XCFLAGS) $(CFLAGS) 191ASOPTIONS= $(CCOPT) $(CCWARN) $(XCFLAGS) $(CFLAGS)
192CCOPTIONS= $(CCDEBUG) $(ASOPTIONS)
193LDOPTIONS= $(CCDEBUG) $(LDFLAGS) 193LDOPTIONS= $(CCDEBUG) $(LDFLAGS)
194 194
195HOST_CC= $(CC) 195HOST_CC= $(CC)
@@ -211,7 +211,7 @@ TARGET_CC= $(STATIC_CC)
211TARGET_STCC= $(STATIC_CC) 211TARGET_STCC= $(STATIC_CC)
212TARGET_DYNCC= $(DYNAMIC_CC) 212TARGET_DYNCC= $(DYNAMIC_CC)
213TARGET_LD= $(CROSS)$(CC) 213TARGET_LD= $(CROSS)$(CC)
214TARGET_AR= $(CROSS)ar rcus 214TARGET_AR= $(CROSS)ar rcus 2>/dev/null
215TARGET_STRIP= $(CROSS)strip 215TARGET_STRIP= $(CROSS)strip
216 216
217TARGET_LIBPATH= $(or $(PREFIX),/usr/local)/$(or $(MULTILIB),lib) 217TARGET_LIBPATH= $(or $(PREFIX),/usr/local)/$(or $(MULTILIB),lib)
@@ -229,6 +229,7 @@ TARGET_XLDFLAGS=
229TARGET_XLIBS= -lm 229TARGET_XLIBS= -lm
230TARGET_TCFLAGS= $(CCOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS) 230TARGET_TCFLAGS= $(CCOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS)
231TARGET_ACFLAGS= $(CCOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS) 231TARGET_ACFLAGS= $(CCOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS)
232TARGET_ASFLAGS= $(ASOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS)
232TARGET_ALDFLAGS= $(LDOPTIONS) $(TARGET_XLDFLAGS) $(TARGET_FLAGS) $(TARGET_LDFLAGS) 233TARGET_ALDFLAGS= $(LDOPTIONS) $(TARGET_XLDFLAGS) $(TARGET_FLAGS) $(TARGET_LDFLAGS)
233TARGET_ASHLDFLAGS= $(LDOPTIONS) $(TARGET_XSHLDFLAGS) $(TARGET_FLAGS) $(TARGET_SHLDFLAGS) 234TARGET_ASHLDFLAGS= $(LDOPTIONS) $(TARGET_XSHLDFLAGS) $(TARGET_FLAGS) $(TARGET_SHLDFLAGS)
234TARGET_ALIBS= $(TARGET_XLIBS) $(LIBS) $(TARGET_LIBS) 235TARGET_ALIBS= $(TARGET_XLIBS) $(LIBS) $(TARGET_LIBS)
@@ -243,17 +244,29 @@ else
243ifneq (,$(findstring LJ_TARGET_ARM ,$(TARGET_TESTARCH))) 244ifneq (,$(findstring LJ_TARGET_ARM ,$(TARGET_TESTARCH)))
244 TARGET_LJARCH= arm 245 TARGET_LJARCH= arm
245else 246else
247ifneq (,$(findstring LJ_TARGET_ARM64 ,$(TARGET_TESTARCH)))
248 ifneq (,$(findstring __AARCH64EB__ ,$(TARGET_TESTARCH)))
249 TARGET_ARCH= -D__AARCH64EB__=1
250 endif
251 TARGET_LJARCH= arm64
252else
246ifneq (,$(findstring LJ_TARGET_PPC ,$(TARGET_TESTARCH))) 253ifneq (,$(findstring LJ_TARGET_PPC ,$(TARGET_TESTARCH)))
254 ifneq (,$(findstring LJ_LE 1,$(TARGET_TESTARCH)))
255 TARGET_ARCH= -DLJ_ARCH_ENDIAN=LUAJIT_LE
256 else
257 TARGET_ARCH= -DLJ_ARCH_ENDIAN=LUAJIT_BE
258 endif
247 TARGET_LJARCH= ppc 259 TARGET_LJARCH= ppc
248else 260else
249ifneq (,$(findstring LJ_TARGET_PPCSPE ,$(TARGET_TESTARCH)))
250 TARGET_LJARCH= ppcspe
251else
252ifneq (,$(findstring LJ_TARGET_MIPS ,$(TARGET_TESTARCH))) 261ifneq (,$(findstring LJ_TARGET_MIPS ,$(TARGET_TESTARCH)))
253 ifneq (,$(findstring MIPSEL ,$(TARGET_TESTARCH))) 262 ifneq (,$(findstring MIPSEL ,$(TARGET_TESTARCH)))
254 TARGET_ARCH= -D__MIPSEL__=1 263 TARGET_ARCH= -D__MIPSEL__=1
255 endif 264 endif
256 TARGET_LJARCH= mips 265 ifneq (,$(findstring LJ_TARGET_MIPS64 ,$(TARGET_TESTARCH)))
266 TARGET_LJARCH= mips64
267 else
268 TARGET_LJARCH= mips
269 endif
257else 270else
258 $(error Unsupported target architecture) 271 $(error Unsupported target architecture)
259endif 272endif
@@ -267,6 +280,7 @@ ifneq (,$(findstring LJ_TARGET_PS3 1,$(TARGET_TESTARCH)))
267 TARGET_SYS= PS3 280 TARGET_SYS= PS3
268 TARGET_ARCH+= -D__CELLOS_LV2__ 281 TARGET_ARCH+= -D__CELLOS_LV2__
269 TARGET_XCFLAGS+= -DLUAJIT_USE_SYSMALLOC 282 TARGET_XCFLAGS+= -DLUAJIT_USE_SYSMALLOC
283 TARGET_XLIBS+= -lpthread
270endif 284endif
271 285
272TARGET_XCFLAGS+= $(CCOPT_$(TARGET_LJARCH)) 286TARGET_XCFLAGS+= $(CCOPT_$(TARGET_LJARCH))
@@ -297,7 +311,6 @@ ifeq (Windows,$(TARGET_SYS))
297 TARGET_XSHLDFLAGS= -shared -Wl,--out-implib,$(TARGET_DLLDOTANAME) 311 TARGET_XSHLDFLAGS= -shared -Wl,--out-implib,$(TARGET_DLLDOTANAME)
298 TARGET_DYNXLDOPTS= 312 TARGET_DYNXLDOPTS=
299else 313else
300 TARGET_AR+= 2>/dev/null
301ifeq (,$(shell $(TARGET_CC) -o /dev/null -c -x c /dev/null -fno-stack-protector 2>/dev/null || echo 1)) 314ifeq (,$(shell $(TARGET_CC) -o /dev/null -c -x c /dev/null -fno-stack-protector 2>/dev/null || echo 1))
302 TARGET_XCFLAGS+= -fno-stack-protector 315 TARGET_XCFLAGS+= -fno-stack-protector
303endif 316endif
@@ -319,6 +332,9 @@ ifeq (iOS,$(TARGET_SYS))
319 TARGET_XSHLDFLAGS= -dynamiclib -single_module -undefined dynamic_lookup -fPIC 332 TARGET_XSHLDFLAGS= -dynamiclib -single_module -undefined dynamic_lookup -fPIC
320 TARGET_DYNXLDOPTS= 333 TARGET_DYNXLDOPTS=
321 TARGET_XSHLDFLAGS+= -install_name $(TARGET_DYLIBPATH) -compatibility_version $(MAJVER).$(MINVER) -current_version $(MAJVER).$(MINVER).$(RELVER) 334 TARGET_XSHLDFLAGS+= -install_name $(TARGET_DYLIBPATH) -compatibility_version $(MAJVER).$(MINVER) -current_version $(MAJVER).$(MINVER).$(RELVER)
335 ifeq (arm64,$(TARGET_LJARCH))
336 TARGET_XCFLAGS+= -fno-omit-frame-pointer
337 endif
322else 338else
323 ifneq (SunOS,$(TARGET_SYS)) 339 ifneq (SunOS,$(TARGET_SYS))
324 ifneq (PS3,$(TARGET_SYS)) 340 ifneq (PS3,$(TARGET_SYS))
@@ -346,7 +362,7 @@ ifneq ($(HOST_SYS),$(TARGET_SYS))
346 HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OSX 362 HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OSX
347 else 363 else
348 ifeq (iOS,$(TARGET_SYS)) 364 ifeq (iOS,$(TARGET_SYS))
349 HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OSX 365 HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OSX -DTARGET_OS_IPHONE=1
350 else 366 else
351 HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OTHER 367 HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OTHER
352 endif 368 endif
@@ -379,6 +395,11 @@ DASM_XFLAGS=
379DASM_AFLAGS= 395DASM_AFLAGS=
380DASM_ARCH= $(TARGET_LJARCH) 396DASM_ARCH= $(TARGET_LJARCH)
381 397
398ifneq (,$(findstring LJ_LE 1,$(TARGET_TESTARCH)))
399 DASM_AFLAGS+= -D ENDIAN_LE
400else
401 DASM_AFLAGS+= -D ENDIAN_BE
402endif
382ifneq (,$(findstring LJ_ARCH_BITS 64,$(TARGET_TESTARCH))) 403ifneq (,$(findstring LJ_ARCH_BITS 64,$(TARGET_TESTARCH)))
383 DASM_AFLAGS+= -D P64 404 DASM_AFLAGS+= -D P64
384endif 405endif
@@ -411,19 +432,19 @@ DASM_AFLAGS+= -D VER=$(subst LJ_ARCH_VERSION_,,$(filter LJ_ARCH_VERSION_%,$(subs
411ifeq (Windows,$(TARGET_SYS)) 432ifeq (Windows,$(TARGET_SYS))
412 DASM_AFLAGS+= -D WIN 433 DASM_AFLAGS+= -D WIN
413endif 434endif
414ifeq (x86,$(TARGET_LJARCH))
415 ifneq (,$(findstring __SSE2__ 1,$(TARGET_TESTARCH)))
416 DASM_AFLAGS+= -D SSE
417 endif
418else
419ifeq (x64,$(TARGET_LJARCH)) 435ifeq (x64,$(TARGET_LJARCH))
420 DASM_ARCH= x86 436 ifeq (,$(findstring LJ_FR2 1,$(TARGET_TESTARCH)))
437 DASM_ARCH= x86
438 endif
421else 439else
422ifeq (arm,$(TARGET_LJARCH)) 440ifeq (arm,$(TARGET_LJARCH))
423 ifeq (iOS,$(TARGET_SYS)) 441 ifeq (iOS,$(TARGET_SYS))
424 DASM_AFLAGS+= -D IOS 442 DASM_AFLAGS+= -D IOS
425 endif 443 endif
426else 444else
445ifneq (,$(findstring LJ_TARGET_MIPSR6 ,$(TARGET_TESTARCH)))
446 DASM_AFLAGS+= -D MIPSR6
447endif
427ifeq (ppc,$(TARGET_LJARCH)) 448ifeq (ppc,$(TARGET_LJARCH))
428 ifneq (,$(findstring LJ_ARCH_SQRT 1,$(TARGET_TESTARCH))) 449 ifneq (,$(findstring LJ_ARCH_SQRT 1,$(TARGET_TESTARCH)))
429 DASM_AFLAGS+= -D SQRT 450 DASM_AFLAGS+= -D SQRT
@@ -431,7 +452,7 @@ ifeq (ppc,$(TARGET_LJARCH))
431 ifneq (,$(findstring LJ_ARCH_ROUND 1,$(TARGET_TESTARCH))) 452 ifneq (,$(findstring LJ_ARCH_ROUND 1,$(TARGET_TESTARCH)))
432 DASM_AFLAGS+= -D ROUND 453 DASM_AFLAGS+= -D ROUND
433 endif 454 endif
434 ifneq (,$(findstring LJ_ARCH_PPC64 1,$(TARGET_TESTARCH))) 455 ifneq (,$(findstring LJ_ARCH_PPC32ON64 1,$(TARGET_TESTARCH)))
435 DASM_AFLAGS+= -D GPR64 456 DASM_AFLAGS+= -D GPR64
436 endif 457 endif
437 ifeq (PS3,$(TARGET_SYS)) 458 ifeq (PS3,$(TARGET_SYS))
@@ -440,7 +461,6 @@ ifeq (ppc,$(TARGET_LJARCH))
440endif 461endif
441endif 462endif
442endif 463endif
443endif
444 464
445DASM_FLAGS= $(DASM_XFLAGS) $(DASM_AFLAGS) 465DASM_FLAGS= $(DASM_XFLAGS) $(DASM_AFLAGS)
446DASM_DASC= vm_$(DASM_ARCH).dasc 466DASM_DASC= vm_$(DASM_ARCH).dasc
@@ -453,7 +473,7 @@ BUILDVM_X= $(BUILDVM_T)
453HOST_O= $(MINILUA_O) $(BUILDVM_O) 473HOST_O= $(MINILUA_O) $(BUILDVM_O)
454HOST_T= $(MINILUA_T) $(BUILDVM_T) 474HOST_T= $(MINILUA_T) $(BUILDVM_T)
455 475
456LJVM_S= lj_vm.s 476LJVM_S= lj_vm.S
457LJVM_O= lj_vm.o 477LJVM_O= lj_vm.o
458LJVM_BOUT= $(LJVM_S) 478LJVM_BOUT= $(LJVM_S)
459LJVM_MODE= elfasm 479LJVM_MODE= elfasm
@@ -462,10 +482,11 @@ LJLIB_O= lib_base.o lib_math.o lib_bit.o lib_string.o lib_table.o \
462 lib_io.o lib_os.o lib_package.o lib_debug.o lib_jit.o lib_ffi.o 482 lib_io.o lib_os.o lib_package.o lib_debug.o lib_jit.o lib_ffi.o
463LJLIB_C= $(LJLIB_O:.o=.c) 483LJLIB_C= $(LJLIB_O:.o=.c)
464 484
465LJCORE_O= lj_gc.o lj_err.o lj_char.o lj_bc.o lj_obj.o \ 485LJCORE_O= lj_assert.o lj_gc.o lj_err.o lj_char.o lj_bc.o lj_obj.o lj_buf.o \
466 lj_str.o lj_tab.o lj_func.o lj_udata.o lj_meta.o lj_debug.o \ 486 lj_str.o lj_tab.o lj_func.o lj_udata.o lj_meta.o lj_debug.o \
467 lj_state.o lj_dispatch.o lj_vmevent.o lj_vmmath.o lj_strscan.o \ 487 lj_prng.o lj_state.o lj_dispatch.o lj_vmevent.o lj_vmmath.o \
468 lj_api.o lj_lex.o lj_parse.o lj_bcread.o lj_bcwrite.o lj_load.o \ 488 lj_strscan.o lj_strfmt.o lj_strfmt_num.o lj_api.o lj_profile.o \
489 lj_lex.o lj_parse.o lj_bcread.o lj_bcwrite.o lj_load.o \
469 lj_ir.o lj_opt_mem.o lj_opt_fold.o lj_opt_narrow.o \ 490 lj_ir.o lj_opt_mem.o lj_opt_fold.o lj_opt_narrow.o \
470 lj_opt_dce.o lj_opt_loop.o lj_opt_split.o lj_opt_sink.o \ 491 lj_opt_dce.o lj_opt_loop.o lj_opt_split.o lj_opt_sink.o \
471 lj_mcode.o lj_snap.o lj_record.o lj_crecord.o lj_ffrecord.o \ 492 lj_mcode.o lj_snap.o lj_record.o lj_crecord.o lj_ffrecord.o \
@@ -580,12 +601,15 @@ E= @echo
580default all: $(TARGET_T) 601default all: $(TARGET_T)
581 602
582amalg: 603amalg:
583 @grep "^[+|]" ljamalg.c
584 $(MAKE) all "LJCORE_O=ljamalg.o" 604 $(MAKE) all "LJCORE_O=ljamalg.o"
585 605
586clean: 606clean:
587 $(HOST_RM) $(ALL_RM) 607 $(HOST_RM) $(ALL_RM)
588 608
609libbc:
610 ./$(LUAJIT_T) host/genlibbc.lua -o host/buildvm_libbc.h $(LJLIB_C)
611 $(MAKE) all
612
589depend: 613depend:
590 @for file in $(ALL_HDRGEN); do \ 614 @for file in $(ALL_HDRGEN); do \
591 test -f $$file || touch $$file; \ 615 test -f $$file || touch $$file; \
@@ -600,7 +624,7 @@ depend:
600 test -s $$file || $(HOST_RM) $$file; \ 624 test -s $$file || $(HOST_RM) $$file; \
601 done 625 done
602 626
603.PHONY: default all amalg clean depend 627.PHONY: default all amalg clean libbc depend
604 628
605############################################################################## 629##############################################################################
606# Rules for generated files. 630# Rules for generated files.
@@ -610,7 +634,7 @@ $(MINILUA_T): $(MINILUA_O)
610 $(E) "HOSTLINK $@" 634 $(E) "HOSTLINK $@"
611 $(Q)$(HOST_CC) $(HOST_ALDFLAGS) -o $@ $(MINILUA_O) $(MINILUA_LIBS) $(HOST_ALIBS) 635 $(Q)$(HOST_CC) $(HOST_ALDFLAGS) -o $@ $(MINILUA_O) $(MINILUA_LIBS) $(HOST_ALIBS)
612 636
613host/buildvm_arch.h: $(DASM_DASC) $(DASM_DEP) lj_arch.h lua.h luaconf.h 637host/buildvm_arch.h: $(DASM_DASC) $(DASM_DEP) $(DASM_DIR)/*.lua lj_arch.h lua.h luaconf.h
614 $(E) "DYNASM $@" 638 $(E) "DYNASM $@"
615 $(Q)$(DASM) $(DASM_FLAGS) -o $@ $(DASM_DASC) 639 $(Q)$(DASM) $(DASM_FLAGS) -o $@ $(DASM_DASC)
616 640
@@ -657,10 +681,10 @@ lj_folddef.h: $(BUILDVM_T) lj_opt_fold.c
657 $(Q)$(TARGET_DYNCC) $(TARGET_ACFLAGS) -c -o $(@:.o=_dyn.o) $< 681 $(Q)$(TARGET_DYNCC) $(TARGET_ACFLAGS) -c -o $(@:.o=_dyn.o) $<
658 $(Q)$(TARGET_CC) $(TARGET_ACFLAGS) -c -o $@ $< 682 $(Q)$(TARGET_CC) $(TARGET_ACFLAGS) -c -o $@ $<
659 683
660%.o: %.s 684%.o: %.S
661 $(E) "ASM $@" 685 $(E) "ASM $@"
662 $(Q)$(TARGET_DYNCC) $(TARGET_ACFLAGS) -c -o $(@:.o=_dyn.o) $< 686 $(Q)$(TARGET_DYNCC) $(TARGET_ASFLAGS) -c -o $(@:.o=_dyn.o) $<
663 $(Q)$(TARGET_CC) $(TARGET_ACFLAGS) -c -o $@ $< 687 $(Q)$(TARGET_CC) $(TARGET_ASFLAGS) -c -o $@ $<
664 688
665$(LUAJIT_O): 689$(LUAJIT_O):
666 $(E) "CC $@" 690 $(E) "CC $@"
diff --git a/src/Makefile.dep b/src/Makefile.dep
index 9e14d617..3f26599e 100644
--- a/src/Makefile.dep
+++ b/src/Makefile.dep
@@ -1,66 +1,75 @@
1lib_aux.o: lib_aux.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \ 1lib_aux.o: lib_aux.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \
2 lj_arch.h lj_err.h lj_errmsg.h lj_state.h lj_trace.h lj_jit.h lj_ir.h \ 2 lj_arch.h lj_err.h lj_errmsg.h lj_state.h lj_trace.h lj_jit.h lj_ir.h \
3 lj_dispatch.h lj_bc.h lj_traceerr.h lj_lib.h lj_alloc.h 3 lj_dispatch.h lj_bc.h lj_traceerr.h lj_lib.h
4lib_base.o: lib_base.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ 4lib_base.o: lib_base.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
5 lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h \ 5 lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h \
6 lj_tab.h lj_meta.h lj_state.h lj_ctype.h lj_cconv.h lj_bc.h lj_ff.h \ 6 lj_tab.h lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cconv.h \
7 lj_ffdef.h lj_dispatch.h lj_jit.h lj_ir.h lj_char.h lj_strscan.h \ 7 lj_ff.h lj_ffdef.h lj_dispatch.h lj_jit.h lj_ir.h lj_char.h lj_strscan.h \
8 lj_lib.h lj_libdef.h 8 lj_strfmt.h lj_lib.h lj_libdef.h
9lib_bit.o: lib_bit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ 9lib_bit.o: lib_bit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
10 lj_arch.h lj_err.h lj_errmsg.h lj_str.h lj_lib.h lj_libdef.h 10 lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_strscan.h \
11 lj_strfmt.h lj_ctype.h lj_cdata.h lj_cconv.h lj_carith.h lj_ff.h \
12 lj_ffdef.h lj_lib.h lj_libdef.h
11lib_debug.o: lib_debug.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ 13lib_debug.o: lib_debug.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
12 lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_lib.h \ 14 lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_lib.h \
13 lj_libdef.h 15 lj_libdef.h
14lib_ffi.o: lib_ffi.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ 16lib_ffi.o: lib_ffi.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
15 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h \ 17 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h \
16 lj_ctype.h lj_cparse.h lj_cdata.h lj_cconv.h lj_carith.h lj_ccall.h \ 18 lj_ctype.h lj_cparse.h lj_cdata.h lj_cconv.h lj_carith.h lj_ccall.h \
17 lj_ccallback.h lj_clib.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h 19 lj_ccallback.h lj_clib.h lj_strfmt.h lj_ff.h lj_ffdef.h lj_lib.h \
20 lj_libdef.h
18lib_init.o: lib_init.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h 21lib_init.o: lib_init.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h
19lib_io.o: lib_io.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ 22lib_io.o: lib_io.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
20 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_state.h lj_ff.h \ 23 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_state.h \
21 lj_ffdef.h lj_lib.h lj_libdef.h 24 lj_strfmt.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h
22lib_jit.o: lib_jit.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h \ 25lib_jit.o: lib_jit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
23 lj_obj.h lj_def.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h \ 26 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h \
24 lj_bc.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_target.h \ 27 lj_state.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h \
25 lj_target_*.h lj_dispatch.h lj_vm.h lj_vmevent.h lj_lib.h luajit.h \ 28 lj_target.h lj_target_*.h lj_trace.h lj_dispatch.h lj_traceerr.h \
26 lj_libdef.h 29 lj_vm.h lj_vmevent.h lj_lib.h luajit.h lj_libdef.h
27lib_math.o: lib_math.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ 30lib_math.o: lib_math.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
28 lj_def.h lj_arch.h lj_lib.h lj_vm.h lj_libdef.h 31 lj_def.h lj_arch.h lj_lib.h lj_vm.h lj_prng.h lj_libdef.h
29lib_os.o: lib_os.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \ 32lib_os.o: lib_os.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
30 lj_arch.h lj_err.h lj_errmsg.h lj_lib.h lj_libdef.h 33 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_lib.h \
34 lj_libdef.h
31lib_package.o: lib_package.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ 35lib_package.o: lib_package.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
32 lj_def.h lj_arch.h lj_err.h lj_errmsg.h lj_lib.h 36 lj_def.h lj_arch.h lj_err.h lj_errmsg.h lj_lib.h
33lib_string.o: lib_string.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ 37lib_string.o: lib_string.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
34 lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h \ 38 lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h \
35 lj_meta.h lj_state.h lj_ff.h lj_ffdef.h lj_bcdump.h lj_lex.h lj_char.h \ 39 lj_tab.h lj_meta.h lj_state.h lj_ff.h lj_ffdef.h lj_bcdump.h lj_lex.h \
36 lj_lib.h lj_libdef.h 40 lj_char.h lj_strfmt.h lj_lib.h lj_libdef.h
37lib_table.o: lib_table.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \ 41lib_table.o: lib_table.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
38 lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_lib.h \ 42 lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h \
39 lj_libdef.h 43 lj_tab.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h
40lj_alloc.o: lj_alloc.c lj_def.h lua.h luaconf.h lj_arch.h lj_alloc.h 44lj_alloc.o: lj_alloc.c lj_def.h lua.h luaconf.h lj_arch.h lj_alloc.h \
45 lj_prng.h
41lj_api.o: lj_api.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 46lj_api.o: lj_api.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
42 lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h lj_udata.h \ 47 lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h lj_udata.h \
43 lj_meta.h lj_state.h lj_bc.h lj_frame.h lj_trace.h lj_jit.h lj_ir.h \ 48 lj_meta.h lj_state.h lj_bc.h lj_frame.h lj_trace.h lj_jit.h lj_ir.h \
44 lj_dispatch.h lj_traceerr.h lj_vm.h lj_strscan.h 49 lj_dispatch.h lj_traceerr.h lj_vm.h lj_strscan.h lj_strfmt.h
45lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 50lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
46 lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h \ 51 lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h \
47 lj_ircall.h lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h \ 52 lj_ircall.h lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h \
48 lj_snap.h lj_asm.h lj_vm.h lj_target.h lj_target_*.h lj_emit_*.h \ 53 lj_snap.h lj_asm.h lj_vm.h lj_target.h lj_target_*.h lj_emit_*.h \
49 lj_asm_*.h 54 lj_asm_*.h
55lj_assert.o: lj_assert.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h
50lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \ 56lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \
51 lj_bcdef.h 57 lj_bcdef.h
52lj_bcread.o: lj_bcread.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 58lj_bcread.o: lj_bcread.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
53 lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_bc.h lj_ctype.h \ 59 lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_bc.h \
54 lj_cdata.h lualib.h lj_lex.h lj_bcdump.h lj_state.h 60 lj_ctype.h lj_cdata.h lualib.h lj_lex.h lj_bcdump.h lj_state.h \
61 lj_strfmt.h
55lj_bcwrite.o: lj_bcwrite.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 62lj_bcwrite.o: lj_bcwrite.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
56 lj_gc.h lj_str.h lj_bc.h lj_ctype.h lj_dispatch.h lj_jit.h lj_ir.h \ 63 lj_gc.h lj_buf.h lj_str.h lj_bc.h lj_ctype.h lj_dispatch.h lj_jit.h \
57 lj_bcdump.h lj_lex.h lj_err.h lj_errmsg.h lj_vm.h 64 lj_ir.h lj_strfmt.h lj_bcdump.h lj_lex.h lj_err.h lj_errmsg.h lj_vm.h
65lj_buf.o: lj_buf.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
66 lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_strfmt.h
58lj_carith.o: lj_carith.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 67lj_carith.o: lj_carith.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
59 lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_meta.h lj_ctype.h lj_cconv.h \ 68 lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_meta.h lj_ir.h lj_ctype.h \
60 lj_cdata.h lj_carith.h 69 lj_cconv.h lj_cdata.h lj_carith.h lj_strscan.h
61lj_ccall.o: lj_ccall.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 70lj_ccall.o: lj_ccall.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
62 lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cconv.h \ 71 lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_cconv.h lj_cdata.h \
63 lj_cdata.h lj_ccall.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \ 72 lj_ccall.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \
64 lj_traceerr.h 73 lj_traceerr.h
65lj_ccallback.o: lj_ccallback.c lj_obj.h lua.h luaconf.h lj_def.h \ 74lj_ccallback.o: lj_ccallback.c lj_obj.h lua.h luaconf.h lj_def.h \
66 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_state.h lj_frame.h \ 75 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_state.h lj_frame.h \
@@ -71,107 +80,120 @@ lj_cconv.o: lj_cconv.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
71 lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_gc.h lj_cdata.h lj_cconv.h \ 80 lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_gc.h lj_cdata.h lj_cconv.h \
72 lj_ccallback.h 81 lj_ccallback.h
73lj_cdata.o: lj_cdata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 82lj_cdata.o: lj_cdata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
74 lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cconv.h \ 83 lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_cconv.h lj_cdata.h
75 lj_cdata.h
76lj_char.o: lj_char.c lj_char.h lj_def.h lua.h luaconf.h 84lj_char.o: lj_char.c lj_char.h lj_def.h lua.h luaconf.h
77lj_clib.o: lj_clib.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 85lj_clib.o: lj_clib.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
78 lj_err.h lj_errmsg.h lj_tab.h lj_str.h lj_udata.h lj_ctype.h lj_cconv.h \ 86 lj_err.h lj_errmsg.h lj_tab.h lj_str.h lj_udata.h lj_ctype.h lj_cconv.h \
79 lj_cdata.h lj_clib.h 87 lj_cdata.h lj_clib.h lj_strfmt.h
80lj_cparse.o: lj_cparse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 88lj_cparse.o: lj_cparse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
81 lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_ctype.h lj_cparse.h lj_frame.h \ 89 lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_ctype.h lj_cparse.h \
82 lj_bc.h lj_vm.h lj_char.h lj_strscan.h 90 lj_frame.h lj_bc.h lj_vm.h lj_char.h lj_strscan.h lj_strfmt.h
83lj_crecord.o: lj_crecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 91lj_crecord.o: lj_crecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
84 lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h \ 92 lj_err.h lj_errmsg.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_gc.h \
85 lj_gc.h lj_cdata.h lj_cparse.h lj_cconv.h lj_clib.h lj_ccall.h lj_ff.h \ 93 lj_cdata.h lj_cparse.h lj_cconv.h lj_carith.h lj_clib.h lj_ccall.h \
86 lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \ 94 lj_ff.h lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
87 lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_snap.h \ 95 lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_snap.h \
88 lj_crecord.h 96 lj_crecord.h lj_strfmt.h
89lj_ctype.o: lj_ctype.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 97lj_ctype.o: lj_ctype.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
90 lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_ccallback.h 98 lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_strfmt.h lj_ctype.h \
99 lj_ccallback.h lj_buf.h
91lj_debug.o: lj_debug.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 100lj_debug.o: lj_debug.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
92 lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_state.h lj_frame.h \ 101 lj_err.h lj_errmsg.h lj_debug.h lj_buf.h lj_gc.h lj_str.h lj_tab.h \
93 lj_bc.h lj_vm.h lj_jit.h lj_ir.h 102 lj_state.h lj_frame.h lj_bc.h lj_strfmt.h lj_jit.h lj_ir.h
94lj_dispatch.o: lj_dispatch.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 103lj_dispatch.o: lj_dispatch.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
95 lj_err.h lj_errmsg.h lj_func.h lj_str.h lj_tab.h lj_meta.h lj_debug.h \ 104 lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_func.h lj_tab.h \
96 lj_state.h lj_frame.h lj_bc.h lj_ff.h lj_ffdef.h lj_jit.h lj_ir.h \ 105 lj_meta.h lj_debug.h lj_state.h lj_frame.h lj_bc.h lj_ff.h lj_ffdef.h \
97 lj_ccallback.h lj_ctype.h lj_gc.h lj_trace.h lj_dispatch.h lj_traceerr.h \ 106 lj_strfmt.h lj_jit.h lj_ir.h lj_ccallback.h lj_ctype.h lj_trace.h \
98 lj_vm.h luajit.h 107 lj_dispatch.h lj_traceerr.h lj_profile.h lj_vm.h luajit.h
99lj_err.o: lj_err.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_err.h \ 108lj_err.o: lj_err.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_err.h \
100 lj_errmsg.h lj_debug.h lj_str.h lj_func.h lj_state.h lj_frame.h lj_bc.h \ 109 lj_errmsg.h lj_debug.h lj_str.h lj_func.h lj_state.h lj_frame.h lj_bc.h \
101 lj_ff.h lj_ffdef.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \ 110 lj_ff.h lj_ffdef.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \
102 lj_traceerr.h lj_vm.h 111 lj_traceerr.h lj_vm.h lj_strfmt.h
103lj_ffrecord.o: lj_ffrecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 112lj_ffrecord.o: lj_ffrecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
104 lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ff.h \ 113 lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ff.h \
105 lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \ 114 lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
106 lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_crecord.h \ 115 lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_crecord.h \
107 lj_vm.h lj_strscan.h lj_recdef.h 116 lj_vm.h lj_strscan.h lj_strfmt.h lj_recdef.h
108lj_func.o: lj_func.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 117lj_func.o: lj_func.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
109 lj_func.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \ 118 lj_func.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \
110 lj_traceerr.h lj_vm.h 119 lj_traceerr.h lj_vm.h
111lj_gc.o: lj_gc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 120lj_gc.o: lj_gc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
112 lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_udata.h lj_meta.h \ 121 lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_func.h lj_udata.h \
113 lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h lj_jit.h \ 122 lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h \
114 lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h 123 lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h
115lj_gdbjit.o: lj_gdbjit.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 124lj_gdbjit.o: lj_gdbjit.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
116 lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_frame.h lj_bc.h lj_jit.h \ 125 lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_frame.h lj_bc.h lj_buf.h \
117 lj_ir.h lj_dispatch.h 126 lj_str.h lj_strfmt.h lj_jit.h lj_ir.h lj_dispatch.h
118lj_ir.o: lj_ir.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 127lj_ir.o: lj_ir.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
119 lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \ 128 lj_buf.h lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h \
120 lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h lj_cdata.h lj_carith.h \ 129 lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h lj_cdata.h \
121 lj_vm.h lj_strscan.h lj_lib.h 130 lj_carith.h lj_vm.h lj_strscan.h lj_strfmt.h lj_prng.h
122lj_lex.o: lj_lex.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 131lj_lex.o: lj_lex.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
123 lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cdata.h lualib.h \ 132 lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_ctype.h lj_cdata.h \
124 lj_state.h lj_lex.h lj_parse.h lj_char.h lj_strscan.h 133 lualib.h lj_state.h lj_lex.h lj_parse.h lj_char.h lj_strscan.h \
134 lj_strfmt.h
125lj_lib.o: lj_lib.c lauxlib.h lua.h luaconf.h lj_obj.h lj_def.h lj_arch.h \ 135lj_lib.o: lj_lib.c lauxlib.h lua.h luaconf.h lj_obj.h lj_def.h lj_arch.h \
126 lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_bc.h \ 136 lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_bc.h \
127 lj_dispatch.h lj_jit.h lj_ir.h lj_vm.h lj_strscan.h lj_lib.h 137 lj_dispatch.h lj_jit.h lj_ir.h lj_vm.h lj_strscan.h lj_strfmt.h lj_lex.h \
138 lj_bcdump.h lj_lib.h
128lj_load.o: lj_load.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \ 139lj_load.o: lj_load.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \
129 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_func.h lj_frame.h \ 140 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_func.h \
130 lj_bc.h lj_vm.h lj_lex.h lj_bcdump.h lj_parse.h 141 lj_frame.h lj_bc.h lj_vm.h lj_lex.h lj_bcdump.h lj_parse.h
131lj_mcode.o: lj_mcode.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 142lj_mcode.o: lj_mcode.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
132 lj_gc.h lj_err.h lj_errmsg.h lj_jit.h lj_ir.h lj_mcode.h lj_trace.h \ 143 lj_gc.h lj_err.h lj_errmsg.h lj_jit.h lj_ir.h lj_mcode.h lj_trace.h \
133 lj_dispatch.h lj_bc.h lj_traceerr.h lj_vm.h 144 lj_dispatch.h lj_bc.h lj_traceerr.h lj_prng.h lj_vm.h
134lj_meta.o: lj_meta.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 145lj_meta.o: lj_meta.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
135 lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \ 146 lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_meta.h lj_frame.h \
136 lj_vm.h lj_strscan.h 147 lj_bc.h lj_vm.h lj_strscan.h lj_strfmt.h lj_lib.h
137lj_obj.o: lj_obj.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h 148lj_obj.o: lj_obj.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h
138lj_opt_dce.o: lj_opt_dce.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 149lj_opt_dce.o: lj_opt_dce.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
139 lj_ir.h lj_jit.h lj_iropt.h 150 lj_ir.h lj_jit.h lj_iropt.h
140lj_opt_fold.o: lj_opt_fold.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 151lj_opt_fold.o: lj_opt_fold.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
141 lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \ 152 lj_buf.h lj_gc.h lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h \
142 lj_bc.h lj_traceerr.h lj_ctype.h lj_gc.h lj_carith.h lj_vm.h \ 153 lj_iropt.h lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h \
143 lj_strscan.h lj_folddef.h 154 lj_carith.h lj_vm.h lj_strscan.h lj_strfmt.h lj_folddef.h
144lj_opt_loop.o: lj_opt_loop.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 155lj_opt_loop.o: lj_opt_loop.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
145 lj_err.h lj_errmsg.h lj_str.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h \ 156 lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_ir.h lj_jit.h \
146 lj_dispatch.h lj_bc.h lj_traceerr.h lj_snap.h lj_vm.h 157 lj_iropt.h lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_snap.h \
158 lj_vm.h
147lj_opt_mem.o: lj_opt_mem.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 159lj_opt_mem.o: lj_opt_mem.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
148 lj_tab.h lj_ir.h lj_jit.h lj_iropt.h 160 lj_tab.h lj_ir.h lj_jit.h lj_iropt.h lj_ircall.h lj_dispatch.h lj_bc.h
149lj_opt_narrow.o: lj_opt_narrow.c lj_obj.h lua.h luaconf.h lj_def.h \ 161lj_opt_narrow.o: lj_opt_narrow.c lj_obj.h lua.h luaconf.h lj_def.h \
150 lj_arch.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \ 162 lj_arch.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \
151 lj_traceerr.h lj_vm.h lj_strscan.h 163 lj_traceerr.h lj_vm.h lj_strscan.h
152lj_opt_sink.o: lj_opt_sink.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 164lj_opt_sink.o: lj_opt_sink.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
153 lj_ir.h lj_jit.h lj_iropt.h lj_target.h lj_target_*.h 165 lj_ir.h lj_jit.h lj_iropt.h lj_target.h lj_target_*.h
154lj_opt_split.o: lj_opt_split.c lj_obj.h lua.h luaconf.h lj_def.h \ 166lj_opt_split.o: lj_opt_split.c lj_obj.h lua.h luaconf.h lj_def.h \
155 lj_arch.h lj_err.h lj_errmsg.h lj_str.h lj_ir.h lj_jit.h lj_ircall.h \ 167 lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_ir.h \
156 lj_iropt.h lj_vm.h 168 lj_jit.h lj_ircall.h lj_iropt.h lj_dispatch.h lj_bc.h lj_vm.h
157lj_parse.o: lj_parse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 169lj_parse.o: lj_parse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
158 lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h \ 170 lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_buf.h lj_str.h lj_tab.h \
159 lj_state.h lj_bc.h lj_ctype.h lj_lex.h lj_parse.h lj_vm.h lj_vmevent.h 171 lj_func.h lj_state.h lj_bc.h lj_ctype.h lj_strfmt.h lj_lex.h lj_parse.h \
172 lj_vm.h lj_vmevent.h
173lj_profile.o: lj_profile.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
174 lj_buf.h lj_gc.h lj_str.h lj_frame.h lj_bc.h lj_debug.h lj_dispatch.h \
175 lj_jit.h lj_ir.h lj_trace.h lj_traceerr.h lj_profile.h luajit.h
176lj_prng.o: lj_prng.c lj_def.h lua.h luaconf.h lj_arch.h lj_prng.h
160lj_record.o: lj_record.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 177lj_record.o: lj_record.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
161 lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \ 178 lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \
162 lj_ctype.h lj_gc.h lj_ff.h lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h \ 179 lj_ctype.h lj_gc.h lj_ff.h lj_ffdef.h lj_debug.h lj_ir.h lj_jit.h \
163 lj_iropt.h lj_trace.h lj_dispatch.h lj_traceerr.h lj_record.h \ 180 lj_ircall.h lj_iropt.h lj_trace.h lj_dispatch.h lj_traceerr.h \
164 lj_ffrecord.h lj_snap.h lj_vm.h 181 lj_record.h lj_ffrecord.h lj_snap.h lj_vm.h lj_prng.h
165lj_snap.o: lj_snap.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 182lj_snap.o: lj_snap.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
166 lj_tab.h lj_state.h lj_frame.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h \ 183 lj_tab.h lj_state.h lj_frame.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h \
167 lj_trace.h lj_dispatch.h lj_traceerr.h lj_snap.h lj_target.h \ 184 lj_trace.h lj_dispatch.h lj_traceerr.h lj_snap.h lj_target.h \
168 lj_target_*.h lj_ctype.h lj_cdata.h 185 lj_target_*.h lj_ctype.h lj_cdata.h
169lj_state.o: lj_state.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 186lj_state.o: lj_state.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
170 lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_meta.h \ 187 lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_func.h \
171 lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_trace.h lj_jit.h lj_ir.h \ 188 lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_trace.h lj_jit.h \
172 lj_dispatch.h lj_traceerr.h lj_vm.h lj_lex.h lj_alloc.h 189 lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h lj_prng.h lj_lex.h \
190 lj_alloc.h luajit.h
173lj_str.o: lj_str.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 191lj_str.o: lj_str.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
174 lj_err.h lj_errmsg.h lj_str.h lj_state.h lj_char.h 192 lj_err.h lj_errmsg.h lj_str.h lj_char.h
193lj_strfmt.o: lj_strfmt.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
194 lj_buf.h lj_gc.h lj_str.h lj_state.h lj_char.h lj_strfmt.h
195lj_strfmt_num.o: lj_strfmt_num.c lj_obj.h lua.h luaconf.h lj_def.h \
196 lj_arch.h lj_buf.h lj_gc.h lj_str.h lj_strfmt.h
175lj_strscan.o: lj_strscan.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 197lj_strscan.o: lj_strscan.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
176 lj_char.h lj_strscan.h 198 lj_char.h lj_strscan.h
177lj_tab.o: lj_tab.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ 199lj_tab.o: lj_tab.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
@@ -180,7 +202,7 @@ lj_trace.o: lj_trace.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
180 lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_frame.h lj_bc.h \ 202 lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_frame.h lj_bc.h \
181 lj_state.h lj_ir.h lj_jit.h lj_iropt.h lj_mcode.h lj_trace.h \ 203 lj_state.h lj_ir.h lj_jit.h lj_iropt.h lj_mcode.h lj_trace.h \
182 lj_dispatch.h lj_traceerr.h lj_snap.h lj_gdbjit.h lj_record.h lj_asm.h \ 204 lj_dispatch.h lj_traceerr.h lj_snap.h lj_gdbjit.h lj_record.h lj_asm.h \
183 lj_vm.h lj_vmevent.h lj_target.h lj_target_*.h 205 lj_vm.h lj_vmevent.h lj_target.h lj_target_*.h lj_prng.h
184lj_udata.o: lj_udata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 206lj_udata.o: lj_udata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
185 lj_gc.h lj_udata.h 207 lj_gc.h lj_udata.h
186lj_vmevent.o: lj_vmevent.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 208lj_vmevent.o: lj_vmevent.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
@@ -188,19 +210,21 @@ lj_vmevent.o: lj_vmevent.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
188 lj_vm.h lj_vmevent.h 210 lj_vm.h lj_vmevent.h
189lj_vmmath.o: lj_vmmath.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ 211lj_vmmath.o: lj_vmmath.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
190 lj_ir.h lj_vm.h 212 lj_ir.h lj_vm.h
191ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_gc.c lj_obj.h lj_def.h \ 213ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_assert.c lj_obj.h \
192 lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h \ 214 lj_def.h lj_arch.h lj_gc.c lj_gc.h lj_err.h lj_errmsg.h lj_buf.h \
193 lj_udata.h lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h \ 215 lj_str.h lj_tab.h lj_func.h lj_udata.h lj_meta.h lj_state.h lj_frame.h \
194 lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h lj_err.c \ 216 lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \
195 lj_debug.h lj_ff.h lj_ffdef.h lj_char.c lj_char.h lj_bc.c lj_bcdef.h \ 217 lj_traceerr.h lj_vm.h lj_err.c lj_debug.h lj_ff.h lj_ffdef.h lj_strfmt.h \
196 lj_obj.c lj_str.c lj_tab.c lj_func.c lj_udata.c lj_meta.c lj_strscan.h \ 218 lj_char.c lj_char.h lj_bc.c lj_bcdef.h lj_obj.c lj_buf.c lj_str.c \
197 lj_debug.c lj_state.c lj_lex.h lj_alloc.h lj_dispatch.c lj_ccallback.h \ 219 lj_tab.c lj_func.c lj_udata.c lj_meta.c lj_strscan.h lj_lib.h lj_debug.c \
198 luajit.h lj_vmevent.c lj_vmevent.h lj_vmmath.c lj_strscan.c lj_api.c \ 220 lj_prng.c lj_prng.h lj_state.c lj_lex.h lj_alloc.h luajit.h \
199 lj_lex.c lualib.h lj_parse.h lj_parse.c lj_bcread.c lj_bcdump.h \ 221 lj_dispatch.c lj_ccallback.h lj_profile.h lj_vmevent.c lj_vmevent.h \
200 lj_bcwrite.c lj_load.c lj_ctype.c lj_cdata.c lj_cconv.h lj_cconv.c \ 222 lj_vmmath.c lj_strscan.c lj_strfmt.c lj_strfmt_num.c lj_api.c \
201 lj_ccall.c lj_ccall.h lj_ccallback.c lj_target.h lj_target_*.h \ 223 lj_profile.c lj_lex.c lualib.h lj_parse.h lj_parse.c lj_bcread.c \
202 lj_mcode.h lj_carith.c lj_carith.h lj_clib.c lj_clib.h lj_cparse.c \ 224 lj_bcdump.h lj_bcwrite.c lj_load.c lj_ctype.c lj_cdata.c lj_cconv.h \
203 lj_cparse.h lj_lib.c lj_lib.h lj_ir.c lj_ircall.h lj_iropt.h \ 225 lj_cconv.c lj_ccall.c lj_ccall.h lj_ccallback.c lj_target.h \
226 lj_target_*.h lj_mcode.h lj_carith.c lj_carith.h lj_clib.c lj_clib.h \
227 lj_cparse.c lj_cparse.h lj_lib.c lj_ir.c lj_ircall.h lj_iropt.h \
204 lj_opt_mem.c lj_opt_fold.c lj_folddef.h lj_opt_narrow.c lj_opt_dce.c \ 228 lj_opt_mem.c lj_opt_fold.c lj_folddef.h lj_opt_narrow.c lj_opt_dce.c \
205 lj_opt_loop.c lj_snap.h lj_opt_split.c lj_opt_sink.c lj_mcode.c \ 229 lj_opt_loop.c lj_snap.h lj_opt_split.c lj_opt_sink.c lj_mcode.c \
206 lj_snap.c lj_record.c lj_record.h lj_ffrecord.h lj_crecord.c \ 230 lj_snap.c lj_record.c lj_record.h lj_ffrecord.h lj_crecord.c \
@@ -220,7 +244,8 @@ host/buildvm_asm.o: host/buildvm_asm.c host/buildvm.h lj_def.h lua.h luaconf.h \
220host/buildvm_fold.o: host/buildvm_fold.c host/buildvm.h lj_def.h lua.h \ 244host/buildvm_fold.o: host/buildvm_fold.c host/buildvm.h lj_def.h lua.h \
221 luaconf.h lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_ir.h lj_obj.h 245 luaconf.h lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_ir.h lj_obj.h
222host/buildvm_lib.o: host/buildvm_lib.c host/buildvm.h lj_def.h lua.h luaconf.h \ 246host/buildvm_lib.o: host/buildvm_lib.c host/buildvm.h lj_def.h lua.h luaconf.h \
223 lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_lib.h lj_obj.h 247 lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_bc.h lj_lib.h lj_obj.h \
248 host/buildvm_libbc.h
224host/buildvm_peobj.o: host/buildvm_peobj.c host/buildvm.h lj_def.h lua.h \ 249host/buildvm_peobj.o: host/buildvm_peobj.c host/buildvm.h lj_def.h lua.h \
225 luaconf.h lj_arch.h lj_bc.h lj_def.h lj_arch.h 250 luaconf.h lj_arch.h lj_bc.h lj_def.h lj_arch.h
226host/minilua.o: host/minilua.c 251host/minilua.o: host/minilua.c
diff --git a/src/host/buildvm.c b/src/host/buildvm.c
index 05e0dbdb..27e14d57 100644
--- a/src/host/buildvm.c
+++ b/src/host/buildvm.c
@@ -59,10 +59,10 @@ static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type);
59#include "../dynasm/dasm_x86.h" 59#include "../dynasm/dasm_x86.h"
60#elif LJ_TARGET_ARM 60#elif LJ_TARGET_ARM
61#include "../dynasm/dasm_arm.h" 61#include "../dynasm/dasm_arm.h"
62#elif LJ_TARGET_ARM64
63#include "../dynasm/dasm_arm64.h"
62#elif LJ_TARGET_PPC 64#elif LJ_TARGET_PPC
63#include "../dynasm/dasm_ppc.h" 65#include "../dynasm/dasm_ppc.h"
64#elif LJ_TARGET_PPCSPE
65#include "../dynasm/dasm_ppc.h"
66#elif LJ_TARGET_MIPS 66#elif LJ_TARGET_MIPS
67#include "../dynasm/dasm_mips.h" 67#include "../dynasm/dasm_mips.h"
68#else 68#else
@@ -110,11 +110,11 @@ static const char *sym_decorate(BuildCtx *ctx,
110 if (p) { 110 if (p) {
111#if LJ_TARGET_X86ORX64 111#if LJ_TARGET_X86ORX64
112 if (!LJ_64 && (ctx->mode == BUILD_coffasm || ctx->mode == BUILD_peobj)) 112 if (!LJ_64 && (ctx->mode == BUILD_coffasm || ctx->mode == BUILD_peobj))
113 name[0] = '@'; 113 name[0] = name[1] == 'R' ? '_' : '@'; /* Just for _RtlUnwind@16. */
114 else 114 else
115 *p = '\0'; 115 *p = '\0';
116#elif (LJ_TARGET_PPC || LJ_TARGET_PPCSPE) && !LJ_TARGET_CONSOLE 116#elif LJ_TARGET_PPC && !LJ_TARGET_CONSOLE
117 /* Keep @plt. */ 117 /* Keep @plt etc. */
118#else 118#else
119 *p = '\0'; 119 *p = '\0';
120#endif 120#endif
@@ -179,6 +179,7 @@ static int build_code(BuildCtx *ctx)
179 ctx->nreloc = 0; 179 ctx->nreloc = 0;
180 180
181 ctx->globnames = globnames; 181 ctx->globnames = globnames;
182 ctx->extnames = extnames;
182 ctx->relocsym = (const char **)malloc(NRELOCSYM*sizeof(const char *)); 183 ctx->relocsym = (const char **)malloc(NRELOCSYM*sizeof(const char *));
183 ctx->nrelocsym = 0; 184 ctx->nrelocsym = 0;
184 for (i = 0; i < (int)NRELOCSYM; i++) relocmap[i] = -1; 185 for (i = 0; i < (int)NRELOCSYM; i++) relocmap[i] = -1;
@@ -320,20 +321,20 @@ static void emit_vmdef(BuildCtx *ctx)
320 char buf[80]; 321 char buf[80];
321 int i; 322 int i;
322 fprintf(ctx->fp, "-- This is a generated file. DO NOT EDIT!\n\n"); 323 fprintf(ctx->fp, "-- This is a generated file. DO NOT EDIT!\n\n");
323 fprintf(ctx->fp, "module(...)\n\n"); 324 fprintf(ctx->fp, "return {\n\n");
324 325
325 fprintf(ctx->fp, "bcnames = \""); 326 fprintf(ctx->fp, "bcnames = \"");
326 for (i = 0; bc_names[i]; i++) fprintf(ctx->fp, "%-6s", bc_names[i]); 327 for (i = 0; bc_names[i]; i++) fprintf(ctx->fp, "%-6s", bc_names[i]);
327 fprintf(ctx->fp, "\"\n\n"); 328 fprintf(ctx->fp, "\",\n\n");
328 329
329 fprintf(ctx->fp, "irnames = \""); 330 fprintf(ctx->fp, "irnames = \"");
330 for (i = 0; ir_names[i]; i++) fprintf(ctx->fp, "%-6s", ir_names[i]); 331 for (i = 0; ir_names[i]; i++) fprintf(ctx->fp, "%-6s", ir_names[i]);
331 fprintf(ctx->fp, "\"\n\n"); 332 fprintf(ctx->fp, "\",\n\n");
332 333
333 fprintf(ctx->fp, "irfpm = { [0]="); 334 fprintf(ctx->fp, "irfpm = { [0]=");
334 for (i = 0; irfpm_names[i]; i++) 335 for (i = 0; irfpm_names[i]; i++)
335 fprintf(ctx->fp, "\"%s\", ", lower(buf, irfpm_names[i])); 336 fprintf(ctx->fp, "\"%s\", ", lower(buf, irfpm_names[i]));
336 fprintf(ctx->fp, "}\n\n"); 337 fprintf(ctx->fp, "},\n\n");
337 338
338 fprintf(ctx->fp, "irfield = { [0]="); 339 fprintf(ctx->fp, "irfield = { [0]=");
339 for (i = 0; irfield_names[i]; i++) { 340 for (i = 0; irfield_names[i]; i++) {
@@ -343,17 +344,17 @@ static void emit_vmdef(BuildCtx *ctx)
343 if (p) *p = '.'; 344 if (p) *p = '.';
344 fprintf(ctx->fp, "\"%s\", ", buf); 345 fprintf(ctx->fp, "\"%s\", ", buf);
345 } 346 }
346 fprintf(ctx->fp, "}\n\n"); 347 fprintf(ctx->fp, "},\n\n");
347 348
348 fprintf(ctx->fp, "ircall = {\n[0]="); 349 fprintf(ctx->fp, "ircall = {\n[0]=");
349 for (i = 0; ircall_names[i]; i++) 350 for (i = 0; ircall_names[i]; i++)
350 fprintf(ctx->fp, "\"%s\",\n", ircall_names[i]); 351 fprintf(ctx->fp, "\"%s\",\n", ircall_names[i]);
351 fprintf(ctx->fp, "}\n\n"); 352 fprintf(ctx->fp, "},\n\n");
352 353
353 fprintf(ctx->fp, "traceerr = {\n[0]="); 354 fprintf(ctx->fp, "traceerr = {\n[0]=");
354 for (i = 0; trace_errors[i]; i++) 355 for (i = 0; trace_errors[i]; i++)
355 fprintf(ctx->fp, "\"%s\",\n", trace_errors[i]); 356 fprintf(ctx->fp, "\"%s\",\n", trace_errors[i]);
356 fprintf(ctx->fp, "}\n\n"); 357 fprintf(ctx->fp, "},\n\n");
357} 358}
358 359
359/* -- Argument parsing ---------------------------------------------------- */ 360/* -- Argument parsing ---------------------------------------------------- */
@@ -490,6 +491,7 @@ int main(int argc, char **argv)
490 case BUILD_vmdef: 491 case BUILD_vmdef:
491 emit_vmdef(ctx); 492 emit_vmdef(ctx);
492 emit_lib(ctx); 493 emit_lib(ctx);
494 fprintf(ctx->fp, "}\n\n");
493 break; 495 break;
494 case BUILD_ffdef: 496 case BUILD_ffdef:
495 case BUILD_libdef: 497 case BUILD_libdef:
diff --git a/src/host/buildvm.h b/src/host/buildvm.h
index a440cfc3..3fdff65b 100644
--- a/src/host/buildvm.h
+++ b/src/host/buildvm.h
@@ -82,6 +82,7 @@ typedef struct BuildCtx {
82 const char *beginsym; 82 const char *beginsym;
83 /* Strings generated by DynASM. */ 83 /* Strings generated by DynASM. */
84 const char *const *globnames; 84 const char *const *globnames;
85 const char *const *extnames;
85 const char *dasm_ident; 86 const char *dasm_ident;
86 const char *dasm_arch; 87 const char *dasm_arch;
87 /* Relocations. */ 88 /* Relocations. */
diff --git a/src/host/buildvm_asm.c b/src/host/buildvm_asm.c
index 2cb7d451..b9cfa049 100644
--- a/src/host/buildvm_asm.c
+++ b/src/host/buildvm_asm.c
@@ -51,8 +51,8 @@ static const char *const jccnames[] = {
51 "js", "jns", "jpe", "jpo", "jl", "jge", "jle", "jg" 51 "js", "jns", "jpe", "jpo", "jl", "jge", "jle", "jg"
52}; 52};
53 53
54/* Emit relocation for the incredibly stupid OSX assembler. */ 54/* Emit x86/x64 text relocations. */
55static void emit_asm_reloc_mach(BuildCtx *ctx, uint8_t *cp, int n, 55static void emit_asm_reloc_text(BuildCtx *ctx, uint8_t *cp, int n,
56 const char *sym) 56 const char *sym)
57{ 57{
58 const char *opname = NULL; 58 const char *opname = NULL;
@@ -71,6 +71,20 @@ err:
71 exit(1); 71 exit(1);
72 } 72 }
73 emit_asm_bytes(ctx, cp, n); 73 emit_asm_bytes(ctx, cp, n);
74 if (strncmp(sym+(*sym == '_'), LABEL_PREFIX, sizeof(LABEL_PREFIX)-1)) {
75 /* Various fixups for external symbols outside of our binary. */
76 if (ctx->mode == BUILD_elfasm) {
77 if (LJ_32)
78 fprintf(ctx->fp, "#if __PIC__\n\t%s lj_wrap_%s\n#else\n", opname, sym);
79 fprintf(ctx->fp, "\t%s %s@PLT\n", opname, sym);
80 if (LJ_32)
81 fprintf(ctx->fp, "#endif\n");
82 return;
83 } else if (LJ_32 && ctx->mode == BUILD_machasm) {
84 fprintf(ctx->fp, "\t%s L%s$stub\n", opname, sym);
85 return;
86 }
87 }
74 fprintf(ctx->fp, "\t%s %s\n", opname, sym); 88 fprintf(ctx->fp, "\t%s %s\n", opname, sym);
75} 89}
76#else 90#else
@@ -79,10 +93,14 @@ static void emit_asm_words(BuildCtx *ctx, uint8_t *p, int n)
79{ 93{
80 int i; 94 int i;
81 for (i = 0; i < n; i += 4) { 95 for (i = 0; i < n; i += 4) {
96 uint32_t ins = *(uint32_t *)(p+i);
97#if LJ_TARGET_ARM64 && LJ_BE
98 ins = lj_bswap(ins); /* ARM64 instructions are always little-endian. */
99#endif
82 if ((i & 15) == 0) 100 if ((i & 15) == 0)
83 fprintf(ctx->fp, "\t.long 0x%08x", *(uint32_t *)(p+i)); 101 fprintf(ctx->fp, "\t.long 0x%08x", ins);
84 else 102 else
85 fprintf(ctx->fp, ",0x%08x", *(uint32_t *)(p+i)); 103 fprintf(ctx->fp, ",0x%08x", ins);
86 if ((i & 15) == 12) putc('\n', ctx->fp); 104 if ((i & 15) == 12) putc('\n', ctx->fp);
87 } 105 }
88 if ((n & 15) != 0) putc('\n', ctx->fp); 106 if ((n & 15) != 0) putc('\n', ctx->fp);
@@ -107,7 +125,16 @@ static void emit_asm_wordreloc(BuildCtx *ctx, uint8_t *p, int n,
107 ins, sym); 125 ins, sym);
108 exit(1); 126 exit(1);
109 } 127 }
110#elif LJ_TARGET_PPC || LJ_TARGET_PPCSPE 128#elif LJ_TARGET_ARM64
129 if ((ins >> 26) == 0x25u) {
130 fprintf(ctx->fp, "\tbl %s\n", sym);
131 } else {
132 fprintf(stderr,
133 "Error: unsupported opcode %08x for %s symbol relocation.\n",
134 ins, sym);
135 exit(1);
136 }
137#elif LJ_TARGET_PPC
111#if LJ_TARGET_PS3 138#if LJ_TARGET_PS3
112#define TOCPREFIX "." 139#define TOCPREFIX "."
113#else 140#else
@@ -228,11 +255,20 @@ void emit_asm(BuildCtx *ctx)
228 255
229#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND 256#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND
230 /* This should really be moved into buildvm_arm.dasc. */ 257 /* This should really be moved into buildvm_arm.dasc. */
258#if LJ_ARCH_HASFPU
259 fprintf(ctx->fp,
260 ".fnstart\n"
261 ".save {r5, r6, r7, r8, r9, r10, r11, lr}\n"
262 ".vsave {d8-d15}\n"
263 ".save {r4}\n"
264 ".pad #28\n");
265#else
231 fprintf(ctx->fp, 266 fprintf(ctx->fp,
232 ".fnstart\n" 267 ".fnstart\n"
233 ".save {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n" 268 ".save {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n"
234 ".pad #28\n"); 269 ".pad #28\n");
235#endif 270#endif
271#endif
236#if LJ_TARGET_MIPS 272#if LJ_TARGET_MIPS
237 fprintf(ctx->fp, ".set nomips16\n.abicalls\n.set noreorder\n.set nomacro\n"); 273 fprintf(ctx->fp, ".set nomips16\n.abicalls\n.set noreorder\n.set nomacro\n");
238#endif 274#endif
@@ -255,8 +291,9 @@ void emit_asm(BuildCtx *ctx)
255 BuildReloc *r = &ctx->reloc[rel]; 291 BuildReloc *r = &ctx->reloc[rel];
256 int n = r->ofs - ofs; 292 int n = r->ofs - ofs;
257#if LJ_TARGET_X86ORX64 293#if LJ_TARGET_X86ORX64
258 if (ctx->mode == BUILD_machasm && r->type != 0) { 294 if (r->type != 0 &&
259 emit_asm_reloc_mach(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]); 295 (ctx->mode == BUILD_elfasm || ctx->mode == BUILD_machasm)) {
296 emit_asm_reloc_text(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]);
260 } else { 297 } else {
261 emit_asm_bytes(ctx, ctx->code+ofs, n); 298 emit_asm_bytes(ctx, ctx->code+ofs, n);
262 emit_asm_reloc(ctx, r->type, ctx->relocsym[r->sym]); 299 emit_asm_reloc(ctx, r->type, ctx->relocsym[r->sym]);
@@ -290,10 +327,7 @@ void emit_asm(BuildCtx *ctx)
290#if !(LJ_TARGET_PS3 || LJ_TARGET_PSVITA) 327#if !(LJ_TARGET_PS3 || LJ_TARGET_PSVITA)
291 fprintf(ctx->fp, "\t.section .note.GNU-stack,\"\"," ELFASM_PX "progbits\n"); 328 fprintf(ctx->fp, "\t.section .note.GNU-stack,\"\"," ELFASM_PX "progbits\n");
292#endif 329#endif
293#if LJ_TARGET_PPCSPE 330#if LJ_TARGET_PPC && !LJ_TARGET_PS3 && !LJ_ABI_SOFTFP
294 /* Soft-float ABI + SPE. */
295 fprintf(ctx->fp, "\t.gnu_attribute 4, 2\n\t.gnu_attribute 8, 3\n");
296#elif LJ_TARGET_PPC && !LJ_TARGET_PS3
297 /* Hard-float ABI. */ 331 /* Hard-float ABI. */
298 fprintf(ctx->fp, "\t.gnu_attribute 4, 1\n"); 332 fprintf(ctx->fp, "\t.gnu_attribute 4, 1\n");
299#endif 333#endif
diff --git a/src/host/buildvm_lib.c b/src/host/buildvm_lib.c
index 3c64626c..88014b23 100644
--- a/src/host/buildvm_lib.c
+++ b/src/host/buildvm_lib.c
@@ -5,7 +5,9 @@
5 5
6#include "buildvm.h" 6#include "buildvm.h"
7#include "lj_obj.h" 7#include "lj_obj.h"
8#include "lj_bc.h"
8#include "lj_lib.h" 9#include "lj_lib.h"
10#include "buildvm_libbc.h"
9 11
10/* Context for library definitions. */ 12/* Context for library definitions. */
11static uint8_t obuf[8192]; 13static uint8_t obuf[8192];
@@ -151,6 +153,62 @@ static void libdef_func(BuildCtx *ctx, char *p, int arg)
151 regfunc = REGFUNC_OK; 153 regfunc = REGFUNC_OK;
152} 154}
153 155
156static uint8_t *libdef_uleb128(uint8_t *p, uint32_t *vv)
157{
158 uint32_t v = *p++;
159 if (v >= 0x80) {
160 int sh = 0; v &= 0x7f;
161 do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80);
162 }
163 *vv = v;
164 return p;
165}
166
167static void libdef_fixupbc(uint8_t *p)
168{
169 uint32_t i, sizebc;
170 p += 4;
171 p = libdef_uleb128(p, &sizebc);
172 p = libdef_uleb128(p, &sizebc);
173 p = libdef_uleb128(p, &sizebc);
174 for (i = 0; i < sizebc; i++, p += 4) {
175 uint8_t op = p[libbc_endian ? 3 : 0];
176 uint8_t ra = p[libbc_endian ? 2 : 1];
177 uint8_t rc = p[libbc_endian ? 1 : 2];
178 uint8_t rb = p[libbc_endian ? 0 : 3];
179 if (!LJ_DUALNUM && op == BC_ISTYPE && rc == ~LJ_TNUMX+1) {
180 op = BC_ISNUM; rc++;
181 }
182 p[LJ_ENDIAN_SELECT(0, 3)] = op;
183 p[LJ_ENDIAN_SELECT(1, 2)] = ra;
184 p[LJ_ENDIAN_SELECT(2, 1)] = rc;
185 p[LJ_ENDIAN_SELECT(3, 0)] = rb;
186 }
187}
188
189static void libdef_lua(BuildCtx *ctx, char *p, int arg)
190{
191 UNUSED(arg);
192 if (ctx->mode == BUILD_libdef) {
193 int i;
194 for (i = 0; libbc_map[i].name != NULL; i++) {
195 if (!strcmp(libbc_map[i].name, p)) {
196 int ofs = libbc_map[i].ofs;
197 int len = libbc_map[i+1].ofs - ofs;
198 obuf[2]++; /* Bump hash table size. */
199 *optr++ = LIBINIT_LUA;
200 libdef_name(p, 0);
201 memcpy(optr, libbc_code + ofs, len);
202 libdef_fixupbc(optr);
203 optr += len;
204 return;
205 }
206 }
207 fprintf(stderr, "Error: missing libbc definition for %s\n", p);
208 exit(1);
209 }
210}
211
154static uint32_t find_rec(char *name) 212static uint32_t find_rec(char *name)
155{ 213{
156 char *p = (char *)obuf; 214 char *p = (char *)obuf;
@@ -277,6 +335,7 @@ static const LibDefHandler libdef_handlers[] = {
277 { "CF(", ")", libdef_func, LIBINIT_CF }, 335 { "CF(", ")", libdef_func, LIBINIT_CF },
278 { "ASM(", ")", libdef_func, LIBINIT_ASM }, 336 { "ASM(", ")", libdef_func, LIBINIT_ASM },
279 { "ASM_(", ")", libdef_func, LIBINIT_ASM_ }, 337 { "ASM_(", ")", libdef_func, LIBINIT_ASM_ },
338 { "LUA(", ")", libdef_lua, 0 },
280 { "REC(", ")", libdef_rec, 0 }, 339 { "REC(", ")", libdef_rec, 0 },
281 { "PUSH(", ")", libdef_push, 0 }, 340 { "PUSH(", ")", libdef_push, 0 },
282 { "SET(", ")", libdef_set, 0 }, 341 { "SET(", ")", libdef_set, 0 },
@@ -373,7 +432,7 @@ void emit_lib(BuildCtx *ctx)
373 "#ifndef FF_NUM_ASMFUNC\n#define FF_NUM_ASMFUNC %d\n#endif\n\n", 432 "#ifndef FF_NUM_ASMFUNC\n#define FF_NUM_ASMFUNC %d\n#endif\n\n",
374 ffasmfunc); 433 ffasmfunc);
375 } else if (ctx->mode == BUILD_vmdef) { 434 } else if (ctx->mode == BUILD_vmdef) {
376 fprintf(ctx->fp, "}\n\n"); 435 fprintf(ctx->fp, "},\n\n");
377 } else if (ctx->mode == BUILD_bcdef) { 436 } else if (ctx->mode == BUILD_bcdef) {
378 int i; 437 int i;
379 fprintf(ctx->fp, "\n};\n\n"); 438 fprintf(ctx->fp, "\n};\n\n");
diff --git a/src/host/buildvm_libbc.h b/src/host/buildvm_libbc.h
new file mode 100644
index 00000000..b2600bd5
--- /dev/null
+++ b/src/host/buildvm_libbc.h
@@ -0,0 +1,56 @@
1/* This is a generated file. DO NOT EDIT! */
2
3static const int libbc_endian = 0;
4
5static const uint8_t libbc_code[] = {
6#if LJ_FR2
70,1,2,0,0,1,2,24,1,0,0,76,1,2,0,241,135,158,166,3,220,203,178,130,4,0,1,2,0,
80,1,2,24,1,0,0,76,1,2,0,243,244,148,165,20,198,190,199,252,3,0,1,2,0,0,0,3,
916,0,5,0,21,1,0,0,76,1,2,0,0,2,10,0,0,0,15,16,0,12,0,16,1,9,0,41,2,1,0,21,3,
100,0,41,4,1,0,77,2,8,128,18,6,1,0,18,8,5,0,59,9,5,0,66,6,3,2,10,6,0,0,88,7,1,
11128,76,6,2,0,79,2,248,127,75,0,1,0,0,2,11,0,0,0,16,16,0,12,0,16,1,9,0,43,2,
120,0,18,3,0,0,41,4,0,0,88,5,7,128,18,7,1,0,18,9,5,0,18,10,6,0,66,7,3,2,10,7,
130,0,88,8,1,128,76,7,2,0,70,5,3,3,82,5,247,127,75,0,1,0,0,1,2,0,0,0,3,16,0,12,
140,21,1,0,0,76,1,2,0,0,2,10,0,0,2,30,16,0,12,0,21,2,0,0,11,1,0,0,88,3,7,128,
158,2,0,0,88,3,23,128,59,3,2,0,43,4,0,0,64,4,2,0,76,3,2,0,88,3,18,128,16,1,14,
160,41,3,1,0,3,3,1,0,88,3,14,128,3,1,2,0,88,3,12,128,59,3,1,0,22,4,1,1,18,5,2,
170,41,6,1,0,77,4,4,128,23,8,1,7,59,9,7,0,64,9,8,0,79,4,252,127,43,4,0,0,64,4,
182,0,76,3,2,0,75,0,1,0,0,2,0,5,12,0,0,0,35,16,0,12,0,16,1,14,0,16,2,14,0,16,
193,14,0,11,4,0,0,88,5,1,128,18,4,0,0,16,4,12,0,3,1,2,0,88,5,24,128,33,5,1,3,
200,2,3,0,88,6,4,128,2,3,1,0,88,6,2,128,4,4,0,0,88,6,9,128,18,6,1,0,18,7,2,0,
2141,8,1,0,77,6,4,128,32,10,5,9,59,11,9,0,64,11,10,4,79,6,252,127,88,6,8,128,
2218,6,2,0,18,7,1,0,41,8,255,255,77,6,4,128,32,10,5,9,59,11,9,0,64,11,10,4,79,
236,252,127,76,4,2,0,0
24#else
250,1,2,0,0,1,2,24,1,0,0,76,1,2,0,241,135,158,166,3,220,203,178,130,4,0,1,2,0,
260,1,2,24,1,0,0,76,1,2,0,243,244,148,165,20,198,190,199,252,3,0,1,2,0,0,0,3,
2716,0,5,0,21,1,0,0,76,1,2,0,0,2,9,0,0,0,15,16,0,12,0,16,1,9,0,41,2,1,0,21,3,
280,0,41,4,1,0,77,2,8,128,18,6,1,0,18,7,5,0,59,8,5,0,66,6,3,2,10,6,0,0,88,7,1,
29128,76,6,2,0,79,2,248,127,75,0,1,0,0,2,10,0,0,0,16,16,0,12,0,16,1,9,0,43,2,
300,0,18,3,0,0,41,4,0,0,88,5,7,128,18,7,1,0,18,8,5,0,18,9,6,0,66,7,3,2,10,7,0,
310,88,8,1,128,76,7,2,0,70,5,3,3,82,5,247,127,75,0,1,0,0,1,2,0,0,0,3,16,0,12,
320,21,1,0,0,76,1,2,0,0,2,10,0,0,2,30,16,0,12,0,21,2,0,0,11,1,0,0,88,3,7,128,
338,2,0,0,88,3,23,128,59,3,2,0,43,4,0,0,64,4,2,0,76,3,2,0,88,3,18,128,16,1,14,
340,41,3,1,0,3,3,1,0,88,3,14,128,3,1,2,0,88,3,12,128,59,3,1,0,22,4,1,1,18,5,2,
350,41,6,1,0,77,4,4,128,23,8,1,7,59,9,7,0,64,9,8,0,79,4,252,127,43,4,0,0,64,4,
362,0,76,3,2,0,75,0,1,0,0,2,0,5,12,0,0,0,35,16,0,12,0,16,1,14,0,16,2,14,0,16,
373,14,0,11,4,0,0,88,5,1,128,18,4,0,0,16,4,12,0,3,1,2,0,88,5,24,128,33,5,1,3,
380,2,3,0,88,6,4,128,2,3,1,0,88,6,2,128,4,4,0,0,88,6,9,128,18,6,1,0,18,7,2,0,
3941,8,1,0,77,6,4,128,32,10,5,9,59,11,9,0,64,11,10,4,79,6,252,127,88,6,8,128,
4018,6,2,0,18,7,1,0,41,8,255,255,77,6,4,128,32,10,5,9,59,11,9,0,64,11,10,4,79,
416,252,127,76,4,2,0,0
42#endif
43};
44
45static const struct { const char *name; int ofs; } libbc_map[] = {
46{"math_deg",0},
47{"math_rad",25},
48{"string_len",50},
49{"table_foreachi",69},
50{"table_foreach",136},
51{"table_getn",207},
52{"table_remove",226},
53{"table_move",355},
54{NULL,502}
55};
56
diff --git a/src/host/buildvm_peobj.c b/src/host/buildvm_peobj.c
index 876b0add..01f9dac4 100644
--- a/src/host/buildvm_peobj.c
+++ b/src/host/buildvm_peobj.c
@@ -9,7 +9,7 @@
9#include "buildvm.h" 9#include "buildvm.h"
10#include "lj_bc.h" 10#include "lj_bc.h"
11 11
12#if LJ_TARGET_X86ORX64 || LJ_TARGET_PPC 12#if LJ_TARGET_X86ORX64
13 13
14/* Context for PE object emitter. */ 14/* Context for PE object emitter. */
15static char *strtab; 15static char *strtab;
@@ -93,12 +93,6 @@ typedef struct PEsymaux {
93#define PEOBJ_RELOC_ADDR32NB 0x03 93#define PEOBJ_RELOC_ADDR32NB 0x03
94#define PEOBJ_RELOC_OFS 0 94#define PEOBJ_RELOC_OFS 0
95#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */ 95#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */
96#elif LJ_TARGET_PPC
97#define PEOBJ_ARCH_TARGET 0x01f2
98#define PEOBJ_RELOC_REL32 0x06
99#define PEOBJ_RELOC_DIR32 0x02
100#define PEOBJ_RELOC_OFS (-4)
101#define PEOBJ_TEXT_FLAGS 0x60400020 /* 60=r+x, 40=align8, 20=code. */
102#endif 96#endif
103 97
104/* Section numbers (0-based). */ 98/* Section numbers (0-based). */
@@ -109,6 +103,8 @@ enum {
109#if LJ_TARGET_X64 103#if LJ_TARGET_X64
110 PEOBJ_SECT_PDATA, 104 PEOBJ_SECT_PDATA,
111 PEOBJ_SECT_XDATA, 105 PEOBJ_SECT_XDATA,
106#elif LJ_TARGET_X86
107 PEOBJ_SECT_SXDATA,
112#endif 108#endif
113 PEOBJ_SECT_RDATA_Z, 109 PEOBJ_SECT_RDATA_Z,
114 PEOBJ_NSECTIONS 110 PEOBJ_NSECTIONS
@@ -208,6 +204,13 @@ void emit_peobj(BuildCtx *ctx)
208 sofs += (pesect[PEOBJ_SECT_XDATA].nreloc = 1) * PEOBJ_RELOC_SIZE; 204 sofs += (pesect[PEOBJ_SECT_XDATA].nreloc = 1) * PEOBJ_RELOC_SIZE;
209 /* Flags: 40 = read, 30 = align4, 40 = initialized data. */ 205 /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
210 pesect[PEOBJ_SECT_XDATA].flags = 0x40300040; 206 pesect[PEOBJ_SECT_XDATA].flags = 0x40300040;
207#elif LJ_TARGET_X86
208 memcpy(pesect[PEOBJ_SECT_SXDATA].name, ".sxdata", sizeof(".sxdata")-1);
209 pesect[PEOBJ_SECT_SXDATA].ofs = sofs;
210 sofs += (pesect[PEOBJ_SECT_SXDATA].size = 4);
211 pesect[PEOBJ_SECT_SXDATA].relocofs = sofs;
212 /* Flags: 40 = read, 30 = align4, 02 = lnk_info, 40 = initialized data. */
213 pesect[PEOBJ_SECT_SXDATA].flags = 0x40300240;
211#endif 214#endif
212 215
213 memcpy(pesect[PEOBJ_SECT_RDATA_Z].name, ".rdata$Z", sizeof(".rdata$Z")-1); 216 memcpy(pesect[PEOBJ_SECT_RDATA_Z].name, ".rdata$Z", sizeof(".rdata$Z")-1);
@@ -232,7 +235,7 @@ void emit_peobj(BuildCtx *ctx)
232 nrsym = ctx->nrelocsym; 235 nrsym = ctx->nrelocsym;
233 pehdr.nsyms = 1+PEOBJ_NSECTIONS*2 + 1+ctx->nsym + nrsym; 236 pehdr.nsyms = 1+PEOBJ_NSECTIONS*2 + 1+ctx->nsym + nrsym;
234#if LJ_TARGET_X64 237#if LJ_TARGET_X64
235 pehdr.nsyms += 1; /* Symbol for lj_err_unwind_win64. */ 238 pehdr.nsyms += 1; /* Symbol for lj_err_unwind_win. */
236#endif 239#endif
237 240
238 /* Write PE object header and all sections. */ 241 /* Write PE object header and all sections. */
@@ -242,15 +245,8 @@ void emit_peobj(BuildCtx *ctx)
242 /* Write .text section. */ 245 /* Write .text section. */
243 host_endian.u = 1; 246 host_endian.u = 1;
244 if (host_endian.b != LJ_ENDIAN_SELECT(1, 0)) { 247 if (host_endian.b != LJ_ENDIAN_SELECT(1, 0)) {
245#if LJ_TARGET_PPC
246 uint32_t *p = (uint32_t *)ctx->code;
247 int n = (int)(ctx->codesz >> 2);
248 for (i = 0; i < n; i++, p++)
249 *p = lj_bswap(*p); /* Byteswap .text section. */
250#else
251 fprintf(stderr, "Error: different byte order for host and target\n"); 248 fprintf(stderr, "Error: different byte order for host and target\n");
252 exit(1); 249 exit(1);
253#endif
254 } 250 }
255 owrite(ctx, ctx->code, ctx->codesz); 251 owrite(ctx, ctx->code, ctx->codesz);
256 for (i = 0; i < ctx->nreloc; i++) { 252 for (i = 0; i < ctx->nreloc; i++) {
@@ -312,6 +308,19 @@ void emit_peobj(BuildCtx *ctx)
312 reloc.type = PEOBJ_RELOC_ADDR32NB; 308 reloc.type = PEOBJ_RELOC_ADDR32NB;
313 owrite(ctx, &reloc, PEOBJ_RELOC_SIZE); 309 owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
314 } 310 }
311#elif LJ_TARGET_X86
312 /* Write .sxdata section. */
313 for (i = 0; i < nrsym; i++) {
314 if (!strcmp(ctx->relocsym[i], "_lj_err_unwind_win")) {
315 uint32_t symidx = 1+2+i;
316 owrite(ctx, &symidx, 4);
317 break;
318 }
319 }
320 if (i == nrsym) {
321 fprintf(stderr, "Error: extern lj_err_unwind_win not used\n");
322 exit(1);
323 }
315#endif 324#endif
316 325
317 /* Write .rdata$Z section. */ 326 /* Write .rdata$Z section. */
@@ -333,8 +342,10 @@ void emit_peobj(BuildCtx *ctx)
333#if LJ_TARGET_X64 342#if LJ_TARGET_X64
334 emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_PDATA); 343 emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_PDATA);
335 emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_XDATA); 344 emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_XDATA);
336 emit_peobj_sym(ctx, "lj_err_unwind_win64", 0, 345 emit_peobj_sym(ctx, "lj_err_unwind_win", 0,
337 PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN); 346 PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
347#elif LJ_TARGET_X86
348 emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_SXDATA);
338#endif 349#endif
339 350
340 emit_peobj_sym(ctx, ctx->beginsym, 0, 351 emit_peobj_sym(ctx, ctx->beginsym, 0,
diff --git a/src/host/genlibbc.lua b/src/host/genlibbc.lua
new file mode 100644
index 00000000..56899546
--- /dev/null
+++ b/src/host/genlibbc.lua
@@ -0,0 +1,197 @@
1----------------------------------------------------------------------------
2-- Lua script to dump the bytecode of the library functions written in Lua.
3-- The resulting 'buildvm_libbc.h' is used for the build process of LuaJIT.
4----------------------------------------------------------------------------
5-- Copyright (C) 2005-2020 Mike Pall. All rights reserved.
6-- Released under the MIT license. See Copyright Notice in luajit.h
7----------------------------------------------------------------------------
8
9local ffi = require("ffi")
10local bit = require("bit")
11local vmdef = require("jit.vmdef")
12local bcnames = vmdef.bcnames
13
14local format = string.format
15
16local isbe = (string.byte(string.dump(function() end), 5) % 2 == 1)
17
18local function usage(arg)
19 io.stderr:write("Usage: ", arg and arg[0] or "genlibbc",
20 " [-o buildvm_libbc.h] lib_*.c\n")
21 os.exit(1)
22end
23
24local function parse_arg(arg)
25 local outfile = "-"
26 if not (arg and arg[1]) then
27 usage(arg)
28 end
29 if arg[1] == "-o" then
30 outfile = arg[2]
31 if not outfile then usage(arg) end
32 table.remove(arg, 1)
33 table.remove(arg, 1)
34 end
35 return outfile
36end
37
38local function read_files(names)
39 local src = ""
40 for _,name in ipairs(names) do
41 local fp = assert(io.open(name))
42 src = src .. fp:read("*a")
43 fp:close()
44 end
45 return src
46end
47
48local function transform_lua(code)
49 local fixup = {}
50 local n = -30000
51 code = string.gsub(code, "CHECK_(%w*)%((.-)%)", function(tp, var)
52 n = n + 1
53 fixup[n] = { "CHECK", tp }
54 return format("%s=%d", var, n)
55 end)
56 code = string.gsub(code, "PAIRS%((.-)%)", function(var)
57 fixup.PAIRS = true
58 return format("nil, %s, 0", var)
59 end)
60 return "return "..code, fixup
61end
62
63local function read_uleb128(p)
64 local v = p[0]; p = p + 1
65 if v >= 128 then
66 local sh = 7; v = v - 128
67 repeat
68 local r = p[0]
69 v = v + bit.lshift(bit.band(r, 127), sh)
70 sh = sh + 7
71 p = p + 1
72 until r < 128
73 end
74 return p, v
75end
76
77-- ORDER LJ_T
78local name2itype = {
79 str = 5, func = 9, tab = 12, int = 14, num = 15
80}
81
82local BC = {}
83for i=0,#bcnames/6-1 do
84 BC[string.gsub(string.sub(bcnames, i*6+1, i*6+6), " ", "")] = i
85end
86local xop, xra = isbe and 3 or 0, isbe and 2 or 1
87local xrc, xrb = isbe and 1 or 2, isbe and 0 or 3
88
89local function fixup_dump(dump, fixup)
90 local buf = ffi.new("uint8_t[?]", #dump+1, dump)
91 local p = buf+5
92 local n, sizebc
93 p, n = read_uleb128(p)
94 local start = p
95 p = p + 4
96 p = read_uleb128(p)
97 p = read_uleb128(p)
98 p, sizebc = read_uleb128(p)
99 local rawtab = {}
100 for i=0,sizebc-1 do
101 local op = p[xop]
102 if op == BC.KSHORT then
103 local rd = p[xrc] + 256*p[xrb]
104 rd = bit.arshift(bit.lshift(rd, 16), 16)
105 local f = fixup[rd]
106 if f then
107 if f[1] == "CHECK" then
108 local tp = f[2]
109 if tp == "tab" then rawtab[p[xra]] = true end
110 p[xop] = tp == "num" and BC.ISNUM or BC.ISTYPE
111 p[xrb] = 0
112 p[xrc] = name2itype[tp]
113 else
114 error("unhandled fixup type: "..f[1])
115 end
116 end
117 elseif op == BC.TGETV then
118 if rawtab[p[xrb]] then
119 p[xop] = BC.TGETR
120 end
121 elseif op == BC.TSETV then
122 if rawtab[p[xrb]] then
123 p[xop] = BC.TSETR
124 end
125 elseif op == BC.ITERC then
126 if fixup.PAIRS then
127 p[xop] = BC.ITERN
128 end
129 end
130 p = p + 4
131 end
132 return ffi.string(start, n)
133end
134
135local function find_defs(src)
136 local defs = {}
137 for name, code in string.gmatch(src, "LJLIB_LUA%(([^)]*)%)%s*/%*(.-)%*/") do
138 local env = {}
139 local tcode, fixup = transform_lua(code)
140 local func = assert(load(tcode, "", nil, env))()
141 defs[name] = fixup_dump(string.dump(func, true), fixup)
142 defs[#defs+1] = name
143 end
144 return defs
145end
146
147local function gen_header(defs)
148 local t = {}
149 local function w(x) t[#t+1] = x end
150 w("/* This is a generated file. DO NOT EDIT! */\n\n")
151 w("static const int libbc_endian = ") w(isbe and 1 or 0) w(";\n\n")
152 local s = ""
153 for _,name in ipairs(defs) do
154 s = s .. defs[name]
155 end
156 w("static const uint8_t libbc_code[] = {\n")
157 local n = 0
158 for i=1,#s do
159 local x = string.byte(s, i)
160 w(x); w(",")
161 n = n + (x < 10 and 2 or (x < 100 and 3 or 4))
162 if n >= 75 then n = 0; w("\n") end
163 end
164 w("0\n};\n\n")
165 w("static const struct { const char *name; int ofs; } libbc_map[] = {\n")
166 local m = 0
167 for _,name in ipairs(defs) do
168 w('{"'); w(name); w('",'); w(m) w('},\n')
169 m = m + #defs[name]
170 end
171 w("{NULL,"); w(m); w("}\n};\n\n")
172 return table.concat(t)
173end
174
175local function write_file(name, data)
176 if name == "-" then
177 assert(io.write(data))
178 assert(io.flush())
179 else
180 local fp = io.open(name)
181 if fp then
182 local old = fp:read("*a")
183 fp:close()
184 if data == old then return end
185 end
186 fp = assert(io.open(name, "w"))
187 assert(fp:write(data))
188 assert(fp:close())
189 end
190end
191
192local outfile = parse_arg(arg)
193local src = read_files(arg)
194local defs = find_defs(src)
195local hdr = gen_header(defs)
196write_file(outfile, hdr)
197
diff --git a/src/jit/bc.lua b/src/jit/bc.lua
index 7ca4c61c..45ba40e2 100644
--- a/src/jit/bc.lua
+++ b/src/jit/bc.lua
@@ -41,7 +41,7 @@
41 41
42-- Cache some library functions and objects. 42-- Cache some library functions and objects.
43local jit = require("jit") 43local jit = require("jit")
44assert(jit.version_num == 20005, "LuaJIT core/library version mismatch") 44assert(jit.version_num == 20100, "LuaJIT core/library version mismatch")
45local jutil = require("jit.util") 45local jutil = require("jit.util")
46local vmdef = require("jit.vmdef") 46local vmdef = require("jit.vmdef")
47local bit = require("bit") 47local bit = require("bit")
@@ -179,13 +179,12 @@ local function bcliston(outfile)
179end 179end
180 180
181-- Public module functions. 181-- Public module functions.
182module(...) 182return {
183 183 line = bcline,
184line = bcline 184 dump = bcdump,
185dump = bcdump 185 targets = bctargets,
186targets = bctargets 186 on = bcliston,
187 187 off = bclistoff,
188on = bcliston 188 start = bcliston -- For -j command line option.
189off = bclistoff 189}
190start = bcliston -- For -j command line option.
191 190
diff --git a/src/jit/bcsave.lua b/src/jit/bcsave.lua
index 58351c16..42d7240b 100644
--- a/src/jit/bcsave.lua
+++ b/src/jit/bcsave.lua
@@ -11,12 +11,16 @@
11------------------------------------------------------------------------------ 11------------------------------------------------------------------------------
12 12
13local jit = require("jit") 13local jit = require("jit")
14assert(jit.version_num == 20005, "LuaJIT core/library version mismatch") 14assert(jit.version_num == 20100, "LuaJIT core/library version mismatch")
15local bit = require("bit") 15local bit = require("bit")
16 16
17-- Symbol name prefix for LuaJIT bytecode. 17-- Symbol name prefix for LuaJIT bytecode.
18local LJBC_PREFIX = "luaJIT_BC_" 18local LJBC_PREFIX = "luaJIT_BC_"
19 19
20local type, assert = type, assert
21local format = string.format
22local tremove, tconcat = table.remove, table.concat
23
20------------------------------------------------------------------------------ 24------------------------------------------------------------------------------
21 25
22local function usage() 26local function usage()
@@ -63,8 +67,18 @@ local map_type = {
63} 67}
64 68
65local map_arch = { 69local map_arch = {
66 x86 = true, x64 = true, arm = true, ppc = true, ppcspe = true, 70 x86 = { e = "le", b = 32, m = 3, p = 0x14c, },
67 mips = true, mipsel = true, 71 x64 = { e = "le", b = 64, m = 62, p = 0x8664, },
72 arm = { e = "le", b = 32, m = 40, p = 0x1c0, },
73 arm64 = { e = "le", b = 64, m = 183, p = 0xaa64, },
74 arm64be = { e = "be", b = 64, m = 183, },
75 ppc = { e = "be", b = 32, m = 20, },
76 mips = { e = "be", b = 32, m = 8, f = 0x50001006, },
77 mipsel = { e = "le", b = 32, m = 8, f = 0x50001006, },
78 mips64 = { e = "be", b = 64, m = 8, f = 0x80000007, },
79 mips64el = { e = "le", b = 64, m = 8, f = 0x80000007, },
80 mips64r6 = { e = "be", b = 64, m = 8, f = 0xa0000407, },
81 mips64r6el = { e = "le", b = 64, m = 8, f = 0xa0000407, },
68} 82}
69 83
70local map_os = { 84local map_os = {
@@ -73,33 +87,33 @@ local map_os = {
73} 87}
74 88
75local function checkarg(str, map, err) 89local function checkarg(str, map, err)
76 str = string.lower(str) 90 str = str:lower()
77 local s = check(map[str], "unknown ", err) 91 local s = check(map[str], "unknown ", err)
78 return s == true and str or s 92 return type(s) == "string" and s or str
79end 93end
80 94
81local function detecttype(str) 95local function detecttype(str)
82 local ext = string.match(string.lower(str), "%.(%a+)$") 96 local ext = str:lower():match("%.(%a+)$")
83 return map_type[ext] or "raw" 97 return map_type[ext] or "raw"
84end 98end
85 99
86local function checkmodname(str) 100local function checkmodname(str)
87 check(string.match(str, "^[%w_.%-]+$"), "bad module name") 101 check(str:match("^[%w_.%-]+$"), "bad module name")
88 return string.gsub(str, "[%.%-]", "_") 102 return str:gsub("[%.%-]", "_")
89end 103end
90 104
91local function detectmodname(str) 105local function detectmodname(str)
92 if type(str) == "string" then 106 if type(str) == "string" then
93 local tail = string.match(str, "[^/\\]+$") 107 local tail = str:match("[^/\\]+$")
94 if tail then str = tail end 108 if tail then str = tail end
95 local head = string.match(str, "^(.*)%.[^.]*$") 109 local head = str:match("^(.*)%.[^.]*$")
96 if head then str = head end 110 if head then str = head end
97 str = string.match(str, "^[%w_.%-]+") 111 str = str:match("^[%w_.%-]+")
98 else 112 else
99 str = nil 113 str = nil
100 end 114 end
101 check(str, "cannot derive module name, use -n name") 115 check(str, "cannot derive module name, use -n name")
102 return string.gsub(str, "[%.%-]", "_") 116 return str:gsub("[%.%-]", "_")
103end 117end
104 118
105------------------------------------------------------------------------------ 119------------------------------------------------------------------------------
@@ -118,19 +132,19 @@ end
118local function bcsave_c(ctx, output, s) 132local function bcsave_c(ctx, output, s)
119 local fp = savefile(output, "w") 133 local fp = savefile(output, "w")
120 if ctx.type == "c" then 134 if ctx.type == "c" then
121 fp:write(string.format([[ 135 fp:write(format([[
122#ifdef __cplusplus 136#ifdef __cplusplus
123extern "C" 137extern "C"
124#endif 138#endif
125#ifdef _WIN32 139#ifdef _WIN32
126__declspec(dllexport) 140__declspec(dllexport)
127#endif 141#endif
128const char %s%s[] = { 142const unsigned char %s%s[] = {
129]], LJBC_PREFIX, ctx.modname)) 143]], LJBC_PREFIX, ctx.modname))
130 else 144 else
131 fp:write(string.format([[ 145 fp:write(format([[
132#define %s%s_SIZE %d 146#define %s%s_SIZE %d
133static const char %s%s[] = { 147static const unsigned char %s%s[] = {
134]], LJBC_PREFIX, ctx.modname, #s, LJBC_PREFIX, ctx.modname)) 148]], LJBC_PREFIX, ctx.modname, #s, LJBC_PREFIX, ctx.modname))
135 end 149 end
136 local t, n, m = {}, 0, 0 150 local t, n, m = {}, 0, 0
@@ -138,13 +152,13 @@ static const char %s%s[] = {
138 local b = tostring(string.byte(s, i)) 152 local b = tostring(string.byte(s, i))
139 m = m + #b + 1 153 m = m + #b + 1
140 if m > 78 then 154 if m > 78 then
141 fp:write(table.concat(t, ",", 1, n), ",\n") 155 fp:write(tconcat(t, ",", 1, n), ",\n")
142 n, m = 0, #b + 1 156 n, m = 0, #b + 1
143 end 157 end
144 n = n + 1 158 n = n + 1
145 t[n] = b 159 t[n] = b
146 end 160 end
147 bcsave_tail(fp, output, table.concat(t, ",", 1, n).."\n};\n") 161 bcsave_tail(fp, output, tconcat(t, ",", 1, n).."\n};\n")
148end 162end
149 163
150local function bcsave_elfobj(ctx, output, s, ffi) 164local function bcsave_elfobj(ctx, output, s, ffi)
@@ -199,12 +213,8 @@ typedef struct {
199} ELF64obj; 213} ELF64obj;
200]] 214]]
201 local symname = LJBC_PREFIX..ctx.modname 215 local symname = LJBC_PREFIX..ctx.modname
202 local is64, isbe = false, false 216 local ai = assert(map_arch[ctx.arch])
203 if ctx.arch == "x64" then 217 local is64, isbe = ai.b == 64, ai.e == "be"
204 is64 = true
205 elseif ctx.arch == "ppc" or ctx.arch == "ppcspe" or ctx.arch == "mips" then
206 isbe = true
207 end
208 218
209 -- Handle different host/target endianess. 219 -- Handle different host/target endianess.
210 local function f32(x) return x end 220 local function f32(x) return x end
@@ -237,10 +247,8 @@ typedef struct {
237 hdr.eendian = isbe and 2 or 1 247 hdr.eendian = isbe and 2 or 1
238 hdr.eversion = 1 248 hdr.eversion = 1
239 hdr.type = f16(1) 249 hdr.type = f16(1)
240 hdr.machine = f16(({ x86=3, x64=62, arm=40, ppc=20, ppcspe=20, mips=8, mipsel=8 })[ctx.arch]) 250 hdr.machine = f16(ai.m)
241 if ctx.arch == "mips" or ctx.arch == "mipsel" then 251 hdr.flags = f32(ai.f or 0)
242 hdr.flags = f32(0x50001006)
243 end
244 hdr.version = f32(1) 252 hdr.version = f32(1)
245 hdr.shofs = fofs(ffi.offsetof(o, "sect")) 253 hdr.shofs = fofs(ffi.offsetof(o, "sect"))
246 hdr.ehsize = f16(ffi.sizeof(hdr)) 254 hdr.ehsize = f16(ffi.sizeof(hdr))
@@ -336,12 +344,8 @@ typedef struct {
336} PEobj; 344} PEobj;
337]] 345]]
338 local symname = LJBC_PREFIX..ctx.modname 346 local symname = LJBC_PREFIX..ctx.modname
339 local is64 = false 347 local ai = assert(map_arch[ctx.arch])
340 if ctx.arch == "x86" then 348 local is64 = ai.b == 64
341 symname = "_"..symname
342 elseif ctx.arch == "x64" then
343 is64 = true
344 end
345 local symexport = " /EXPORT:"..symname..",DATA " 349 local symexport = " /EXPORT:"..symname..",DATA "
346 350
347 -- The file format is always little-endian. Swap if the host is big-endian. 351 -- The file format is always little-endian. Swap if the host is big-endian.
@@ -355,7 +359,7 @@ typedef struct {
355 -- Create PE object and fill in header. 359 -- Create PE object and fill in header.
356 local o = ffi.new("PEobj") 360 local o = ffi.new("PEobj")
357 local hdr = o.hdr 361 local hdr = o.hdr
358 hdr.arch = f16(({ x86=0x14c, x64=0x8664, arm=0x1c0, ppc=0x1f2, mips=0x366, mipsel=0x366 })[ctx.arch]) 362 hdr.arch = f16(assert(ai.p))
359 hdr.nsects = f16(2) 363 hdr.nsects = f16(2)
360 hdr.symtabofs = f32(ffi.offsetof(o, "sym0")) 364 hdr.symtabofs = f32(ffi.offsetof(o, "sym0"))
361 hdr.nsyms = f32(6) 365 hdr.nsyms = f32(6)
@@ -477,13 +481,13 @@ typedef struct {
477} mach_obj_64; 481} mach_obj_64;
478typedef struct { 482typedef struct {
479 mach_fat_header fat; 483 mach_fat_header fat;
480 mach_fat_arch fat_arch[4]; 484 mach_fat_arch fat_arch[2];
481 struct { 485 struct {
482 mach_header hdr; 486 mach_header hdr;
483 mach_segment_command seg; 487 mach_segment_command seg;
484 mach_section sec; 488 mach_section sec;
485 mach_symtab_command sym; 489 mach_symtab_command sym;
486 } arch[4]; 490 } arch[2];
487 mach_nlist sym_entry; 491 mach_nlist sym_entry;
488 uint8_t space[4096]; 492 uint8_t space[4096];
489} mach_fat_obj; 493} mach_fat_obj;
@@ -494,6 +498,8 @@ typedef struct {
494 is64, align, mobj = true, 8, "mach_obj_64" 498 is64, align, mobj = true, 8, "mach_obj_64"
495 elseif ctx.arch == "arm" then 499 elseif ctx.arch == "arm" then
496 isfat, mobj = true, "mach_fat_obj" 500 isfat, mobj = true, "mach_fat_obj"
501 elseif ctx.arch == "arm64" then
502 is64, align, isfat, mobj = true, 8, true, "mach_fat_obj"
497 else 503 else
498 check(ctx.arch == "x86", "unsupported architecture for OSX") 504 check(ctx.arch == "x86", "unsupported architecture for OSX")
499 end 505 end
@@ -503,8 +509,8 @@ typedef struct {
503 -- Create Mach-O object and fill in header. 509 -- Create Mach-O object and fill in header.
504 local o = ffi.new(mobj) 510 local o = ffi.new(mobj)
505 local mach_size = aligned(ffi.offsetof(o, "space")+#symname+2, align) 511 local mach_size = aligned(ffi.offsetof(o, "space")+#symname+2, align)
506 local cputype = ({ x86={7}, x64={0x01000007}, arm={7,12,12,12} })[ctx.arch] 512 local cputype = ({ x86={7}, x64={0x01000007}, arm={7,12}, arm64={0x01000007,0x0100000c} })[ctx.arch]
507 local cpusubtype = ({ x86={3}, x64={3}, arm={3,6,9,11} })[ctx.arch] 513 local cpusubtype = ({ x86={3}, x64={3}, arm={3,9}, arm64={3,0} })[ctx.arch]
508 if isfat then 514 if isfat then
509 o.fat.magic = be32(0xcafebabe) 515 o.fat.magic = be32(0xcafebabe)
510 o.fat.nfat_arch = be32(#cpusubtype) 516 o.fat.nfat_arch = be32(#cpusubtype)
@@ -603,16 +609,16 @@ local function docmd(...)
603 local n = 1 609 local n = 1
604 local list = false 610 local list = false
605 local ctx = { 611 local ctx = {
606 strip = true, arch = jit.arch, os = string.lower(jit.os), 612 strip = true, arch = jit.arch, os = jit.os:lower(),
607 type = false, modname = false, 613 type = false, modname = false,
608 } 614 }
609 while n <= #arg do 615 while n <= #arg do
610 local a = arg[n] 616 local a = arg[n]
611 if type(a) == "string" and string.sub(a, 1, 1) == "-" and a ~= "-" then 617 if type(a) == "string" and a:sub(1, 1) == "-" and a ~= "-" then
612 table.remove(arg, n) 618 tremove(arg, n)
613 if a == "--" then break end 619 if a == "--" then break end
614 for m=2,#a do 620 for m=2,#a do
615 local opt = string.sub(a, m, m) 621 local opt = a:sub(m, m)
616 if opt == "l" then 622 if opt == "l" then
617 list = true 623 list = true
618 elseif opt == "s" then 624 elseif opt == "s" then
@@ -625,13 +631,13 @@ local function docmd(...)
625 if n ~= 1 then usage() end 631 if n ~= 1 then usage() end
626 arg[1] = check(loadstring(arg[1])) 632 arg[1] = check(loadstring(arg[1]))
627 elseif opt == "n" then 633 elseif opt == "n" then
628 ctx.modname = checkmodname(table.remove(arg, n)) 634 ctx.modname = checkmodname(tremove(arg, n))
629 elseif opt == "t" then 635 elseif opt == "t" then
630 ctx.type = checkarg(table.remove(arg, n), map_type, "file type") 636 ctx.type = checkarg(tremove(arg, n), map_type, "file type")
631 elseif opt == "a" then 637 elseif opt == "a" then
632 ctx.arch = checkarg(table.remove(arg, n), map_arch, "architecture") 638 ctx.arch = checkarg(tremove(arg, n), map_arch, "architecture")
633 elseif opt == "o" then 639 elseif opt == "o" then
634 ctx.os = checkarg(table.remove(arg, n), map_os, "OS name") 640 ctx.os = checkarg(tremove(arg, n), map_os, "OS name")
635 else 641 else
636 usage() 642 usage()
637 end 643 end
@@ -653,7 +659,7 @@ end
653------------------------------------------------------------------------------ 659------------------------------------------------------------------------------
654 660
655-- Public module functions. 661-- Public module functions.
656module(...) 662return {
657 663 start = docmd -- Process -b command line option.
658start = docmd -- Process -b command line option. 664}
659 665
diff --git a/src/jit/dis_arm.lua b/src/jit/dis_arm.lua
index 152d91bb..cafd2f74 100644
--- a/src/jit/dis_arm.lua
+++ b/src/jit/dis_arm.lua
@@ -658,7 +658,7 @@ local function disass_block(ctx, ofs, len)
658end 658end
659 659
660-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). 660-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
661local function create_(code, addr, out) 661local function create(code, addr, out)
662 local ctx = {} 662 local ctx = {}
663 ctx.code = code 663 ctx.code = code
664 ctx.addr = addr or 0 664 ctx.addr = addr or 0
@@ -670,20 +670,20 @@ local function create_(code, addr, out)
670end 670end
671 671
672-- Simple API: disassemble code (a string) at address and output via out. 672-- Simple API: disassemble code (a string) at address and output via out.
673local function disass_(code, addr, out) 673local function disass(code, addr, out)
674 create_(code, addr, out):disass() 674 create(code, addr, out):disass()
675end 675end
676 676
677-- Return register name for RID. 677-- Return register name for RID.
678local function regname_(r) 678local function regname(r)
679 if r < 16 then return map_gpr[r] end 679 if r < 16 then return map_gpr[r] end
680 return "d"..(r-16) 680 return "d"..(r-16)
681end 681end
682 682
683-- Public module functions. 683-- Public module functions.
684module(...) 684return {
685 685 create = create,
686create = create_ 686 disass = disass,
687disass = disass_ 687 regname = regname
688regname = regname_ 688}
689 689
diff --git a/src/jit/dis_arm64.lua b/src/jit/dis_arm64.lua
new file mode 100644
index 00000000..d1596ebc
--- /dev/null
+++ b/src/jit/dis_arm64.lua
@@ -0,0 +1,1216 @@
1----------------------------------------------------------------------------
2-- LuaJIT ARM64 disassembler module.
3--
4-- Copyright (C) 2005-2020 Mike Pall. All rights reserved.
5-- Released under the MIT license. See Copyright Notice in luajit.h
6--
7-- Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com.
8-- Sponsored by Cisco Systems, Inc.
9----------------------------------------------------------------------------
10-- This is a helper module used by the LuaJIT machine code dumper module.
11--
12-- It disassembles most user-mode AArch64 instructions.
13-- NYI: Advanced SIMD and VFP instructions.
14------------------------------------------------------------------------------
15
16local type = type
17local sub, byte, format = string.sub, string.byte, string.format
18local match, gmatch, gsub = string.match, string.gmatch, string.gsub
19local concat = table.concat
20local bit = require("bit")
21local band, bor, bxor, tohex = bit.band, bit.bor, bit.bxor, bit.tohex
22local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
23local ror = bit.ror
24
25------------------------------------------------------------------------------
26-- Opcode maps
27------------------------------------------------------------------------------
28
29local map_adr = { -- PC-relative addressing.
30 shift = 31, mask = 1,
31 [0] = "adrDBx", "adrpDBx"
32}
33
34local map_addsubi = { -- Add/subtract immediate.
35 shift = 29, mask = 3,
36 [0] = "add|movDNIg", "adds|cmnD0NIg", "subDNIg", "subs|cmpD0NIg",
37}
38
39local map_logi = { -- Logical immediate.
40 shift = 31, mask = 1,
41 [0] = {
42 shift = 22, mask = 1,
43 [0] = {
44 shift = 29, mask = 3,
45 [0] = "andDNig", "orr|movDN0ig", "eorDNig", "ands|tstD0Nig"
46 },
47 false -- unallocated
48 },
49 {
50 shift = 29, mask = 3,
51 [0] = "andDNig", "orr|movDN0ig", "eorDNig", "ands|tstD0Nig"
52 }
53}
54
55local map_movwi = { -- Move wide immediate.
56 shift = 31, mask = 1,
57 [0] = {
58 shift = 22, mask = 1,
59 [0] = {
60 shift = 29, mask = 3,
61 [0] = "movnDWRg", false, "movz|movDYRg", "movkDWRg"
62 }, false -- unallocated
63 },
64 {
65 shift = 29, mask = 3,
66 [0] = "movnDWRg", false, "movz|movDYRg", "movkDWRg"
67 },
68}
69
70local map_bitf = { -- Bitfield.
71 shift = 31, mask = 1,
72 [0] = {
73 shift = 22, mask = 1,
74 [0] = {
75 shift = 29, mask = 3,
76 [0] = "sbfm|sbfiz|sbfx|asr|sxtw|sxth|sxtbDN12w",
77 "bfm|bfi|bfxilDN13w",
78 "ubfm|ubfiz|ubfx|lsr|lsl|uxth|uxtbDN12w"
79 }
80 },
81 {
82 shift = 22, mask = 1,
83 {
84 shift = 29, mask = 3,
85 [0] = "sbfm|sbfiz|sbfx|asr|sxtw|sxth|sxtbDN12x",
86 "bfm|bfi|bfxilDN13x",
87 "ubfm|ubfiz|ubfx|lsr|lsl|uxth|uxtbDN12x"
88 }
89 }
90}
91
92local map_datai = { -- Data processing - immediate.
93 shift = 23, mask = 7,
94 [0] = map_adr, map_adr, map_addsubi, false,
95 map_logi, map_movwi, map_bitf,
96 {
97 shift = 15, mask = 0x1c0c1,
98 [0] = "extr|rorDNM4w", [0x10080] = "extr|rorDNM4x",
99 [0x10081] = "extr|rorDNM4x"
100 }
101}
102
103local map_logsr = { -- Logical, shifted register.
104 shift = 31, mask = 1,
105 [0] = {
106 shift = 15, mask = 1,
107 [0] = {
108 shift = 29, mask = 3,
109 [0] = {
110 shift = 21, mask = 7,
111 [0] = "andDNMSg", "bicDNMSg", "andDNMSg", "bicDNMSg",
112 "andDNMSg", "bicDNMSg", "andDNMg", "bicDNMg"
113 },
114 {
115 shift = 21, mask = 7,
116 [0] ="orr|movDN0MSg", "orn|mvnDN0MSg", "orr|movDN0MSg", "orn|mvnDN0MSg",
117 "orr|movDN0MSg", "orn|mvnDN0MSg", "orr|movDN0Mg", "orn|mvnDN0Mg"
118 },
119 {
120 shift = 21, mask = 7,
121 [0] = "eorDNMSg", "eonDNMSg", "eorDNMSg", "eonDNMSg",
122 "eorDNMSg", "eonDNMSg", "eorDNMg", "eonDNMg"
123 },
124 {
125 shift = 21, mask = 7,
126 [0] = "ands|tstD0NMSg", "bicsDNMSg", "ands|tstD0NMSg", "bicsDNMSg",
127 "ands|tstD0NMSg", "bicsDNMSg", "ands|tstD0NMg", "bicsDNMg"
128 }
129 },
130 false -- unallocated
131 },
132 {
133 shift = 29, mask = 3,
134 [0] = {
135 shift = 21, mask = 7,
136 [0] = "andDNMSg", "bicDNMSg", "andDNMSg", "bicDNMSg",
137 "andDNMSg", "bicDNMSg", "andDNMg", "bicDNMg"
138 },
139 {
140 shift = 21, mask = 7,
141 [0] = "orr|movDN0MSg", "orn|mvnDN0MSg", "orr|movDN0MSg", "orn|mvnDN0MSg",
142 "orr|movDN0MSg", "orn|mvnDN0MSg", "orr|movDN0Mg", "orn|mvnDN0Mg"
143 },
144 {
145 shift = 21, mask = 7,
146 [0] = "eorDNMSg", "eonDNMSg", "eorDNMSg", "eonDNMSg",
147 "eorDNMSg", "eonDNMSg", "eorDNMg", "eonDNMg"
148 },
149 {
150 shift = 21, mask = 7,
151 [0] = "ands|tstD0NMSg", "bicsDNMSg", "ands|tstD0NMSg", "bicsDNMSg",
152 "ands|tstD0NMSg", "bicsDNMSg", "ands|tstD0NMg", "bicsDNMg"
153 }
154 }
155}
156
157local map_assh = {
158 shift = 31, mask = 1,
159 [0] = {
160 shift = 15, mask = 1,
161 [0] = {
162 shift = 29, mask = 3,
163 [0] = {
164 shift = 22, mask = 3,
165 [0] = "addDNMSg", "addDNMSg", "addDNMSg", "addDNMg"
166 },
167 {
168 shift = 22, mask = 3,
169 [0] = "adds|cmnD0NMSg", "adds|cmnD0NMSg",
170 "adds|cmnD0NMSg", "adds|cmnD0NMg"
171 },
172 {
173 shift = 22, mask = 3,
174 [0] = "sub|negDN0MSg", "sub|negDN0MSg", "sub|negDN0MSg", "sub|negDN0Mg"
175 },
176 {
177 shift = 22, mask = 3,
178 [0] = "subs|cmp|negsD0N0MzSg", "subs|cmp|negsD0N0MzSg",
179 "subs|cmp|negsD0N0MzSg", "subs|cmp|negsD0N0Mzg"
180 },
181 },
182 false -- unallocated
183 },
184 {
185 shift = 29, mask = 3,
186 [0] = {
187 shift = 22, mask = 3,
188 [0] = "addDNMSg", "addDNMSg", "addDNMSg", "addDNMg"
189 },
190 {
191 shift = 22, mask = 3,
192 [0] = "adds|cmnD0NMSg", "adds|cmnD0NMSg", "adds|cmnD0NMSg",
193 "adds|cmnD0NMg"
194 },
195 {
196 shift = 22, mask = 3,
197 [0] = "sub|negDN0MSg", "sub|negDN0MSg", "sub|negDN0MSg", "sub|negDN0Mg"
198 },
199 {
200 shift = 22, mask = 3,
201 [0] = "subs|cmp|negsD0N0MzSg", "subs|cmp|negsD0N0MzSg",
202 "subs|cmp|negsD0N0MzSg", "subs|cmp|negsD0N0Mzg"
203 }
204 }
205}
206
207local map_addsubsh = { -- Add/subtract, shifted register.
208 shift = 22, mask = 3,
209 [0] = map_assh, map_assh, map_assh
210}
211
212local map_addsubex = { -- Add/subtract, extended register.
213 shift = 22, mask = 3,
214 [0] = {
215 shift = 29, mask = 3,
216 [0] = "addDNMXg", "adds|cmnD0NMXg", "subDNMXg", "subs|cmpD0NMzXg",
217 }
218}
219
220local map_addsubc = { -- Add/subtract, with carry.
221 shift = 10, mask = 63,
222 [0] = {
223 shift = 29, mask = 3,
224 [0] = "adcDNMg", "adcsDNMg", "sbc|ngcDN0Mg", "sbcs|ngcsDN0Mg",
225 }
226}
227
228local map_ccomp = {
229 shift = 4, mask = 1,
230 [0] = {
231 shift = 10, mask = 3,
232 [0] = { -- Conditional compare register.
233 shift = 29, mask = 3,
234 "ccmnNMVCg", false, "ccmpNMVCg",
235 },
236 [2] = { -- Conditional compare immediate.
237 shift = 29, mask = 3,
238 "ccmnN5VCg", false, "ccmpN5VCg",
239 }
240 }
241}
242
243local map_csel = { -- Conditional select.
244 shift = 11, mask = 1,
245 [0] = {
246 shift = 10, mask = 1,
247 [0] = {
248 shift = 29, mask = 3,
249 [0] = "cselDNMzCg", false, "csinv|cinv|csetmDNMcg", false,
250 },
251 {
252 shift = 29, mask = 3,
253 [0] = "csinc|cinc|csetDNMcg", false, "csneg|cnegDNMcg", false,
254 }
255 }
256}
257
258local map_data1s = { -- Data processing, 1 source.
259 shift = 29, mask = 1,
260 [0] = {
261 shift = 31, mask = 1,
262 [0] = {
263 shift = 10, mask = 0x7ff,
264 [0] = "rbitDNg", "rev16DNg", "revDNw", false, "clzDNg", "clsDNg"
265 },
266 {
267 shift = 10, mask = 0x7ff,
268 [0] = "rbitDNg", "rev16DNg", "rev32DNx", "revDNx", "clzDNg", "clsDNg"
269 }
270 }
271}
272
273local map_data2s = { -- Data processing, 2 sources.
274 shift = 29, mask = 1,
275 [0] = {
276 shift = 10, mask = 63,
277 false, "udivDNMg", "sdivDNMg", false, false, false, false, "lslDNMg",
278 "lsrDNMg", "asrDNMg", "rorDNMg"
279 }
280}
281
282local map_data3s = { -- Data processing, 3 sources.
283 shift = 29, mask = 7,
284 [0] = {
285 shift = 21, mask = 7,
286 [0] = {
287 shift = 15, mask = 1,
288 [0] = "madd|mulDNMA0g", "msub|mnegDNMA0g"
289 }
290 }, false, false, false,
291 {
292 shift = 15, mask = 1,
293 [0] = {
294 shift = 21, mask = 7,
295 [0] = "madd|mulDNMA0g", "smaddl|smullDxNMwA0x", "smulhDNMx", false,
296 false, "umaddl|umullDxNMwA0x", "umulhDNMx"
297 },
298 {
299 shift = 21, mask = 7,
300 [0] = "msub|mnegDNMA0g", "smsubl|smneglDxNMwA0x", false, false,
301 false, "umsubl|umneglDxNMwA0x"
302 }
303 }
304}
305
306local map_datar = { -- Data processing, register.
307 shift = 28, mask = 1,
308 [0] = {
309 shift = 24, mask = 1,
310 [0] = map_logsr,
311 {
312 shift = 21, mask = 1,
313 [0] = map_addsubsh, map_addsubex
314 }
315 },
316 {
317 shift = 21, mask = 15,
318 [0] = map_addsubc, false, map_ccomp, false, map_csel, false,
319 {
320 shift = 30, mask = 1,
321 [0] = map_data2s, map_data1s
322 },
323 false, map_data3s, map_data3s, map_data3s, map_data3s, map_data3s,
324 map_data3s, map_data3s, map_data3s
325 }
326}
327
328local map_lrl = { -- Load register, literal.
329 shift = 26, mask = 1,
330 [0] = {
331 shift = 30, mask = 3,
332 [0] = "ldrDwB", "ldrDxB", "ldrswDxB"
333 },
334 {
335 shift = 30, mask = 3,
336 [0] = "ldrDsB", "ldrDdB"
337 }
338}
339
340local map_lsriind = { -- Load/store register, immediate pre/post-indexed.
341 shift = 30, mask = 3,
342 [0] = {
343 shift = 26, mask = 1,
344 [0] = {
345 shift = 22, mask = 3,
346 [0] = "strbDwzL", "ldrbDwzL", "ldrsbDxzL", "ldrsbDwzL"
347 }
348 },
349 {
350 shift = 26, mask = 1,
351 [0] = {
352 shift = 22, mask = 3,
353 [0] = "strhDwzL", "ldrhDwzL", "ldrshDxzL", "ldrshDwzL"
354 }
355 },
356 {
357 shift = 26, mask = 1,
358 [0] = {
359 shift = 22, mask = 3,
360 [0] = "strDwzL", "ldrDwzL", "ldrswDxzL"
361 },
362 {
363 shift = 22, mask = 3,
364 [0] = "strDszL", "ldrDszL"
365 }
366 },
367 {
368 shift = 26, mask = 1,
369 [0] = {
370 shift = 22, mask = 3,
371 [0] = "strDxzL", "ldrDxzL"
372 },
373 {
374 shift = 22, mask = 3,
375 [0] = "strDdzL", "ldrDdzL"
376 }
377 }
378}
379
380local map_lsriro = {
381 shift = 21, mask = 1,
382 [0] = { -- Load/store register immediate.
383 shift = 10, mask = 3,
384 [0] = { -- Unscaled immediate.
385 shift = 26, mask = 1,
386 [0] = {
387 shift = 30, mask = 3,
388 [0] = {
389 shift = 22, mask = 3,
390 [0] = "sturbDwK", "ldurbDwK"
391 },
392 {
393 shift = 22, mask = 3,
394 [0] = "sturhDwK", "ldurhDwK"
395 },
396 {
397 shift = 22, mask = 3,
398 [0] = "sturDwK", "ldurDwK"
399 },
400 {
401 shift = 22, mask = 3,
402 [0] = "sturDxK", "ldurDxK"
403 }
404 }
405 }, map_lsriind, false, map_lsriind
406 },
407 { -- Load/store register, register offset.
408 shift = 10, mask = 3,
409 [2] = {
410 shift = 26, mask = 1,
411 [0] = {
412 shift = 30, mask = 3,
413 [0] = {
414 shift = 22, mask = 3,
415 [0] = "strbDwO", "ldrbDwO", "ldrsbDxO", "ldrsbDwO"
416 },
417 {
418 shift = 22, mask = 3,
419 [0] = "strhDwO", "ldrhDwO", "ldrshDxO", "ldrshDwO"
420 },
421 {
422 shift = 22, mask = 3,
423 [0] = "strDwO", "ldrDwO", "ldrswDxO"
424 },
425 {
426 shift = 22, mask = 3,
427 [0] = "strDxO", "ldrDxO"
428 }
429 },
430 {
431 shift = 30, mask = 3,
432 [2] = {
433 shift = 22, mask = 3,
434 [0] = "strDsO", "ldrDsO"
435 },
436 [3] = {
437 shift = 22, mask = 3,
438 [0] = "strDdO", "ldrDdO"
439 }
440 }
441 }
442 }
443}
444
445local map_lsp = { -- Load/store register pair, offset.
446 shift = 22, mask = 1,
447 [0] = {
448 shift = 30, mask = 3,
449 [0] = {
450 shift = 26, mask = 1,
451 [0] = "stpDzAzwP", "stpDzAzsP",
452 },
453 {
454 shift = 26, mask = 1,
455 "stpDzAzdP"
456 },
457 {
458 shift = 26, mask = 1,
459 [0] = "stpDzAzxP"
460 }
461 },
462 {
463 shift = 30, mask = 3,
464 [0] = {
465 shift = 26, mask = 1,
466 [0] = "ldpDzAzwP", "ldpDzAzsP",
467 },
468 {
469 shift = 26, mask = 1,
470 [0] = "ldpswDAxP", "ldpDzAzdP"
471 },
472 {
473 shift = 26, mask = 1,
474 [0] = "ldpDzAzxP"
475 }
476 }
477}
478
479local map_ls = { -- Loads and stores.
480 shift = 24, mask = 0x31,
481 [0x10] = map_lrl, [0x30] = map_lsriro,
482 [0x20] = {
483 shift = 23, mask = 3,
484 map_lsp, map_lsp, map_lsp
485 },
486 [0x21] = {
487 shift = 23, mask = 3,
488 map_lsp, map_lsp, map_lsp
489 },
490 [0x31] = {
491 shift = 26, mask = 1,
492 [0] = {
493 shift = 30, mask = 3,
494 [0] = {
495 shift = 22, mask = 3,
496 [0] = "strbDwzU", "ldrbDwzU"
497 },
498 {
499 shift = 22, mask = 3,
500 [0] = "strhDwzU", "ldrhDwzU"
501 },
502 {
503 shift = 22, mask = 3,
504 [0] = "strDwzU", "ldrDwzU"
505 },
506 {
507 shift = 22, mask = 3,
508 [0] = "strDxzU", "ldrDxzU"
509 }
510 },
511 {
512 shift = 30, mask = 3,
513 [2] = {
514 shift = 22, mask = 3,
515 [0] = "strDszU", "ldrDszU"
516 },
517 [3] = {
518 shift = 22, mask = 3,
519 [0] = "strDdzU", "ldrDdzU"
520 }
521 }
522 },
523}
524
525local map_datafp = { -- Data processing, SIMD and FP.
526 shift = 28, mask = 7,
527 { -- 001
528 shift = 24, mask = 1,
529 [0] = {
530 shift = 21, mask = 1,
531 {
532 shift = 10, mask = 3,
533 [0] = {
534 shift = 12, mask = 1,
535 [0] = {
536 shift = 13, mask = 1,
537 [0] = {
538 shift = 14, mask = 1,
539 [0] = {
540 shift = 15, mask = 1,
541 [0] = { -- FP/int conversion.
542 shift = 31, mask = 1,
543 [0] = {
544 shift = 16, mask = 0xff,
545 [0x20] = "fcvtnsDwNs", [0x21] = "fcvtnuDwNs",
546 [0x22] = "scvtfDsNw", [0x23] = "ucvtfDsNw",
547 [0x24] = "fcvtasDwNs", [0x25] = "fcvtauDwNs",
548 [0x26] = "fmovDwNs", [0x27] = "fmovDsNw",
549 [0x28] = "fcvtpsDwNs", [0x29] = "fcvtpuDwNs",
550 [0x30] = "fcvtmsDwNs", [0x31] = "fcvtmuDwNs",
551 [0x38] = "fcvtzsDwNs", [0x39] = "fcvtzuDwNs",
552 [0x60] = "fcvtnsDwNd", [0x61] = "fcvtnuDwNd",
553 [0x62] = "scvtfDdNw", [0x63] = "ucvtfDdNw",
554 [0x64] = "fcvtasDwNd", [0x65] = "fcvtauDwNd",
555 [0x68] = "fcvtpsDwNd", [0x69] = "fcvtpuDwNd",
556 [0x70] = "fcvtmsDwNd", [0x71] = "fcvtmuDwNd",
557 [0x78] = "fcvtzsDwNd", [0x79] = "fcvtzuDwNd"
558 },
559 {
560 shift = 16, mask = 0xff,
561 [0x20] = "fcvtnsDxNs", [0x21] = "fcvtnuDxNs",
562 [0x22] = "scvtfDsNx", [0x23] = "ucvtfDsNx",
563 [0x24] = "fcvtasDxNs", [0x25] = "fcvtauDxNs",
564 [0x28] = "fcvtpsDxNs", [0x29] = "fcvtpuDxNs",
565 [0x30] = "fcvtmsDxNs", [0x31] = "fcvtmuDxNs",
566 [0x38] = "fcvtzsDxNs", [0x39] = "fcvtzuDxNs",
567 [0x60] = "fcvtnsDxNd", [0x61] = "fcvtnuDxNd",
568 [0x62] = "scvtfDdNx", [0x63] = "ucvtfDdNx",
569 [0x64] = "fcvtasDxNd", [0x65] = "fcvtauDxNd",
570 [0x66] = "fmovDxNd", [0x67] = "fmovDdNx",
571 [0x68] = "fcvtpsDxNd", [0x69] = "fcvtpuDxNd",
572 [0x70] = "fcvtmsDxNd", [0x71] = "fcvtmuDxNd",
573 [0x78] = "fcvtzsDxNd", [0x79] = "fcvtzuDxNd"
574 }
575 }
576 },
577 { -- FP data-processing, 1 source.
578 shift = 31, mask = 1,
579 [0] = {
580 shift = 22, mask = 3,
581 [0] = {
582 shift = 15, mask = 63,
583 [0] = "fmovDNf", "fabsDNf", "fnegDNf",
584 "fsqrtDNf", false, "fcvtDdNs", false, false,
585 "frintnDNf", "frintpDNf", "frintmDNf", "frintzDNf",
586 "frintaDNf", false, "frintxDNf", "frintiDNf",
587 },
588 {
589 shift = 15, mask = 63,
590 [0] = "fmovDNf", "fabsDNf", "fnegDNf",
591 "fsqrtDNf", "fcvtDsNd", false, false, false,
592 "frintnDNf", "frintpDNf", "frintmDNf", "frintzDNf",
593 "frintaDNf", false, "frintxDNf", "frintiDNf",
594 }
595 }
596 }
597 },
598 { -- FP compare.
599 shift = 31, mask = 1,
600 [0] = {
601 shift = 14, mask = 3,
602 [0] = {
603 shift = 23, mask = 1,
604 [0] = {
605 shift = 0, mask = 31,
606 [0] = "fcmpNMf", [8] = "fcmpNZf",
607 [16] = "fcmpeNMf", [24] = "fcmpeNZf",
608 }
609 }
610 }
611 }
612 },
613 { -- FP immediate.
614 shift = 31, mask = 1,
615 [0] = {
616 shift = 5, mask = 31,
617 [0] = {
618 shift = 23, mask = 1,
619 [0] = "fmovDFf"
620 }
621 }
622 }
623 },
624 { -- FP conditional compare.
625 shift = 31, mask = 1,
626 [0] = {
627 shift = 23, mask = 1,
628 [0] = {
629 shift = 4, mask = 1,
630 [0] = "fccmpNMVCf", "fccmpeNMVCf"
631 }
632 }
633 },
634 { -- FP data-processing, 2 sources.
635 shift = 31, mask = 1,
636 [0] = {
637 shift = 23, mask = 1,
638 [0] = {
639 shift = 12, mask = 15,
640 [0] = "fmulDNMf", "fdivDNMf", "faddDNMf", "fsubDNMf",
641 "fmaxDNMf", "fminDNMf", "fmaxnmDNMf", "fminnmDNMf",
642 "fnmulDNMf"
643 }
644 }
645 },
646 { -- FP conditional select.
647 shift = 31, mask = 1,
648 [0] = {
649 shift = 23, mask = 1,
650 [0] = "fcselDNMCf"
651 }
652 }
653 }
654 },
655 { -- FP data-processing, 3 sources.
656 shift = 31, mask = 1,
657 [0] = {
658 shift = 15, mask = 1,
659 [0] = {
660 shift = 21, mask = 5,
661 [0] = "fmaddDNMAf", "fnmaddDNMAf"
662 },
663 {
664 shift = 21, mask = 5,
665 [0] = "fmsubDNMAf", "fnmsubDNMAf"
666 }
667 }
668 }
669 }
670}
671
672local map_br = { -- Branches, exception generating and system instructions.
673 shift = 29, mask = 7,
674 [0] = "bB",
675 { -- Compare & branch, immediate.
676 shift = 24, mask = 3,
677 [0] = "cbzDBg", "cbnzDBg", "tbzDTBw", "tbnzDTBw"
678 },
679 { -- Conditional branch, immediate.
680 shift = 24, mask = 3,
681 [0] = {
682 shift = 4, mask = 1,
683 [0] = {
684 shift = 0, mask = 15,
685 [0] = "beqB", "bneB", "bhsB", "bloB", "bmiB", "bplB", "bvsB", "bvcB",
686 "bhiB", "blsB", "bgeB", "bltB", "bgtB", "bleB", "balB"
687 }
688 }
689 }, false, "blB",
690 { -- Compare & branch, immediate.
691 shift = 24, mask = 3,
692 [0] = "cbzDBg", "cbnzDBg", "tbzDTBx", "tbnzDTBx"
693 },
694 {
695 shift = 24, mask = 3,
696 [0] = { -- Exception generation.
697 shift = 0, mask = 0xe0001f,
698 [0x200000] = "brkW"
699 },
700 { -- System instructions.
701 shift = 0, mask = 0x3fffff,
702 [0x03201f] = "nop"
703 },
704 { -- Unconditional branch, register.
705 shift = 0, mask = 0xfffc1f,
706 [0x1f0000] = "brNx", [0x3f0000] = "blrNx",
707 [0x5f0000] = "retNx"
708 },
709 }
710}
711
712local map_init = {
713 shift = 25, mask = 15,
714 [0] = false, false, false, false, map_ls, map_datar, map_ls, map_datafp,
715 map_datai, map_datai, map_br, map_br, map_ls, map_datar, map_ls, map_datafp
716}
717
718------------------------------------------------------------------------------
719
720local map_regs = { x = {}, w = {}, d = {}, s = {} }
721
722for i=0,30 do
723 map_regs.x[i] = "x"..i
724 map_regs.w[i] = "w"..i
725 map_regs.d[i] = "d"..i
726 map_regs.s[i] = "s"..i
727end
728map_regs.x[31] = "sp"
729map_regs.w[31] = "wsp"
730map_regs.d[31] = "d31"
731map_regs.s[31] = "s31"
732
733local map_cond = {
734 [0] = "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
735 "hi", "ls", "ge", "lt", "gt", "le", "al",
736}
737
738local map_shift = { [0] = "lsl", "lsr", "asr", }
739
740local map_extend = {
741 [0] = "uxtb", "uxth", "uxtw", "uxtx", "sxtb", "sxth", "sxtw", "sxtx",
742}
743
744------------------------------------------------------------------------------
745
746-- Output a nicely formatted line with an opcode and operands.
747local function putop(ctx, text, operands)
748 local pos = ctx.pos
749 local extra = ""
750 if ctx.rel then
751 local sym = ctx.symtab[ctx.rel]
752 if sym then
753 extra = "\t->"..sym
754 end
755 end
756 if ctx.hexdump > 0 then
757 ctx.out(format("%08x %s %-5s %s%s\n",
758 ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
759 else
760 ctx.out(format("%08x %-5s %s%s\n",
761 ctx.addr+pos, text, concat(operands, ", "), extra))
762 end
763 ctx.pos = pos + 4
764end
765
766-- Fallback for unknown opcodes.
767local function unknown(ctx)
768 return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
769end
770
771local function match_reg(p, pat, regnum)
772 return map_regs[match(pat, p.."%w-([xwds])")][regnum]
773end
774
775local function fmt_hex32(x)
776 if x < 0 then
777 return tohex(x)
778 else
779 return format("%x", x)
780 end
781end
782
783local imm13_rep = { 0x55555555, 0x11111111, 0x01010101, 0x00010001, 0x00000001 }
784
785local function decode_imm13(op)
786 local imms = band(rshift(op, 10), 63)
787 local immr = band(rshift(op, 16), 63)
788 if band(op, 0x00400000) == 0 then
789 local len = 5
790 if imms >= 56 then
791 if imms >= 60 then len = 1 else len = 2 end
792 elseif imms >= 48 then len = 3 elseif imms >= 32 then len = 4 end
793 local l = lshift(1, len)-1
794 local s = band(imms, l)
795 local r = band(immr, l)
796 local imm = ror(rshift(-1, 31-s), r)
797 if len ~= 5 then imm = band(imm, lshift(1, l)-1) + rshift(imm, 31-l) end
798 imm = imm * imm13_rep[len]
799 local ix = fmt_hex32(imm)
800 if rshift(op, 31) ~= 0 then
801 return ix..tohex(imm)
802 else
803 return ix
804 end
805 else
806 local lo, hi = -1, 0
807 if imms < 32 then lo = rshift(-1, 31-imms) else hi = rshift(-1, 63-imms) end
808 if immr ~= 0 then
809 lo, hi = ror(lo, immr), ror(hi, immr)
810 local x = immr == 32 and 0 or band(bxor(lo, hi), lshift(-1, 32-immr))
811 lo, hi = bxor(lo, x), bxor(hi, x)
812 if immr >= 32 then lo, hi = hi, lo end
813 end
814 if hi ~= 0 then
815 return fmt_hex32(hi)..tohex(lo)
816 else
817 return fmt_hex32(lo)
818 end
819 end
820end
821
822local function parse_immpc(op, name)
823 if name == "b" or name == "bl" then
824 return arshift(lshift(op, 6), 4)
825 elseif name == "adr" or name == "adrp" then
826 local immlo = band(rshift(op, 29), 3)
827 local immhi = lshift(arshift(lshift(op, 8), 13), 2)
828 return bor(immhi, immlo)
829 elseif name == "tbz" or name == "tbnz" then
830 return lshift(arshift(lshift(op, 13), 18), 2)
831 else
832 return lshift(arshift(lshift(op, 8), 13), 2)
833 end
834end
835
836local function parse_fpimm8(op)
837 local sign = band(op, 0x100000) == 0 and 1 or -1
838 local exp = bxor(rshift(arshift(lshift(op, 12), 5), 24), 0x80) - 131
839 local frac = 16+band(rshift(op, 13), 15)
840 return sign * frac * 2^exp
841end
842
843local function prefer_bfx(sf, uns, imms, immr)
844 if imms < immr or imms == 31 or imms == 63 then
845 return false
846 end
847 if immr == 0 then
848 if sf == 0 and (imms == 7 or imms == 15) then
849 return false
850 end
851 if sf ~= 0 and uns == 0 and (imms == 7 or imms == 15 or imms == 31) then
852 return false
853 end
854 end
855 return true
856end
857
858-- Disassemble a single instruction.
859local function disass_ins(ctx)
860 local pos = ctx.pos
861 local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
862 local op = bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0)
863 local operands = {}
864 local suffix = ""
865 local last, name, pat
866 local map_reg
867 ctx.op = op
868 ctx.rel = nil
869 last = nil
870 local opat
871 opat = map_init[band(rshift(op, 25), 15)]
872 while type(opat) ~= "string" do
873 if not opat then return unknown(ctx) end
874 opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
875 end
876 name, pat = match(opat, "^([a-z0-9]*)(.*)")
877 local altname, pat2 = match(pat, "|([a-z0-9_.|]*)(.*)")
878 if altname then pat = pat2 end
879 if sub(pat, 1, 1) == "." then
880 local s2, p2 = match(pat, "^([a-z0-9.]*)(.*)")
881 suffix = suffix..s2
882 pat = p2
883 end
884
885 local rt = match(pat, "[gf]")
886 if rt then
887 if rt == "g" then
888 map_reg = band(op, 0x80000000) ~= 0 and map_regs.x or map_regs.w
889 else
890 map_reg = band(op, 0x400000) ~= 0 and map_regs.d or map_regs.s
891 end
892 end
893
894 local second0, immr
895
896 for p in gmatch(pat, ".") do
897 local x = nil
898 if p == "D" then
899 local regnum = band(op, 31)
900 x = rt and map_reg[regnum] or match_reg(p, pat, regnum)
901 elseif p == "N" then
902 local regnum = band(rshift(op, 5), 31)
903 x = rt and map_reg[regnum] or match_reg(p, pat, regnum)
904 elseif p == "M" then
905 local regnum = band(rshift(op, 16), 31)
906 x = rt and map_reg[regnum] or match_reg(p, pat, regnum)
907 elseif p == "A" then
908 local regnum = band(rshift(op, 10), 31)
909 x = rt and map_reg[regnum] or match_reg(p, pat, regnum)
910 elseif p == "B" then
911 local addr = ctx.addr + pos + parse_immpc(op, name)
912 ctx.rel = addr
913 x = "0x"..tohex(addr)
914 elseif p == "T" then
915 x = bor(band(rshift(op, 26), 32), band(rshift(op, 19), 31))
916 elseif p == "V" then
917 x = band(op, 15)
918 elseif p == "C" then
919 x = map_cond[band(rshift(op, 12), 15)]
920 elseif p == "c" then
921 local rn = band(rshift(op, 5), 31)
922 local rm = band(rshift(op, 16), 31)
923 local cond = band(rshift(op, 12), 15)
924 local invc = bxor(cond, 1)
925 x = map_cond[cond]
926 if altname and cond ~= 14 and cond ~= 15 then
927 local a1, a2 = match(altname, "([^|]*)|(.*)")
928 if rn == rm then
929 local n = #operands
930 operands[n] = nil
931 x = map_cond[invc]
932 if rn ~= 31 then
933 if a1 then name = a1 else name = altname end
934 else
935 operands[n-1] = nil
936 name = a2
937 end
938 end
939 end
940 elseif p == "W" then
941 x = band(rshift(op, 5), 0xffff)
942 elseif p == "Y" then
943 x = band(rshift(op, 5), 0xffff)
944 local hw = band(rshift(op, 21), 3)
945 if altname and (hw == 0 or x ~= 0) then
946 name = altname
947 end
948 elseif p == "L" then
949 local rn = map_regs.x[band(rshift(op, 5), 31)]
950 local imm9 = arshift(lshift(op, 11), 23)
951 if band(op, 0x800) ~= 0 then
952 x = "["..rn..", #"..imm9.."]!"
953 else
954 x = "["..rn.."], #"..imm9
955 end
956 elseif p == "U" then
957 local rn = map_regs.x[band(rshift(op, 5), 31)]
958 local sz = band(rshift(op, 30), 3)
959 local imm12 = lshift(arshift(lshift(op, 10), 20), sz)
960 if imm12 ~= 0 then
961 x = "["..rn..", #"..imm12.."]"
962 else
963 x = "["..rn.."]"
964 end
965 elseif p == "K" then
966 local rn = map_regs.x[band(rshift(op, 5), 31)]
967 local imm9 = arshift(lshift(op, 11), 23)
968 if imm9 ~= 0 then
969 x = "["..rn..", #"..imm9.."]"
970 else
971 x = "["..rn.."]"
972 end
973 elseif p == "O" then
974 local rn, rm = map_regs.x[band(rshift(op, 5), 31)]
975 local m = band(rshift(op, 13), 1)
976 if m == 0 then
977 rm = map_regs.w[band(rshift(op, 16), 31)]
978 else
979 rm = map_regs.x[band(rshift(op, 16), 31)]
980 end
981 x = "["..rn..", "..rm
982 local opt = band(rshift(op, 13), 7)
983 local s = band(rshift(op, 12), 1)
984 local sz = band(rshift(op, 30), 3)
985 -- extension to be applied
986 if opt == 3 then
987 if s == 0 then x = x.."]"
988 else x = x..", lsl #"..sz.."]" end
989 elseif opt == 2 or opt == 6 or opt == 7 then
990 if s == 0 then x = x..", "..map_extend[opt].."]"
991 else x = x..", "..map_extend[opt].." #"..sz.."]" end
992 else
993 x = x.."]"
994 end
995 elseif p == "P" then
996 local opcv, sh = rshift(op, 26), 2
997 if opcv >= 0x2a then sh = 4 elseif opcv >= 0x1b then sh = 3 end
998 local imm7 = lshift(arshift(lshift(op, 10), 25), sh)
999 local rn = map_regs.x[band(rshift(op, 5), 31)]
1000 local ind = band(rshift(op, 23), 3)
1001 if ind == 1 then
1002 x = "["..rn.."], #"..imm7
1003 elseif ind == 2 then
1004 if imm7 == 0 then
1005 x = "["..rn.."]"
1006 else
1007 x = "["..rn..", #"..imm7.."]"
1008 end
1009 elseif ind == 3 then
1010 x = "["..rn..", #"..imm7.."]!"
1011 end
1012 elseif p == "I" then
1013 local shf = band(rshift(op, 22), 3)
1014 local imm12 = band(rshift(op, 10), 0x0fff)
1015 local rn, rd = band(rshift(op, 5), 31), band(op, 31)
1016 if altname == "mov" and shf == 0 and imm12 == 0 and (rn == 31 or rd == 31) then
1017 name = altname
1018 x = nil
1019 elseif shf == 0 then
1020 x = imm12
1021 elseif shf == 1 then
1022 x = imm12..", lsl #12"
1023 end
1024 elseif p == "i" then
1025 x = "#0x"..decode_imm13(op)
1026 elseif p == "1" then
1027 immr = band(rshift(op, 16), 63)
1028 x = immr
1029 elseif p == "2" then
1030 x = band(rshift(op, 10), 63)
1031 if altname then
1032 local a1, a2, a3, a4, a5, a6 =
1033 match(altname, "([^|]*)|([^|]*)|([^|]*)|([^|]*)|([^|]*)|(.*)")
1034 local sf = band(rshift(op, 26), 32)
1035 local uns = band(rshift(op, 30), 1)
1036 if prefer_bfx(sf, uns, x, immr) then
1037 name = a2
1038 x = x - immr + 1
1039 elseif immr == 0 and x == 7 then
1040 local n = #operands
1041 operands[n] = nil
1042 if sf ~= 0 then
1043 operands[n-1] = gsub(operands[n-1], "x", "w")
1044 end
1045 last = operands[n-1]
1046 name = a6
1047 x = nil
1048 elseif immr == 0 and x == 15 then
1049 local n = #operands
1050 operands[n] = nil
1051 if sf ~= 0 then
1052 operands[n-1] = gsub(operands[n-1], "x", "w")
1053 end
1054 last = operands[n-1]
1055 name = a5
1056 x = nil
1057 elseif x == 31 or x == 63 then
1058 if x == 31 and immr == 0 and name == "sbfm" then
1059 name = a4
1060 local n = #operands
1061 operands[n] = nil
1062 if sf ~= 0 then
1063 operands[n-1] = gsub(operands[n-1], "x", "w")
1064 end
1065 last = operands[n-1]
1066 else
1067 name = a3
1068 end
1069 x = nil
1070 elseif band(x, 31) ~= 31 and immr == x+1 and name == "ubfm" then
1071 name = a4
1072 last = "#"..(sf+32 - immr)
1073 operands[#operands] = last
1074 x = nil
1075 elseif x < immr then
1076 name = a1
1077 last = "#"..(sf+32 - immr)
1078 operands[#operands] = last
1079 x = x + 1
1080 end
1081 end
1082 elseif p == "3" then
1083 x = band(rshift(op, 10), 63)
1084 if altname then
1085 local a1, a2 = match(altname, "([^|]*)|(.*)")
1086 if x < immr then
1087 name = a1
1088 local sf = band(rshift(op, 26), 32)
1089 last = "#"..(sf+32 - immr)
1090 operands[#operands] = last
1091 x = x + 1
1092 elseif x >= immr then
1093 name = a2
1094 x = x - immr + 1
1095 end
1096 end
1097 elseif p == "4" then
1098 x = band(rshift(op, 10), 63)
1099 local rn = band(rshift(op, 5), 31)
1100 local rm = band(rshift(op, 16), 31)
1101 if altname and rn == rm then
1102 local n = #operands
1103 operands[n] = nil
1104 last = operands[n-1]
1105 name = altname
1106 end
1107 elseif p == "5" then
1108 x = band(rshift(op, 16), 31)
1109 elseif p == "S" then
1110 x = band(rshift(op, 10), 63)
1111 if x == 0 then x = nil
1112 else x = map_shift[band(rshift(op, 22), 3)].." #"..x end
1113 elseif p == "X" then
1114 local opt = band(rshift(op, 13), 7)
1115 -- Width specifier <R>.
1116 if opt ~= 3 and opt ~= 7 then
1117 last = map_regs.w[band(rshift(op, 16), 31)]
1118 operands[#operands] = last
1119 end
1120 x = band(rshift(op, 10), 7)
1121 -- Extension.
1122 if opt == 2 + band(rshift(op, 31), 1) and
1123 band(rshift(op, second0 and 5 or 0), 31) == 31 then
1124 if x == 0 then x = nil
1125 else x = "lsl #"..x end
1126 else
1127 if x == 0 then x = map_extend[band(rshift(op, 13), 7)]
1128 else x = map_extend[band(rshift(op, 13), 7)].." #"..x end
1129 end
1130 elseif p == "R" then
1131 x = band(rshift(op,21), 3)
1132 if x == 0 then x = nil
1133 else x = "lsl #"..x*16 end
1134 elseif p == "z" then
1135 local n = #operands
1136 if operands[n] == "sp" then operands[n] = "xzr"
1137 elseif operands[n] == "wsp" then operands[n] = "wzr"
1138 end
1139 elseif p == "Z" then
1140 x = 0
1141 elseif p == "F" then
1142 x = parse_fpimm8(op)
1143 elseif p == "g" or p == "f" or p == "x" or p == "w" or
1144 p == "d" or p == "s" then
1145 -- These are handled in D/N/M/A.
1146 elseif p == "0" then
1147 if last == "sp" or last == "wsp" then
1148 local n = #operands
1149 operands[n] = nil
1150 last = operands[n-1]
1151 if altname then
1152 local a1, a2 = match(altname, "([^|]*)|(.*)")
1153 if not a1 then
1154 name = altname
1155 elseif second0 then
1156 name, altname = a2, a1
1157 else
1158 name, altname = a1, a2
1159 end
1160 end
1161 end
1162 second0 = true
1163 else
1164 assert(false)
1165 end
1166 if x then
1167 last = x
1168 if type(x) == "number" then x = "#"..x end
1169 operands[#operands+1] = x
1170 end
1171 end
1172
1173 return putop(ctx, name..suffix, operands)
1174end
1175
1176------------------------------------------------------------------------------
1177
1178-- Disassemble a block of code.
1179local function disass_block(ctx, ofs, len)
1180 if not ofs then ofs = 0 end
1181 local stop = len and ofs+len or #ctx.code
1182 ctx.pos = ofs
1183 ctx.rel = nil
1184 while ctx.pos < stop do disass_ins(ctx) end
1185end
1186
1187-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
1188local function create(code, addr, out)
1189 local ctx = {}
1190 ctx.code = code
1191 ctx.addr = addr or 0
1192 ctx.out = out or io.write
1193 ctx.symtab = {}
1194 ctx.disass = disass_block
1195 ctx.hexdump = 8
1196 return ctx
1197end
1198
1199-- Simple API: disassemble code (a string) at address and output via out.
1200local function disass(code, addr, out)
1201 create(code, addr, out):disass()
1202end
1203
1204-- Return register name for RID.
1205local function regname(r)
1206 if r < 32 then return map_regs.x[r] end
1207 return map_regs.d[r-32]
1208end
1209
1210-- Public module functions.
1211return {
1212 create = create,
1213 disass = disass,
1214 regname = regname
1215}
1216
diff --git a/src/jit/dis_arm64be.lua b/src/jit/dis_arm64be.lua
new file mode 100644
index 00000000..9f4077af
--- /dev/null
+++ b/src/jit/dis_arm64be.lua
@@ -0,0 +1,12 @@
1----------------------------------------------------------------------------
2-- LuaJIT ARM64BE disassembler wrapper module.
3--
4-- Copyright (C) 2005-2020 Mike Pall. All rights reserved.
5-- Released under the MIT license. See Copyright Notice in luajit.h
6----------------------------------------------------------------------------
7-- ARM64 instructions are always little-endian. So just forward to the
8-- common ARM64 disassembler module. All the interesting stuff is there.
9------------------------------------------------------------------------------
10
11return require((string.match(..., ".*%.") or "").."dis_arm64")
12
diff --git a/src/jit/dis_mips.lua b/src/jit/dis_mips.lua
index c720b537..791ac91d 100644
--- a/src/jit/dis_mips.lua
+++ b/src/jit/dis_mips.lua
@@ -19,13 +19,34 @@ local band, bor, tohex = bit.band, bit.bor, bit.tohex
19local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift 19local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
20 20
21------------------------------------------------------------------------------ 21------------------------------------------------------------------------------
22-- Primary and extended opcode maps 22-- Extended opcode maps common to all MIPS releases
23------------------------------------------------------------------------------ 23------------------------------------------------------------------------------
24 24
25local map_movci = { shift = 16, mask = 1, [0] = "movfDSC", "movtDSC", }
26local map_srl = { shift = 21, mask = 1, [0] = "srlDTA", "rotrDTA", } 25local map_srl = { shift = 21, mask = 1, [0] = "srlDTA", "rotrDTA", }
27local map_srlv = { shift = 6, mask = 1, [0] = "srlvDTS", "rotrvDTS", } 26local map_srlv = { shift = 6, mask = 1, [0] = "srlvDTS", "rotrvDTS", }
28 27
28local map_cop0 = {
29 shift = 25, mask = 1,
30 [0] = {
31 shift = 21, mask = 15,
32 [0] = "mfc0TDW", [4] = "mtc0TDW",
33 [10] = "rdpgprDT",
34 [11] = { shift = 5, mask = 1, [0] = "diT0", "eiT0", },
35 [14] = "wrpgprDT",
36 }, {
37 shift = 0, mask = 63,
38 [1] = "tlbr", [2] = "tlbwi", [6] = "tlbwr", [8] = "tlbp",
39 [24] = "eret", [31] = "deret",
40 [32] = "wait",
41 },
42}
43
44------------------------------------------------------------------------------
45-- Primary and extended opcode maps for MIPS R1-R5
46------------------------------------------------------------------------------
47
48local map_movci = { shift = 16, mask = 1, [0] = "movfDSC", "movtDSC", }
49
29local map_special = { 50local map_special = {
30 shift = 0, mask = 63, 51 shift = 0, mask = 63,
31 [0] = { shift = 0, mask = -1, [0] = "nop", _ = "sllDTA" }, 52 [0] = { shift = 0, mask = -1, [0] = "nop", _ = "sllDTA" },
@@ -34,15 +55,17 @@ local map_special = {
34 "jrS", "jalrD1S", "movzDST", "movnDST", 55 "jrS", "jalrD1S", "movzDST", "movnDST",
35 "syscallY", "breakY", false, "sync", 56 "syscallY", "breakY", false, "sync",
36 "mfhiD", "mthiS", "mfloD", "mtloS", 57 "mfhiD", "mthiS", "mfloD", "mtloS",
37 false, false, false, false, 58 "dsllvDST", false, "dsrlvDST", "dsravDST",
38 "multST", "multuST", "divST", "divuST", 59 "multST", "multuST", "divST", "divuST",
39 false, false, false, false, 60 "dmultST", "dmultuST", "ddivST", "ddivuST",
40 "addDST", "addu|moveDST0", "subDST", "subu|neguDS0T", 61 "addDST", "addu|moveDST0", "subDST", "subu|neguDS0T",
41 "andDST", "orDST", "xorDST", "nor|notDST0", 62 "andDST", "or|moveDST0", "xorDST", "nor|notDST0",
42 false, false, "sltDST", "sltuDST", 63 false, false, "sltDST", "sltuDST",
43 false, false, false, false, 64 "daddDST", "dadduDST", "dsubDST", "dsubuDST",
44 "tgeSTZ", "tgeuSTZ", "tltSTZ", "tltuSTZ", 65 "tgeSTZ", "tgeuSTZ", "tltSTZ", "tltuSTZ",
45 "teqSTZ", false, "tneSTZ", 66 "teqSTZ", false, "tneSTZ", false,
67 "dsllDTA", false, "dsrlDTA", "dsraDTA",
68 "dsll32DTA", false, "dsrl32DTA", "dsra32DTA",
46} 69}
47 70
48local map_special2 = { 71local map_special2 = {
@@ -60,11 +83,17 @@ local map_bshfl = {
60 [24] = "sehDT", 83 [24] = "sehDT",
61} 84}
62 85
86local map_dbshfl = {
87 shift = 6, mask = 31,
88 [2] = "dsbhDT",
89 [5] = "dshdDT",
90}
91
63local map_special3 = { 92local map_special3 = {
64 shift = 0, mask = 63, 93 shift = 0, mask = 63,
65 [0] = "extTSAK", [4] = "insTSAL", 94 [0] = "extTSAK", [1] = "dextmTSAP", [3] = "dextTSAK",
66 [32] = map_bshfl, 95 [4] = "insTSAL", [6] = "dinsuTSEQ", [7] = "dinsTSAL",
67 [59] = "rdhwrTD", 96 [32] = map_bshfl, [36] = map_dbshfl, [59] = "rdhwrTD",
68} 97}
69 98
70local map_regimm = { 99local map_regimm = {
@@ -79,22 +108,6 @@ local map_regimm = {
79 false, false, false, "synciSO", 108 false, false, false, "synciSO",
80} 109}
81 110
82local map_cop0 = {
83 shift = 25, mask = 1,
84 [0] = {
85 shift = 21, mask = 15,
86 [0] = "mfc0TDW", [4] = "mtc0TDW",
87 [10] = "rdpgprDT",
88 [11] = { shift = 5, mask = 1, [0] = "diT0", "eiT0", },
89 [14] = "wrpgprDT",
90 }, {
91 shift = 0, mask = 63,
92 [1] = "tlbr", [2] = "tlbwi", [6] = "tlbwr", [8] = "tlbp",
93 [24] = "eret", [31] = "deret",
94 [32] = "wait",
95 },
96}
97
98local map_cop1s = { 111local map_cop1s = {
99 shift = 0, mask = 63, 112 shift = 0, mask = 63,
100 [0] = "add.sFGH", "sub.sFGH", "mul.sFGH", "div.sFGH", 113 [0] = "add.sFGH", "sub.sFGH", "mul.sFGH", "div.sFGH",
@@ -178,8 +191,8 @@ local map_cop1bc = {
178 191
179local map_cop1 = { 192local map_cop1 = {
180 shift = 21, mask = 31, 193 shift = 21, mask = 31,
181 [0] = "mfc1TG", false, "cfc1TG", "mfhc1TG", 194 [0] = "mfc1TG", "dmfc1TG", "cfc1TG", "mfhc1TG",
182 "mtc1TG", false, "ctc1TG", "mthc1TG", 195 "mtc1TG", "dmtc1TG", "ctc1TG", "mthc1TG",
183 map_cop1bc, false, false, false, 196 map_cop1bc, false, false, false,
184 false, false, false, false, 197 false, false, false, false,
185 map_cop1s, map_cop1d, false, false, 198 map_cop1s, map_cop1d, false, false,
@@ -213,16 +226,218 @@ local map_pri = {
213 "andiTSU", "ori|liTS0U", "xoriTSU", "luiTU", 226 "andiTSU", "ori|liTS0U", "xoriTSU", "luiTU",
214 map_cop0, map_cop1, false, map_cop1x, 227 map_cop0, map_cop1, false, map_cop1x,
215 "beql|beqzlST0B", "bnel|bnezlST0B", "blezlSB", "bgtzlSB", 228 "beql|beqzlST0B", "bnel|bnezlST0B", "blezlSB", "bgtzlSB",
216 false, false, false, false, 229 "daddiTSI", "daddiuTSI", false, false,
217 map_special2, false, false, map_special3, 230 map_special2, "jalxJ", false, map_special3,
218 "lbTSO", "lhTSO", "lwlTSO", "lwTSO", 231 "lbTSO", "lhTSO", "lwlTSO", "lwTSO",
219 "lbuTSO", "lhuTSO", "lwrTSO", false, 232 "lbuTSO", "lhuTSO", "lwrTSO", false,
220 "sbTSO", "shTSO", "swlTSO", "swTSO", 233 "sbTSO", "shTSO", "swlTSO", "swTSO",
221 false, false, "swrTSO", "cacheNSO", 234 false, false, "swrTSO", "cacheNSO",
222 "llTSO", "lwc1HSO", "lwc2TSO", "prefNSO", 235 "llTSO", "lwc1HSO", "lwc2TSO", "prefNSO",
223 false, "ldc1HSO", "ldc2TSO", false, 236 false, "ldc1HSO", "ldc2TSO", "ldTSO",
224 "scTSO", "swc1HSO", "swc2TSO", false, 237 "scTSO", "swc1HSO", "swc2TSO", false,
225 false, "sdc1HSO", "sdc2TSO", false, 238 false, "sdc1HSO", "sdc2TSO", "sdTSO",
239}
240
241------------------------------------------------------------------------------
242-- Primary and extended opcode maps for MIPS R6
243------------------------------------------------------------------------------
244
245local map_mul_r6 = { shift = 6, mask = 3, [2] = "mulDST", [3] = "muhDST" }
246local map_mulu_r6 = { shift = 6, mask = 3, [2] = "muluDST", [3] = "muhuDST" }
247local map_div_r6 = { shift = 6, mask = 3, [2] = "divDST", [3] = "modDST" }
248local map_divu_r6 = { shift = 6, mask = 3, [2] = "divuDST", [3] = "moduDST" }
249local map_dmul_r6 = { shift = 6, mask = 3, [2] = "dmulDST", [3] = "dmuhDST" }
250local map_dmulu_r6 = { shift = 6, mask = 3, [2] = "dmuluDST", [3] = "dmuhuDST" }
251local map_ddiv_r6 = { shift = 6, mask = 3, [2] = "ddivDST", [3] = "dmodDST" }
252local map_ddivu_r6 = { shift = 6, mask = 3, [2] = "ddivuDST", [3] = "dmoduDST" }
253
254local map_special_r6 = {
255 shift = 0, mask = 63,
256 [0] = { shift = 0, mask = -1, [0] = "nop", _ = "sllDTA" },
257 false, map_srl, "sraDTA",
258 "sllvDTS", false, map_srlv, "sravDTS",
259 "jrS", "jalrD1S", false, false,
260 "syscallY", "breakY", false, "sync",
261 "clzDS", "cloDS", "dclzDS", "dcloDS",
262 "dsllvDST", "dlsaDSTA", "dsrlvDST", "dsravDST",
263 map_mul_r6, map_mulu_r6, map_div_r6, map_divu_r6,
264 map_dmul_r6, map_dmulu_r6, map_ddiv_r6, map_ddivu_r6,
265 "addDST", "addu|moveDST0", "subDST", "subu|neguDS0T",
266 "andDST", "or|moveDST0", "xorDST", "nor|notDST0",
267 false, false, "sltDST", "sltuDST",
268 "daddDST", "dadduDST", "dsubDST", "dsubuDST",
269 "tgeSTZ", "tgeuSTZ", "tltSTZ", "tltuSTZ",
270 "teqSTZ", "seleqzDST", "tneSTZ", "selnezDST",
271 "dsllDTA", false, "dsrlDTA", "dsraDTA",
272 "dsll32DTA", false, "dsrl32DTA", "dsra32DTA",
273}
274
275local map_bshfl_r6 = {
276 shift = 9, mask = 3,
277 [1] = "alignDSTa",
278 _ = {
279 shift = 6, mask = 31,
280 [0] = "bitswapDT",
281 [2] = "wsbhDT",
282 [16] = "sebDT",
283 [24] = "sehDT",
284 }
285}
286
287local map_dbshfl_r6 = {
288 shift = 9, mask = 3,
289 [1] = "dalignDSTa",
290 _ = {
291 shift = 6, mask = 31,
292 [0] = "dbitswapDT",
293 [2] = "dsbhDT",
294 [5] = "dshdDT",
295 }
296}
297
298local map_special3_r6 = {
299 shift = 0, mask = 63,
300 [0] = "extTSAK", [1] = "dextmTSAP", [3] = "dextTSAK",
301 [4] = "insTSAL", [6] = "dinsuTSEQ", [7] = "dinsTSAL",
302 [32] = map_bshfl_r6, [36] = map_dbshfl_r6, [59] = "rdhwrTD",
303}
304
305local map_regimm_r6 = {
306 shift = 16, mask = 31,
307 [0] = "bltzSB", [1] = "bgezSB",
308 [6] = "dahiSI", [30] = "datiSI",
309 [23] = "sigrieI", [31] = "synciSO",
310}
311
312local map_pcrel_r6 = {
313 shift = 19, mask = 3,
314 [0] = "addiupcS2", "lwpcS2", "lwupcS2", {
315 shift = 18, mask = 1,
316 [0] = "ldpcS3", { shift = 16, mask = 3, [2] = "auipcSI", [3] = "aluipcSI" }
317 }
318}
319
320local map_cop1s_r6 = {
321 shift = 0, mask = 63,
322 [0] = "add.sFGH", "sub.sFGH", "mul.sFGH", "div.sFGH",
323 "sqrt.sFG", "abs.sFG", "mov.sFG", "neg.sFG",
324 "round.l.sFG", "trunc.l.sFG", "ceil.l.sFG", "floor.l.sFG",
325 "round.w.sFG", "trunc.w.sFG", "ceil.w.sFG", "floor.w.sFG",
326 "sel.sFGH", false, false, false,
327 "seleqz.sFGH", "recip.sFG", "rsqrt.sFG", "selnez.sFGH",
328 "maddf.sFGH", "msubf.sFGH", "rint.sFG", "class.sFG",
329 "min.sFGH", "mina.sFGH", "max.sFGH", "maxa.sFGH",
330 false, "cvt.d.sFG", false, false,
331 "cvt.w.sFG", "cvt.l.sFG",
332}
333
334local map_cop1d_r6 = {
335 shift = 0, mask = 63,
336 [0] = "add.dFGH", "sub.dFGH", "mul.dFGH", "div.dFGH",
337 "sqrt.dFG", "abs.dFG", "mov.dFG", "neg.dFG",
338 "round.l.dFG", "trunc.l.dFG", "ceil.l.dFG", "floor.l.dFG",
339 "round.w.dFG", "trunc.w.dFG", "ceil.w.dFG", "floor.w.dFG",
340 "sel.dFGH", false, false, false,
341 "seleqz.dFGH", "recip.dFG", "rsqrt.dFG", "selnez.dFGH",
342 "maddf.dFGH", "msubf.dFGH", "rint.dFG", "class.dFG",
343 "min.dFGH", "mina.dFGH", "max.dFGH", "maxa.dFGH",
344 "cvt.s.dFG", false, false, false,
345 "cvt.w.dFG", "cvt.l.dFG",
346}
347
348local map_cop1w_r6 = {
349 shift = 0, mask = 63,
350 [0] = "cmp.af.sFGH", "cmp.un.sFGH", "cmp.eq.sFGH", "cmp.ueq.sFGH",
351 "cmp.lt.sFGH", "cmp.ult.sFGH", "cmp.le.sFGH", "cmp.ule.sFGH",
352 "cmp.saf.sFGH", "cmp.sun.sFGH", "cmp.seq.sFGH", "cmp.sueq.sFGH",
353 "cmp.slt.sFGH", "cmp.sult.sFGH", "cmp.sle.sFGH", "cmp.sule.sFGH",
354 false, "cmp.or.sFGH", "cmp.une.sFGH", "cmp.ne.sFGH",
355 false, false, false, false,
356 false, "cmp.sor.sFGH", "cmp.sune.sFGH", "cmp.sne.sFGH",
357 false, false, false, false,
358 "cvt.s.wFG", "cvt.d.wFG",
359}
360
361local map_cop1l_r6 = {
362 shift = 0, mask = 63,
363 [0] = "cmp.af.dFGH", "cmp.un.dFGH", "cmp.eq.dFGH", "cmp.ueq.dFGH",
364 "cmp.lt.dFGH", "cmp.ult.dFGH", "cmp.le.dFGH", "cmp.ule.dFGH",
365 "cmp.saf.dFGH", "cmp.sun.dFGH", "cmp.seq.dFGH", "cmp.sueq.dFGH",
366 "cmp.slt.dFGH", "cmp.sult.dFGH", "cmp.sle.dFGH", "cmp.sule.dFGH",
367 false, "cmp.or.dFGH", "cmp.une.dFGH", "cmp.ne.dFGH",
368 false, false, false, false,
369 false, "cmp.sor.dFGH", "cmp.sune.dFGH", "cmp.sne.dFGH",
370 false, false, false, false,
371 "cvt.s.lFG", "cvt.d.lFG",
372}
373
374local map_cop1_r6 = {
375 shift = 21, mask = 31,
376 [0] = "mfc1TG", "dmfc1TG", "cfc1TG", "mfhc1TG",
377 "mtc1TG", "dmtc1TG", "ctc1TG", "mthc1TG",
378 false, "bc1eqzHB", false, false,
379 false, "bc1nezHB", false, false,
380 map_cop1s_r6, map_cop1d_r6, false, false,
381 map_cop1w_r6, map_cop1l_r6,
382}
383
384local function maprs_popTS(rs, rt)
385 if rt == 0 then return 0 elseif rs == 0 then return 1
386 elseif rs == rt then return 2 else return 3 end
387end
388
389local map_pop06_r6 = {
390 maprs = maprs_popTS, [0] = "blezSB", "blezalcTB", "bgezalcTB", "bgeucSTB"
391}
392local map_pop07_r6 = {
393 maprs = maprs_popTS, [0] = "bgtzSB", "bgtzalcTB", "bltzalcTB", "bltucSTB"
394}
395local map_pop26_r6 = {
396 maprs = maprs_popTS, "blezcTB", "bgezcTB", "bgecSTB"
397}
398local map_pop27_r6 = {
399 maprs = maprs_popTS, "bgtzcTB", "bltzcTB", "bltcSTB"
400}
401
402local function maprs_popS(rs, rt)
403 if rs == 0 then return 0 else return 1 end
404end
405
406local map_pop66_r6 = {
407 maprs = maprs_popS, [0] = "jicTI", "beqzcSb"
408}
409local map_pop76_r6 = {
410 maprs = maprs_popS, [0] = "jialcTI", "bnezcSb"
411}
412
413local function maprs_popST(rs, rt)
414 if rs >= rt then return 0 elseif rs == 0 then return 1 else return 2 end
415end
416
417local map_pop10_r6 = {
418 maprs = maprs_popST, [0] = "bovcSTB", "beqzalcTB", "beqcSTB"
419}
420local map_pop30_r6 = {
421 maprs = maprs_popST, [0] = "bnvcSTB", "bnezalcTB", "bnecSTB"
422}
423
424local map_pri_r6 = {
425 [0] = map_special_r6, map_regimm_r6, "jJ", "jalJ",
426 "beq|beqz|bST00B", "bne|bnezST0B", map_pop06_r6, map_pop07_r6,
427 map_pop10_r6, "addiu|liTS0I", "sltiTSI", "sltiuTSI",
428 "andiTSU", "ori|liTS0U", "xoriTSU", "aui|luiTS0U",
429 map_cop0, map_cop1_r6, false, false,
430 false, false, map_pop26_r6, map_pop27_r6,
431 map_pop30_r6, "daddiuTSI", false, false,
432 false, "dauiTSI", false, map_special3_r6,
433 "lbTSO", "lhTSO", false, "lwTSO",
434 "lbuTSO", "lhuTSO", false, false,
435 "sbTSO", "shTSO", false, "swTSO",
436 false, false, false, false,
437 false, "lwc1HSO", "bc#", false,
438 false, "ldc1HSO", map_pop66_r6, "ldTSO",
439 false, "swc1HSO", "balc#", map_pcrel_r6,
440 false, "sdc1HSO", map_pop76_r6, "sdTSO",
226} 441}
227 442
228------------------------------------------------------------------------------ 443------------------------------------------------------------------------------
@@ -279,10 +494,14 @@ local function disass_ins(ctx)
279 ctx.op = op 494 ctx.op = op
280 ctx.rel = nil 495 ctx.rel = nil
281 496
282 local opat = map_pri[rshift(op, 26)] 497 local opat = ctx.map_pri[rshift(op, 26)]
283 while type(opat) ~= "string" do 498 while type(opat) ~= "string" do
284 if not opat then return unknown(ctx) end 499 if not opat then return unknown(ctx) end
285 opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._ 500 if opat.maprs then
501 opat = opat[opat.maprs(band(rshift(op,21),31), band(rshift(op,16),31))]
502 else
503 opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
504 end
286 end 505 end
287 local name, pat = match(opat, "^([a-z0-9_.]*)(.*)") 506 local name, pat = match(opat, "^([a-z0-9_.]*)(.*)")
288 local altname, pat2 = match(pat, "|([a-z0-9_.|]*)(.*)") 507 local altname, pat2 = match(pat, "|([a-z0-9_.|]*)(.*)")
@@ -306,6 +525,10 @@ local function disass_ins(ctx)
306 x = "f"..band(rshift(op, 21), 31) 525 x = "f"..band(rshift(op, 21), 31)
307 elseif p == "A" then 526 elseif p == "A" then
308 x = band(rshift(op, 6), 31) 527 x = band(rshift(op, 6), 31)
528 elseif p == "a" then
529 x = band(rshift(op, 6), 7)
530 elseif p == "E" then
531 x = band(rshift(op, 6), 31) + 32
309 elseif p == "M" then 532 elseif p == "M" then
310 x = band(rshift(op, 11), 31) 533 x = band(rshift(op, 11), 31)
311 elseif p == "N" then 534 elseif p == "N" then
@@ -315,10 +538,18 @@ local function disass_ins(ctx)
315 if x == 0 then x = nil end 538 if x == 0 then x = nil end
316 elseif p == "K" then 539 elseif p == "K" then
317 x = band(rshift(op, 11), 31) + 1 540 x = band(rshift(op, 11), 31) + 1
541 elseif p == "P" then
542 x = band(rshift(op, 11), 31) + 33
318 elseif p == "L" then 543 elseif p == "L" then
319 x = band(rshift(op, 11), 31) - last + 1 544 x = band(rshift(op, 11), 31) - last + 1
545 elseif p == "Q" then
546 x = band(rshift(op, 11), 31) - last + 33
320 elseif p == "I" then 547 elseif p == "I" then
321 x = arshift(lshift(op, 16), 16) 548 x = arshift(lshift(op, 16), 16)
549 elseif p == "2" then
550 x = arshift(lshift(op, 13), 11)
551 elseif p == "3" then
552 x = arshift(lshift(op, 14), 11)
322 elseif p == "U" then 553 elseif p == "U" then
323 x = band(op, 0xffff) 554 x = band(op, 0xffff)
324 elseif p == "O" then 555 elseif p == "O" then
@@ -328,13 +559,22 @@ local function disass_ins(ctx)
328 local index = map_gpr[band(rshift(op, 16), 31)] 559 local index = map_gpr[band(rshift(op, 16), 31)]
329 operands[#operands] = format("%s(%s)", index, last) 560 operands[#operands] = format("%s(%s)", index, last)
330 elseif p == "B" then 561 elseif p == "B" then
331 x = ctx.addr + ctx.pos + arshift(lshift(op, 16), 16)*4 + 4 562 x = ctx.addr + ctx.pos + arshift(lshift(op, 16), 14) + 4
563 ctx.rel = x
564 x = format("0x%08x", x)
565 elseif p == "b" then
566 x = ctx.addr + ctx.pos + arshift(lshift(op, 11), 9) + 4
332 ctx.rel = x 567 ctx.rel = x
333 x = "0x"..tohex(x) 568 x = format("0x%08x", x)
569 elseif p == "#" then
570 x = ctx.addr + ctx.pos + arshift(lshift(op, 6), 4) + 4
571 ctx.rel = x
572 x = format("0x%08x", x)
334 elseif p == "J" then 573 elseif p == "J" then
335 x = band(ctx.addr + ctx.pos, 0xf0000000) + band(op, 0x03ffffff)*4 574 local a = ctx.addr + ctx.pos
575 x = a - band(a, 0x0fffffff) + band(op, 0x03ffffff)*4
336 ctx.rel = x 576 ctx.rel = x
337 x = "0x"..tohex(x) 577 x = format("0x%08x", x)
338 elseif p == "V" then 578 elseif p == "V" then
339 x = band(rshift(op, 8), 7) 579 x = band(rshift(op, 8), 7)
340 if x == 0 then x = nil end 580 if x == 0 then x = nil end
@@ -384,7 +624,7 @@ local function disass_block(ctx, ofs, len)
384end 624end
385 625
386-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). 626-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
387local function create_(code, addr, out) 627local function create(code, addr, out)
388 local ctx = {} 628 local ctx = {}
389 ctx.code = code 629 ctx.code = code
390 ctx.addr = addr or 0 630 ctx.addr = addr or 0
@@ -393,36 +633,62 @@ local function create_(code, addr, out)
393 ctx.disass = disass_block 633 ctx.disass = disass_block
394 ctx.hexdump = 8 634 ctx.hexdump = 8
395 ctx.get = get_be 635 ctx.get = get_be
636 ctx.map_pri = map_pri
637 return ctx
638end
639
640local function create_el(code, addr, out)
641 local ctx = create(code, addr, out)
642 ctx.get = get_le
643 return ctx
644end
645
646local function create_r6(code, addr, out)
647 local ctx = create(code, addr, out)
648 ctx.map_pri = map_pri_r6
396 return ctx 649 return ctx
397end 650end
398 651
399local function create_el_(code, addr, out) 652local function create_r6_el(code, addr, out)
400 local ctx = create_(code, addr, out) 653 local ctx = create(code, addr, out)
401 ctx.get = get_le 654 ctx.get = get_le
655 ctx.map_pri = map_pri_r6
402 return ctx 656 return ctx
403end 657end
404 658
405-- Simple API: disassemble code (a string) at address and output via out. 659-- Simple API: disassemble code (a string) at address and output via out.
406local function disass_(code, addr, out) 660local function disass(code, addr, out)
407 create_(code, addr, out):disass() 661 create(code, addr, out):disass()
662end
663
664local function disass_el(code, addr, out)
665 create_el(code, addr, out):disass()
408end 666end
409 667
410local function disass_el_(code, addr, out) 668local function disass_r6(code, addr, out)
411 create_el_(code, addr, out):disass() 669 create_r6(code, addr, out):disass()
670end
671
672local function disass_r6_el(code, addr, out)
673 create_r6_el(code, addr, out):disass()
412end 674end
413 675
414-- Return register name for RID. 676-- Return register name for RID.
415local function regname_(r) 677local function regname(r)
416 if r < 32 then return map_gpr[r] end 678 if r < 32 then return map_gpr[r] end
417 return "f"..(r-32) 679 return "f"..(r-32)
418end 680end
419 681
420-- Public module functions. 682-- Public module functions.
421module(...) 683return {
422 684 create = create,
423create = create_ 685 create_el = create_el,
424create_el = create_el_ 686 create_r6 = create_r6,
425disass = disass_ 687 create_r6_el = create_r6_el,
426disass_el = disass_el_ 688 disass = disass,
427regname = regname_ 689 disass_el = disass_el,
690 disass_r6 = disass_r6,
691 disass_r6_el = disass_r6_el,
692 regname = regname
693}
428 694
diff --git a/src/jit/dis_mips64.lua b/src/jit/dis_mips64.lua
new file mode 100644
index 00000000..018e6058
--- /dev/null
+++ b/src/jit/dis_mips64.lua
@@ -0,0 +1,17 @@
1----------------------------------------------------------------------------
2-- LuaJIT MIPS64 disassembler wrapper module.
3--
4-- Copyright (C) 2005-2020 Mike Pall. All rights reserved.
5-- Released under the MIT license. See Copyright Notice in luajit.h
6----------------------------------------------------------------------------
7-- This module just exports the big-endian functions from the
8-- MIPS disassembler module. All the interesting stuff is there.
9------------------------------------------------------------------------------
10
11local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips")
12return {
13 create = dis_mips.create,
14 disass = dis_mips.disass,
15 regname = dis_mips.regname
16}
17
diff --git a/src/jit/dis_mips64el.lua b/src/jit/dis_mips64el.lua
new file mode 100644
index 00000000..ef3af475
--- /dev/null
+++ b/src/jit/dis_mips64el.lua
@@ -0,0 +1,17 @@
1----------------------------------------------------------------------------
2-- LuaJIT MIPS64EL disassembler wrapper module.
3--
4-- Copyright (C) 2005-2020 Mike Pall. All rights reserved.
5-- Released under the MIT license. See Copyright Notice in luajit.h
6----------------------------------------------------------------------------
7-- This module just exports the little-endian functions from the
8-- MIPS disassembler module. All the interesting stuff is there.
9------------------------------------------------------------------------------
10
11local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips")
12return {
13 create = dis_mips.create_el,
14 disass = dis_mips.disass_el,
15 regname = dis_mips.regname
16}
17
diff --git a/src/jit/dis_mips64r6.lua b/src/jit/dis_mips64r6.lua
new file mode 100644
index 00000000..2bfc2429
--- /dev/null
+++ b/src/jit/dis_mips64r6.lua
@@ -0,0 +1,17 @@
1----------------------------------------------------------------------------
2-- LuaJIT MIPS64R6 disassembler wrapper module.
3--
4-- Copyright (C) 2005-2020 Mike Pall. All rights reserved.
5-- Released under the MIT license. See Copyright Notice in luajit.h
6----------------------------------------------------------------------------
7-- This module just exports the r6 big-endian functions from the
8-- MIPS disassembler module. All the interesting stuff is there.
9------------------------------------------------------------------------------
10
11local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips")
12return {
13 create = dis_mips.create_r6,
14 disass = dis_mips.disass_r6,
15 regname = dis_mips.regname
16}
17
diff --git a/src/jit/dis_mips64r6el.lua b/src/jit/dis_mips64r6el.lua
new file mode 100644
index 00000000..30597552
--- /dev/null
+++ b/src/jit/dis_mips64r6el.lua
@@ -0,0 +1,17 @@
1----------------------------------------------------------------------------
2-- LuaJIT MIPS64R6EL disassembler wrapper module.
3--
4-- Copyright (C) 2005-2020 Mike Pall. All rights reserved.
5-- Released under the MIT license. See Copyright Notice in luajit.h
6----------------------------------------------------------------------------
7-- This module just exports the r6 little-endian functions from the
8-- MIPS disassembler module. All the interesting stuff is there.
9------------------------------------------------------------------------------
10
11local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips")
12return {
13 create = dis_mips.create_r6_el,
14 disass = dis_mips.disass_r6_el,
15 regname = dis_mips.regname
16}
17
diff --git a/src/jit/dis_mipsel.lua b/src/jit/dis_mipsel.lua
index a2d05690..a6bb9565 100644
--- a/src/jit/dis_mipsel.lua
+++ b/src/jit/dis_mipsel.lua
@@ -8,13 +8,10 @@
8-- MIPS disassembler module. All the interesting stuff is there. 8-- MIPS disassembler module. All the interesting stuff is there.
9------------------------------------------------------------------------------ 9------------------------------------------------------------------------------
10 10
11local require = require 11local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips")
12 12return {
13module(...) 13 create = dis_mips.create_el,
14 14 disass = dis_mips.disass_el,
15local dis_mips = require(_PACKAGE.."dis_mips") 15 regname = dis_mips.regname
16 16}
17create = dis_mips.create_el
18disass = dis_mips.disass_el
19regname = dis_mips.regname
20 17
diff --git a/src/jit/dis_ppc.lua b/src/jit/dis_ppc.lua
index dfc6cbce..31d7a4d5 100644
--- a/src/jit/dis_ppc.lua
+++ b/src/jit/dis_ppc.lua
@@ -560,7 +560,7 @@ local function disass_block(ctx, ofs, len)
560end 560end
561 561
562-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). 562-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
563local function create_(code, addr, out) 563local function create(code, addr, out)
564 local ctx = {} 564 local ctx = {}
565 ctx.code = code 565 ctx.code = code
566 ctx.addr = addr or 0 566 ctx.addr = addr or 0
@@ -572,20 +572,20 @@ local function create_(code, addr, out)
572end 572end
573 573
574-- Simple API: disassemble code (a string) at address and output via out. 574-- Simple API: disassemble code (a string) at address and output via out.
575local function disass_(code, addr, out) 575local function disass(code, addr, out)
576 create_(code, addr, out):disass() 576 create(code, addr, out):disass()
577end 577end
578 578
579-- Return register name for RID. 579-- Return register name for RID.
580local function regname_(r) 580local function regname(r)
581 if r < 32 then return map_gpr[r] end 581 if r < 32 then return map_gpr[r] end
582 return "f"..(r-32) 582 return "f"..(r-32)
583end 583end
584 584
585-- Public module functions. 585-- Public module functions.
586module(...) 586return {
587 587 create = create,
588create = create_ 588 disass = disass,
589disass = disass_ 589 regname = regname
590regname = regname_ 590}
591 591
diff --git a/src/jit/dis_x64.lua b/src/jit/dis_x64.lua
index 1027b5a1..88032f1e 100644
--- a/src/jit/dis_x64.lua
+++ b/src/jit/dis_x64.lua
@@ -8,13 +8,10 @@
8-- x86/x64 disassembler module. All the interesting stuff is there. 8-- x86/x64 disassembler module. All the interesting stuff is there.
9------------------------------------------------------------------------------ 9------------------------------------------------------------------------------
10 10
11local require = require 11local dis_x86 = require((string.match(..., ".*%.") or "").."dis_x86")
12 12return {
13module(...) 13 create = dis_x86.create64,
14 14 disass = dis_x86.disass64,
15local dis_x86 = require(_PACKAGE.."dis_x86") 15 regname = dis_x86.regname64
16 16}
17create = dis_x86.create64
18disass = dis_x86.disass64
19regname = dis_x86.regname64
20 17
diff --git a/src/jit/dis_x86.lua b/src/jit/dis_x86.lua
index 9246820d..364a3184 100644
--- a/src/jit/dis_x86.lua
+++ b/src/jit/dis_x86.lua
@@ -15,19 +15,20 @@
15-- Intel and AMD manuals. The supported instruction set is quite extensive 15-- Intel and AMD manuals. The supported instruction set is quite extensive
16-- and reflects what a current generation Intel or AMD CPU implements in 16-- and reflects what a current generation Intel or AMD CPU implements in
17-- 32 bit and 64 bit mode. Yes, this includes MMX, SSE, SSE2, SSE3, SSSE3, 17-- 32 bit and 64 bit mode. Yes, this includes MMX, SSE, SSE2, SSE3, SSSE3,
18-- SSE4.1, SSE4.2, SSE4a and even privileged and hypervisor (VMX/SVM) 18-- SSE4.1, SSE4.2, SSE4a, AVX, AVX2 and even privileged and hypervisor
19-- instructions. 19-- (VMX/SVM) instructions.
20-- 20--
21-- Notes: 21-- Notes:
22-- * The (useless) a16 prefix, 3DNow and pre-586 opcodes are unsupported. 22-- * The (useless) a16 prefix, 3DNow and pre-586 opcodes are unsupported.
23-- * No attempt at optimization has been made -- it's fast enough for my needs. 23-- * No attempt at optimization has been made -- it's fast enough for my needs.
24-- * The public API may change when more architectures are added.
25------------------------------------------------------------------------------ 24------------------------------------------------------------------------------
26 25
27local type = type 26local type = type
28local sub, byte, format = string.sub, string.byte, string.format 27local sub, byte, format = string.sub, string.byte, string.format
29local match, gmatch, gsub = string.match, string.gmatch, string.gsub 28local match, gmatch, gsub = string.match, string.gmatch, string.gsub
30local lower, rep = string.lower, string.rep 29local lower, rep = string.lower, string.rep
30local bit = require("bit")
31local tohex = bit.tohex
31 32
32-- Map for 1st opcode byte in 32 bit mode. Ugly? Well ... read on. 33-- Map for 1st opcode byte in 32 bit mode. Ugly? Well ... read on.
33local map_opc1_32 = { 34local map_opc1_32 = {
@@ -76,7 +77,7 @@ local map_opc1_32 = {
76"movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi", 77"movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi",
77"movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI", 78"movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI",
78--Cx 79--Cx
79"shift!Bmu","shift!Vmu","retBw","ret","$lesVrm","$ldsVrm","movBmi","movVmi", 80"shift!Bmu","shift!Vmu","retBw","ret","vex*3$lesVrm","vex*2$ldsVrm","movBmi","movVmi",
80"enterBwu","leave","retfBw","retf","int3","intBu","into","iretVS", 81"enterBwu","leave","retfBw","retf","int3","intBu","into","iretVS",
81--Dx 82--Dx
82"shift!Bm1","shift!Vm1","shift!Bmc","shift!Vmc","aamBu","aadBu","salc","xlatb", 83"shift!Bm1","shift!Vm1","shift!Bmc","shift!Vmc","aamBu","aadBu","salc","xlatb",
@@ -101,7 +102,7 @@ local map_opc1_64 = setmetatable({
101 [0x44]="rex*r", [0x45]="rex*rb", [0x46]="rex*rx", [0x47]="rex*rxb", 102 [0x44]="rex*r", [0x45]="rex*rb", [0x46]="rex*rx", [0x47]="rex*rxb",
102 [0x48]="rex*w", [0x49]="rex*wb", [0x4a]="rex*wx", [0x4b]="rex*wxb", 103 [0x48]="rex*w", [0x49]="rex*wb", [0x4a]="rex*wx", [0x4b]="rex*wxb",
103 [0x4c]="rex*wr", [0x4d]="rex*wrb", [0x4e]="rex*wrx", [0x4f]="rex*wrxb", 104 [0x4c]="rex*wr", [0x4d]="rex*wrb", [0x4e]="rex*wrx", [0x4f]="rex*wrxb",
104 [0x82]=false, [0x9a]=false, [0xc4]=false, [0xc5]=false, [0xce]=false, 105 [0x82]=false, [0x9a]=false, [0xc4]="vex*3", [0xc5]="vex*2", [0xce]=false,
105 [0xd4]=false, [0xd5]=false, [0xd6]=false, [0xea]=false, 106 [0xd4]=false, [0xd5]=false, [0xd6]=false, [0xea]=false,
106}, { __index = map_opc1_32 }) 107}, { __index = map_opc1_32 })
107 108
@@ -112,12 +113,12 @@ local map_opc2 = {
112[0]="sldt!Dmp","sgdt!Ump","larVrm","lslVrm",nil,"syscall","clts","sysret", 113[0]="sldt!Dmp","sgdt!Ump","larVrm","lslVrm",nil,"syscall","clts","sysret",
113"invd","wbinvd",nil,"ud1",nil,"$prefetch!Bm","femms","3dnowMrmu", 114"invd","wbinvd",nil,"ud1",nil,"$prefetch!Bm","femms","3dnowMrmu",
114--1x 115--1x
115"movupsXrm|movssXrm|movupdXrm|movsdXrm", 116"movupsXrm|movssXrvm|movupdXrm|movsdXrvm",
116"movupsXmr|movssXmr|movupdXmr|movsdXmr", 117"movupsXmr|movssXmvr|movupdXmr|movsdXmvr",
117"movhlpsXrm$movlpsXrm|movsldupXrm|movlpdXrm|movddupXrm", 118"movhlpsXrm$movlpsXrm|movsldupXrm|movlpdXrm|movddupXrm",
118"movlpsXmr||movlpdXmr", 119"movlpsXmr||movlpdXmr",
119"unpcklpsXrm||unpcklpdXrm", 120"unpcklpsXrvm||unpcklpdXrvm",
120"unpckhpsXrm||unpckhpdXrm", 121"unpckhpsXrvm||unpckhpdXrvm",
121"movlhpsXrm$movhpsXrm|movshdupXrm|movhpdXrm", 122"movlhpsXrm$movhpsXrm|movshdupXrm|movhpdXrm",
122"movhpsXmr||movhpdXmr", 123"movhpsXmr||movhpdXmr",
123"$prefetcht!Bm","hintnopVm","hintnopVm","hintnopVm", 124"$prefetcht!Bm","hintnopVm","hintnopVm","hintnopVm",
@@ -126,7 +127,7 @@ local map_opc2 = {
126"movUmx$","movUmy$","movUxm$","movUym$","movUmz$",nil,"movUzm$",nil, 127"movUmx$","movUmy$","movUxm$","movUym$","movUmz$",nil,"movUzm$",nil,
127"movapsXrm||movapdXrm", 128"movapsXrm||movapdXrm",
128"movapsXmr||movapdXmr", 129"movapsXmr||movapdXmr",
129"cvtpi2psXrMm|cvtsi2ssXrVmt|cvtpi2pdXrMm|cvtsi2sdXrVmt", 130"cvtpi2psXrMm|cvtsi2ssXrvVmt|cvtpi2pdXrMm|cvtsi2sdXrvVmt",
130"movntpsXmr|movntssXmr|movntpdXmr|movntsdXmr", 131"movntpsXmr|movntssXmr|movntpdXmr|movntsdXmr",
131"cvttps2piMrXm|cvttss2siVrXm|cvttpd2piMrXm|cvttsd2siVrXm", 132"cvttps2piMrXm|cvttss2siVrXm|cvttpd2piMrXm|cvttsd2siVrXm",
132"cvtps2piMrXm|cvtss2siVrXm|cvtpd2piMrXm|cvtsd2siVrXm", 133"cvtps2piMrXm|cvtss2siVrXm|cvtpd2piMrXm|cvtsd2siVrXm",
@@ -142,27 +143,27 @@ local map_opc2 = {
142"cmovlVrm","cmovgeVrm","cmovleVrm","cmovgVrm", 143"cmovlVrm","cmovgeVrm","cmovleVrm","cmovgVrm",
143--5x 144--5x
144"movmskpsVrXm$||movmskpdVrXm$","sqrtpsXrm|sqrtssXrm|sqrtpdXrm|sqrtsdXrm", 145"movmskpsVrXm$||movmskpdVrXm$","sqrtpsXrm|sqrtssXrm|sqrtpdXrm|sqrtsdXrm",
145"rsqrtpsXrm|rsqrtssXrm","rcppsXrm|rcpssXrm", 146"rsqrtpsXrm|rsqrtssXrvm","rcppsXrm|rcpssXrvm",
146"andpsXrm||andpdXrm","andnpsXrm||andnpdXrm", 147"andpsXrvm||andpdXrvm","andnpsXrvm||andnpdXrvm",
147"orpsXrm||orpdXrm","xorpsXrm||xorpdXrm", 148"orpsXrvm||orpdXrvm","xorpsXrvm||xorpdXrvm",
148"addpsXrm|addssXrm|addpdXrm|addsdXrm","mulpsXrm|mulssXrm|mulpdXrm|mulsdXrm", 149"addpsXrvm|addssXrvm|addpdXrvm|addsdXrvm","mulpsXrvm|mulssXrvm|mulpdXrvm|mulsdXrvm",
149"cvtps2pdXrm|cvtss2sdXrm|cvtpd2psXrm|cvtsd2ssXrm", 150"cvtps2pdXrm|cvtss2sdXrvm|cvtpd2psXrm|cvtsd2ssXrvm",
150"cvtdq2psXrm|cvttps2dqXrm|cvtps2dqXrm", 151"cvtdq2psXrm|cvttps2dqXrm|cvtps2dqXrm",
151"subpsXrm|subssXrm|subpdXrm|subsdXrm","minpsXrm|minssXrm|minpdXrm|minsdXrm", 152"subpsXrvm|subssXrvm|subpdXrvm|subsdXrvm","minpsXrvm|minssXrvm|minpdXrvm|minsdXrvm",
152"divpsXrm|divssXrm|divpdXrm|divsdXrm","maxpsXrm|maxssXrm|maxpdXrm|maxsdXrm", 153"divpsXrvm|divssXrvm|divpdXrvm|divsdXrvm","maxpsXrvm|maxssXrvm|maxpdXrvm|maxsdXrvm",
153--6x 154--6x
154"punpcklbwPrm","punpcklwdPrm","punpckldqPrm","packsswbPrm", 155"punpcklbwPrvm","punpcklwdPrvm","punpckldqPrvm","packsswbPrvm",
155"pcmpgtbPrm","pcmpgtwPrm","pcmpgtdPrm","packuswbPrm", 156"pcmpgtbPrvm","pcmpgtwPrvm","pcmpgtdPrvm","packuswbPrvm",
156"punpckhbwPrm","punpckhwdPrm","punpckhdqPrm","packssdwPrm", 157"punpckhbwPrvm","punpckhwdPrvm","punpckhdqPrvm","packssdwPrvm",
157"||punpcklqdqXrm","||punpckhqdqXrm", 158"||punpcklqdqXrvm","||punpckhqdqXrvm",
158"movPrVSm","movqMrm|movdquXrm|movdqaXrm", 159"movPrVSm","movqMrm|movdquXrm|movdqaXrm",
159--7x 160--7x
160"pshufwMrmu|pshufhwXrmu|pshufdXrmu|pshuflwXrmu","pshiftw!Pmu", 161"pshufwMrmu|pshufhwXrmu|pshufdXrmu|pshuflwXrmu","pshiftw!Pvmu",
161"pshiftd!Pmu","pshiftq!Mmu||pshiftdq!Xmu", 162"pshiftd!Pvmu","pshiftq!Mvmu||pshiftdq!Xvmu",
162"pcmpeqbPrm","pcmpeqwPrm","pcmpeqdPrm","emms|", 163"pcmpeqbPrvm","pcmpeqwPrvm","pcmpeqdPrvm","emms*|",
163"vmreadUmr||extrqXmuu$|insertqXrmuu$","vmwriteUrm||extrqXrm$|insertqXrm$", 164"vmreadUmr||extrqXmuu$|insertqXrmuu$","vmwriteUrm||extrqXrm$|insertqXrm$",
164nil,nil, 165nil,nil,
165"||haddpdXrm|haddpsXrm","||hsubpdXrm|hsubpsXrm", 166"||haddpdXrvm|haddpsXrvm","||hsubpdXrvm|hsubpsXrvm",
166"movVSmMr|movqXrm|movVSmXr","movqMmr|movdquXmr|movdqaXmr", 167"movVSmMr|movqXrm|movVSmXr","movqMmr|movdquXmr|movdqaXmr",
167--8x 168--8x
168"joVj","jnoVj","jbVj","jnbVj","jzVj","jnzVj","jbeVj","jaVj", 169"joVj","jnoVj","jbVj","jnbVj","jzVj","jnzVj","jbeVj","jaVj",
@@ -180,27 +181,27 @@ nil,nil,
180"bsfVrm","bsrVrm|lzcntVrm|bsrWrm","movsxVrBmt","movsxVrWmt", 181"bsfVrm","bsrVrm|lzcntVrm|bsrWrm","movsxVrBmt","movsxVrWmt",
181--Cx 182--Cx
182"xaddBmr","xaddVmr", 183"xaddBmr","xaddVmr",
183"cmppsXrmu|cmpssXrmu|cmppdXrmu|cmpsdXrmu","$movntiVmr|", 184"cmppsXrvmu|cmpssXrvmu|cmppdXrvmu|cmpsdXrvmu","$movntiVmr|",
184"pinsrwPrWmu","pextrwDrPmu", 185"pinsrwPrvWmu","pextrwDrPmu",
185"shufpsXrmu||shufpdXrmu","$cmpxchg!Qmp", 186"shufpsXrvmu||shufpdXrvmu","$cmpxchg!Qmp",
186"bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR", 187"bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR",
187--Dx 188--Dx
188"||addsubpdXrm|addsubpsXrm","psrlwPrm","psrldPrm","psrlqPrm", 189"||addsubpdXrvm|addsubpsXrvm","psrlwPrvm","psrldPrvm","psrlqPrvm",
189"paddqPrm","pmullwPrm", 190"paddqPrvm","pmullwPrvm",
190"|movq2dqXrMm|movqXmr|movdq2qMrXm$","pmovmskbVrMm||pmovmskbVrXm", 191"|movq2dqXrMm|movqXmr|movdq2qMrXm$","pmovmskbVrMm||pmovmskbVrXm",
191"psubusbPrm","psubuswPrm","pminubPrm","pandPrm", 192"psubusbPrvm","psubuswPrvm","pminubPrvm","pandPrvm",
192"paddusbPrm","padduswPrm","pmaxubPrm","pandnPrm", 193"paddusbPrvm","padduswPrvm","pmaxubPrvm","pandnPrvm",
193--Ex 194--Ex
194"pavgbPrm","psrawPrm","psradPrm","pavgwPrm", 195"pavgbPrvm","psrawPrvm","psradPrvm","pavgwPrvm",
195"pmulhuwPrm","pmulhwPrm", 196"pmulhuwPrvm","pmulhwPrvm",
196"|cvtdq2pdXrm|cvttpd2dqXrm|cvtpd2dqXrm","$movntqMmr||$movntdqXmr", 197"|cvtdq2pdXrm|cvttpd2dqXrm|cvtpd2dqXrm","$movntqMmr||$movntdqXmr",
197"psubsbPrm","psubswPrm","pminswPrm","porPrm", 198"psubsbPrvm","psubswPrvm","pminswPrvm","porPrvm",
198"paddsbPrm","paddswPrm","pmaxswPrm","pxorPrm", 199"paddsbPrvm","paddswPrvm","pmaxswPrvm","pxorPrvm",
199--Fx 200--Fx
200"|||lddquXrm","psllwPrm","pslldPrm","psllqPrm", 201"|||lddquXrm","psllwPrvm","pslldPrvm","psllqPrvm",
201"pmuludqPrm","pmaddwdPrm","psadbwPrm","maskmovqMrm||maskmovdquXrm$", 202"pmuludqPrvm","pmaddwdPrvm","psadbwPrvm","maskmovqMrm||maskmovdquXrm$",
202"psubbPrm","psubwPrm","psubdPrm","psubqPrm", 203"psubbPrvm","psubwPrvm","psubdPrvm","psubqPrvm",
203"paddbPrm","paddwPrm","padddPrm","ud", 204"paddbPrvm","paddwPrvm","padddPrvm","ud",
204} 205}
205assert(map_opc2[255] == "ud") 206assert(map_opc2[255] == "ud")
206 207
@@ -208,49 +209,91 @@ assert(map_opc2[255] == "ud")
208local map_opc3 = { 209local map_opc3 = {
209["38"] = { -- [66] 0f 38 xx 210["38"] = { -- [66] 0f 38 xx
210--0x 211--0x
211[0]="pshufbPrm","phaddwPrm","phadddPrm","phaddswPrm", 212[0]="pshufbPrvm","phaddwPrvm","phadddPrvm","phaddswPrvm",
212"pmaddubswPrm","phsubwPrm","phsubdPrm","phsubswPrm", 213"pmaddubswPrvm","phsubwPrvm","phsubdPrvm","phsubswPrvm",
213"psignbPrm","psignwPrm","psigndPrm","pmulhrswPrm", 214"psignbPrvm","psignwPrvm","psigndPrvm","pmulhrswPrvm",
214nil,nil,nil,nil, 215"||permilpsXrvm","||permilpdXrvm",nil,nil,
215--1x 216--1x
216"||pblendvbXrma",nil,nil,nil, 217"||pblendvbXrma",nil,nil,nil,
217"||blendvpsXrma","||blendvpdXrma",nil,"||ptestXrm", 218"||blendvpsXrma","||blendvpdXrma","||permpsXrvm","||ptestXrm",
218nil,nil,nil,nil, 219"||broadcastssXrm","||broadcastsdXrm","||broadcastf128XrlXm",nil,
219"pabsbPrm","pabswPrm","pabsdPrm",nil, 220"pabsbPrm","pabswPrm","pabsdPrm",nil,
220--2x 221--2x
221"||pmovsxbwXrm","||pmovsxbdXrm","||pmovsxbqXrm","||pmovsxwdXrm", 222"||pmovsxbwXrm","||pmovsxbdXrm","||pmovsxbqXrm","||pmovsxwdXrm",
222"||pmovsxwqXrm","||pmovsxdqXrm",nil,nil, 223"||pmovsxwqXrm","||pmovsxdqXrm",nil,nil,
223"||pmuldqXrm","||pcmpeqqXrm","||$movntdqaXrm","||packusdwXrm", 224"||pmuldqXrvm","||pcmpeqqXrvm","||$movntdqaXrm","||packusdwXrvm",
224nil,nil,nil,nil, 225"||maskmovpsXrvm","||maskmovpdXrvm","||maskmovpsXmvr","||maskmovpdXmvr",
225--3x 226--3x
226"||pmovzxbwXrm","||pmovzxbdXrm","||pmovzxbqXrm","||pmovzxwdXrm", 227"||pmovzxbwXrm","||pmovzxbdXrm","||pmovzxbqXrm","||pmovzxwdXrm",
227"||pmovzxwqXrm","||pmovzxdqXrm",nil,"||pcmpgtqXrm", 228"||pmovzxwqXrm","||pmovzxdqXrm","||permdXrvm","||pcmpgtqXrvm",
228"||pminsbXrm","||pminsdXrm","||pminuwXrm","||pminudXrm", 229"||pminsbXrvm","||pminsdXrvm","||pminuwXrvm","||pminudXrvm",
229"||pmaxsbXrm","||pmaxsdXrm","||pmaxuwXrm","||pmaxudXrm", 230"||pmaxsbXrvm","||pmaxsdXrvm","||pmaxuwXrvm","||pmaxudXrvm",
230--4x 231--4x
231"||pmulddXrm","||phminposuwXrm", 232"||pmulddXrvm","||phminposuwXrm",nil,nil,
233nil,"||psrlvVSXrvm","||psravdXrvm","||psllvVSXrvm",
234--5x
235[0x58] = "||pbroadcastdXrlXm",[0x59] = "||pbroadcastqXrlXm",
236[0x5a] = "||broadcasti128XrlXm",
237--7x
238[0x78] = "||pbroadcastbXrlXm",[0x79] = "||pbroadcastwXrlXm",
239--8x
240[0x8c] = "||pmaskmovXrvVSm",
241[0x8e] = "||pmaskmovVSmXvr",
242--9x
243[0x96] = "||fmaddsub132pHXrvm",[0x97] = "||fmsubadd132pHXrvm",
244[0x98] = "||fmadd132pHXrvm",[0x99] = "||fmadd132sHXrvm",
245[0x9a] = "||fmsub132pHXrvm",[0x9b] = "||fmsub132sHXrvm",
246[0x9c] = "||fnmadd132pHXrvm",[0x9d] = "||fnmadd132sHXrvm",
247[0x9e] = "||fnmsub132pHXrvm",[0x9f] = "||fnmsub132sHXrvm",
248--Ax
249[0xa6] = "||fmaddsub213pHXrvm",[0xa7] = "||fmsubadd213pHXrvm",
250[0xa8] = "||fmadd213pHXrvm",[0xa9] = "||fmadd213sHXrvm",
251[0xaa] = "||fmsub213pHXrvm",[0xab] = "||fmsub213sHXrvm",
252[0xac] = "||fnmadd213pHXrvm",[0xad] = "||fnmadd213sHXrvm",
253[0xae] = "||fnmsub213pHXrvm",[0xaf] = "||fnmsub213sHXrvm",
254--Bx
255[0xb6] = "||fmaddsub231pHXrvm",[0xb7] = "||fmsubadd231pHXrvm",
256[0xb8] = "||fmadd231pHXrvm",[0xb9] = "||fmadd231sHXrvm",
257[0xba] = "||fmsub231pHXrvm",[0xbb] = "||fmsub231sHXrvm",
258[0xbc] = "||fnmadd231pHXrvm",[0xbd] = "||fnmadd231sHXrvm",
259[0xbe] = "||fnmsub231pHXrvm",[0xbf] = "||fnmsub231sHXrvm",
260--Dx
261[0xdc] = "||aesencXrvm", [0xdd] = "||aesenclastXrvm",
262[0xde] = "||aesdecXrvm", [0xdf] = "||aesdeclastXrvm",
232--Fx 263--Fx
233[0xf0] = "|||crc32TrBmt",[0xf1] = "|||crc32TrVmt", 264[0xf0] = "|||crc32TrBmt",[0xf1] = "|||crc32TrVmt",
265[0xf7] = "| sarxVrmv| shlxVrmv| shrxVrmv",
234}, 266},
235 267
236["3a"] = { -- [66] 0f 3a xx 268["3a"] = { -- [66] 0f 3a xx
237--0x 269--0x
238[0x00]=nil,nil,nil,nil,nil,nil,nil,nil, 270[0x00]="||permqXrmu","||permpdXrmu","||pblenddXrvmu",nil,
239"||roundpsXrmu","||roundpdXrmu","||roundssXrmu","||roundsdXrmu", 271"||permilpsXrmu","||permilpdXrmu","||perm2f128Xrvmu",nil,
240"||blendpsXrmu","||blendpdXrmu","||pblendwXrmu","palignrPrmu", 272"||roundpsXrmu","||roundpdXrmu","||roundssXrvmu","||roundsdXrvmu",
273"||blendpsXrvmu","||blendpdXrvmu","||pblendwXrvmu","palignrPrvmu",
241--1x 274--1x
242nil,nil,nil,nil, 275nil,nil,nil,nil,
243"||pextrbVmXru","||pextrwVmXru","||pextrVmSXru","||extractpsVmXru", 276"||pextrbVmXru","||pextrwVmXru","||pextrVmSXru","||extractpsVmXru",
244nil,nil,nil,nil,nil,nil,nil,nil, 277"||insertf128XrvlXmu","||extractf128XlXmYru",nil,nil,
278nil,nil,nil,nil,
245--2x 279--2x
246"||pinsrbXrVmu","||insertpsXrmu","||pinsrXrVmuS",nil, 280"||pinsrbXrvVmu","||insertpsXrvmu","||pinsrXrvVmuS",nil,
281--3x
282[0x38] = "||inserti128Xrvmu",[0x39] = "||extracti128XlXmYru",
247--4x 283--4x
248[0x40] = "||dppsXrmu", 284[0x40] = "||dppsXrvmu",
249[0x41] = "||dppdXrmu", 285[0x41] = "||dppdXrvmu",
250[0x42] = "||mpsadbwXrmu", 286[0x42] = "||mpsadbwXrvmu",
287[0x44] = "||pclmulqdqXrvmu",
288[0x46] = "||perm2i128Xrvmu",
289[0x4a] = "||blendvpsXrvmb",[0x4b] = "||blendvpdXrvmb",
290[0x4c] = "||pblendvbXrvmb",
251--6x 291--6x
252[0x60] = "||pcmpestrmXrmu",[0x61] = "||pcmpestriXrmu", 292[0x60] = "||pcmpestrmXrmu",[0x61] = "||pcmpestriXrmu",
253[0x62] = "||pcmpistrmXrmu",[0x63] = "||pcmpistriXrmu", 293[0x62] = "||pcmpistrmXrmu",[0x63] = "||pcmpistriXrmu",
294[0xdf] = "||aeskeygenassistXrmu",
295--Fx
296[0xf0] = "||| rorxVrmu",
254}, 297},
255} 298}
256 299
@@ -354,17 +397,19 @@ local map_regs = {
354 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }, -- No x64 ext! 397 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }, -- No x64 ext!
355 X = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", 398 X = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
356 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" }, 399 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" },
400 Y = { "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7",
401 "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15" },
357} 402}
358local map_segregs = { "es", "cs", "ss", "ds", "fs", "gs", "segr6", "segr7" } 403local map_segregs = { "es", "cs", "ss", "ds", "fs", "gs", "segr6", "segr7" }
359 404
360-- Maps for size names. 405-- Maps for size names.
361local map_sz2n = { 406local map_sz2n = {
362 B = 1, W = 2, D = 4, Q = 8, M = 8, X = 16, 407 B = 1, W = 2, D = 4, Q = 8, M = 8, X = 16, Y = 32,
363} 408}
364local map_sz2prefix = { 409local map_sz2prefix = {
365 B = "byte", W = "word", D = "dword", 410 B = "byte", W = "word", D = "dword",
366 Q = "qword", 411 Q = "qword",
367 M = "qword", X = "xword", 412 M = "qword", X = "xword", Y = "yword",
368 F = "dword", G = "qword", -- No need for sizes/register names for these two. 413 F = "dword", G = "qword", -- No need for sizes/register names for these two.
369} 414}
370 415
@@ -387,10 +432,13 @@ local function putop(ctx, text, operands)
387 if ctx.rep then text = ctx.rep.." "..text; ctx.rep = false end 432 if ctx.rep then text = ctx.rep.." "..text; ctx.rep = false end
388 if ctx.rex then 433 if ctx.rex then
389 local t = (ctx.rexw and "w" or "")..(ctx.rexr and "r" or "").. 434 local t = (ctx.rexw and "w" or "")..(ctx.rexr and "r" or "")..
390 (ctx.rexx and "x" or "")..(ctx.rexb and "b" or "") 435 (ctx.rexx and "x" or "")..(ctx.rexb and "b" or "")..
391 if t ~= "" then text = "rex."..t.." "..text end 436 (ctx.vexl and "l" or "")
437 if ctx.vexv and ctx.vexv ~= 0 then t = t.."v"..ctx.vexv end
438 if t ~= "" then text = ctx.rex.."."..t.." "..gsub(text, "^ ", "")
439 elseif ctx.rex == "vex" then text = gsub("v"..text, "^v ", "") end
392 ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false 440 ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false
393 ctx.rex = false 441 ctx.rex = false; ctx.vexl = false; ctx.vexv = false
394 end 442 end
395 if ctx.seg then 443 if ctx.seg then
396 local text2, n = gsub(text, "%[", "["..ctx.seg..":") 444 local text2, n = gsub(text, "%[", "["..ctx.seg..":")
@@ -405,6 +453,7 @@ local function putop(ctx, text, operands)
405 end 453 end
406 ctx.out(format("%08x %s%s\n", ctx.addr+ctx.start, hex, text)) 454 ctx.out(format("%08x %s%s\n", ctx.addr+ctx.start, hex, text))
407 ctx.mrm = false 455 ctx.mrm = false
456 ctx.vexv = false
408 ctx.start = pos 457 ctx.start = pos
409 ctx.imm = nil 458 ctx.imm = nil
410end 459end
@@ -413,7 +462,7 @@ end
413local function clearprefixes(ctx) 462local function clearprefixes(ctx)
414 ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false 463 ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false
415 ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false 464 ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false
416 ctx.rex = false; ctx.a32 = false 465 ctx.rex = false; ctx.a32 = false; ctx.vexl = false
417end 466end
418 467
419-- Fallback for incomplete opcodes at the end. 468-- Fallback for incomplete opcodes at the end.
@@ -450,9 +499,9 @@ end
450-- Process pattern string and generate the operands. 499-- Process pattern string and generate the operands.
451local function putpat(ctx, name, pat) 500local function putpat(ctx, name, pat)
452 local operands, regs, sz, mode, sp, rm, sc, rx, sdisp 501 local operands, regs, sz, mode, sp, rm, sc, rx, sdisp
453 local code, pos, stop = ctx.code, ctx.pos, ctx.stop 502 local code, pos, stop, vexl = ctx.code, ctx.pos, ctx.stop, ctx.vexl
454 503
455 -- Chars used: 1DFGIMPQRSTUVWXacdfgijmoprstuwxyz 504 -- Chars used: 1DFGHIMPQRSTUVWXYabcdfgijlmoprstuvwxyz
456 for p in gmatch(pat, ".") do 505 for p in gmatch(pat, ".") do
457 local x = nil 506 local x = nil
458 if p == "V" or p == "U" then 507 if p == "V" or p == "U" then
@@ -467,12 +516,17 @@ local function putpat(ctx, name, pat)
467 elseif p == "B" then 516 elseif p == "B" then
468 sz = "B" 517 sz = "B"
469 regs = ctx.rex and map_regs.B64 or map_regs.B 518 regs = ctx.rex and map_regs.B64 or map_regs.B
470 elseif match(p, "[WDQMXFG]") then 519 elseif match(p, "[WDQMXYFG]") then
471 sz = p 520 sz = p
521 if sz == "X" and vexl then sz = "Y"; ctx.vexl = false end
472 regs = map_regs[sz] 522 regs = map_regs[sz]
473 elseif p == "P" then 523 elseif p == "P" then
474 sz = ctx.o16 and "X" or "M"; ctx.o16 = false 524 sz = ctx.o16 and "X" or "M"; ctx.o16 = false
525 if sz == "X" and vexl then sz = "Y"; ctx.vexl = false end
475 regs = map_regs[sz] 526 regs = map_regs[sz]
527 elseif p == "H" then
528 name = name..(ctx.rexw and "d" or "s")
529 ctx.rexw = false
476 elseif p == "S" then 530 elseif p == "S" then
477 name = name..lower(sz) 531 name = name..lower(sz)
478 elseif p == "s" then 532 elseif p == "s" then
@@ -484,6 +538,10 @@ local function putpat(ctx, name, pat)
484 local imm = getimm(ctx, pos, 1); if not imm then return end 538 local imm = getimm(ctx, pos, 1); if not imm then return end
485 x = format("0x%02x", imm) 539 x = format("0x%02x", imm)
486 pos = pos+1 540 pos = pos+1
541 elseif p == "b" then
542 local imm = getimm(ctx, pos, 1); if not imm then return end
543 x = regs[imm/16+1]
544 pos = pos+1
487 elseif p == "w" then 545 elseif p == "w" then
488 local imm = getimm(ctx, pos, 2); if not imm then return end 546 local imm = getimm(ctx, pos, 2); if not imm then return end
489 x = format("0x%x", imm) 547 x = format("0x%x", imm)
@@ -532,7 +590,7 @@ local function putpat(ctx, name, pat)
532 local lo = imm % 0x1000000 590 local lo = imm % 0x1000000
533 x = format("0x%02x%06x", (imm-lo) / 0x1000000, lo) 591 x = format("0x%02x%06x", (imm-lo) / 0x1000000, lo)
534 else 592 else
535 x = format("0x%08x", imm) 593 x = "0x"..tohex(imm)
536 end 594 end
537 elseif p == "R" then 595 elseif p == "R" then
538 local r = byte(code, pos-1, pos-1)%8 596 local r = byte(code, pos-1, pos-1)%8
@@ -616,8 +674,13 @@ local function putpat(ctx, name, pat)
616 else 674 else
617 x = "CR"..sp 675 x = "CR"..sp
618 end 676 end
677 elseif p == "v" then
678 if ctx.vexv then
679 x = regs[ctx.vexv+1]; ctx.vexv = false
680 end
619 elseif p == "y" then x = "DR"..sp 681 elseif p == "y" then x = "DR"..sp
620 elseif p == "z" then x = "TR"..sp 682 elseif p == "z" then x = "TR"..sp
683 elseif p == "l" then vexl = false
621 elseif p == "t" then 684 elseif p == "t" then
622 else 685 else
623 error("bad pattern `"..pat.."'") 686 error("bad pattern `"..pat.."'")
@@ -692,7 +755,8 @@ map_act = {
692 B = putpat, W = putpat, D = putpat, Q = putpat, 755 B = putpat, W = putpat, D = putpat, Q = putpat,
693 V = putpat, U = putpat, T = putpat, 756 V = putpat, U = putpat, T = putpat,
694 M = putpat, X = putpat, P = putpat, 757 M = putpat, X = putpat, P = putpat,
695 F = putpat, G = putpat, 758 F = putpat, G = putpat, Y = putpat,
759 H = putpat,
696 760
697 -- Collect prefixes. 761 -- Collect prefixes.
698 [":"] = function(ctx, name, pat) 762 [":"] = function(ctx, name, pat)
@@ -753,15 +817,68 @@ map_act = {
753 817
754 -- REX prefix. 818 -- REX prefix.
755 rex = function(ctx, name, pat) 819 rex = function(ctx, name, pat)
756 if ctx.rex then return unknown(ctx) end -- Only 1 REX prefix allowed. 820 if ctx.rex then return unknown(ctx) end -- Only 1 REX or VEX prefix allowed.
757 for p in gmatch(pat, ".") do ctx["rex"..p] = true end 821 for p in gmatch(pat, ".") do ctx["rex"..p] = true end
758 ctx.rex = true 822 ctx.rex = "rex"
823 end,
824
825 -- VEX prefix.
826 vex = function(ctx, name, pat)
827 if ctx.rex then return unknown(ctx) end -- Only 1 REX or VEX prefix allowed.
828 ctx.rex = "vex"
829 local pos = ctx.pos
830 if ctx.mrm then
831 ctx.mrm = nil
832 pos = pos-1
833 end
834 local b = byte(ctx.code, pos, pos)
835 if not b then return incomplete(ctx) end
836 pos = pos+1
837 if b < 128 then ctx.rexr = true end
838 local m = 1
839 if pat == "3" then
840 m = b%32; b = (b-m)/32
841 local nb = b%2; b = (b-nb)/2
842 if nb == 0 then ctx.rexb = true end
843 local nx = b%2
844 if nx == 0 then ctx.rexx = true end
845 b = byte(ctx.code, pos, pos)
846 if not b then return incomplete(ctx) end
847 pos = pos+1
848 if b >= 128 then ctx.rexw = true end
849 end
850 ctx.pos = pos
851 local map
852 if m == 1 then map = map_opc2
853 elseif m == 2 then map = map_opc3["38"]
854 elseif m == 3 then map = map_opc3["3a"]
855 else return unknown(ctx) end
856 local p = b%4; b = (b-p)/4
857 if p == 1 then ctx.o16 = "o16"
858 elseif p == 2 then ctx.rep = "rep"
859 elseif p == 3 then ctx.rep = "repne" end
860 local l = b%2; b = (b-l)/2
861 if l ~= 0 then ctx.vexl = true end
862 ctx.vexv = (-1-b)%16
863 return dispatchmap(ctx, map)
759 end, 864 end,
760 865
761 -- Special case for nop with REX prefix. 866 -- Special case for nop with REX prefix.
762 nop = function(ctx, name, pat) 867 nop = function(ctx, name, pat)
763 return dispatch(ctx, ctx.rex and pat or "nop") 868 return dispatch(ctx, ctx.rex and pat or "nop")
764 end, 869 end,
870
871 -- Special case for 0F 77.
872 emms = function(ctx, name, pat)
873 if ctx.rex ~= "vex" then
874 return putop(ctx, "emms")
875 elseif ctx.vexl then
876 ctx.vexl = false
877 return putop(ctx, "zeroall")
878 else
879 return putop(ctx, "zeroupper")
880 end
881 end,
765} 882}
766 883
767------------------------------------------------------------------------------ 884------------------------------------------------------------------------------
@@ -782,7 +899,7 @@ local function disass_block(ctx, ofs, len)
782end 899end
783 900
784-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len). 901-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
785local function create_(code, addr, out) 902local function create(code, addr, out)
786 local ctx = {} 903 local ctx = {}
787 ctx.code = code 904 ctx.code = code
788 ctx.addr = (addr or 0) - 1 905 ctx.addr = (addr or 0) - 1
@@ -796,8 +913,8 @@ local function create_(code, addr, out)
796 return ctx 913 return ctx
797end 914end
798 915
799local function create64_(code, addr, out) 916local function create64(code, addr, out)
800 local ctx = create_(code, addr, out) 917 local ctx = create(code, addr, out)
801 ctx.x64 = true 918 ctx.x64 = true
802 ctx.map1 = map_opc1_64 919 ctx.map1 = map_opc1_64
803 ctx.aregs = map_regs.Q 920 ctx.aregs = map_regs.Q
@@ -805,32 +922,32 @@ local function create64_(code, addr, out)
805end 922end
806 923
807-- Simple API: disassemble code (a string) at address and output via out. 924-- Simple API: disassemble code (a string) at address and output via out.
808local function disass_(code, addr, out) 925local function disass(code, addr, out)
809 create_(code, addr, out):disass() 926 create(code, addr, out):disass()
810end 927end
811 928
812local function disass64_(code, addr, out) 929local function disass64(code, addr, out)
813 create64_(code, addr, out):disass() 930 create64(code, addr, out):disass()
814end 931end
815 932
816-- Return register name for RID. 933-- Return register name for RID.
817local function regname_(r) 934local function regname(r)
818 if r < 8 then return map_regs.D[r+1] end 935 if r < 8 then return map_regs.D[r+1] end
819 return map_regs.X[r-7] 936 return map_regs.X[r-7]
820end 937end
821 938
822local function regname64_(r) 939local function regname64(r)
823 if r < 16 then return map_regs.Q[r+1] end 940 if r < 16 then return map_regs.Q[r+1] end
824 return map_regs.X[r-15] 941 return map_regs.X[r-15]
825end 942end
826 943
827-- Public module functions. 944-- Public module functions.
828module(...) 945return {
829 946 create = create,
830create = create_ 947 create64 = create64,
831create64 = create64_ 948 disass = disass,
832disass = disass_ 949 disass64 = disass64,
833disass64 = disass64_ 950 regname = regname,
834regname = regname_ 951 regname64 = regname64
835regname64 = regname64_ 952}
836 953
diff --git a/src/jit/dump.lua b/src/jit/dump.lua
index 6a2632c3..0cb38b58 100644
--- a/src/jit/dump.lua
+++ b/src/jit/dump.lua
@@ -55,7 +55,7 @@
55 55
56-- Cache some library functions and objects. 56-- Cache some library functions and objects.
57local jit = require("jit") 57local jit = require("jit")
58assert(jit.version_num == 20005, "LuaJIT core/library version mismatch") 58assert(jit.version_num == 20100, "LuaJIT core/library version mismatch")
59local jutil = require("jit.util") 59local jutil = require("jit.util")
60local vmdef = require("jit.vmdef") 60local vmdef = require("jit.vmdef")
61local funcinfo, funcbc = jutil.funcinfo, jutil.funcbc 61local funcinfo, funcbc = jutil.funcinfo, jutil.funcbc
@@ -63,7 +63,7 @@ local traceinfo, traceir, tracek = jutil.traceinfo, jutil.traceir, jutil.tracek
63local tracemc, tracesnap = jutil.tracemc, jutil.tracesnap 63local tracemc, tracesnap = jutil.tracemc, jutil.tracesnap
64local traceexitstub, ircalladdr = jutil.traceexitstub, jutil.ircalladdr 64local traceexitstub, ircalladdr = jutil.traceexitstub, jutil.ircalladdr
65local bit = require("bit") 65local bit = require("bit")
66local band, shr = bit.band, bit.rshift 66local band, shr, tohex = bit.band, bit.rshift, bit.tohex
67local sub, gsub, format = string.sub, string.gsub, string.format 67local sub, gsub, format = string.sub, string.gsub, string.format
68local byte, rep = string.byte, string.rep 68local byte, rep = string.byte, string.rep
69local type, tostring = type, tostring 69local type, tostring = type, tostring
@@ -85,12 +85,13 @@ local nexitsym = 0
85local function fillsymtab_tr(tr, nexit) 85local function fillsymtab_tr(tr, nexit)
86 local t = {} 86 local t = {}
87 symtabmt.__index = t 87 symtabmt.__index = t
88 if jit.arch == "mips" or jit.arch == "mipsel" then 88 if jit.arch:sub(1, 4) == "mips" then
89 t[traceexitstub(tr, 0)] = "exit" 89 t[traceexitstub(tr, 0)] = "exit"
90 return 90 return
91 end 91 end
92 for i=0,nexit-1 do 92 for i=0,nexit-1 do
93 local addr = traceexitstub(tr, i) 93 local addr = traceexitstub(tr, i)
94 if addr < 0 then addr = addr + 2^32 end
94 t[addr] = tostring(i) 95 t[addr] = tostring(i)
95 end 96 end
96 local addr = traceexitstub(tr, nexit) 97 local addr = traceexitstub(tr, nexit)
@@ -104,7 +105,10 @@ local function fillsymtab(tr, nexit)
104 local ircall = vmdef.ircall 105 local ircall = vmdef.ircall
105 for i=0,#ircall do 106 for i=0,#ircall do
106 local addr = ircalladdr(i) 107 local addr = ircalladdr(i)
107 if addr ~= 0 then t[addr] = ircall[i] end 108 if addr ~= 0 then
109 if addr < 0 then addr = addr + 2^32 end
110 t[addr] = ircall[i]
111 end
108 end 112 end
109 end 113 end
110 if nexitsym == 1000000 then -- Per-trace exit stubs. 114 if nexitsym == 1000000 then -- Per-trace exit stubs.
@@ -118,6 +122,7 @@ local function fillsymtab(tr, nexit)
118 nexit = 1000000 122 nexit = 1000000
119 break 123 break
120 end 124 end
125 if addr < 0 then addr = addr + 2^32 end
121 t[addr] = tostring(i) 126 t[addr] = tostring(i)
122 end 127 end
123 nexitsym = nexit 128 nexitsym = nexit
@@ -136,6 +141,7 @@ local function dump_mcode(tr)
136 local mcode, addr, loop = tracemc(tr) 141 local mcode, addr, loop = tracemc(tr)
137 if not mcode then return end 142 if not mcode then return end
138 if not disass then disass = require("jit.dis_"..jit.arch) end 143 if not disass then disass = require("jit.dis_"..jit.arch) end
144 if addr < 0 then addr = addr + 2^32 end
139 out:write("---- TRACE ", tr, " mcode ", #mcode, "\n") 145 out:write("---- TRACE ", tr, " mcode ", #mcode, "\n")
140 local ctx = disass.create(mcode, addr, dumpwrite) 146 local ctx = disass.create(mcode, addr, dumpwrite)
141 ctx.hexdump = 0 147 ctx.hexdump = 0
@@ -270,8 +276,7 @@ local litname = {
270 ["CONV "] = setmetatable({}, { __index = function(t, mode) 276 ["CONV "] = setmetatable({}, { __index = function(t, mode)
271 local s = irtype[band(mode, 31)] 277 local s = irtype[band(mode, 31)]
272 s = irtype[band(shr(mode, 5), 31)].."."..s 278 s = irtype[band(shr(mode, 5), 31)].."."..s
273 if band(mode, 0x400) ~= 0 then s = s.." trunc" 279 if band(mode, 0x800) ~= 0 then s = s.." sext" end
274 elseif band(mode, 0x800) ~= 0 then s = s.." sext" end
275 local c = shr(mode, 14) 280 local c = shr(mode, 14)
276 if c == 2 then s = s.." index" elseif c == 3 then s = s.." check" end 281 if c == 2 then s = s.." index" elseif c == 3 then s = s.." check" end
277 t[mode] = s 282 t[mode] = s
@@ -280,6 +285,8 @@ local litname = {
280 ["FLOAD "] = vmdef.irfield, 285 ["FLOAD "] = vmdef.irfield,
281 ["FREF "] = vmdef.irfield, 286 ["FREF "] = vmdef.irfield,
282 ["FPMATH"] = vmdef.irfpm, 287 ["FPMATH"] = vmdef.irfpm,
288 ["BUFHDR"] = { [0] = "RESET", "APPEND" },
289 ["TOSTR "] = { [0] = "INT", "NUM", "CHAR" },
283} 290}
284 291
285local function ctlsub(c) 292local function ctlsub(c)
@@ -303,15 +310,17 @@ local function fmtfunc(func, pc)
303 end 310 end
304end 311end
305 312
306local function formatk(tr, idx) 313local function formatk(tr, idx, sn)
307 local k, t, slot = tracek(tr, idx) 314 local k, t, slot = tracek(tr, idx)
308 local tn = type(k) 315 local tn = type(k)
309 local s 316 local s
310 if tn == "number" then 317 if tn == "number" then
311 if k == 2^52+2^51 then 318 if band(sn or 0, 0x30000) ~= 0 then
319 s = band(sn, 0x20000) ~= 0 and "contpc" or "ftsz"
320 elseif k == 2^52+2^51 then
312 s = "bias" 321 s = "bias"
313 else 322 else
314 s = format("%+.14g", k) 323 s = format(0 < k and k < 0x1p-1026 and "%+a" or "%+.14g", k)
315 end 324 end
316 elseif tn == "string" then 325 elseif tn == "string" then
317 s = format(#k > 20 and '"%.20s"~' or '"%s"', gsub(k, "%c", ctlsub)) 326 s = format(#k > 20 and '"%.20s"~' or '"%s"', gsub(k, "%c", ctlsub))
@@ -329,6 +338,8 @@ local function formatk(tr, idx)
329 elseif t == 21 then -- int64_t 338 elseif t == 21 then -- int64_t
330 s = sub(tostring(k), 1, -3) 339 s = sub(tostring(k), 1, -3)
331 if sub(s, 1, 1) ~= "-" then s = "+"..s end 340 if sub(s, 1, 1) ~= "-" then s = "+"..s end
341 elseif sn == 0x1057fff then -- SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL)
342 return "----" -- Special case for LJ_FR2 slot 1.
332 else 343 else
333 s = tostring(k) -- For primitives. 344 s = tostring(k) -- For primitives.
334 end 345 end
@@ -347,7 +358,7 @@ local function printsnap(tr, snap)
347 n = n + 1 358 n = n + 1
348 local ref = band(sn, 0xffff) - 0x8000 -- REF_BIAS 359 local ref = band(sn, 0xffff) - 0x8000 -- REF_BIAS
349 if ref < 0 then 360 if ref < 0 then
350 out:write(formatk(tr, ref)) 361 out:write(formatk(tr, ref, sn))
351 elseif band(sn, 0x80000) ~= 0 then -- SNAP_SOFTFPNUM 362 elseif band(sn, 0x80000) ~= 0 then -- SNAP_SOFTFPNUM
352 out:write(colorize(format("%04d/%04d", ref, ref+1), 14)) 363 out:write(colorize(format("%04d/%04d", ref, ref+1), 14))
353 else 364 else
@@ -545,7 +556,7 @@ local function dump_trace(what, tr, func, pc, otr, oex)
545 if what == "start" then 556 if what == "start" then
546 if dumpmode.H then out:write('<pre class="ljdump">\n') end 557 if dumpmode.H then out:write('<pre class="ljdump">\n') end
547 out:write("---- TRACE ", tr, " ", what) 558 out:write("---- TRACE ", tr, " ", what)
548 if otr then out:write(" ", otr, "/", oex) end 559 if otr then out:write(" ", otr, "/", oex == -1 and "stitch" or oex) end
549 out:write(" ", fmtfunc(func, pc), "\n") 560 out:write(" ", fmtfunc(func, pc), "\n")
550 elseif what == "stop" or what == "abort" then 561 elseif what == "stop" or what == "abort" then
551 out:write("---- TRACE ", tr, " ", what) 562 out:write("---- TRACE ", tr, " ", what)
@@ -608,7 +619,7 @@ local function dump_texit(tr, ex, ngpr, nfpr, ...)
608 end 619 end
609 else 620 else
610 for i=1,ngpr do 621 for i=1,ngpr do
611 out:write(format(" %08x", regs[i])) 622 out:write(" ", tohex(regs[i]))
612 if i % 8 == 0 then out:write("\n") end 623 if i % 8 == 0 then out:write("\n") end
613 end 624 end
614 end 625 end
@@ -693,9 +704,9 @@ local function dumpon(opt, outfile)
693end 704end
694 705
695-- Public module functions. 706-- Public module functions.
696module(...) 707return {
697 708 on = dumpon,
698on = dumpon 709 off = dumpoff,
699off = dumpoff 710 start = dumpon -- For -j command line option.
700start = dumpon -- For -j command line option. 711}
701 712
diff --git a/src/jit/p.lua b/src/jit/p.lua
new file mode 100644
index 00000000..ac3ec40a
--- /dev/null
+++ b/src/jit/p.lua
@@ -0,0 +1,311 @@
1----------------------------------------------------------------------------
2-- LuaJIT profiler.
3--
4-- Copyright (C) 2005-2020 Mike Pall. All rights reserved.
5-- Released under the MIT license. See Copyright Notice in luajit.h
6----------------------------------------------------------------------------
7--
8-- This module is a simple command line interface to the built-in
9-- low-overhead profiler of LuaJIT.
10--
11-- The lower-level API of the profiler is accessible via the "jit.profile"
12-- module or the luaJIT_profile_* C API.
13--
14-- Example usage:
15--
16-- luajit -jp myapp.lua
17-- luajit -jp=s myapp.lua
18-- luajit -jp=-s myapp.lua
19-- luajit -jp=vl myapp.lua
20-- luajit -jp=G,profile.txt myapp.lua
21--
22-- The following dump features are available:
23--
24-- f Stack dump: function name, otherwise module:line. Default mode.
25-- F Stack dump: ditto, but always prepend module.
26-- l Stack dump: module:line.
27-- <number> stack dump depth (callee < caller). Default: 1.
28-- -<number> Inverse stack dump depth (caller > callee).
29-- s Split stack dump after first stack level. Implies abs(depth) >= 2.
30-- p Show full path for module names.
31-- v Show VM states. Can be combined with stack dumps, e.g. vf or fv.
32-- z Show zones. Can be combined with stack dumps, e.g. zf or fz.
33-- r Show raw sample counts. Default: show percentages.
34-- a Annotate excerpts from source code files.
35-- A Annotate complete source code files.
36-- G Produce raw output suitable for graphical tools (e.g. flame graphs).
37-- m<number> Minimum sample percentage to be shown. Default: 3.
38-- i<number> Sampling interval in milliseconds. Default: 10.
39--
40----------------------------------------------------------------------------
41
42-- Cache some library functions and objects.
43local jit = require("jit")
44assert(jit.version_num == 20100, "LuaJIT core/library version mismatch")
45local profile = require("jit.profile")
46local vmdef = require("jit.vmdef")
47local math = math
48local pairs, ipairs, tonumber, floor = pairs, ipairs, tonumber, math.floor
49local sort, format = table.sort, string.format
50local stdout = io.stdout
51local zone -- Load jit.zone module on demand.
52
53-- Output file handle.
54local out
55
56------------------------------------------------------------------------------
57
58local prof_ud
59local prof_states, prof_split, prof_min, prof_raw, prof_fmt, prof_depth
60local prof_ann, prof_count1, prof_count2, prof_samples
61
62local map_vmmode = {
63 N = "Compiled",
64 I = "Interpreted",
65 C = "C code",
66 G = "Garbage Collector",
67 J = "JIT Compiler",
68}
69
70-- Profiler callback.
71local function prof_cb(th, samples, vmmode)
72 prof_samples = prof_samples + samples
73 local key_stack, key_stack2, key_state
74 -- Collect keys for sample.
75 if prof_states then
76 if prof_states == "v" then
77 key_state = map_vmmode[vmmode] or vmmode
78 else
79 key_state = zone:get() or "(none)"
80 end
81 end
82 if prof_fmt then
83 key_stack = profile.dumpstack(th, prof_fmt, prof_depth)
84 key_stack = key_stack:gsub("%[builtin#(%d+)%]", function(x)
85 return vmdef.ffnames[tonumber(x)]
86 end)
87 if prof_split == 2 then
88 local k1, k2 = key_stack:match("(.-) [<>] (.*)")
89 if k2 then key_stack, key_stack2 = k1, k2 end
90 elseif prof_split == 3 then
91 key_stack2 = profile.dumpstack(th, "l", 1)
92 end
93 end
94 -- Order keys.
95 local k1, k2
96 if prof_split == 1 then
97 if key_state then
98 k1 = key_state
99 if key_stack then k2 = key_stack end
100 end
101 elseif key_stack then
102 k1 = key_stack
103 if key_stack2 then k2 = key_stack2 elseif key_state then k2 = key_state end
104 end
105 -- Coalesce samples in one or two levels.
106 if k1 then
107 local t1 = prof_count1
108 t1[k1] = (t1[k1] or 0) + samples
109 if k2 then
110 local t2 = prof_count2
111 local t3 = t2[k1]
112 if not t3 then t3 = {}; t2[k1] = t3 end
113 t3[k2] = (t3[k2] or 0) + samples
114 end
115 end
116end
117
118------------------------------------------------------------------------------
119
120-- Show top N list.
121local function prof_top(count1, count2, samples, indent)
122 local t, n = {}, 0
123 for k in pairs(count1) do
124 n = n + 1
125 t[n] = k
126 end
127 sort(t, function(a, b) return count1[a] > count1[b] end)
128 for i=1,n do
129 local k = t[i]
130 local v = count1[k]
131 local pct = floor(v*100/samples + 0.5)
132 if pct < prof_min then break end
133 if not prof_raw then
134 out:write(format("%s%2d%% %s\n", indent, pct, k))
135 elseif prof_raw == "r" then
136 out:write(format("%s%5d %s\n", indent, v, k))
137 else
138 out:write(format("%s %d\n", k, v))
139 end
140 if count2 then
141 local r = count2[k]
142 if r then
143 prof_top(r, nil, v, (prof_split == 3 or prof_split == 1) and " -- " or
144 (prof_depth < 0 and " -> " or " <- "))
145 end
146 end
147 end
148end
149
150-- Annotate source code
151local function prof_annotate(count1, samples)
152 local files = {}
153 local ms = 0
154 for k, v in pairs(count1) do
155 local pct = floor(v*100/samples + 0.5)
156 ms = math.max(ms, v)
157 if pct >= prof_min then
158 local file, line = k:match("^(.*):(%d+)$")
159 if not file then file = k; line = 0 end
160 local fl = files[file]
161 if not fl then fl = {}; files[file] = fl; files[#files+1] = file end
162 line = tonumber(line)
163 fl[line] = prof_raw and v or pct
164 end
165 end
166 sort(files)
167 local fmtv, fmtn = " %3d%% | %s\n", " | %s\n"
168 if prof_raw then
169 local n = math.max(5, math.ceil(math.log10(ms)))
170 fmtv = "%"..n.."d | %s\n"
171 fmtn = (" "):rep(n).." | %s\n"
172 end
173 local ann = prof_ann
174 for _, file in ipairs(files) do
175 local f0 = file:byte()
176 if f0 == 40 or f0 == 91 then
177 out:write(format("\n====== %s ======\n[Cannot annotate non-file]\n", file))
178 break
179 end
180 local fp, err = io.open(file)
181 if not fp then
182 out:write(format("====== ERROR: %s: %s\n", file, err))
183 break
184 end
185 out:write(format("\n====== %s ======\n", file))
186 local fl = files[file]
187 local n, show = 1, false
188 if ann ~= 0 then
189 for i=1,ann do
190 if fl[i] then show = true; out:write("@@ 1 @@\n"); break end
191 end
192 end
193 for line in fp:lines() do
194 if line:byte() == 27 then
195 out:write("[Cannot annotate bytecode file]\n")
196 break
197 end
198 local v = fl[n]
199 if ann ~= 0 then
200 local v2 = fl[n+ann]
201 if show then
202 if v2 then show = n+ann elseif v then show = n
203 elseif show+ann < n then show = false end
204 elseif v2 then
205 show = n+ann
206 out:write(format("@@ %d @@\n", n))
207 end
208 if not show then goto next end
209 end
210 if v then
211 out:write(format(fmtv, v, line))
212 else
213 out:write(format(fmtn, line))
214 end
215 ::next::
216 n = n + 1
217 end
218 fp:close()
219 end
220end
221
222------------------------------------------------------------------------------
223
224-- Finish profiling and dump result.
225local function prof_finish()
226 if prof_ud then
227 profile.stop()
228 local samples = prof_samples
229 if samples == 0 then
230 if prof_raw ~= true then out:write("[No samples collected]\n") end
231 return
232 end
233 if prof_ann then
234 prof_annotate(prof_count1, samples)
235 else
236 prof_top(prof_count1, prof_count2, samples, "")
237 end
238 prof_count1 = nil
239 prof_count2 = nil
240 prof_ud = nil
241 end
242end
243
244-- Start profiling.
245local function prof_start(mode)
246 local interval = ""
247 mode = mode:gsub("i%d*", function(s) interval = s; return "" end)
248 prof_min = 3
249 mode = mode:gsub("m(%d+)", function(s) prof_min = tonumber(s); return "" end)
250 prof_depth = 1
251 mode = mode:gsub("%-?%d+", function(s) prof_depth = tonumber(s); return "" end)
252 local m = {}
253 for c in mode:gmatch(".") do m[c] = c end
254 prof_states = m.z or m.v
255 if prof_states == "z" then zone = require("jit.zone") end
256 local scope = m.l or m.f or m.F or (prof_states and "" or "f")
257 local flags = (m.p or "")
258 prof_raw = m.r
259 if m.s then
260 prof_split = 2
261 if prof_depth == -1 or m["-"] then prof_depth = -2
262 elseif prof_depth == 1 then prof_depth = 2 end
263 elseif mode:find("[fF].*l") then
264 scope = "l"
265 prof_split = 3
266 else
267 prof_split = (scope == "" or mode:find("[zv].*[lfF]")) and 1 or 0
268 end
269 prof_ann = m.A and 0 or (m.a and 3)
270 if prof_ann then
271 scope = "l"
272 prof_fmt = "pl"
273 prof_split = 0
274 prof_depth = 1
275 elseif m.G and scope ~= "" then
276 prof_fmt = flags..scope.."Z;"
277 prof_depth = -100
278 prof_raw = true
279 prof_min = 0
280 elseif scope == "" then
281 prof_fmt = false
282 else
283 local sc = prof_split == 3 and m.f or m.F or scope
284 prof_fmt = flags..sc..(prof_depth >= 0 and "Z < " or "Z > ")
285 end
286 prof_count1 = {}
287 prof_count2 = {}
288 prof_samples = 0
289 profile.start(scope:lower()..interval, prof_cb)
290 prof_ud = newproxy(true)
291 getmetatable(prof_ud).__gc = prof_finish
292end
293
294------------------------------------------------------------------------------
295
296local function start(mode, outfile)
297 if not outfile then outfile = os.getenv("LUAJIT_PROFILEFILE") end
298 if outfile then
299 out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
300 else
301 out = stdout
302 end
303 prof_start(mode or "f")
304end
305
306-- Public module functions.
307return {
308 start = start, -- For -j command line option.
309 stop = prof_finish
310}
311
diff --git a/src/jit/v.lua b/src/jit/v.lua
index 9696f67f..e37466c6 100644
--- a/src/jit/v.lua
+++ b/src/jit/v.lua
@@ -59,7 +59,7 @@
59 59
60-- Cache some library functions and objects. 60-- Cache some library functions and objects.
61local jit = require("jit") 61local jit = require("jit")
62assert(jit.version_num == 20005, "LuaJIT core/library version mismatch") 62assert(jit.version_num == 20100, "LuaJIT core/library version mismatch")
63local jutil = require("jit.util") 63local jutil = require("jit.util")
64local vmdef = require("jit.vmdef") 64local vmdef = require("jit.vmdef")
65local funcinfo, traceinfo = jutil.funcinfo, jutil.traceinfo 65local funcinfo, traceinfo = jutil.funcinfo, jutil.traceinfo
@@ -99,7 +99,7 @@ end
99local function dump_trace(what, tr, func, pc, otr, oex) 99local function dump_trace(what, tr, func, pc, otr, oex)
100 if what == "start" then 100 if what == "start" then
101 startloc = fmtfunc(func, pc) 101 startloc = fmtfunc(func, pc)
102 startex = otr and "("..otr.."/"..oex..") " or "" 102 startex = otr and "("..otr.."/"..(oex == -1 and "stitch" or oex)..") " or ""
103 else 103 else
104 if what == "abort" then 104 if what == "abort" then
105 local loc = fmtfunc(func, pc) 105 local loc = fmtfunc(func, pc)
@@ -116,6 +116,9 @@ local function dump_trace(what, tr, func, pc, otr, oex)
116 if ltype == "interpreter" then 116 if ltype == "interpreter" then
117 out:write(format("[TRACE %3s %s%s -- fallback to interpreter]\n", 117 out:write(format("[TRACE %3s %s%s -- fallback to interpreter]\n",
118 tr, startex, startloc)) 118 tr, startex, startloc))
119 elseif ltype == "stitch" then
120 out:write(format("[TRACE %3s %s%s %s %s]\n",
121 tr, startex, startloc, ltype, fmtfunc(func, pc)))
119 elseif link == tr or link == 0 then 122 elseif link == tr or link == 0 then
120 out:write(format("[TRACE %3s %s%s %s]\n", 123 out:write(format("[TRACE %3s %s%s %s]\n",
121 tr, startex, startloc, ltype)) 124 tr, startex, startloc, ltype))
@@ -159,9 +162,9 @@ local function dumpon(outfile)
159end 162end
160 163
161-- Public module functions. 164-- Public module functions.
162module(...) 165return {
163 166 on = dumpon,
164on = dumpon 167 off = dumpoff,
165off = dumpoff 168 start = dumpon -- For -j command line option.
166start = dumpon -- For -j command line option. 169}
167 170
diff --git a/src/jit/zone.lua b/src/jit/zone.lua
new file mode 100644
index 00000000..a8b4f0ae
--- /dev/null
+++ b/src/jit/zone.lua
@@ -0,0 +1,45 @@
1----------------------------------------------------------------------------
2-- LuaJIT profiler zones.
3--
4-- Copyright (C) 2005-2020 Mike Pall. All rights reserved.
5-- Released under the MIT license. See Copyright Notice in luajit.h
6----------------------------------------------------------------------------
7--
8-- This module implements a simple hierarchical zone model.
9--
10-- Example usage:
11--
12-- local zone = require("jit.zone")
13-- zone("AI")
14-- ...
15-- zone("A*")
16-- ...
17-- print(zone:get()) --> "A*"
18-- ...
19-- zone()
20-- ...
21-- print(zone:get()) --> "AI"
22-- ...
23-- zone()
24--
25----------------------------------------------------------------------------
26
27local remove = table.remove
28
29return setmetatable({
30 flush = function(t)
31 for i=#t,1,-1 do t[i] = nil end
32 end,
33 get = function(t)
34 return t[#t]
35 end
36}, {
37 __call = function(t, zone)
38 if zone then
39 t[#t+1] = zone
40 else
41 return (assert(remove(t), "empty zone stack"))
42 end
43 end
44})
45
diff --git a/src/lauxlib.h b/src/lauxlib.h
index fed1491b..a44f0272 100644
--- a/src/lauxlib.h
+++ b/src/lauxlib.h
@@ -15,9 +15,6 @@
15#include "lua.h" 15#include "lua.h"
16 16
17 17
18#define luaL_getn(L,i) ((int)lua_objlen(L, i))
19#define luaL_setn(L,i,j) ((void)0) /* no op! */
20
21/* extra error code for `luaL_load' */ 18/* extra error code for `luaL_load' */
22#define LUA_ERRFILE (LUA_ERRERR+1) 19#define LUA_ERRFILE (LUA_ERRERR+1)
23 20
@@ -58,6 +55,10 @@ LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...);
58LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def, 55LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def,
59 const char *const lst[]); 56 const char *const lst[]);
60 57
58/* pre-defined references */
59#define LUA_NOREF (-2)
60#define LUA_REFNIL (-1)
61
61LUALIB_API int (luaL_ref) (lua_State *L, int t); 62LUALIB_API int (luaL_ref) (lua_State *L, int t);
62LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref); 63LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref);
63 64
@@ -84,6 +85,11 @@ LUALIB_API int (luaL_loadbufferx) (lua_State *L, const char *buff, size_t sz,
84 const char *name, const char *mode); 85 const char *name, const char *mode);
85LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg, 86LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg,
86 int level); 87 int level);
88LUALIB_API void (luaL_setfuncs) (lua_State *L, const luaL_Reg *l, int nup);
89LUALIB_API void (luaL_pushmodule) (lua_State *L, const char *modname,
90 int sizehint);
91LUALIB_API void *(luaL_testudata) (lua_State *L, int ud, const char *tname);
92LUALIB_API void (luaL_setmetatable) (lua_State *L, const char *tname);
87 93
88 94
89/* 95/*
@@ -113,6 +119,11 @@ LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg,
113 119
114#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n))) 120#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n)))
115 121
122/* From Lua 5.2. */
123#define luaL_newlibtable(L, l) \
124 lua_createtable(L, 0, sizeof(l)/sizeof((l)[0]) - 1)
125#define luaL_newlib(L, l) (luaL_newlibtable(L, l), luaL_setfuncs(L, l, 0))
126
116/* 127/*
117** {====================================================== 128** {======================================================
118** Generic Buffer manipulation 129** Generic Buffer manipulation
@@ -147,21 +158,4 @@ LUALIB_API void (luaL_pushresult) (luaL_Buffer *B);
147 158
148/* }====================================================== */ 159/* }====================================================== */
149 160
150
151/* compatibility with ref system */
152
153/* pre-defined references */
154#define LUA_NOREF (-2)
155#define LUA_REFNIL (-1)
156
157#define lua_ref(L,lock) ((lock) ? luaL_ref(L, LUA_REGISTRYINDEX) : \
158 (lua_pushstring(L, "unlocked references are obsolete"), lua_error(L), 0))
159
160#define lua_unref(L,ref) luaL_unref(L, LUA_REGISTRYINDEX, (ref))
161
162#define lua_getref(L,ref) lua_rawgeti(L, LUA_REGISTRYINDEX, (ref))
163
164
165#define luaL_reg luaL_Reg
166
167#endif 161#endif
diff --git a/src/lib_aux.c b/src/lib_aux.c
index f29ca848..35866f8d 100644
--- a/src/lib_aux.c
+++ b/src/lib_aux.c
@@ -107,38 +107,36 @@ LUALIB_API const char *luaL_findtable(lua_State *L, int idx,
107static int libsize(const luaL_Reg *l) 107static int libsize(const luaL_Reg *l)
108{ 108{
109 int size = 0; 109 int size = 0;
110 for (; l->name; l++) size++; 110 for (; l && l->name; l++) size++;
111 return size; 111 return size;
112} 112}
113 113
114LUALIB_API void luaL_pushmodule(lua_State *L, const char *modname, int sizehint)
115{
116 luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
117 lua_getfield(L, -1, modname);
118 if (!lua_istable(L, -1)) {
119 lua_pop(L, 1);
120 if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, sizehint) != NULL)
121 lj_err_callerv(L, LJ_ERR_BADMODN, modname);
122 lua_pushvalue(L, -1);
123 lua_setfield(L, -3, modname); /* _LOADED[modname] = new table. */
124 }
125 lua_remove(L, -2); /* Remove _LOADED table. */
126}
127
114LUALIB_API void luaL_openlib(lua_State *L, const char *libname, 128LUALIB_API void luaL_openlib(lua_State *L, const char *libname,
115 const luaL_Reg *l, int nup) 129 const luaL_Reg *l, int nup)
116{ 130{
117 lj_lib_checkfpu(L); 131 lj_lib_checkfpu(L);
118 if (libname) { 132 if (libname) {
119 int size = libsize(l); 133 luaL_pushmodule(L, libname, libsize(l));
120 /* check whether lib already exists */ 134 lua_insert(L, -(nup + 1)); /* Move module table below upvalues. */
121 luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
122 lua_getfield(L, -1, libname); /* get _LOADED[libname] */
123 if (!lua_istable(L, -1)) { /* not found? */
124 lua_pop(L, 1); /* remove previous result */
125 /* try global variable (and create one if it does not exist) */
126 if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL)
127 lj_err_callerv(L, LJ_ERR_BADMODN, libname);
128 lua_pushvalue(L, -1);
129 lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */
130 }
131 lua_remove(L, -2); /* remove _LOADED table */
132 lua_insert(L, -(nup+1)); /* move library table to below upvalues */
133 } 135 }
134 for (; l->name; l++) { 136 if (l)
135 int i; 137 luaL_setfuncs(L, l, nup);
136 for (i = 0; i < nup; i++) /* copy upvalues to the top */ 138 else
137 lua_pushvalue(L, -nup); 139 lua_pop(L, nup); /* Remove upvalues. */
138 lua_pushcclosure(L, l->func, nup);
139 lua_setfield(L, -(nup+2), l->name);
140 }
141 lua_pop(L, nup); /* remove upvalues */
142} 140}
143 141
144LUALIB_API void luaL_register(lua_State *L, const char *libname, 142LUALIB_API void luaL_register(lua_State *L, const char *libname,
@@ -147,6 +145,19 @@ LUALIB_API void luaL_register(lua_State *L, const char *libname,
147 luaL_openlib(L, libname, l, 0); 145 luaL_openlib(L, libname, l, 0);
148} 146}
149 147
148LUALIB_API void luaL_setfuncs(lua_State *L, const luaL_Reg *l, int nup)
149{
150 luaL_checkstack(L, nup, "too many upvalues");
151 for (; l->name; l++) {
152 int i;
153 for (i = 0; i < nup; i++) /* Copy upvalues to the top. */
154 lua_pushvalue(L, -nup);
155 lua_pushcclosure(L, l->func, nup);
156 lua_setfield(L, -(nup + 2), l->name);
157 }
158 lua_pop(L, nup); /* Remove upvalues. */
159}
160
150LUALIB_API const char *luaL_gsub(lua_State *L, const char *s, 161LUALIB_API const char *luaL_gsub(lua_State *L, const char *s,
151 const char *p, const char *r) 162 const char *p, const char *r)
152{ 163{
@@ -207,8 +218,15 @@ LUALIB_API char *luaL_prepbuffer(luaL_Buffer *B)
207 218
208LUALIB_API void luaL_addlstring(luaL_Buffer *B, const char *s, size_t l) 219LUALIB_API void luaL_addlstring(luaL_Buffer *B, const char *s, size_t l)
209{ 220{
210 while (l--) 221 if (l <= bufffree(B)) {
211 luaL_addchar(B, *s++); 222 memcpy(B->p, s, l);
223 B->p += l;
224 } else {
225 emptybuffer(B);
226 lua_pushlstring(B->L, s, l);
227 B->lvl++;
228 adjuststack(B);
229 }
212} 230}
213 231
214LUALIB_API void luaL_addstring(luaL_Buffer *B, const char *s) 232LUALIB_API void luaL_addstring(luaL_Buffer *B, const char *s)
@@ -302,7 +320,7 @@ static int panic(lua_State *L)
302 320
303#ifdef LUAJIT_USE_SYSMALLOC 321#ifdef LUAJIT_USE_SYSMALLOC
304 322
305#if LJ_64 && !defined(LUAJIT_USE_VALGRIND) 323#if LJ_64 && !LJ_GC64 && !defined(LUAJIT_USE_VALGRIND)
306#error "Must use builtin allocator for 64 bit target" 324#error "Must use builtin allocator for 64 bit target"
307#endif 325#endif
308 326
@@ -327,23 +345,19 @@ LUALIB_API lua_State *luaL_newstate(void)
327 345
328#else 346#else
329 347
330#include "lj_alloc.h"
331
332LUALIB_API lua_State *luaL_newstate(void) 348LUALIB_API lua_State *luaL_newstate(void)
333{ 349{
334 lua_State *L; 350 lua_State *L;
335 void *ud = lj_alloc_create(); 351#if LJ_64 && !LJ_GC64
336 if (ud == NULL) return NULL; 352 L = lj_state_newstate(LJ_ALLOCF_INTERNAL, NULL);
337#if LJ_64
338 L = lj_state_newstate(lj_alloc_f, ud);
339#else 353#else
340 L = lua_newstate(lj_alloc_f, ud); 354 L = lua_newstate(LJ_ALLOCF_INTERNAL, NULL);
341#endif 355#endif
342 if (L) G(L)->panic = panic; 356 if (L) G(L)->panic = panic;
343 return L; 357 return L;
344} 358}
345 359
346#if LJ_64 360#if LJ_64 && !LJ_GC64
347LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud) 361LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
348{ 362{
349 UNUSED(f); UNUSED(ud); 363 UNUSED(f); UNUSED(ud);
diff --git a/src/lib_base.c b/src/lib_base.c
index 99f7b44a..eb604538 100644
--- a/src/lib_base.c
+++ b/src/lib_base.c
@@ -23,6 +23,7 @@
23#include "lj_tab.h" 23#include "lj_tab.h"
24#include "lj_meta.h" 24#include "lj_meta.h"
25#include "lj_state.h" 25#include "lj_state.h"
26#include "lj_frame.h"
26#if LJ_HASFFI 27#if LJ_HASFFI
27#include "lj_ctype.h" 28#include "lj_ctype.h"
28#include "lj_cconv.h" 29#include "lj_cconv.h"
@@ -32,6 +33,7 @@
32#include "lj_dispatch.h" 33#include "lj_dispatch.h"
33#include "lj_char.h" 34#include "lj_char.h"
34#include "lj_strscan.h" 35#include "lj_strscan.h"
36#include "lj_strfmt.h"
35#include "lj_lib.h" 37#include "lj_lib.h"
36 38
37/* -- Base library: checks ------------------------------------------------ */ 39/* -- Base library: checks ------------------------------------------------ */
@@ -40,13 +42,13 @@
40 42
41LJLIB_ASM(assert) LJLIB_REC(.) 43LJLIB_ASM(assert) LJLIB_REC(.)
42{ 44{
43 GCstr *s;
44 lj_lib_checkany(L, 1); 45 lj_lib_checkany(L, 1);
45 s = lj_lib_optstr(L, 2); 46 if (L->top == L->base+1)
46 if (s)
47 lj_err_callermsg(L, strdata(s));
48 else
49 lj_err_caller(L, LJ_ERR_ASSERT); 47 lj_err_caller(L, LJ_ERR_ASSERT);
48 else if (tvisstr(L->base+1) || tvisnumber(L->base+1))
49 lj_err_callermsg(L, strdata(lj_lib_checkstr(L, 2)));
50 else
51 lj_err_run(L);
50 return FFH_UNREACHABLE; 52 return FFH_UNREACHABLE;
51} 53}
52 54
@@ -86,10 +88,11 @@ static int ffh_pairs(lua_State *L, MMS mm)
86 cTValue *mo = lj_meta_lookup(L, o, mm); 88 cTValue *mo = lj_meta_lookup(L, o, mm);
87 if ((LJ_52 || tviscdata(o)) && !tvisnil(mo)) { 89 if ((LJ_52 || tviscdata(o)) && !tvisnil(mo)) {
88 L->top = o+1; /* Only keep one argument. */ 90 L->top = o+1; /* Only keep one argument. */
89 copyTV(L, L->base-1, mo); /* Replace callable. */ 91 copyTV(L, L->base-1-LJ_FR2, mo); /* Replace callable. */
90 return FFH_TAILCALL; 92 return FFH_TAILCALL;
91 } else { 93 } else {
92 if (!tvistab(o)) lj_err_argt(L, 1, LUA_TTABLE); 94 if (!tvistab(o)) lj_err_argt(L, 1, LUA_TTABLE);
95 if (LJ_FR2) { copyTV(L, o-1, o); o--; }
93 setfuncV(L, o-1, funcV(lj_lib_upvalue(L, 1))); 96 setfuncV(L, o-1, funcV(lj_lib_upvalue(L, 1)));
94 if (mm == MM_pairs) setnilV(o+1); else setintV(o+1, 0); 97 if (mm == MM_pairs) setnilV(o+1); else setintV(o+1, 0);
95 return FFH_RES(3); 98 return FFH_RES(3);
@@ -100,7 +103,7 @@ static int ffh_pairs(lua_State *L, MMS mm)
100#endif 103#endif
101 104
102LJLIB_PUSH(lastcl) 105LJLIB_PUSH(lastcl)
103LJLIB_ASM(pairs) 106LJLIB_ASM(pairs) LJLIB_REC(xpairs 0)
104{ 107{
105 return ffh_pairs(L, MM_pairs); 108 return ffh_pairs(L, MM_pairs);
106} 109}
@@ -113,7 +116,7 @@ LJLIB_NOREGUV LJLIB_ASM(ipairs_aux) LJLIB_REC(.)
113} 116}
114 117
115LJLIB_PUSH(lastcl) 118LJLIB_PUSH(lastcl)
116LJLIB_ASM(ipairs) LJLIB_REC(.) 119LJLIB_ASM(ipairs) LJLIB_REC(xpairs 1)
117{ 120{
118 return ffh_pairs(L, MM_ipairs); 121 return ffh_pairs(L, MM_ipairs);
119} 122}
@@ -131,11 +134,11 @@ LJLIB_ASM(setmetatable) LJLIB_REC(.)
131 lj_err_caller(L, LJ_ERR_PROTMT); 134 lj_err_caller(L, LJ_ERR_PROTMT);
132 setgcref(t->metatable, obj2gco(mt)); 135 setgcref(t->metatable, obj2gco(mt));
133 if (mt) { lj_gc_objbarriert(L, t, mt); } 136 if (mt) { lj_gc_objbarriert(L, t, mt); }
134 settabV(L, L->base-1, t); 137 settabV(L, L->base-1-LJ_FR2, t);
135 return FFH_RES(1); 138 return FFH_RES(1);
136} 139}
137 140
138LJLIB_CF(getfenv) 141LJLIB_CF(getfenv) LJLIB_REC(.)
139{ 142{
140 GCfunc *fn; 143 GCfunc *fn;
141 cTValue *o = L->base; 144 cTValue *o = L->base;
@@ -144,6 +147,7 @@ LJLIB_CF(getfenv)
144 o = lj_debug_frame(L, level, &level); 147 o = lj_debug_frame(L, level, &level);
145 if (o == NULL) 148 if (o == NULL)
146 lj_err_arg(L, 1, LJ_ERR_INVLVL); 149 lj_err_arg(L, 1, LJ_ERR_INVLVL);
150 if (LJ_FR2) o--;
147 } 151 }
148 fn = &gcval(o)->fn; 152 fn = &gcval(o)->fn;
149 settabV(L, L->top++, isluafunc(fn) ? tabref(fn->l.env) : tabref(L->env)); 153 settabV(L, L->top++, isluafunc(fn) ? tabref(fn->l.env) : tabref(L->env));
@@ -165,6 +169,7 @@ LJLIB_CF(setfenv)
165 o = lj_debug_frame(L, level, &level); 169 o = lj_debug_frame(L, level, &level);
166 if (o == NULL) 170 if (o == NULL)
167 lj_err_arg(L, 1, LJ_ERR_INVLVL); 171 lj_err_arg(L, 1, LJ_ERR_INVLVL);
172 if (LJ_FR2) o--;
168 } 173 }
169 fn = &gcval(o)->fn; 174 fn = &gcval(o)->fn;
170 if (!isluafunc(fn)) 175 if (!isluafunc(fn))
@@ -259,7 +264,7 @@ LJLIB_ASM(tonumber) LJLIB_REC(.)
259 if (base == 10) { 264 if (base == 10) {
260 TValue *o = lj_lib_checkany(L, 1); 265 TValue *o = lj_lib_checkany(L, 1);
261 if (lj_strscan_numberobj(o)) { 266 if (lj_strscan_numberobj(o)) {
262 copyTV(L, L->base-1, o); 267 copyTV(L, L->base-1-LJ_FR2, o);
263 return FFH_RES(1); 268 return FFH_RES(1);
264 } 269 }
265#if LJ_HASFFI 270#if LJ_HASFFI
@@ -272,11 +277,11 @@ LJLIB_ASM(tonumber) LJLIB_REC(.)
272 ct->size <= 4 && !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) { 277 ct->size <= 4 && !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) {
273 int32_t i; 278 int32_t i;
274 lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, 0); 279 lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, 0);
275 setintV(L->base-1, i); 280 setintV(L->base-1-LJ_FR2, i);
276 return FFH_RES(1); 281 return FFH_RES(1);
277 } 282 }
278 lj_cconv_ct_tv(cts, ctype_get(cts, CTID_DOUBLE), 283 lj_cconv_ct_tv(cts, ctype_get(cts, CTID_DOUBLE),
279 (uint8_t *)&(L->base-1)->n, o, 0); 284 (uint8_t *)&(L->base-1-LJ_FR2)->n, o, 0);
280 return FFH_RES(1); 285 return FFH_RES(1);
281 } 286 }
282 } 287 }
@@ -284,53 +289,46 @@ LJLIB_ASM(tonumber) LJLIB_REC(.)
284 } else { 289 } else {
285 const char *p = strdata(lj_lib_checkstr(L, 1)); 290 const char *p = strdata(lj_lib_checkstr(L, 1));
286 char *ep; 291 char *ep;
292 unsigned int neg = 0;
287 unsigned long ul; 293 unsigned long ul;
288 if (base < 2 || base > 36) 294 if (base < 2 || base > 36)
289 lj_err_arg(L, 2, LJ_ERR_BASERNG); 295 lj_err_arg(L, 2, LJ_ERR_BASERNG);
290 ul = strtoul(p, &ep, base); 296 while (lj_char_isspace((unsigned char)(*p))) p++;
291 if (p != ep) { 297 if (*p == '-') { p++; neg = 1; } else if (*p == '+') { p++; }
292 while (lj_char_isspace((unsigned char)(*ep))) ep++; 298 if (lj_char_isalnum((unsigned char)(*p))) {
293 if (*ep == '\0') { 299 ul = strtoul(p, &ep, base);
294 if (LJ_DUALNUM && LJ_LIKELY(ul < 0x80000000u)) 300 if (p != ep) {
295 setintV(L->base-1, (int32_t)ul); 301 while (lj_char_isspace((unsigned char)(*ep))) ep++;
296 else 302 if (*ep == '\0') {
297 setnumV(L->base-1, (lua_Number)ul); 303 if (LJ_DUALNUM && LJ_LIKELY(ul < 0x80000000u+neg)) {
298 return FFH_RES(1); 304 if (neg) ul = (unsigned long)-(long)ul;
305 setintV(L->base-1-LJ_FR2, (int32_t)ul);
306 } else {
307 lua_Number n = (lua_Number)ul;
308 if (neg) n = -n;
309 setnumV(L->base-1-LJ_FR2, n);
310 }
311 return FFH_RES(1);
312 }
299 } 313 }
300 } 314 }
301 } 315 }
302 setnilV(L->base-1); 316 setnilV(L->base-1-LJ_FR2);
303 return FFH_RES(1); 317 return FFH_RES(1);
304} 318}
305 319
306LJLIB_PUSH("nil")
307LJLIB_PUSH("false")
308LJLIB_PUSH("true")
309LJLIB_ASM(tostring) LJLIB_REC(.) 320LJLIB_ASM(tostring) LJLIB_REC(.)
310{ 321{
311 TValue *o = lj_lib_checkany(L, 1); 322 TValue *o = lj_lib_checkany(L, 1);
312 cTValue *mo; 323 cTValue *mo;
313 L->top = o+1; /* Only keep one argument. */ 324 L->top = o+1; /* Only keep one argument. */
314 if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { 325 if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) {
315 copyTV(L, L->base-1, mo); /* Replace callable. */ 326 copyTV(L, L->base-1-LJ_FR2, mo); /* Replace callable. */
316 return FFH_TAILCALL; 327 return FFH_TAILCALL;
317 } else {
318 GCstr *s;
319 if (tvisnumber(o)) {
320 s = lj_str_fromnumber(L, o);
321 } else if (tvispri(o)) {
322 s = strV(lj_lib_upvalue(L, -(int32_t)itype(o)));
323 } else {
324 if (tvisfunc(o) && isffunc(funcV(o)))
325 lua_pushfstring(L, "function: builtin#%d", funcV(o)->c.ffid);
326 else
327 lua_pushfstring(L, "%s: %p", lj_typename(o), lua_topointer(L, 1));
328 /* Note: lua_pushfstring calls the GC which may invalidate o. */
329 s = strV(L->top-1);
330 }
331 setstrV(L, L->base-1, s);
332 return FFH_RES(1);
333 } 328 }
329 lj_gc_check(L);
330 setstrV(L, L->base-1-LJ_FR2, lj_strfmt_obj(L, L->base));
331 return FFH_RES(1);
334} 332}
335 333
336/* -- Base library: throw and catch errors -------------------------------- */ 334/* -- Base library: throw and catch errors -------------------------------- */
@@ -359,7 +357,7 @@ LJLIB_ASM_(xpcall) LJLIB_REC(.)
359 357
360static int load_aux(lua_State *L, int status, int envarg) 358static int load_aux(lua_State *L, int status, int envarg)
361{ 359{
362 if (status == 0) { 360 if (status == LUA_OK) {
363 if (tvistab(L->base+envarg-1)) { 361 if (tvistab(L->base+envarg-1)) {
364 GCfunc *fn = funcV(L->top-1); 362 GCfunc *fn = funcV(L->top-1);
365 GCtab *t = tabV(L->base+envarg-1); 363 GCtab *t = tabV(L->base+envarg-1);
@@ -432,7 +430,7 @@ LJLIB_CF(dofile)
432 GCstr *fname = lj_lib_optstr(L, 1); 430 GCstr *fname = lj_lib_optstr(L, 1);
433 setnilV(L->top); 431 setnilV(L->top);
434 L->top = L->base+1; 432 L->top = L->base+1;
435 if (luaL_loadfile(L, fname ? strdata(fname) : NULL) != 0) 433 if (luaL_loadfile(L, fname ? strdata(fname) : NULL) != LUA_OK)
436 lua_error(L); 434 lua_error(L);
437 lua_call(L, 0, LUA_MULTRET); 435 lua_call(L, 0, LUA_MULTRET);
438 return (int)(L->top - L->base) - 1; 436 return (int)(L->top - L->base) - 1;
@@ -442,20 +440,20 @@ LJLIB_CF(dofile)
442 440
443LJLIB_CF(gcinfo) 441LJLIB_CF(gcinfo)
444{ 442{
445 setintV(L->top++, (G(L)->gc.total >> 10)); 443 setintV(L->top++, (int32_t)(G(L)->gc.total >> 10));
446 return 1; 444 return 1;
447} 445}
448 446
449LJLIB_CF(collectgarbage) 447LJLIB_CF(collectgarbage)
450{ 448{
451 int opt = lj_lib_checkopt(L, 1, LUA_GCCOLLECT, /* ORDER LUA_GC* */ 449 int opt = lj_lib_checkopt(L, 1, LUA_GCCOLLECT, /* ORDER LUA_GC* */
452 "\4stop\7restart\7collect\5count\1\377\4step\10setpause\12setstepmul"); 450 "\4stop\7restart\7collect\5count\1\377\4step\10setpause\12setstepmul\1\377\11isrunning");
453 int32_t data = lj_lib_optint(L, 2, 0); 451 int32_t data = lj_lib_optint(L, 2, 0);
454 if (opt == LUA_GCCOUNT) { 452 if (opt == LUA_GCCOUNT) {
455 setnumV(L->top, (lua_Number)G(L)->gc.total/1024.0); 453 setnumV(L->top, (lua_Number)G(L)->gc.total/1024.0);
456 } else { 454 } else {
457 int res = lua_gc(L, opt, data); 455 int res = lua_gc(L, opt, data);
458 if (opt == LUA_GCSTEP) 456 if (opt == LUA_GCSTEP || opt == LUA_GCISRUNNING)
459 setboolV(L->top, res); 457 setboolV(L->top, res);
460 else 458 else
461 setintV(L->top, res); 459 setintV(L->top, res);
@@ -507,23 +505,14 @@ LJLIB_CF(print)
507 tv = L->top-1; 505 tv = L->top-1;
508 } 506 }
509 shortcut = (tvisfunc(tv) && funcV(tv)->c.ffid == FF_tostring) 507 shortcut = (tvisfunc(tv) && funcV(tv)->c.ffid == FF_tostring)
510 && !gcrefu(basemt_it(G(L), LJ_TNUMX)); 508 && !gcrefu(basemt_it(G(L), LJ_TNUMX));
511 for (i = 0; i < nargs; i++) { 509 for (i = 0; i < nargs; i++) {
510 cTValue *o = &L->base[i];
512 const char *str; 511 const char *str;
513 size_t size; 512 size_t size;
514 cTValue *o = &L->base[i]; 513 MSize len;
515 if (shortcut && tvisstr(o)) { 514 if (shortcut && (str = lj_strfmt_wstrnum(L, o, &len)) != NULL) {
516 str = strVdata(o); 515 size = len;
517 size = strV(o)->len;
518 } else if (shortcut && tvisint(o)) {
519 char buf[LJ_STR_INTBUF];
520 char *p = lj_str_bufint(buf, intV(o));
521 size = (size_t)(buf+LJ_STR_INTBUF-p);
522 str = p;
523 } else if (shortcut && tvisnum(o)) {
524 char buf[LJ_STR_NUMBUF];
525 size = lj_str_bufnum(buf, o);
526 str = buf;
527 } else { 516 } else {
528 copyTV(L, L->top+1, o); 517 copyTV(L, L->top+1, o);
529 copyTV(L, L->top, L->top-1); 518 copyTV(L, L->top, L->top-1);
@@ -560,8 +549,8 @@ LJLIB_CF(coroutine_status)
560 co = threadV(L->base); 549 co = threadV(L->base);
561 if (co == L) s = "running"; 550 if (co == L) s = "running";
562 else if (co->status == LUA_YIELD) s = "suspended"; 551 else if (co->status == LUA_YIELD) s = "suspended";
563 else if (co->status != 0) s = "dead"; 552 else if (co->status != LUA_OK) s = "dead";
564 else if (co->base > tvref(co->stack)+1) s = "normal"; 553 else if (co->base > tvref(co->stack)+1+LJ_FR2) s = "normal";
565 else if (co->top == co->base) s = "dead"; 554 else if (co->top == co->base) s = "dead";
566 else s = "suspended"; 555 else s = "suspended";
567 lua_pushstring(L, s); 556 lua_pushstring(L, s);
@@ -581,6 +570,12 @@ LJLIB_CF(coroutine_running)
581#endif 570#endif
582} 571}
583 572
573LJLIB_CF(coroutine_isyieldable)
574{
575 setboolV(L->top++, cframe_canyield(L->cframe));
576 return 1;
577}
578
584LJLIB_CF(coroutine_create) 579LJLIB_CF(coroutine_create)
585{ 580{
586 lua_State *L1; 581 lua_State *L1;
@@ -600,11 +595,11 @@ LJLIB_ASM(coroutine_yield)
600static int ffh_resume(lua_State *L, lua_State *co, int wrap) 595static int ffh_resume(lua_State *L, lua_State *co, int wrap)
601{ 596{
602 if (co->cframe != NULL || co->status > LUA_YIELD || 597 if (co->cframe != NULL || co->status > LUA_YIELD ||
603 (co->status == 0 && co->top == co->base)) { 598 (co->status == LUA_OK && co->top == co->base)) {
604 ErrMsg em = co->cframe ? LJ_ERR_CORUN : LJ_ERR_CODEAD; 599 ErrMsg em = co->cframe ? LJ_ERR_CORUN : LJ_ERR_CODEAD;
605 if (wrap) lj_err_caller(L, em); 600 if (wrap) lj_err_caller(L, em);
606 setboolV(L->base-1, 0); 601 setboolV(L->base-1-LJ_FR2, 0);
607 setstrV(L, L->base, lj_err_str(L, em)); 602 setstrV(L, L->base-LJ_FR2, lj_err_str(L, em));
608 return FFH_RES(2); 603 return FFH_RES(2);
609 } 604 }
610 lj_state_growstack(co, (MSize)(L->top - L->base)); 605 lj_state_growstack(co, (MSize)(L->top - L->base));
@@ -645,9 +640,10 @@ static void setpc_wrap_aux(lua_State *L, GCfunc *fn);
645 640
646LJLIB_CF(coroutine_wrap) 641LJLIB_CF(coroutine_wrap)
647{ 642{
643 GCfunc *fn;
648 lj_cf_coroutine_create(L); 644 lj_cf_coroutine_create(L);
649 lj_lib_pushcc(L, lj_ffh_coroutine_wrap_aux, FF_coroutine_wrap_aux, 1); 645 fn = lj_lib_pushcc(L, lj_ffh_coroutine_wrap_aux, FF_coroutine_wrap_aux, 1);
650 setpc_wrap_aux(L, funcV(L->top-1)); 646 setpc_wrap_aux(L, fn);
651 return 1; 647 return 1;
652} 648}
653 649
diff --git a/src/lib_bit.c b/src/lib_bit.c
index c374d7a0..c4911450 100644
--- a/src/lib_bit.c
+++ b/src/lib_bit.c
@@ -12,26 +12,99 @@
12 12
13#include "lj_obj.h" 13#include "lj_obj.h"
14#include "lj_err.h" 14#include "lj_err.h"
15#include "lj_str.h" 15#include "lj_buf.h"
16#include "lj_strscan.h"
17#include "lj_strfmt.h"
18#if LJ_HASFFI
19#include "lj_ctype.h"
20#include "lj_cdata.h"
21#include "lj_cconv.h"
22#include "lj_carith.h"
23#endif
24#include "lj_ff.h"
16#include "lj_lib.h" 25#include "lj_lib.h"
17 26
18/* ------------------------------------------------------------------------ */ 27/* ------------------------------------------------------------------------ */
19 28
20#define LJLIB_MODULE_bit 29#define LJLIB_MODULE_bit
21 30
22LJLIB_ASM(bit_tobit) LJLIB_REC(bit_unary IR_TOBIT) 31#if LJ_HASFFI
32static int bit_result64(lua_State *L, CTypeID id, uint64_t x)
23{ 33{
34 GCcdata *cd = lj_cdata_new_(L, id, 8);
35 *(uint64_t *)cdataptr(cd) = x;
36 setcdataV(L, L->base-1-LJ_FR2, cd);
37 return FFH_RES(1);
38}
39#else
40static int32_t bit_checkbit(lua_State *L, int narg)
41{
42 TValue *o = L->base + narg-1;
43 if (!(o < L->top && lj_strscan_numberobj(o)))
44 lj_err_argt(L, narg, LUA_TNUMBER);
45 if (LJ_LIKELY(tvisint(o))) {
46 return intV(o);
47 } else {
48 int32_t i = lj_num2bit(numV(o));
49 if (LJ_DUALNUM) setintV(o, i);
50 return i;
51 }
52}
53#endif
54
55LJLIB_ASM(bit_tobit) LJLIB_REC(bit_tobit)
56{
57#if LJ_HASFFI
58 CTypeID id = 0;
59 setintV(L->base-1-LJ_FR2, (int32_t)lj_carith_check64(L, 1, &id));
60 return FFH_RES(1);
61#else
62 lj_lib_checknumber(L, 1);
63 return FFH_RETRY;
64#endif
65}
66
67LJLIB_ASM(bit_bnot) LJLIB_REC(bit_unary IR_BNOT)
68{
69#if LJ_HASFFI
70 CTypeID id = 0;
71 uint64_t x = lj_carith_check64(L, 1, &id);
72 return id ? bit_result64(L, id, ~x) : FFH_RETRY;
73#else
24 lj_lib_checknumber(L, 1); 74 lj_lib_checknumber(L, 1);
25 return FFH_RETRY; 75 return FFH_RETRY;
76#endif
77}
78
79LJLIB_ASM(bit_bswap) LJLIB_REC(bit_unary IR_BSWAP)
80{
81#if LJ_HASFFI
82 CTypeID id = 0;
83 uint64_t x = lj_carith_check64(L, 1, &id);
84 return id ? bit_result64(L, id, lj_bswap64(x)) : FFH_RETRY;
85#else
86 lj_lib_checknumber(L, 1);
87 return FFH_RETRY;
88#endif
26} 89}
27LJLIB_ASM_(bit_bnot) LJLIB_REC(bit_unary IR_BNOT)
28LJLIB_ASM_(bit_bswap) LJLIB_REC(bit_unary IR_BSWAP)
29 90
30LJLIB_ASM(bit_lshift) LJLIB_REC(bit_shift IR_BSHL) 91LJLIB_ASM(bit_lshift) LJLIB_REC(bit_shift IR_BSHL)
31{ 92{
93#if LJ_HASFFI
94 CTypeID id = 0, id2 = 0;
95 uint64_t x = lj_carith_check64(L, 1, &id);
96 int32_t sh = (int32_t)lj_carith_check64(L, 2, &id2);
97 if (id) {
98 x = lj_carith_shift64(x, sh, curr_func(L)->c.ffid - (int)FF_bit_lshift);
99 return bit_result64(L, id, x);
100 }
101 if (id2) setintV(L->base+1, sh);
102 return FFH_RETRY;
103#else
32 lj_lib_checknumber(L, 1); 104 lj_lib_checknumber(L, 1);
33 lj_lib_checkbit(L, 2); 105 bit_checkbit(L, 2);
34 return FFH_RETRY; 106 return FFH_RETRY;
107#endif
35} 108}
36LJLIB_ASM_(bit_rshift) LJLIB_REC(bit_shift IR_BSHR) 109LJLIB_ASM_(bit_rshift) LJLIB_REC(bit_shift IR_BSHR)
37LJLIB_ASM_(bit_arshift) LJLIB_REC(bit_shift IR_BSAR) 110LJLIB_ASM_(bit_arshift) LJLIB_REC(bit_shift IR_BSAR)
@@ -40,25 +113,58 @@ LJLIB_ASM_(bit_ror) LJLIB_REC(bit_shift IR_BROR)
40 113
41LJLIB_ASM(bit_band) LJLIB_REC(bit_nary IR_BAND) 114LJLIB_ASM(bit_band) LJLIB_REC(bit_nary IR_BAND)
42{ 115{
116#if LJ_HASFFI
117 CTypeID id = 0;
118 TValue *o = L->base, *top = L->top;
119 int i = 0;
120 do { lj_carith_check64(L, ++i, &id); } while (++o < top);
121 if (id) {
122 CTState *cts = ctype_cts(L);
123 CType *ct = ctype_get(cts, id);
124 int op = curr_func(L)->c.ffid - (int)FF_bit_bor;
125 uint64_t x, y = op >= 0 ? 0 : ~(uint64_t)0;
126 o = L->base;
127 do {
128 lj_cconv_ct_tv(cts, ct, (uint8_t *)&x, o, 0);
129 if (op < 0) y &= x; else if (op == 0) y |= x; else y ^= x;
130 } while (++o < top);
131 return bit_result64(L, id, y);
132 }
133 return FFH_RETRY;
134#else
43 int i = 0; 135 int i = 0;
44 do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top); 136 do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top);
45 return FFH_RETRY; 137 return FFH_RETRY;
138#endif
46} 139}
47LJLIB_ASM_(bit_bor) LJLIB_REC(bit_nary IR_BOR) 140LJLIB_ASM_(bit_bor) LJLIB_REC(bit_nary IR_BOR)
48LJLIB_ASM_(bit_bxor) LJLIB_REC(bit_nary IR_BXOR) 141LJLIB_ASM_(bit_bxor) LJLIB_REC(bit_nary IR_BXOR)
49 142
50/* ------------------------------------------------------------------------ */ 143/* ------------------------------------------------------------------------ */
51 144
52LJLIB_CF(bit_tohex) 145LJLIB_CF(bit_tohex) LJLIB_REC(.)
53{ 146{
54 uint32_t b = (uint32_t)lj_lib_checkbit(L, 1); 147#if LJ_HASFFI
55 int32_t i, n = L->base+1 >= L->top ? 8 : lj_lib_checkbit(L, 2); 148 CTypeID id = 0, id2 = 0;
56 const char *hexdigits = "0123456789abcdef"; 149 uint64_t b = lj_carith_check64(L, 1, &id);
57 char buf[8]; 150 int32_t n = L->base+1>=L->top ? (id ? 16 : 8) :
58 if (n < 0) { n = -n; hexdigits = "0123456789ABCDEF"; } 151 (int32_t)lj_carith_check64(L, 2, &id2);
59 if (n > 8) n = 8; 152#else
60 for (i = n; --i >= 0; ) { buf[i] = hexdigits[b & 15]; b >>= 4; } 153 uint32_t b = (uint32_t)bit_checkbit(L, 1);
61 lua_pushlstring(L, buf, (size_t)n); 154 int32_t n = L->base+1>=L->top ? 8 : bit_checkbit(L, 2);
155#endif
156 SBuf *sb = lj_buf_tmp_(L);
157 SFormat sf = (STRFMT_UINT|STRFMT_T_HEX);
158 if (n < 0) { n = -n; sf |= STRFMT_F_UPPER; }
159 sf |= ((SFormat)((n+1)&255) << STRFMT_SH_PREC);
160#if LJ_HASFFI
161 if (n < 16) b &= ((uint64_t)1 << 4*n)-1;
162#else
163 if (n < 8) b &= (1u << 4*n)-1;
164#endif
165 sb = lj_strfmt_putfxint(sb, sf, b);
166 setstrV(L, L->top-1, lj_buf_str(L, sb));
167 lj_gc_check(L);
62 return 1; 168 return 1;
63} 169}
64 170
diff --git a/src/lib_debug.c b/src/lib_debug.c
index 07262830..6737c462 100644
--- a/src/lib_debug.c
+++ b/src/lib_debug.c
@@ -29,7 +29,7 @@ LJLIB_CF(debug_getregistry)
29 return 1; 29 return 1;
30} 30}
31 31
32LJLIB_CF(debug_getmetatable) 32LJLIB_CF(debug_getmetatable) LJLIB_REC(.)
33{ 33{
34 lj_lib_checkany(L, 1); 34 lj_lib_checkany(L, 1);
35 if (!lua_getmetatable(L, 1)) { 35 if (!lua_getmetatable(L, 1)) {
@@ -283,13 +283,13 @@ LJLIB_CF(debug_setuservalue)
283 283
284/* ------------------------------------------------------------------------ */ 284/* ------------------------------------------------------------------------ */
285 285
286static const char KEY_HOOK = 'h'; 286#define KEY_HOOK ((void *)0x3004)
287 287
288static void hookf(lua_State *L, lua_Debug *ar) 288static void hookf(lua_State *L, lua_Debug *ar)
289{ 289{
290 static const char *const hooknames[] = 290 static const char *const hooknames[] =
291 {"call", "return", "line", "count", "tail return"}; 291 {"call", "return", "line", "count", "tail return"};
292 lua_pushlightuserdata(L, (void *)&KEY_HOOK); 292 lua_pushlightuserdata(L, KEY_HOOK);
293 lua_rawget(L, LUA_REGISTRYINDEX); 293 lua_rawget(L, LUA_REGISTRYINDEX);
294 if (lua_isfunction(L, -1)) { 294 if (lua_isfunction(L, -1)) {
295 lua_pushstring(L, hooknames[(int)ar->event]); 295 lua_pushstring(L, hooknames[(int)ar->event]);
@@ -334,7 +334,7 @@ LJLIB_CF(debug_sethook)
334 count = luaL_optint(L, arg+3, 0); 334 count = luaL_optint(L, arg+3, 0);
335 func = hookf; mask = makemask(smask, count); 335 func = hookf; mask = makemask(smask, count);
336 } 336 }
337 lua_pushlightuserdata(L, (void *)&KEY_HOOK); 337 lua_pushlightuserdata(L, KEY_HOOK);
338 lua_pushvalue(L, arg+1); 338 lua_pushvalue(L, arg+1);
339 lua_rawset(L, LUA_REGISTRYINDEX); 339 lua_rawset(L, LUA_REGISTRYINDEX);
340 lua_sethook(L, func, mask, count); 340 lua_sethook(L, func, mask, count);
@@ -349,7 +349,7 @@ LJLIB_CF(debug_gethook)
349 if (hook != NULL && hook != hookf) { /* external hook? */ 349 if (hook != NULL && hook != hookf) { /* external hook? */
350 lua_pushliteral(L, "external hook"); 350 lua_pushliteral(L, "external hook");
351 } else { 351 } else {
352 lua_pushlightuserdata(L, (void *)&KEY_HOOK); 352 lua_pushlightuserdata(L, KEY_HOOK);
353 lua_rawget(L, LUA_REGISTRYINDEX); /* get hook */ 353 lua_rawget(L, LUA_REGISTRYINDEX); /* get hook */
354 } 354 }
355 lua_pushstring(L, unmakemask(mask, buff)); 355 lua_pushstring(L, unmakemask(mask, buff));
diff --git a/src/lib_ffi.c b/src/lib_ffi.c
index c7a26f56..c2f73ed5 100644
--- a/src/lib_ffi.c
+++ b/src/lib_ffi.c
@@ -29,6 +29,7 @@
29#include "lj_ccall.h" 29#include "lj_ccall.h"
30#include "lj_ccallback.h" 30#include "lj_ccallback.h"
31#include "lj_clib.h" 31#include "lj_clib.h"
32#include "lj_strfmt.h"
32#include "lj_ff.h" 33#include "lj_ff.h"
33#include "lj_lib.h" 34#include "lj_lib.h"
34 35
@@ -137,7 +138,7 @@ static int ffi_index_meta(lua_State *L, CTState *cts, CType *ct, MMS mm)
137 } 138 }
138 } 139 }
139 copyTV(L, base, L->top); 140 copyTV(L, base, L->top);
140 tv = L->top-1; 141 tv = L->top-1-LJ_FR2;
141 } 142 }
142 return lj_meta_tailcall(L, tv); 143 return lj_meta_tailcall(L, tv);
143} 144}
@@ -318,7 +319,7 @@ LJLIB_CF(ffi_meta___tostring)
318 } 319 }
319 } 320 }
320 } 321 }
321 lj_str_pushf(L, msg, strdata(lj_ctype_repr(L, id, NULL)), p); 322 lj_strfmt_pushf(L, msg, strdata(lj_ctype_repr(L, id, NULL)), p);
322checkgc: 323checkgc:
323 lj_gc_check(L); 324 lj_gc_check(L);
324 return 1; 325 return 1;
@@ -504,10 +505,7 @@ LJLIB_CF(ffi_new) LJLIB_REC(.)
504 } 505 }
505 if (sz == CTSIZE_INVALID) 506 if (sz == CTSIZE_INVALID)
506 lj_err_arg(L, 1, LJ_ERR_FFI_INVSIZE); 507 lj_err_arg(L, 1, LJ_ERR_FFI_INVSIZE);
507 if (!(info & CTF_VLA) && ctype_align(info) <= CT_MEMALIGN) 508 cd = lj_cdata_newx(cts, id, sz, info);
508 cd = lj_cdata_new(cts, id, sz);
509 else
510 cd = lj_cdata_newv(cts, id, sz, ctype_align(info));
511 setcdataV(L, o-1, cd); /* Anchor the uninitialized cdata. */ 509 setcdataV(L, o-1, cd); /* Anchor the uninitialized cdata. */
512 lj_cconv_ct_init(cts, ct, sz, cdataptr(cd), 510 lj_cconv_ct_init(cts, ct, sz, cdataptr(cd),
513 o, (MSize)(L->top - o)); /* Initialize cdata. */ 511 o, (MSize)(L->top - o)); /* Initialize cdata. */
@@ -558,6 +556,31 @@ LJLIB_CF(ffi_typeof) LJLIB_REC(.)
558 return 1; 556 return 1;
559} 557}
560 558
559/* Internal and unsupported API. */
560LJLIB_CF(ffi_typeinfo)
561{
562 CTState *cts = ctype_cts(L);
563 CTypeID id = (CTypeID)ffi_checkint(L, 1);
564 if (id > 0 && id < cts->top) {
565 CType *ct = ctype_get(cts, id);
566 GCtab *t;
567 lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */
568 t = tabV(L->top-1);
569 setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "info")), (int32_t)ct->info);
570 if (ct->size != CTSIZE_INVALID)
571 setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "size")), (int32_t)ct->size);
572 if (ct->sib)
573 setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "sib")), (int32_t)ct->sib);
574 if (gcref(ct->name)) {
575 GCstr *s = gco2str(gcref(ct->name));
576 setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "name")), s);
577 }
578 lj_gc_check(L);
579 return 1;
580 }
581 return 0;
582}
583
561LJLIB_CF(ffi_istype) LJLIB_REC(.) 584LJLIB_CF(ffi_istype) LJLIB_REC(.)
562{ 585{
563 CTState *cts = ctype_cts(L); 586 CTState *cts = ctype_cts(L);
@@ -697,44 +720,47 @@ LJLIB_CF(ffi_fill) LJLIB_REC(.)
697 return 0; 720 return 0;
698} 721}
699 722
700#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be)
701
702/* Test ABI string. */ 723/* Test ABI string. */
703LJLIB_CF(ffi_abi) LJLIB_REC(.) 724LJLIB_CF(ffi_abi) LJLIB_REC(.)
704{ 725{
705 GCstr *s = lj_lib_checkstr(L, 1); 726 GCstr *s = lj_lib_checkstr(L, 1);
706 int b = 0; 727 int b = lj_cparse_case(s,
707 switch (s->hash) {
708#if LJ_64 728#if LJ_64
709 case H_(849858eb,ad35fd06): b = 1; break; /* 64bit */ 729 "\00564bit"
710#else 730#else
711 case H_(662d3c79,d0e22477): b = 1; break; /* 32bit */ 731 "\00532bit"
712#endif 732#endif
713#if LJ_ARCH_HASFPU 733#if LJ_ARCH_HASFPU
714 case H_(e33ee463,e33ee463): b = 1; break; /* fpu */ 734 "\003fpu"
715#endif 735#endif
716#if LJ_ABI_SOFTFP 736#if LJ_ABI_SOFTFP
717 case H_(61211a23,c2e8c81c): b = 1; break; /* softfp */ 737 "\006softfp"
718#else 738#else
719 case H_(539417a8,8ce0812f): b = 1; break; /* hardfp */ 739 "\006hardfp"
720#endif 740#endif
721#if LJ_ABI_EABI 741#if LJ_ABI_EABI
722 case H_(2182df8f,f2ed1152): b = 1; break; /* eabi */ 742 "\004eabi"
723#endif 743#endif
724#if LJ_ABI_WIN 744#if LJ_ABI_WIN
725 case H_(4ab624a8,4ab624a8): b = 1; break; /* win */ 745 "\003win"
726#endif 746#endif
727 case H_(3af93066,1f001464): b = 1; break; /* le/be */ 747#if LJ_TARGET_UWP
728 default: 748 "\003uwp"
729 break; 749#endif
730 } 750#if LJ_LE
751 "\002le"
752#else
753 "\002be"
754#endif
755#if LJ_GC64
756 "\004gc64"
757#endif
758 ) >= 0;
731 setboolV(L->top-1, b); 759 setboolV(L->top-1, b);
732 setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */ 760 setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */
733 return 1; 761 return 1;
734} 762}
735 763
736#undef H_
737
738LJLIB_PUSH(top-8) LJLIB_SET(!) /* Store reference to miscmap table. */ 764LJLIB_PUSH(top-8) LJLIB_SET(!) /* Store reference to miscmap table. */
739 765
740LJLIB_CF(ffi_metatype) 766LJLIB_CF(ffi_metatype)
@@ -768,19 +794,11 @@ LJLIB_CF(ffi_gc) LJLIB_REC(.)
768 GCcdata *cd = ffi_checkcdata(L, 1); 794 GCcdata *cd = ffi_checkcdata(L, 1);
769 TValue *fin = lj_lib_checkany(L, 2); 795 TValue *fin = lj_lib_checkany(L, 2);
770 CTState *cts = ctype_cts(L); 796 CTState *cts = ctype_cts(L);
771 GCtab *t = cts->finalizer;
772 CType *ct = ctype_raw(cts, cd->ctypeid); 797 CType *ct = ctype_raw(cts, cd->ctypeid);
773 if (!(ctype_isptr(ct->info) || ctype_isstruct(ct->info) || 798 if (!(ctype_isptr(ct->info) || ctype_isstruct(ct->info) ||
774 ctype_isrefarray(ct->info))) 799 ctype_isrefarray(ct->info)))
775 lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE); 800 lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
776 if (gcref(t->metatable)) { /* Update finalizer table, if still enabled. */ 801 lj_cdata_setfin(L, cd, gcval(fin), itype(fin));
777 copyTV(L, lj_tab_set(L, t, L->base), fin);
778 lj_gc_anybarriert(L, t);
779 if (!tvisnil(fin))
780 cd->marked |= LJ_GC_CDATA_FIN;
781 else
782 cd->marked &= ~LJ_GC_CDATA_FIN;
783 }
784 L->top = L->base+1; /* Pass through the cdata object. */ 802 L->top = L->base+1; /* Pass through the cdata object. */
785 return 1; 803 return 1;
786} 804}
diff --git a/src/lib_io.c b/src/lib_io.c
index f13cf048..c889a6b0 100644
--- a/src/lib_io.c
+++ b/src/lib_io.c
@@ -19,8 +19,10 @@
19#include "lj_obj.h" 19#include "lj_obj.h"
20#include "lj_gc.h" 20#include "lj_gc.h"
21#include "lj_err.h" 21#include "lj_err.h"
22#include "lj_buf.h"
22#include "lj_str.h" 23#include "lj_str.h"
23#include "lj_state.h" 24#include "lj_state.h"
25#include "lj_strfmt.h"
24#include "lj_ff.h" 26#include "lj_ff.h"
25#include "lj_lib.h" 27#include "lj_lib.h"
26 28
@@ -84,7 +86,7 @@ static IOFileUD *io_file_open(lua_State *L, const char *mode)
84 IOFileUD *iof = io_file_new(L); 86 IOFileUD *iof = io_file_new(L);
85 iof->fp = fopen(fname, mode); 87 iof->fp = fopen(fname, mode);
86 if (iof->fp == NULL) 88 if (iof->fp == NULL)
87 luaL_argerror(L, 1, lj_str_pushf(L, "%s: %s", fname, strerror(errno))); 89 luaL_argerror(L, 1, lj_strfmt_pushf(L, "%s: %s", fname, strerror(errno)));
88 return iof; 90 return iof;
89} 91}
90 92
@@ -97,11 +99,8 @@ static int io_file_close(lua_State *L, IOFileUD *iof)
97 int stat = -1; 99 int stat = -1;
98#if LJ_TARGET_POSIX 100#if LJ_TARGET_POSIX
99 stat = pclose(iof->fp); 101 stat = pclose(iof->fp);
100#elif LJ_TARGET_WINDOWS 102#elif LJ_TARGET_WINDOWS && !LJ_TARGET_XBOXONE && !LJ_TARGET_UWP
101 stat = _pclose(iof->fp); 103 stat = _pclose(iof->fp);
102#else
103 lua_assert(0);
104 return 0;
105#endif 104#endif
106#if LJ_52 105#if LJ_52
107 iof->fp = NULL; 106 iof->fp = NULL;
@@ -110,7 +109,8 @@ static int io_file_close(lua_State *L, IOFileUD *iof)
110 ok = (stat != -1); 109 ok = (stat != -1);
111#endif 110#endif
112 } else { 111 } else {
113 lua_assert((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_STDF); 112 lj_assertL((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_STDF,
113 "close of unknown FILE* type");
114 setnilV(L->top++); 114 setnilV(L->top++);
115 lua_pushliteral(L, "cannot close standard file"); 115 lua_pushliteral(L, "cannot close standard file");
116 return 2; 116 return 2;
@@ -145,7 +145,7 @@ static int io_file_readline(lua_State *L, FILE *fp, MSize chop)
145 MSize m = LUAL_BUFFERSIZE, n = 0, ok = 0; 145 MSize m = LUAL_BUFFERSIZE, n = 0, ok = 0;
146 char *buf; 146 char *buf;
147 for (;;) { 147 for (;;) {
148 buf = lj_str_needbuf(L, &G(L)->tmpbuf, m); 148 buf = lj_buf_tmp(L, m);
149 if (fgets(buf+n, m-n, fp) == NULL) break; 149 if (fgets(buf+n, m-n, fp) == NULL) break;
150 n += (MSize)strlen(buf+n); 150 n += (MSize)strlen(buf+n);
151 ok |= n; 151 ok |= n;
@@ -161,7 +161,7 @@ static void io_file_readall(lua_State *L, FILE *fp)
161{ 161{
162 MSize m, n; 162 MSize m, n;
163 for (m = LUAL_BUFFERSIZE, n = 0; ; m += m) { 163 for (m = LUAL_BUFFERSIZE, n = 0; ; m += m) {
164 char *buf = lj_str_needbuf(L, &G(L)->tmpbuf, m); 164 char *buf = lj_buf_tmp(L, m);
165 n += (MSize)fread(buf+n, 1, m-n, fp); 165 n += (MSize)fread(buf+n, 1, m-n, fp);
166 if (n != m) { 166 if (n != m) {
167 setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); 167 setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n));
@@ -174,7 +174,7 @@ static void io_file_readall(lua_State *L, FILE *fp)
174static int io_file_readlen(lua_State *L, FILE *fp, MSize m) 174static int io_file_readlen(lua_State *L, FILE *fp, MSize m)
175{ 175{
176 if (m) { 176 if (m) {
177 char *buf = lj_str_needbuf(L, &G(L)->tmpbuf, m); 177 char *buf = lj_buf_tmp(L, m);
178 MSize n = (MSize)fread(buf, 1, m, fp); 178 MSize n = (MSize)fread(buf, 1, m, fp);
179 setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); 179 setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n));
180 lj_gc_check(L); 180 lj_gc_check(L);
@@ -201,13 +201,12 @@ static int io_file_read(lua_State *L, FILE *fp, int start)
201 for (n = start; nargs-- && ok; n++) { 201 for (n = start; nargs-- && ok; n++) {
202 if (tvisstr(L->base+n)) { 202 if (tvisstr(L->base+n)) {
203 const char *p = strVdata(L->base+n); 203 const char *p = strVdata(L->base+n);
204 if (p[0] != '*') 204 if (p[0] == '*') p++;
205 lj_err_arg(L, n+1, LJ_ERR_INVOPT); 205 if (p[0] == 'n')
206 if (p[1] == 'n')
207 ok = io_file_readnum(L, fp); 206 ok = io_file_readnum(L, fp);
208 else if ((p[1] & ~0x20) == 'L') 207 else if ((p[0] & ~0x20) == 'L')
209 ok = io_file_readline(L, fp, (p[1] == 'l')); 208 ok = io_file_readline(L, fp, (p[0] == 'l'));
210 else if (p[1] == 'a') 209 else if (p[0] == 'a')
211 io_file_readall(L, fp); 210 io_file_readall(L, fp);
212 else 211 else
213 lj_err_arg(L, n+1, LJ_ERR_INVFMT); 212 lj_err_arg(L, n+1, LJ_ERR_INVFMT);
@@ -230,19 +229,11 @@ static int io_file_write(lua_State *L, FILE *fp, int start)
230 cTValue *tv; 229 cTValue *tv;
231 int status = 1; 230 int status = 1;
232 for (tv = L->base+start; tv < L->top; tv++) { 231 for (tv = L->base+start; tv < L->top; tv++) {
233 if (tvisstr(tv)) { 232 MSize len;
234 MSize len = strV(tv)->len; 233 const char *p = lj_strfmt_wstrnum(L, tv, &len);
235 status = status && (fwrite(strVdata(tv), 1, len, fp) == len); 234 if (!p)
236 } else if (tvisint(tv)) {
237 char buf[LJ_STR_INTBUF];
238 char *p = lj_str_bufint(buf, intV(tv));
239 size_t len = (size_t)(buf+LJ_STR_INTBUF-p);
240 status = status && (fwrite(p, 1, len, fp) == len);
241 } else if (tvisnum(tv)) {
242 status = status && (fprintf(fp, LUA_NUMBER_FMT, numV(tv)) > 0);
243 } else {
244 lj_err_argt(L, (int)(tv - L->base) + 1, LUA_TSTRING); 235 lj_err_argt(L, (int)(tv - L->base) + 1, LUA_TSTRING);
245 } 236 status = status && (fwrite(p, 1, len, fp) == len);
246 } 237 }
247 if (LJ_52 && status) { 238 if (LJ_52 && status) {
248 L->top = L->base+1; 239 L->top = L->base+1;
@@ -313,6 +304,14 @@ LJLIB_CF(io_method_flush) LJLIB_REC(io_flush 0)
313 return luaL_fileresult(L, fflush(io_tofile(L)->fp) == 0, NULL); 304 return luaL_fileresult(L, fflush(io_tofile(L)->fp) == 0, NULL);
314} 305}
315 306
307#if LJ_32 && defined(__ANDROID__) && __ANDROID_API__ < 24
308/* The Android NDK is such an unmatched marvel of engineering. */
309extern int fseeko32(FILE *, long int, int) __asm__("fseeko");
310extern long int ftello32(FILE *) __asm__("ftello");
311#define fseeko(fp, pos, whence) (fseeko32((fp), (pos), (whence)))
312#define ftello(fp) (ftello32((fp)))
313#endif
314
316LJLIB_CF(io_method_seek) 315LJLIB_CF(io_method_seek)
317{ 316{
318 FILE *fp = io_tofile(L)->fp; 317 FILE *fp = io_tofile(L)->fp;
@@ -413,7 +412,7 @@ LJLIB_CF(io_open)
413 412
414LJLIB_CF(io_popen) 413LJLIB_CF(io_popen)
415{ 414{
416#if LJ_TARGET_POSIX || LJ_TARGET_WINDOWS 415#if LJ_TARGET_POSIX || (LJ_TARGET_WINDOWS && !LJ_TARGET_XBOXONE && !LJ_TARGET_UWP)
417 const char *fname = strdata(lj_lib_checkstr(L, 1)); 416 const char *fname = strdata(lj_lib_checkstr(L, 1));
418 GCstr *s = lj_lib_optstr(L, 2); 417 GCstr *s = lj_lib_optstr(L, 2);
419 const char *mode = s ? strdata(s) : "r"; 418 const char *mode = s ? strdata(s) : "r";
diff --git a/src/lib_jit.c b/src/lib_jit.c
index 6e98229e..21e01d3e 100644
--- a/src/lib_jit.c
+++ b/src/lib_jit.c
@@ -10,13 +10,17 @@
10#include "lauxlib.h" 10#include "lauxlib.h"
11#include "lualib.h" 11#include "lualib.h"
12 12
13#include "lj_arch.h"
14#include "lj_obj.h" 13#include "lj_obj.h"
14#include "lj_gc.h"
15#include "lj_err.h" 15#include "lj_err.h"
16#include "lj_debug.h" 16#include "lj_debug.h"
17#include "lj_str.h" 17#include "lj_str.h"
18#include "lj_tab.h" 18#include "lj_tab.h"
19#include "lj_state.h"
19#include "lj_bc.h" 20#include "lj_bc.h"
21#if LJ_HASFFI
22#include "lj_ctype.h"
23#endif
20#if LJ_HASJIT 24#if LJ_HASJIT
21#include "lj_ir.h" 25#include "lj_ir.h"
22#include "lj_jit.h" 26#include "lj_jit.h"
@@ -24,6 +28,7 @@
24#include "lj_iropt.h" 28#include "lj_iropt.h"
25#include "lj_target.h" 29#include "lj_target.h"
26#endif 30#endif
31#include "lj_trace.h"
27#include "lj_dispatch.h" 32#include "lj_dispatch.h"
28#include "lj_vm.h" 33#include "lj_vm.h"
29#include "lj_vmevent.h" 34#include "lj_vmevent.h"
@@ -99,8 +104,8 @@ LJLIB_CF(jit_status)
99 jit_State *J = L2J(L); 104 jit_State *J = L2J(L);
100 L->top = L->base; 105 L->top = L->base;
101 setboolV(L->top++, (J->flags & JIT_F_ON) ? 1 : 0); 106 setboolV(L->top++, (J->flags & JIT_F_ON) ? 1 : 0);
102 flagbits_to_strings(L, J->flags, JIT_F_CPU_FIRST, JIT_F_CPUSTRING); 107 flagbits_to_strings(L, J->flags, JIT_F_CPU, JIT_F_CPUSTRING);
103 flagbits_to_strings(L, J->flags, JIT_F_OPT_FIRST, JIT_F_OPTSTRING); 108 flagbits_to_strings(L, J->flags, JIT_F_OPT, JIT_F_OPTSTRING);
104 return (int)(L->top - L->base); 109 return (int)(L->top - L->base);
105#else 110#else
106 setboolV(L->top++, 0); 111 setboolV(L->top++, 0);
@@ -108,6 +113,13 @@ LJLIB_CF(jit_status)
108#endif 113#endif
109} 114}
110 115
116LJLIB_CF(jit_security)
117{
118 int idx = lj_lib_checkopt(L, 1, -1, LJ_SECURITY_MODESTRING);
119 setintV(L->top++, ((LJ_SECURITY_MODE >> (2*idx)) & 3));
120 return 1;
121}
122
111LJLIB_CF(jit_attach) 123LJLIB_CF(jit_attach)
112{ 124{
113#ifdef LUAJIT_DISABLE_VMEVENT 125#ifdef LUAJIT_DISABLE_VMEVENT
@@ -222,7 +234,7 @@ LJLIB_CF(jit_util_funcbc)
222 if (pc < pt->sizebc) { 234 if (pc < pt->sizebc) {
223 BCIns ins = proto_bc(pt)[pc]; 235 BCIns ins = proto_bc(pt)[pc];
224 BCOp op = bc_op(ins); 236 BCOp op = bc_op(ins);
225 lua_assert(op < BC__MAX); 237 lj_assertL(op < BC__MAX, "bad bytecode op %d", op);
226 setintV(L->top, ins); 238 setintV(L->top, ins);
227 setintV(L->top+1, lj_bc_mode[op]); 239 setintV(L->top+1, lj_bc_mode[op]);
228 L->top += 2; 240 L->top += 2;
@@ -280,7 +292,7 @@ static GCtrace *jit_checktrace(lua_State *L)
280/* Names of link types. ORDER LJ_TRLINK */ 292/* Names of link types. ORDER LJ_TRLINK */
281static const char *const jit_trlinkname[] = { 293static const char *const jit_trlinkname[] = {
282 "none", "root", "loop", "tail-recursion", "up-recursion", "down-recursion", 294 "none", "root", "loop", "tail-recursion", "up-recursion", "down-recursion",
283 "interpreter", "return" 295 "interpreter", "return", "stitch"
284}; 296};
285 297
286/* local info = jit.util.traceinfo(tr) */ 298/* local info = jit.util.traceinfo(tr) */
@@ -333,6 +345,13 @@ LJLIB_CF(jit_util_tracek)
333 slot = ir->op2; 345 slot = ir->op2;
334 ir = &T->ir[ir->op1]; 346 ir = &T->ir[ir->op1];
335 } 347 }
348#if LJ_HASFFI
349 if (ir->o == IR_KINT64 && !ctype_ctsG(G(L))) {
350 ptrdiff_t oldtop = savestack(L, L->top);
351 luaopen_ffi(L); /* Load FFI library on-demand. */
352 L->top = restorestack(L, oldtop);
353 }
354#endif
336 lj_ir_kvalue(L, L->top-2, ir); 355 lj_ir_kvalue(L, L->top-2, ir);
337 setintV(L->top-1, (int32_t)irt_type(ir->t)); 356 setintV(L->top-1, (int32_t)irt_type(ir->t));
338 if (slot == -1) 357 if (slot == -1)
@@ -417,6 +436,12 @@ LJLIB_CF(jit_util_ircalladdr)
417 436
418#include "lj_libdef.h" 437#include "lj_libdef.h"
419 438
439static int luaopen_jit_util(lua_State *L)
440{
441 LJ_LIB_REG(L, NULL, jit_util);
442 return 1;
443}
444
420/* -- jit.opt module ------------------------------------------------------ */ 445/* -- jit.opt module ------------------------------------------------------ */
421 446
422#if LJ_HASJIT 447#if LJ_HASJIT
@@ -453,7 +478,7 @@ static int jitopt_flag(jit_State *J, const char *str)
453 str += str[2] == '-' ? 3 : 2; 478 str += str[2] == '-' ? 3 : 2;
454 set = 0; 479 set = 0;
455 } 480 }
456 for (opt = JIT_F_OPT_FIRST; ; opt <<= 1) { 481 for (opt = JIT_F_OPT; ; opt <<= 1) {
457 size_t len = *(const uint8_t *)lst; 482 size_t len = *(const uint8_t *)lst;
458 if (len == 0) 483 if (len == 0)
459 break; 484 break;
@@ -473,7 +498,7 @@ static int jitopt_param(jit_State *J, const char *str)
473 int i; 498 int i;
474 for (i = 0; i < JIT_P__MAX; i++) { 499 for (i = 0; i < JIT_P__MAX; i++) {
475 size_t len = *(const uint8_t *)lst; 500 size_t len = *(const uint8_t *)lst;
476 lua_assert(len != 0); 501 lj_assertJ(len != 0, "bad JIT_P_STRING");
477 if (strncmp(str, lst+1, len) == 0 && str[len] == '=') { 502 if (strncmp(str, lst+1, len) == 0 && str[len] == '=') {
478 int32_t n = 0; 503 int32_t n = 0;
479 const char *p = &str[len+1]; 504 const char *p = &str[len+1];
@@ -514,6 +539,104 @@ LJLIB_CF(jit_opt_start)
514 539
515#endif 540#endif
516 541
542/* -- jit.profile module -------------------------------------------------- */
543
544#if LJ_HASPROFILE
545
546#define LJLIB_MODULE_jit_profile
547
548/* Not loaded by default, use: local profile = require("jit.profile") */
549
550static const char KEY_PROFILE_THREAD = 't';
551static const char KEY_PROFILE_FUNC = 'f';
552
553static void jit_profile_callback(lua_State *L2, lua_State *L, int samples,
554 int vmstate)
555{
556 TValue key;
557 cTValue *tv;
558 setlightudV(&key, (void *)&KEY_PROFILE_FUNC);
559 tv = lj_tab_get(L, tabV(registry(L)), &key);
560 if (tvisfunc(tv)) {
561 char vmst = (char)vmstate;
562 int status;
563 setfuncV(L2, L2->top++, funcV(tv));
564 setthreadV(L2, L2->top++, L);
565 setintV(L2->top++, samples);
566 setstrV(L2, L2->top++, lj_str_new(L2, &vmst, 1));
567 status = lua_pcall(L2, 3, 0, 0); /* callback(thread, samples, vmstate) */
568 if (status) {
569 if (G(L2)->panic) G(L2)->panic(L2);
570 exit(EXIT_FAILURE);
571 }
572 lj_trace_abort(G(L2));
573 }
574}
575
576/* profile.start(mode, cb) */
577LJLIB_CF(jit_profile_start)
578{
579 GCtab *registry = tabV(registry(L));
580 GCstr *mode = lj_lib_optstr(L, 1);
581 GCfunc *func = lj_lib_checkfunc(L, 2);
582 lua_State *L2 = lua_newthread(L); /* Thread that runs profiler callback. */
583 TValue key;
584 /* Anchor thread and function in registry. */
585 setlightudV(&key, (void *)&KEY_PROFILE_THREAD);
586 setthreadV(L, lj_tab_set(L, registry, &key), L2);
587 setlightudV(&key, (void *)&KEY_PROFILE_FUNC);
588 setfuncV(L, lj_tab_set(L, registry, &key), func);
589 lj_gc_anybarriert(L, registry);
590 luaJIT_profile_start(L, mode ? strdata(mode) : "",
591 (luaJIT_profile_callback)jit_profile_callback, L2);
592 return 0;
593}
594
595/* profile.stop() */
596LJLIB_CF(jit_profile_stop)
597{
598 GCtab *registry;
599 TValue key;
600 luaJIT_profile_stop(L);
601 registry = tabV(registry(L));
602 setlightudV(&key, (void *)&KEY_PROFILE_THREAD);
603 setnilV(lj_tab_set(L, registry, &key));
604 setlightudV(&key, (void *)&KEY_PROFILE_FUNC);
605 setnilV(lj_tab_set(L, registry, &key));
606 lj_gc_anybarriert(L, registry);
607 return 0;
608}
609
610/* dump = profile.dumpstack([thread,] fmt, depth) */
611LJLIB_CF(jit_profile_dumpstack)
612{
613 lua_State *L2 = L;
614 int arg = 0;
615 size_t len;
616 int depth;
617 GCstr *fmt;
618 const char *p;
619 if (L->top > L->base && tvisthread(L->base)) {
620 L2 = threadV(L->base);
621 arg = 1;
622 }
623 fmt = lj_lib_checkstr(L, arg+1);
624 depth = lj_lib_checkint(L, arg+2);
625 p = luaJIT_profile_dumpstack(L2, strdata(fmt), depth, &len);
626 lua_pushlstring(L, p, len);
627 return 1;
628}
629
630#include "lj_libdef.h"
631
632static int luaopen_jit_profile(lua_State *L)
633{
634 LJ_LIB_REG(L, NULL, jit_profile);
635 return 1;
636}
637
638#endif
639
517/* -- JIT compiler initialization ----------------------------------------- */ 640/* -- JIT compiler initialization ----------------------------------------- */
518 641
519#if LJ_HASJIT 642#if LJ_HASJIT
@@ -524,66 +647,41 @@ JIT_PARAMDEF(JIT_PARAMINIT)
524#undef JIT_PARAMINIT 647#undef JIT_PARAMINIT
525 0 648 0
526}; 649};
527#endif
528 650
529#if LJ_TARGET_ARM && LJ_TARGET_LINUX 651#if LJ_TARGET_ARM && LJ_TARGET_LINUX
530#include <sys/utsname.h> 652#include <sys/utsname.h>
531#endif 653#endif
532 654
533/* Arch-dependent CPU detection. */ 655/* Arch-dependent CPU feature detection. */
534static uint32_t jit_cpudetect(lua_State *L) 656static uint32_t jit_cpudetect(void)
535{ 657{
536 uint32_t flags = 0; 658 uint32_t flags = 0;
537#if LJ_TARGET_X86ORX64 659#if LJ_TARGET_X86ORX64
660
538 uint32_t vendor[4]; 661 uint32_t vendor[4];
539 uint32_t features[4]; 662 uint32_t features[4];
540 if (lj_vm_cpuid(0, vendor) && lj_vm_cpuid(1, features)) { 663 if (lj_vm_cpuid(0, vendor) && lj_vm_cpuid(1, features)) {
541#if !LJ_HASJIT
542#define JIT_F_CMOV 1
543#define JIT_F_SSE2 2
544#endif
545 flags |= ((features[3] >> 15)&1) * JIT_F_CMOV;
546 flags |= ((features[3] >> 26)&1) * JIT_F_SSE2;
547#if LJ_HASJIT
548 flags |= ((features[2] >> 0)&1) * JIT_F_SSE3; 664 flags |= ((features[2] >> 0)&1) * JIT_F_SSE3;
549 flags |= ((features[2] >> 19)&1) * JIT_F_SSE4_1; 665 flags |= ((features[2] >> 19)&1) * JIT_F_SSE4_1;
550 if (vendor[2] == 0x6c65746e) { /* Intel. */ 666 if (vendor[0] >= 7) {
551 if ((features[0] & 0x0ff00f00) == 0x00000f00) /* P4. */ 667 uint32_t xfeatures[4];
552 flags |= JIT_F_P4; /* Currently unused. */ 668 lj_vm_cpuid(7, xfeatures);
553 else if ((features[0] & 0x0fff0ff0) == 0x000106c0) /* Atom. */ 669 flags |= ((xfeatures[1] >> 8)&1) * JIT_F_BMI2;
554 flags |= JIT_F_LEA_AGU;
555 } else if (vendor[2] == 0x444d4163) { /* AMD. */
556 uint32_t fam = (features[0] & 0x0ff00f00);
557 if (fam == 0x00000f00) /* K8. */
558 flags |= JIT_F_SPLIT_XMM;
559 if (fam >= 0x00000f00) /* K8, K10. */
560 flags |= JIT_F_PREFER_IMUL;
561 } 670 }
562#endif
563 } 671 }
564 /* Check for required instruction set support on x86 (unnecessary on x64). */ 672 /* Don't bother checking for SSE2 -- the VM will crash before getting here. */
565#if LJ_TARGET_X86 673
566#if !defined(LUAJIT_CPU_NOCMOV)
567 if (!(flags & JIT_F_CMOV))
568 luaL_error(L, "CPU not supported");
569#endif
570#if defined(LUAJIT_CPU_SSE2)
571 if (!(flags & JIT_F_SSE2))
572 luaL_error(L, "CPU does not support SSE2 (recompile without -DLUAJIT_CPU_SSE2)");
573#endif
574#endif
575#elif LJ_TARGET_ARM 674#elif LJ_TARGET_ARM
576#if LJ_HASJIT 675
577 int ver = LJ_ARCH_VERSION; /* Compile-time ARM CPU detection. */ 676 int ver = LJ_ARCH_VERSION; /* Compile-time ARM CPU detection. */
578#if LJ_TARGET_LINUX 677#if LJ_TARGET_LINUX
579 if (ver < 70) { /* Runtime ARM CPU detection. */ 678 if (ver < 70) { /* Runtime ARM CPU detection. */
580 struct utsname ut; 679 struct utsname ut;
581 uname(&ut); 680 uname(&ut);
582 if (strncmp(ut.machine, "armv", 4) == 0) { 681 if (strncmp(ut.machine, "armv", 4) == 0) {
583 if (ut.machine[4] >= '7') 682 if (ut.machine[4] >= '8') ver = 80;
584 ver = 70; 683 else if (ut.machine[4] == '7') ver = 70;
585 else if (ut.machine[4] == '6') 684 else if (ut.machine[4] == '6') ver = 60;
586 ver = 60;
587 } 685 }
588 } 686 }
589#endif 687#endif
@@ -591,74 +689,77 @@ static uint32_t jit_cpudetect(lua_State *L)
591 ver >= 61 ? JIT_F_ARMV6T2_ : 689 ver >= 61 ? JIT_F_ARMV6T2_ :
592 ver >= 60 ? JIT_F_ARMV6_ : 0; 690 ver >= 60 ? JIT_F_ARMV6_ : 0;
593 flags |= LJ_ARCH_HASFPU == 0 ? 0 : ver >= 70 ? JIT_F_VFPV3 : JIT_F_VFPV2; 691 flags |= LJ_ARCH_HASFPU == 0 ? 0 : ver >= 70 ? JIT_F_VFPV3 : JIT_F_VFPV2;
594#endif 692
693#elif LJ_TARGET_ARM64
694
695 /* No optional CPU features to detect (for now). */
696
595#elif LJ_TARGET_PPC 697#elif LJ_TARGET_PPC
596#if LJ_HASJIT 698
597#if LJ_ARCH_SQRT 699#if LJ_ARCH_SQRT
598 flags |= JIT_F_SQRT; 700 flags |= JIT_F_SQRT;
599#endif 701#endif
600#if LJ_ARCH_ROUND 702#if LJ_ARCH_ROUND
601 flags |= JIT_F_ROUND; 703 flags |= JIT_F_ROUND;
602#endif 704#endif
603#endif 705
604#elif LJ_TARGET_PPCSPE
605 /* Nothing to do. */
606#elif LJ_TARGET_MIPS 706#elif LJ_TARGET_MIPS
607#if LJ_HASJIT 707
608 /* Compile-time MIPS CPU detection. */ 708 /* Compile-time MIPS CPU detection. */
609#if LJ_ARCH_VERSION >= 20 709#if LJ_ARCH_VERSION >= 20
610 flags |= JIT_F_MIPS32R2; 710 flags |= JIT_F_MIPSXXR2;
611#endif 711#endif
612 /* Runtime MIPS CPU detection. */ 712 /* Runtime MIPS CPU detection. */
613#if defined(__GNUC__) 713#if defined(__GNUC__)
614 if (!(flags & JIT_F_MIPS32R2)) { 714 if (!(flags & JIT_F_MIPSXXR2)) {
615 int x; 715 int x;
716#ifdef __mips16
717 x = 0; /* Runtime detection is difficult. Ensure optimal -march flags. */
718#else
616 /* On MIPS32R1 rotr is treated as srl. rotr r2,r2,1 -> srl r2,r2,1. */ 719 /* On MIPS32R1 rotr is treated as srl. rotr r2,r2,1 -> srl r2,r2,1. */
617 __asm__("li $2, 1\n\t.long 0x00221042\n\tmove %0, $2" : "=r"(x) : : "$2"); 720 __asm__("li $2, 1\n\t.long 0x00221042\n\tmove %0, $2" : "=r"(x) : : "$2");
618 if (x) flags |= JIT_F_MIPS32R2; /* Either 0x80000000 (R2) or 0 (R1). */
619 }
620#endif 721#endif
722 if (x) flags |= JIT_F_MIPSXXR2; /* Either 0x80000000 (R2) or 0 (R1). */
723 }
621#endif 724#endif
725
622#else 726#else
623#error "Missing CPU detection for this architecture" 727#error "Missing CPU detection for this architecture"
624#endif 728#endif
625 UNUSED(L);
626 return flags; 729 return flags;
627} 730}
628 731
629/* Initialize JIT compiler. */ 732/* Initialize JIT compiler. */
630static void jit_init(lua_State *L) 733static void jit_init(lua_State *L)
631{ 734{
632 uint32_t flags = jit_cpudetect(L);
633#if LJ_HASJIT
634 jit_State *J = L2J(L); 735 jit_State *J = L2J(L);
635#if LJ_TARGET_X86 736 J->flags = jit_cpudetect() | JIT_F_ON | JIT_F_OPT_DEFAULT;
636 /* Silently turn off the JIT compiler on CPUs without SSE2. */
637 if ((flags & JIT_F_SSE2))
638#endif
639 J->flags = flags | JIT_F_ON | JIT_F_OPT_DEFAULT;
640 memcpy(J->param, jit_param_default, sizeof(J->param)); 737 memcpy(J->param, jit_param_default, sizeof(J->param));
641 lj_dispatch_update(G(L)); 738 lj_dispatch_update(G(L));
642#else
643 UNUSED(flags);
644#endif
645} 739}
740#endif
646 741
647LUALIB_API int luaopen_jit(lua_State *L) 742LUALIB_API int luaopen_jit(lua_State *L)
648{ 743{
744#if LJ_HASJIT
745 jit_init(L);
746#endif
649 lua_pushliteral(L, LJ_OS_NAME); 747 lua_pushliteral(L, LJ_OS_NAME);
650 lua_pushliteral(L, LJ_ARCH_NAME); 748 lua_pushliteral(L, LJ_ARCH_NAME);
651 lua_pushinteger(L, LUAJIT_VERSION_NUM); 749 lua_pushinteger(L, LUAJIT_VERSION_NUM);
652 lua_pushliteral(L, LUAJIT_VERSION); 750 lua_pushliteral(L, LUAJIT_VERSION);
653 LJ_LIB_REG(L, LUA_JITLIBNAME, jit); 751 LJ_LIB_REG(L, LUA_JITLIBNAME, jit);
752#if LJ_HASPROFILE
753 lj_lib_prereg(L, LUA_JITLIBNAME ".profile", luaopen_jit_profile,
754 tabref(L->env));
755#endif
654#ifndef LUAJIT_DISABLE_JITUTIL 756#ifndef LUAJIT_DISABLE_JITUTIL
655 LJ_LIB_REG(L, "jit.util", jit_util); 757 lj_lib_prereg(L, LUA_JITLIBNAME ".util", luaopen_jit_util, tabref(L->env));
656#endif 758#endif
657#if LJ_HASJIT 759#if LJ_HASJIT
658 LJ_LIB_REG(L, "jit.opt", jit_opt); 760 LJ_LIB_REG(L, "jit.opt", jit_opt);
659#endif 761#endif
660 L->top -= 2; 762 L->top -= 2;
661 jit_init(L);
662 return 1; 763 return 1;
663} 764}
664 765
diff --git a/src/lib_math.c b/src/lib_math.c
index 9d324d7e..95b7d460 100644
--- a/src/lib_math.c
+++ b/src/lib_math.c
@@ -15,6 +15,7 @@
15#include "lj_obj.h" 15#include "lj_obj.h"
16#include "lj_lib.h" 16#include "lj_lib.h"
17#include "lj_vm.h" 17#include "lj_vm.h"
18#include "lj_prng.h"
18 19
19/* ------------------------------------------------------------------------ */ 20/* ------------------------------------------------------------------------ */
20 21
@@ -33,25 +34,19 @@ LJLIB_ASM(math_sqrt) LJLIB_REC(math_unary IRFPM_SQRT)
33 lj_lib_checknum(L, 1); 34 lj_lib_checknum(L, 1);
34 return FFH_RETRY; 35 return FFH_RETRY;
35} 36}
36LJLIB_ASM_(math_log10) LJLIB_REC(math_unary IRFPM_LOG10) 37LJLIB_ASM_(math_log10) LJLIB_REC(math_call IRCALL_log10)
37LJLIB_ASM_(math_exp) LJLIB_REC(math_unary IRFPM_EXP) 38LJLIB_ASM_(math_exp) LJLIB_REC(math_call IRCALL_exp)
38LJLIB_ASM_(math_sin) LJLIB_REC(math_unary IRFPM_SIN) 39LJLIB_ASM_(math_sin) LJLIB_REC(math_call IRCALL_sin)
39LJLIB_ASM_(math_cos) LJLIB_REC(math_unary IRFPM_COS) 40LJLIB_ASM_(math_cos) LJLIB_REC(math_call IRCALL_cos)
40LJLIB_ASM_(math_tan) LJLIB_REC(math_unary IRFPM_TAN) 41LJLIB_ASM_(math_tan) LJLIB_REC(math_call IRCALL_tan)
41LJLIB_ASM_(math_asin) LJLIB_REC(math_atrig FF_math_asin) 42LJLIB_ASM_(math_asin) LJLIB_REC(math_call IRCALL_asin)
42LJLIB_ASM_(math_acos) LJLIB_REC(math_atrig FF_math_acos) 43LJLIB_ASM_(math_acos) LJLIB_REC(math_call IRCALL_acos)
43LJLIB_ASM_(math_atan) LJLIB_REC(math_atrig FF_math_atan) 44LJLIB_ASM_(math_atan) LJLIB_REC(math_call IRCALL_atan)
44LJLIB_ASM_(math_sinh) LJLIB_REC(math_htrig IRCALL_sinh) 45LJLIB_ASM_(math_sinh) LJLIB_REC(math_call IRCALL_sinh)
45LJLIB_ASM_(math_cosh) LJLIB_REC(math_htrig IRCALL_cosh) 46LJLIB_ASM_(math_cosh) LJLIB_REC(math_call IRCALL_cosh)
46LJLIB_ASM_(math_tanh) LJLIB_REC(math_htrig IRCALL_tanh) 47LJLIB_ASM_(math_tanh) LJLIB_REC(math_call IRCALL_tanh)
47LJLIB_ASM_(math_frexp) 48LJLIB_ASM_(math_frexp)
48LJLIB_ASM_(math_modf) LJLIB_REC(.) 49LJLIB_ASM_(math_modf)
49
50LJLIB_PUSH(57.29577951308232)
51LJLIB_ASM_(math_deg) LJLIB_REC(math_degrad)
52
53LJLIB_PUSH(0.017453292519943295)
54LJLIB_ASM_(math_rad) LJLIB_REC(math_degrad)
55 50
56LJLIB_ASM(math_log) LJLIB_REC(math_log) 51LJLIB_ASM(math_log) LJLIB_REC(math_log)
57{ 52{
@@ -63,12 +58,15 @@ LJLIB_ASM(math_log) LJLIB_REC(math_log)
63#else 58#else
64 x = lj_vm_log2(x); y = 1.0 / lj_vm_log2(y); 59 x = lj_vm_log2(x); y = 1.0 / lj_vm_log2(y);
65#endif 60#endif
66 setnumV(L->base-1, x*y); /* Do NOT join the expression to x / y. */ 61 setnumV(L->base-1-LJ_FR2, x*y); /* Do NOT join the expression to x / y. */
67 return FFH_RES(1); 62 return FFH_RES(1);
68 } 63 }
69 return FFH_RETRY; 64 return FFH_RETRY;
70} 65}
71 66
67LJLIB_LUA(math_deg) /* function(x) return x * 57.29577951308232 end */
68LJLIB_LUA(math_rad) /* function(x) return x * 0.017453292519943295 end */
69
72LJLIB_ASM(math_atan2) LJLIB_REC(.) 70LJLIB_ASM(math_atan2) LJLIB_REC(.)
73{ 71{
74 lj_lib_checknum(L, 1); 72 lj_lib_checknum(L, 1);
@@ -108,34 +106,11 @@ LJLIB_PUSH(1e310) LJLIB_SET(huge)
108** Full-period ME-CF generator with L=64, J=4, k=223, N1=49. 106** Full-period ME-CF generator with L=64, J=4, k=223, N1=49.
109*/ 107*/
110 108
111/* PRNG state. */
112struct RandomState {
113 uint64_t gen[4]; /* State of the 4 LFSR generators. */
114 int valid; /* State is valid. */
115};
116
117/* Union needed for bit-pattern conversion between uint64_t and double. */ 109/* Union needed for bit-pattern conversion between uint64_t and double. */
118typedef union { uint64_t u64; double d; } U64double; 110typedef union { uint64_t u64; double d; } U64double;
119 111
120/* Update generator i and compute a running xor of all states. */ 112/* PRNG seeding function. */
121#define TW223_GEN(i, k, q, s) \ 113static void random_seed(PRNGState *rs, double d)
122 z = rs->gen[i]; \
123 z = (((z<<q)^z) >> (k-s)) ^ ((z&((uint64_t)(int64_t)-1 << (64-k)))<<s); \
124 r ^= z; rs->gen[i] = z;
125
126/* PRNG step function. Returns a double in the range 1.0 <= d < 2.0. */
127LJ_NOINLINE uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs)
128{
129 uint64_t z, r = 0;
130 TW223_GEN(0, 63, 31, 18)
131 TW223_GEN(1, 58, 19, 28)
132 TW223_GEN(2, 55, 24, 7)
133 TW223_GEN(3, 47, 21, 8)
134 return (r & U64x(000fffff,ffffffff)) | U64x(3ff00000,00000000);
135}
136
137/* PRNG initialization function. */
138static void random_init(RandomState *rs, double d)
139{ 114{
140 uint32_t r = 0x11090601; /* 64-k[i] as four 8 bit constants. */ 115 uint32_t r = 0x11090601; /* 64-k[i] as four 8 bit constants. */
141 int i; 116 int i;
@@ -144,24 +119,22 @@ static void random_init(RandomState *rs, double d)
144 uint32_t m = 1u << (r&255); 119 uint32_t m = 1u << (r&255);
145 r >>= 8; 120 r >>= 8;
146 u.d = d = d * 3.14159265358979323846 + 2.7182818284590452354; 121 u.d = d = d * 3.14159265358979323846 + 2.7182818284590452354;
147 if (u.u64 < m) u.u64 += m; /* Ensure k[i] MSB of gen[i] are non-zero. */ 122 if (u.u64 < m) u.u64 += m; /* Ensure k[i] MSB of u[i] are non-zero. */
148 rs->gen[i] = u.u64; 123 rs->u[i] = u.u64;
149 } 124 }
150 rs->valid = 1;
151 for (i = 0; i < 10; i++) 125 for (i = 0; i < 10; i++)
152 lj_math_random_step(rs); 126 (void)lj_prng_u64(rs);
153} 127}
154 128
155/* PRNG extract function. */ 129/* PRNG extract function. */
156LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */ 130LJLIB_PUSH(top-2) /* Upvalue holds userdata with PRNGState. */
157LJLIB_CF(math_random) LJLIB_REC(.) 131LJLIB_CF(math_random) LJLIB_REC(.)
158{ 132{
159 int n = (int)(L->top - L->base); 133 int n = (int)(L->top - L->base);
160 RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1)))); 134 PRNGState *rs = (PRNGState *)(uddata(udataV(lj_lib_upvalue(L, 1))));
161 U64double u; 135 U64double u;
162 double d; 136 double d;
163 if (LJ_UNLIKELY(!rs->valid)) random_init(rs, 0.0); 137 u.u64 = lj_prng_u64d(rs);
164 u.u64 = lj_math_random_step(rs);
165 d = u.d - 1.0; 138 d = u.d - 1.0;
166 if (n > 0) { 139 if (n > 0) {
167#if LJ_DUALNUM 140#if LJ_DUALNUM
@@ -206,11 +179,11 @@ LJLIB_CF(math_random) LJLIB_REC(.)
206} 179}
207 180
208/* PRNG seed function. */ 181/* PRNG seed function. */
209LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */ 182LJLIB_PUSH(top-2) /* Upvalue holds userdata with PRNGState. */
210LJLIB_CF(math_randomseed) 183LJLIB_CF(math_randomseed)
211{ 184{
212 RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1)))); 185 PRNGState *rs = (PRNGState *)(uddata(udataV(lj_lib_upvalue(L, 1))));
213 random_init(rs, lj_lib_checknum(L, 1)); 186 random_seed(rs, lj_lib_checknum(L, 1));
214 return 0; 187 return 0;
215} 188}
216 189
@@ -220,14 +193,9 @@ LJLIB_CF(math_randomseed)
220 193
221LUALIB_API int luaopen_math(lua_State *L) 194LUALIB_API int luaopen_math(lua_State *L)
222{ 195{
223 RandomState *rs; 196 PRNGState *rs = (PRNGState *)lua_newuserdata(L, sizeof(PRNGState));
224 rs = (RandomState *)lua_newuserdata(L, sizeof(RandomState)); 197 lj_prng_seed_fixed(rs);
225 rs->valid = 0; /* Use lazy initialization to save some time on startup. */
226 LJ_LIB_REG(L, LUA_MATHLIBNAME, math); 198 LJ_LIB_REG(L, LUA_MATHLIBNAME, math);
227#if defined(LUA_COMPAT_MOD) && !LJ_52
228 lua_getfield(L, -1, "fmod");
229 lua_setfield(L, -2, "mod");
230#endif
231 return 1; 199 return 1;
232} 200}
233 201
diff --git a/src/lib_os.c b/src/lib_os.c
index a8e1708f..47893766 100644
--- a/src/lib_os.c
+++ b/src/lib_os.c
@@ -17,7 +17,10 @@
17#include "lualib.h" 17#include "lualib.h"
18 18
19#include "lj_obj.h" 19#include "lj_obj.h"
20#include "lj_gc.h"
20#include "lj_err.h" 21#include "lj_err.h"
22#include "lj_buf.h"
23#include "lj_str.h"
21#include "lj_lib.h" 24#include "lj_lib.h"
22 25
23#if LJ_TARGET_POSIX 26#if LJ_TARGET_POSIX
@@ -188,7 +191,7 @@ LJLIB_CF(os_date)
188#endif 191#endif
189 } 192 }
190 if (stm == NULL) { /* Invalid date? */ 193 if (stm == NULL) { /* Invalid date? */
191 setnilV(L->top-1); 194 setnilV(L->top++);
192 } else if (strcmp(s, "*t") == 0) { 195 } else if (strcmp(s, "*t") == 0) {
193 lua_createtable(L, 0, 9); /* 9 = number of fields */ 196 lua_createtable(L, 0, 9); /* 9 = number of fields */
194 setfield(L, "sec", stm->tm_sec); 197 setfield(L, "sec", stm->tm_sec);
@@ -200,23 +203,25 @@ LJLIB_CF(os_date)
200 setfield(L, "wday", stm->tm_wday+1); 203 setfield(L, "wday", stm->tm_wday+1);
201 setfield(L, "yday", stm->tm_yday+1); 204 setfield(L, "yday", stm->tm_yday+1);
202 setboolfield(L, "isdst", stm->tm_isdst); 205 setboolfield(L, "isdst", stm->tm_isdst);
203 } else { 206 } else if (*s) {
204 char cc[3]; 207 SBuf *sb = &G(L)->tmpbuf;
205 luaL_Buffer b; 208 MSize sz = 0, retry = 4;
206 cc[0] = '%'; cc[2] = '\0'; 209 const char *q;
207 luaL_buffinit(L, &b); 210 for (q = s; *q; q++)
208 for (; *s; s++) { 211 sz += (*q == '%') ? 30 : 1; /* Overflow doesn't matter. */
209 if (*s != '%' || *(s + 1) == '\0') { /* No conversion specifier? */ 212 setsbufL(sb, L);
210 luaL_addchar(&b, *s); 213 while (retry--) { /* Limit growth for invalid format or empty result. */
211 } else { 214 char *buf = lj_buf_need(sb, sz);
212 size_t reslen; 215 size_t len = strftime(buf, sbufsz(sb), s, stm);
213 char buff[200]; /* Should be big enough for any conversion result. */ 216 if (len) {
214 cc[1] = *(++s); 217 setstrV(L, L->top++, lj_str_new(L, buf, len));
215 reslen = strftime(buff, sizeof(buff), cc, stm); 218 lj_gc_check(L);
216 luaL_addlstring(&b, buff, reslen); 219 break;
217 } 220 }
221 sz += (sz|1);
218 } 222 }
219 luaL_pushresult(&b); 223 } else {
224 setstrV(L, L->top++, &G(L)->strempty);
220 } 225 }
221 return 1; 226 return 1;
222} 227}
diff --git a/src/lib_package.c b/src/lib_package.c
index a8bdcf17..5d8eb25d 100644
--- a/src/lib_package.c
+++ b/src/lib_package.c
@@ -76,6 +76,20 @@ static const char *ll_bcsym(void *lib, const char *sym)
76BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*); 76BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*);
77#endif 77#endif
78 78
79#if LJ_TARGET_UWP
80void *LJ_WIN_LOADLIBA(const char *path)
81{
82 DWORD err = GetLastError();
83 wchar_t wpath[256];
84 HANDLE lib = NULL;
85 if (MultiByteToWideChar(CP_ACP, 0, path, -1, wpath, 256) > 0) {
86 lib = LoadPackagedLibrary(wpath, 0);
87 }
88 SetLastError(err);
89 return lib;
90}
91#endif
92
79#undef setprogdir 93#undef setprogdir
80 94
81static void setprogdir(lua_State *L) 95static void setprogdir(lua_State *L)
@@ -96,9 +110,17 @@ static void setprogdir(lua_State *L)
96static void pusherror(lua_State *L) 110static void pusherror(lua_State *L)
97{ 111{
98 DWORD error = GetLastError(); 112 DWORD error = GetLastError();
113#if LJ_TARGET_XBOXONE
114 wchar_t wbuffer[128];
115 char buffer[128*2];
116 if (FormatMessageW(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM,
117 NULL, error, 0, wbuffer, sizeof(wbuffer)/sizeof(wchar_t), NULL) &&
118 WideCharToMultiByte(CP_ACP, 0, wbuffer, 128, buffer, 128*2, NULL, NULL))
119#else
99 char buffer[128]; 120 char buffer[128];
100 if (FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM, 121 if (FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM,
101 NULL, error, 0, buffer, sizeof(buffer), NULL)) 122 NULL, error, 0, buffer, sizeof(buffer), NULL))
123#endif
102 lua_pushstring(L, buffer); 124 lua_pushstring(L, buffer);
103 else 125 else
104 lua_pushfstring(L, "system error %d\n", error); 126 lua_pushfstring(L, "system error %d\n", error);
@@ -111,7 +133,7 @@ static void ll_unloadlib(void *lib)
111 133
112static void *ll_load(lua_State *L, const char *path, int gl) 134static void *ll_load(lua_State *L, const char *path, int gl)
113{ 135{
114 HINSTANCE lib = LoadLibraryA(path); 136 HINSTANCE lib = LJ_WIN_LOADLIBA(path);
115 if (lib == NULL) pusherror(L); 137 if (lib == NULL) pusherror(L);
116 UNUSED(gl); 138 UNUSED(gl);
117 return lib; 139 return lib;
@@ -124,17 +146,25 @@ static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
124 return f; 146 return f;
125} 147}
126 148
149#if LJ_TARGET_UWP
150EXTERN_C IMAGE_DOS_HEADER __ImageBase;
151#endif
152
127static const char *ll_bcsym(void *lib, const char *sym) 153static const char *ll_bcsym(void *lib, const char *sym)
128{ 154{
129 if (lib) { 155 if (lib) {
130 return (const char *)GetProcAddress((HINSTANCE)lib, sym); 156 return (const char *)GetProcAddress((HINSTANCE)lib, sym);
131 } else { 157 } else {
158#if LJ_TARGET_UWP
159 return (const char *)GetProcAddress((HINSTANCE)&__ImageBase, sym);
160#else
132 HINSTANCE h = GetModuleHandleA(NULL); 161 HINSTANCE h = GetModuleHandleA(NULL);
133 const char *p = (const char *)GetProcAddress(h, sym); 162 const char *p = (const char *)GetProcAddress(h, sym);
134 if (p == NULL && GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, 163 if (p == NULL && GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
135 (const char *)ll_bcsym, &h)) 164 (const char *)ll_bcsym, &h))
136 p = (const char *)GetProcAddress(h, sym); 165 p = (const char *)GetProcAddress(h, sym);
137 return p; 166 return p;
167#endif
138 } 168 }
139} 169}
140 170
@@ -185,8 +215,7 @@ static void **ll_register(lua_State *L, const char *path)
185 lua_pop(L, 1); 215 lua_pop(L, 1);
186 plib = (void **)lua_newuserdata(L, sizeof(void *)); 216 plib = (void **)lua_newuserdata(L, sizeof(void *));
187 *plib = NULL; 217 *plib = NULL;
188 luaL_getmetatable(L, "_LOADLIB"); 218 luaL_setmetatable(L, "_LOADLIB");
189 lua_setmetatable(L, -2);
190 lua_pushfstring(L, "LOADLIB: %s", path); 219 lua_pushfstring(L, "LOADLIB: %s", path);
191 lua_pushvalue(L, -2); 220 lua_pushvalue(L, -2);
192 lua_settable(L, LUA_REGISTRYINDEX); 221 lua_settable(L, LUA_REGISTRYINDEX);
@@ -391,8 +420,7 @@ static int lj_cf_package_loader_preload(lua_State *L)
391 420
392/* ------------------------------------------------------------------------ */ 421/* ------------------------------------------------------------------------ */
393 422
394static const int sentinel_ = 0; 423#define sentinel ((void *)0x4004)
395#define sentinel ((void *)&sentinel_)
396 424
397static int lj_cf_package_require(lua_State *L) 425static int lj_cf_package_require(lua_State *L)
398{ 426{
@@ -482,29 +510,19 @@ static void modinit(lua_State *L, const char *modname)
482static int lj_cf_package_module(lua_State *L) 510static int lj_cf_package_module(lua_State *L)
483{ 511{
484 const char *modname = luaL_checkstring(L, 1); 512 const char *modname = luaL_checkstring(L, 1);
485 int loaded = lua_gettop(L) + 1; /* index of _LOADED table */ 513 int lastarg = (int)(L->top - L->base);
486 lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); 514 luaL_pushmodule(L, modname, 1);
487 lua_getfield(L, loaded, modname); /* get _LOADED[modname] */
488 if (!lua_istable(L, -1)) { /* not found? */
489 lua_pop(L, 1); /* remove previous result */
490 /* try global variable (and create one if it does not exist) */
491 if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, 1) != NULL)
492 lj_err_callerv(L, LJ_ERR_BADMODN, modname);
493 lua_pushvalue(L, -1);
494 lua_setfield(L, loaded, modname); /* _LOADED[modname] = new table */
495 }
496 /* check whether table already has a _NAME field */
497 lua_getfield(L, -1, "_NAME"); 515 lua_getfield(L, -1, "_NAME");
498 if (!lua_isnil(L, -1)) { /* is table an initialized module? */ 516 if (!lua_isnil(L, -1)) { /* Module already initialized? */
499 lua_pop(L, 1); 517 lua_pop(L, 1);
500 } else { /* no; initialize it */ 518 } else {
501 lua_pop(L, 1); 519 lua_pop(L, 1);
502 modinit(L, modname); 520 modinit(L, modname);
503 } 521 }
504 lua_pushvalue(L, -1); 522 lua_pushvalue(L, -1);
505 setfenv(L); 523 setfenv(L);
506 dooptions(L, loaded - 1); 524 dooptions(L, lastarg);
507 return 0; 525 return LJ_52;
508} 526}
509 527
510static int lj_cf_package_seeall(lua_State *L) 528static int lj_cf_package_seeall(lua_State *L)
@@ -575,13 +593,16 @@ LUALIB_API int luaopen_package(lua_State *L)
575 lj_lib_pushcf(L, lj_cf_package_unloadlib, 1); 593 lj_lib_pushcf(L, lj_cf_package_unloadlib, 1);
576 lua_setfield(L, -2, "__gc"); 594 lua_setfield(L, -2, "__gc");
577 luaL_register(L, LUA_LOADLIBNAME, package_lib); 595 luaL_register(L, LUA_LOADLIBNAME, package_lib);
578 lua_pushvalue(L, -1); 596 lua_copy(L, -1, LUA_ENVIRONINDEX);
579 lua_replace(L, LUA_ENVIRONINDEX);
580 lua_createtable(L, sizeof(package_loaders)/sizeof(package_loaders[0])-1, 0); 597 lua_createtable(L, sizeof(package_loaders)/sizeof(package_loaders[0])-1, 0);
581 for (i = 0; package_loaders[i] != NULL; i++) { 598 for (i = 0; package_loaders[i] != NULL; i++) {
582 lj_lib_pushcf(L, package_loaders[i], 1); 599 lj_lib_pushcf(L, package_loaders[i], 1);
583 lua_rawseti(L, -2, i+1); 600 lua_rawseti(L, -2, i+1);
584 } 601 }
602#if LJ_52
603 lua_pushvalue(L, -1);
604 lua_setfield(L, -3, "searchers");
605#endif
585 lua_setfield(L, -2, "loaders"); 606 lua_setfield(L, -2, "loaders");
586 lua_getfield(L, LUA_REGISTRYINDEX, "LUA_NOENV"); 607 lua_getfield(L, LUA_REGISTRYINDEX, "LUA_NOENV");
587 noenv = lua_toboolean(L, -1); 608 noenv = lua_toboolean(L, -1);
diff --git a/src/lib_string.c b/src/lib_string.c
index d0b79160..0d9290bc 100644
--- a/src/lib_string.c
+++ b/src/lib_string.c
@@ -6,8 +6,6 @@
6** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h 6** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7*/ 7*/
8 8
9#include <stdio.h>
10
11#define lib_string_c 9#define lib_string_c
12#define LUA_LIB 10#define LUA_LIB
13 11
@@ -18,6 +16,7 @@
18#include "lj_obj.h" 16#include "lj_obj.h"
19#include "lj_gc.h" 17#include "lj_gc.h"
20#include "lj_err.h" 18#include "lj_err.h"
19#include "lj_buf.h"
21#include "lj_str.h" 20#include "lj_str.h"
22#include "lj_tab.h" 21#include "lj_tab.h"
23#include "lj_meta.h" 22#include "lj_meta.h"
@@ -25,17 +24,19 @@
25#include "lj_ff.h" 24#include "lj_ff.h"
26#include "lj_bcdump.h" 25#include "lj_bcdump.h"
27#include "lj_char.h" 26#include "lj_char.h"
27#include "lj_strfmt.h"
28#include "lj_lib.h" 28#include "lj_lib.h"
29 29
30/* ------------------------------------------------------------------------ */ 30/* ------------------------------------------------------------------------ */
31 31
32#define LJLIB_MODULE_string 32#define LJLIB_MODULE_string
33 33
34LJLIB_ASM(string_len) LJLIB_REC(.) 34LJLIB_LUA(string_len) /*
35{ 35 function(s)
36 lj_lib_checkstr(L, 1); 36 CHECK_str(s)
37 return FFH_RETRY; 37 return #s
38} 38 end
39*/
39 40
40LJLIB_ASM(string_byte) LJLIB_REC(string_range 0) 41LJLIB_ASM(string_byte) LJLIB_REC(string_range 0)
41{ 42{
@@ -57,21 +58,21 @@ LJLIB_ASM(string_byte) LJLIB_REC(string_range 0)
57 lj_state_checkstack(L, (MSize)n); 58 lj_state_checkstack(L, (MSize)n);
58 p = (const unsigned char *)strdata(s) + start; 59 p = (const unsigned char *)strdata(s) + start;
59 for (i = 0; i < n; i++) 60 for (i = 0; i < n; i++)
60 setintV(L->base + i-1, p[i]); 61 setintV(L->base + i-1-LJ_FR2, p[i]);
61 return FFH_RES(n); 62 return FFH_RES(n);
62} 63}
63 64
64LJLIB_ASM(string_char) 65LJLIB_ASM(string_char) LJLIB_REC(.)
65{ 66{
66 int i, nargs = (int)(L->top - L->base); 67 int i, nargs = (int)(L->top - L->base);
67 char *buf = lj_str_needbuf(L, &G(L)->tmpbuf, (MSize)nargs); 68 char *buf = lj_buf_tmp(L, (MSize)nargs);
68 for (i = 1; i <= nargs; i++) { 69 for (i = 1; i <= nargs; i++) {
69 int32_t k = lj_lib_checkint(L, i); 70 int32_t k = lj_lib_checkint(L, i);
70 if (!checku8(k)) 71 if (!checku8(k))
71 lj_err_arg(L, i, LJ_ERR_BADVAL); 72 lj_err_arg(L, i, LJ_ERR_BADVAL);
72 buf[i-1] = (char)k; 73 buf[i-1] = (char)k;
73 } 74 }
74 setstrV(L, L->base-1, lj_str_new(L, buf, (size_t)nargs)); 75 setstrV(L, L->base-1-LJ_FR2, lj_str_new(L, buf, (size_t)nargs));
75 return FFH_RES(1); 76 return FFH_RES(1);
76} 77}
77 78
@@ -83,68 +84,38 @@ LJLIB_ASM(string_sub) LJLIB_REC(string_range 1)
83 return FFH_RETRY; 84 return FFH_RETRY;
84} 85}
85 86
86LJLIB_ASM(string_rep) 87LJLIB_CF(string_rep) LJLIB_REC(.)
87{ 88{
88 GCstr *s = lj_lib_checkstr(L, 1); 89 GCstr *s = lj_lib_checkstr(L, 1);
89 int32_t k = lj_lib_checkint(L, 2); 90 int32_t rep = lj_lib_checkint(L, 2);
90 GCstr *sep = lj_lib_optstr(L, 3); 91 GCstr *sep = lj_lib_optstr(L, 3);
91 int32_t len = (int32_t)s->len; 92 SBuf *sb = lj_buf_tmp_(L);
92 global_State *g = G(L); 93 if (sep && rep > 1) {
93 int64_t tlen; 94 GCstr *s2 = lj_buf_cat2str(L, sep, s);
94 const char *src; 95 lj_buf_reset(sb);
95 char *buf; 96 lj_buf_putstr(sb, s);
96 if (k <= 0) { 97 s = s2;
97 empty: 98 rep--;
98 setstrV(L, L->base-1, &g->strempty);
99 return FFH_RES(1);
100 }
101 if (sep) {
102 tlen = (int64_t)len + sep->len;
103 if (tlen > LJ_MAX_STR)
104 lj_err_caller(L, LJ_ERR_STROV);
105 tlen *= k;
106 if (tlen > LJ_MAX_STR)
107 lj_err_caller(L, LJ_ERR_STROV);
108 } else {
109 tlen = (int64_t)k * len;
110 if (tlen > LJ_MAX_STR)
111 lj_err_caller(L, LJ_ERR_STROV);
112 }
113 if (tlen == 0) goto empty;
114 buf = lj_str_needbuf(L, &g->tmpbuf, (MSize)tlen);
115 src = strdata(s);
116 if (sep) {
117 tlen -= sep->len; /* Ignore trailing separator. */
118 if (k > 1) { /* Paste one string and one separator. */
119 int32_t i;
120 i = 0; while (i < len) *buf++ = src[i++];
121 src = strdata(sep); len = sep->len;
122 i = 0; while (i < len) *buf++ = src[i++];
123 src = g->tmpbuf.buf; len += s->len; k--; /* Now copy that k-1 times. */
124 }
125 } 99 }
126 do { 100 sb = lj_buf_putstr_rep(sb, s, rep);
127 int32_t i = 0; 101 setstrV(L, L->top-1, lj_buf_str(L, sb));
128 do { *buf++ = src[i++]; } while (i < len); 102 lj_gc_check(L);
129 } while (--k > 0); 103 return 1;
130 setstrV(L, L->base-1, lj_str_new(L, g->tmpbuf.buf, (size_t)tlen));
131 return FFH_RES(1);
132} 104}
133 105
134LJLIB_ASM(string_reverse) 106LJLIB_ASM(string_reverse) LJLIB_REC(string_op IRCALL_lj_buf_putstr_reverse)
135{ 107{
136 GCstr *s = lj_lib_checkstr(L, 1); 108 lj_lib_checkstr(L, 1);
137 lj_str_needbuf(L, &G(L)->tmpbuf, s->len);
138 return FFH_RETRY; 109 return FFH_RETRY;
139} 110}
140LJLIB_ASM_(string_lower) 111LJLIB_ASM_(string_lower) LJLIB_REC(string_op IRCALL_lj_buf_putstr_lower)
141LJLIB_ASM_(string_upper) 112LJLIB_ASM_(string_upper) LJLIB_REC(string_op IRCALL_lj_buf_putstr_upper)
142 113
143/* ------------------------------------------------------------------------ */ 114/* ------------------------------------------------------------------------ */
144 115
145static int writer_buf(lua_State *L, const void *p, size_t size, void *b) 116static int writer_buf(lua_State *L, const void *p, size_t size, void *sb)
146{ 117{
147 luaL_addlstring((luaL_Buffer *)b, (const char *)p, size); 118 lj_buf_putmem((SBuf *)sb, p, (MSize)size);
148 UNUSED(L); 119 UNUSED(L);
149 return 0; 120 return 0;
150} 121}
@@ -153,19 +124,19 @@ LJLIB_CF(string_dump)
153{ 124{
154 GCfunc *fn = lj_lib_checkfunc(L, 1); 125 GCfunc *fn = lj_lib_checkfunc(L, 1);
155 int strip = L->base+1 < L->top && tvistruecond(L->base+1); 126 int strip = L->base+1 < L->top && tvistruecond(L->base+1);
156 luaL_Buffer b; 127 SBuf *sb = lj_buf_tmp_(L); /* Assumes lj_bcwrite() doesn't use tmpbuf. */
157 L->top = L->base+1; 128 L->top = L->base+1;
158 luaL_buffinit(L, &b); 129 if (!isluafunc(fn) || lj_bcwrite(L, funcproto(fn), writer_buf, sb, strip))
159 if (!isluafunc(fn) || lj_bcwrite(L, funcproto(fn), writer_buf, &b, strip))
160 lj_err_caller(L, LJ_ERR_STRDUMP); 130 lj_err_caller(L, LJ_ERR_STRDUMP);
161 luaL_pushresult(&b); 131 setstrV(L, L->top-1, lj_buf_str(L, sb));
132 lj_gc_check(L);
162 return 1; 133 return 1;
163} 134}
164 135
165/* ------------------------------------------------------------------------ */ 136/* ------------------------------------------------------------------------ */
166 137
167/* macro to `unsign' a character */ 138/* macro to `unsign' a character */
168#define uchar(c) ((unsigned char)(c)) 139#define uchar(c) ((unsigned char)(c))
169 140
170#define CAP_UNFINISHED (-1) 141#define CAP_UNFINISHED (-1)
171#define CAP_POSITION (-2) 142#define CAP_POSITION (-2)
@@ -183,7 +154,6 @@ typedef struct MatchState {
183} MatchState; 154} MatchState;
184 155
185#define L_ESC '%' 156#define L_ESC '%'
186#define SPECIALS "^$*+?.([%-"
187 157
188static int check_capture(MatchState *ms, int l) 158static int check_capture(MatchState *ms, int l)
189{ 159{
@@ -450,30 +420,6 @@ static const char *match(MatchState *ms, const char *s, const char *p)
450 return s; 420 return s;
451} 421}
452 422
453static const char *lmemfind(const char *s1, size_t l1,
454 const char *s2, size_t l2)
455{
456 if (l2 == 0) {
457 return s1; /* empty strings are everywhere */
458 } else if (l2 > l1) {
459 return NULL; /* avoids a negative `l1' */
460 } else {
461 const char *init; /* to search for a `*s2' inside `s1' */
462 l2--; /* 1st char will be checked by `memchr' */
463 l1 = l1-l2; /* `s2' cannot be found after that */
464 while (l1 > 0 && (init = (const char *)memchr(s1, *s2, l1)) != NULL) {
465 init++; /* 1st char is already checked */
466 if (memcmp(init, s2+1, l2) == 0) {
467 return init-1;
468 } else { /* correct `l1' and `s1' to try again */
469 l1 -= (size_t)(init-s1);
470 s1 = init;
471 }
472 }
473 return NULL; /* not found */
474 }
475}
476
477static void push_onecapture(MatchState *ms, int i, const char *s, const char *e) 423static void push_onecapture(MatchState *ms, int i, const char *s, const char *e)
478{ 424{
479 if (i >= ms->level) { 425 if (i >= ms->level) {
@@ -501,64 +447,60 @@ static int push_captures(MatchState *ms, const char *s, const char *e)
501 return nlevels; /* number of strings pushed */ 447 return nlevels; /* number of strings pushed */
502} 448}
503 449
504static ptrdiff_t posrelat(ptrdiff_t pos, size_t len)
505{
506 /* relative string position: negative means back from end */
507 if (pos < 0) pos += (ptrdiff_t)len + 1;
508 return (pos >= 0) ? pos : 0;
509}
510
511static int str_find_aux(lua_State *L, int find) 450static int str_find_aux(lua_State *L, int find)
512{ 451{
513 size_t l1, l2; 452 GCstr *s = lj_lib_checkstr(L, 1);
514 const char *s = luaL_checklstring(L, 1, &l1); 453 GCstr *p = lj_lib_checkstr(L, 2);
515 const char *p = luaL_checklstring(L, 2, &l2); 454 int32_t start = lj_lib_optint(L, 3, 1);
516 ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1; 455 MSize st;
517 if (init < 0) { 456 if (start < 0) start += (int32_t)s->len; else start--;
518 init = 0; 457 if (start < 0) start = 0;
519 } else if ((size_t)(init) > l1) { 458 st = (MSize)start;
459 if (st > s->len) {
520#if LJ_52 460#if LJ_52
521 setnilV(L->top-1); 461 setnilV(L->top-1);
522 return 1; 462 return 1;
523#else 463#else
524 init = (ptrdiff_t)l1; 464 st = s->len;
525#endif 465#endif
526 } 466 }
527 if (find && (lua_toboolean(L, 4) || /* explicit request? */ 467 if (find && ((L->base+3 < L->top && tvistruecond(L->base+3)) ||
528 strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */ 468 !lj_str_haspattern(p))) { /* Search for fixed string. */
529 /* do a plain search */ 469 const char *q = lj_str_find(strdata(s)+st, strdata(p), s->len-st, p->len);
530 const char *s2 = lmemfind(s+init, l1-(size_t)init, p, l2); 470 if (q) {
531 if (s2) { 471 setintV(L->top-2, (int32_t)(q-strdata(s)) + 1);
532 lua_pushinteger(L, s2-s+1); 472 setintV(L->top-1, (int32_t)(q-strdata(s)) + (int32_t)p->len);
533 lua_pushinteger(L, s2-s+(ptrdiff_t)l2);
534 return 2; 473 return 2;
535 } 474 }
536 } else { 475 } else { /* Search for pattern. */
537 MatchState ms; 476 MatchState ms;
538 int anchor = (*p == '^') ? (p++, 1) : 0; 477 const char *pstr = strdata(p);
539 const char *s1=s+init; 478 const char *sstr = strdata(s) + st;
479 int anchor = 0;
480 if (*pstr == '^') { pstr++; anchor = 1; }
540 ms.L = L; 481 ms.L = L;
541 ms.src_init = s; 482 ms.src_init = strdata(s);
542 ms.src_end = s+l1; 483 ms.src_end = strdata(s) + s->len;
543 do { 484 do { /* Loop through string and try to match the pattern. */
544 const char *res; 485 const char *q;
545 ms.level = ms.depth = 0; 486 ms.level = ms.depth = 0;
546 if ((res=match(&ms, s1, p)) != NULL) { 487 q = match(&ms, sstr, pstr);
488 if (q) {
547 if (find) { 489 if (find) {
548 lua_pushinteger(L, s1-s+1); /* start */ 490 setintV(L->top++, (int32_t)(sstr-(strdata(s)-1)));
549 lua_pushinteger(L, res-s); /* end */ 491 setintV(L->top++, (int32_t)(q-strdata(s)));
550 return push_captures(&ms, NULL, 0) + 2; 492 return push_captures(&ms, NULL, NULL) + 2;
551 } else { 493 } else {
552 return push_captures(&ms, s1, res); 494 return push_captures(&ms, sstr, q);
553 } 495 }
554 } 496 }
555 } while (s1++ < ms.src_end && !anchor); 497 } while (sstr++ < ms.src_end && !anchor);
556 } 498 }
557 lua_pushnil(L); /* not found */ 499 setnilV(L->top-1); /* Not found. */
558 return 1; 500 return 1;
559} 501}
560 502
561LJLIB_CF(string_find) 503LJLIB_CF(string_find) LJLIB_REC(.)
562{ 504{
563 return str_find_aux(L, 1); 505 return str_find_aux(L, 1);
564} 506}
@@ -698,222 +640,91 @@ LJLIB_CF(string_gsub)
698 640
699/* ------------------------------------------------------------------------ */ 641/* ------------------------------------------------------------------------ */
700 642
701/* maximum size of each formatted item (> len(format('%99.99f', -1e308))) */ 643/* Emulate tostring() inline. */
702#define MAX_FMTITEM 512 644static GCstr *string_fmt_tostring(lua_State *L, int arg, int retry)
703/* valid flags in a format specification */
704#define FMT_FLAGS "-+ #0"
705/*
706** maximum size of each format specification (such as '%-099.99d')
707** (+10 accounts for %99.99x plus margin of error)
708*/
709#define MAX_FMTSPEC (sizeof(FMT_FLAGS) + sizeof(LUA_INTFRMLEN) + 10)
710
711static void addquoted(lua_State *L, luaL_Buffer *b, int arg)
712{
713 GCstr *str = lj_lib_checkstr(L, arg);
714 int32_t len = (int32_t)str->len;
715 const char *s = strdata(str);
716 luaL_addchar(b, '"');
717 while (len--) {
718 uint32_t c = uchar(*s);
719 if (c == '"' || c == '\\' || c == '\n') {
720 luaL_addchar(b, '\\');
721 } else if (lj_char_iscntrl(c)) { /* This can only be 0-31 or 127. */
722 uint32_t d;
723 luaL_addchar(b, '\\');
724 if (c >= 100 || lj_char_isdigit(uchar(s[1]))) {
725 luaL_addchar(b, '0'+(c >= 100)); if (c >= 100) c -= 100;
726 goto tens;
727 } else if (c >= 10) {
728 tens:
729 d = (c * 205) >> 11; c -= d * 10; luaL_addchar(b, '0'+d);
730 }
731 c += '0';
732 }
733 luaL_addchar(b, c);
734 s++;
735 }
736 luaL_addchar(b, '"');
737}
738
739static const char *scanformat(lua_State *L, const char *strfrmt, char *form)
740{
741 const char *p = strfrmt;
742 while (*p != '\0' && strchr(FMT_FLAGS, *p) != NULL) p++; /* skip flags */
743 if ((size_t)(p - strfrmt) >= sizeof(FMT_FLAGS))
744 lj_err_caller(L, LJ_ERR_STRFMTR);
745 if (lj_char_isdigit(uchar(*p))) p++; /* skip width */
746 if (lj_char_isdigit(uchar(*p))) p++; /* (2 digits at most) */
747 if (*p == '.') {
748 p++;
749 if (lj_char_isdigit(uchar(*p))) p++; /* skip precision */
750 if (lj_char_isdigit(uchar(*p))) p++; /* (2 digits at most) */
751 }
752 if (lj_char_isdigit(uchar(*p)))
753 lj_err_caller(L, LJ_ERR_STRFMTW);
754 *(form++) = '%';
755 strncpy(form, strfrmt, (size_t)(p - strfrmt + 1));
756 form += p - strfrmt + 1;
757 *form = '\0';
758 return p;
759}
760
761static void addintlen(char *form)
762{
763 size_t l = strlen(form);
764 char spec = form[l - 1];
765 strcpy(form + l - 1, LUA_INTFRMLEN);
766 form[l + sizeof(LUA_INTFRMLEN) - 2] = spec;
767 form[l + sizeof(LUA_INTFRMLEN) - 1] = '\0';
768}
769
770static unsigned LUA_INTFRM_T num2intfrm(lua_State *L, int arg)
771{
772 if (sizeof(LUA_INTFRM_T) == 4) {
773 return (LUA_INTFRM_T)lj_lib_checkbit(L, arg);
774 } else {
775 cTValue *o;
776 lj_lib_checknumber(L, arg);
777 o = L->base+arg-1;
778 if (tvisint(o))
779 return (LUA_INTFRM_T)intV(o);
780 else
781 return (LUA_INTFRM_T)numV(o);
782 }
783}
784
785static unsigned LUA_INTFRM_T num2uintfrm(lua_State *L, int arg)
786{
787 if (sizeof(LUA_INTFRM_T) == 4) {
788 return (unsigned LUA_INTFRM_T)lj_lib_checkbit(L, arg);
789 } else {
790 cTValue *o;
791 lj_lib_checknumber(L, arg);
792 o = L->base+arg-1;
793 if (tvisint(o))
794 return (unsigned LUA_INTFRM_T)intV(o);
795 else if ((int32_t)o->u32.hi < 0)
796 return (unsigned LUA_INTFRM_T)(LUA_INTFRM_T)numV(o);
797 else
798 return (unsigned LUA_INTFRM_T)numV(o);
799 }
800}
801
802static GCstr *meta_tostring(lua_State *L, int arg)
803{ 645{
804 TValue *o = L->base+arg-1; 646 TValue *o = L->base+arg-1;
805 cTValue *mo; 647 cTValue *mo;
806 lua_assert(o < L->top); /* Caller already checks for existence. */ 648 lj_assertL(o < L->top, "bad usage"); /* Caller already checks for existence. */
807 if (LJ_LIKELY(tvisstr(o))) 649 if (LJ_LIKELY(tvisstr(o)))
808 return strV(o); 650 return strV(o);
809 if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { 651 if (retry != 2 && !tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) {
810 copyTV(L, L->top++, mo); 652 copyTV(L, L->top++, mo);
811 copyTV(L, L->top++, o); 653 copyTV(L, L->top++, o);
812 lua_call(L, 1, 1); 654 lua_call(L, 1, 1);
813 L->top--; 655 copyTV(L, L->base+arg-1, --L->top);
814 if (tvisstr(L->top)) 656 return NULL; /* Buffer may be overwritten, retry. */
815 return strV(L->top);
816 o = L->base+arg-1;
817 copyTV(L, o, L->top);
818 }
819 if (tvisnumber(o)) {
820 return lj_str_fromnumber(L, o);
821 } else if (tvisnil(o)) {
822 return lj_str_newlit(L, "nil");
823 } else if (tvisfalse(o)) {
824 return lj_str_newlit(L, "false");
825 } else if (tvistrue(o)) {
826 return lj_str_newlit(L, "true");
827 } else {
828 if (tvisfunc(o) && isffunc(funcV(o)))
829 lj_str_pushf(L, "function: builtin#%d", funcV(o)->c.ffid);
830 else
831 lj_str_pushf(L, "%s: %p", lj_typename(o), lua_topointer(L, arg));
832 L->top--;
833 return strV(L->top);
834 } 657 }
835} 658 return lj_strfmt_obj(L, o);
836 659}
837LJLIB_CF(string_format) 660
838{ 661LJLIB_CF(string_format) LJLIB_REC(.)
839 int arg = 1, top = (int)(L->top - L->base); 662{
840 GCstr *fmt = lj_lib_checkstr(L, arg); 663 int arg, top = (int)(L->top - L->base);
841 const char *strfrmt = strdata(fmt); 664 GCstr *fmt;
842 const char *strfrmt_end = strfrmt + fmt->len; 665 SBuf *sb;
843 luaL_Buffer b; 666 FormatState fs;
844 luaL_buffinit(L, &b); 667 SFormat sf;
845 while (strfrmt < strfrmt_end) { 668 int retry = 0;
846 if (*strfrmt != L_ESC) { 669again:
847 luaL_addchar(&b, *strfrmt++); 670 arg = 1;
848 } else if (*++strfrmt == L_ESC) { 671 sb = lj_buf_tmp_(L);
849 luaL_addchar(&b, *strfrmt++); /* %% */ 672 fmt = lj_lib_checkstr(L, arg);
850 } else { /* format item */ 673 lj_strfmt_init(&fs, strdata(fmt), fmt->len);
851 char form[MAX_FMTSPEC]; /* to store the format (`%...') */ 674 while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) {
852 char buff[MAX_FMTITEM]; /* to store the formatted item */ 675 if (sf == STRFMT_LIT) {
853 int n = 0; 676 lj_buf_putmem(sb, fs.str, fs.len);
677 } else if (sf == STRFMT_ERR) {
678 lj_err_callerv(L, LJ_ERR_STRFMT, strdata(lj_str_new(L, fs.str, fs.len)));
679 } else {
854 if (++arg > top) 680 if (++arg > top)
855 luaL_argerror(L, arg, lj_obj_typename[0]); 681 luaL_argerror(L, arg, lj_obj_typename[0]);
856 strfrmt = scanformat(L, strfrmt, form); 682 switch (STRFMT_TYPE(sf)) {
857 switch (*strfrmt++) { 683 case STRFMT_INT:
858 case 'c': 684 if (tvisint(L->base+arg-1)) {
859 n = sprintf(buff, form, lj_lib_checkint(L, arg)); 685 int32_t k = intV(L->base+arg-1);
686 if (sf == STRFMT_INT)
687 lj_strfmt_putint(sb, k); /* Shortcut for plain %d. */
688 else
689 lj_strfmt_putfxint(sb, sf, k);
690 } else {
691 lj_strfmt_putfnum_int(sb, sf, lj_lib_checknum(L, arg));
692 }
860 break; 693 break;
861 case 'd': case 'i': 694 case STRFMT_UINT:
862 addintlen(form); 695 if (tvisint(L->base+arg-1))
863 n = sprintf(buff, form, num2intfrm(L, arg)); 696 lj_strfmt_putfxint(sb, sf, intV(L->base+arg-1));
697 else
698 lj_strfmt_putfnum_uint(sb, sf, lj_lib_checknum(L, arg));
864 break; 699 break;
865 case 'o': case 'u': case 'x': case 'X': 700 case STRFMT_NUM:
866 addintlen(form); 701 lj_strfmt_putfnum(sb, sf, lj_lib_checknum(L, arg));
867 n = sprintf(buff, form, num2uintfrm(L, arg));
868 break; 702 break;
869 case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': { 703 case STRFMT_STR: {
870 TValue tv; 704 GCstr *str = string_fmt_tostring(L, arg, retry);
871 tv.n = lj_lib_checknum(L, arg); 705 if (str == NULL)
872 if (LJ_UNLIKELY((tv.u32.hi << 1) >= 0xffe00000)) { 706 retry = 1;
873 /* Canonicalize output of non-finite values. */ 707 else if ((sf & STRFMT_T_QUOTED))
874 char *p, nbuf[LJ_STR_NUMBUF]; 708 lj_strfmt_putquoted(sb, str); /* No formatting. */
875 size_t len = lj_str_bufnum(nbuf, &tv); 709 else
876 if (strfrmt[-1] < 'a') { 710 lj_strfmt_putfstr(sb, sf, str);
877 nbuf[len-3] = nbuf[len-3] - 0x20;
878 nbuf[len-2] = nbuf[len-2] - 0x20;
879 nbuf[len-1] = nbuf[len-1] - 0x20;
880 }
881 nbuf[len] = '\0';
882 for (p = form; *p < 'A' && *p != '.'; p++) ;
883 *p++ = 's'; *p = '\0';
884 n = sprintf(buff, form, nbuf);
885 break;
886 }
887 n = sprintf(buff, form, (double)tv.n);
888 break; 711 break;
889 } 712 }
890 case 'q': 713 case STRFMT_CHAR:
891 addquoted(L, &b, arg); 714 lj_strfmt_putfchar(sb, sf, lj_lib_checkint(L, arg));
892 continue; 715 break;
893 case 'p': 716 case STRFMT_PTR: /* No formatting. */
894 lj_str_pushf(L, "%p", lua_topointer(L, arg)); 717 lj_strfmt_putptr(sb, lj_obj_ptr(L->base+arg-1));
895 luaL_addvalue(&b);
896 continue;
897 case 's': {
898 GCstr *str = meta_tostring(L, arg);
899 if (!strchr(form, '.') && str->len >= 100) {
900 /* no precision and string is too long to be formatted;
901 keep original string */
902 setstrV(L, L->top++, str);
903 luaL_addvalue(&b);
904 continue;
905 }
906 n = sprintf(buff, form, strdata(str));
907 break; 718 break;
908 }
909 default: 719 default:
910 lj_err_callerv(L, LJ_ERR_STRFMTO, *(strfrmt -1)); 720 lj_assertL(0, "bad string format type");
911 break; 721 break;
912 } 722 }
913 luaL_addlstring(&b, buff, n);
914 } 723 }
915 } 724 }
916 luaL_pushresult(&b); 725 if (retry++ == 1) goto again;
726 setstrV(L, L->top-1, lj_buf_str(L, sb));
727 lj_gc_check(L);
917 return 1; 728 return 1;
918} 729}
919 730
@@ -926,10 +737,6 @@ LUALIB_API int luaopen_string(lua_State *L)
926 GCtab *mt; 737 GCtab *mt;
927 global_State *g; 738 global_State *g;
928 LJ_LIB_REG(L, LUA_STRLIBNAME, string); 739 LJ_LIB_REG(L, LUA_STRLIBNAME, string);
929#if defined(LUA_COMPAT_GFIND) && !LJ_52
930 lua_getfield(L, -1, "gmatch");
931 lua_setfield(L, -2, "gfind");
932#endif
933 mt = lj_tab_new(L, 0, 1); 740 mt = lj_tab_new(L, 0, 1);
934 /* NOBARRIER: basemt is a GC root. */ 741 /* NOBARRIER: basemt is a GC root. */
935 g = G(L); 742 g = G(L);
diff --git a/src/lib_table.c b/src/lib_table.c
index 9842513b..4e612146 100644
--- a/src/lib_table.c
+++ b/src/lib_table.c
@@ -16,57 +16,43 @@
16#include "lj_obj.h" 16#include "lj_obj.h"
17#include "lj_gc.h" 17#include "lj_gc.h"
18#include "lj_err.h" 18#include "lj_err.h"
19#include "lj_buf.h"
19#include "lj_tab.h" 20#include "lj_tab.h"
21#include "lj_ff.h"
20#include "lj_lib.h" 22#include "lj_lib.h"
21 23
22/* ------------------------------------------------------------------------ */ 24/* ------------------------------------------------------------------------ */
23 25
24#define LJLIB_MODULE_table 26#define LJLIB_MODULE_table
25 27
26LJLIB_CF(table_foreachi) 28LJLIB_LUA(table_foreachi) /*
27{ 29 function(t, f)
28 GCtab *t = lj_lib_checktab(L, 1); 30 CHECK_tab(t)
29 GCfunc *func = lj_lib_checkfunc(L, 2); 31 CHECK_func(f)
30 MSize i, n = lj_tab_len(t); 32 for i=1,#t do
31 for (i = 1; i <= n; i++) { 33 local r = f(i, t[i])
32 cTValue *val; 34 if r ~= nil then return r end
33 setfuncV(L, L->top, func); 35 end
34 setintV(L->top+1, i); 36 end
35 val = lj_tab_getint(t, (int32_t)i); 37*/
36 if (val) { copyTV(L, L->top+2, val); } else { setnilV(L->top+2); }
37 L->top += 3;
38 lua_call(L, 2, 1);
39 if (!tvisnil(L->top-1))
40 return 1;
41 L->top--;
42 }
43 return 0;
44}
45 38
46LJLIB_CF(table_foreach) 39LJLIB_LUA(table_foreach) /*
47{ 40 function(t, f)
48 GCtab *t = lj_lib_checktab(L, 1); 41 CHECK_tab(t)
49 GCfunc *func = lj_lib_checkfunc(L, 2); 42 CHECK_func(f)
50 L->top = L->base+3; 43 for k, v in PAIRS(t) do
51 setnilV(L->top-1); 44 local r = f(k, v)
52 while (lj_tab_next(L, t, L->top-1)) { 45 if r ~= nil then return r end
53 copyTV(L, L->top+2, L->top); 46 end
54 copyTV(L, L->top+1, L->top-1); 47 end
55 setfuncV(L, L->top, func); 48*/
56 L->top += 3;
57 lua_call(L, 2, 1);
58 if (!tvisnil(L->top-1))
59 return 1;
60 L->top--;
61 }
62 return 0;
63}
64 49
65LJLIB_ASM(table_getn) LJLIB_REC(.) 50LJLIB_LUA(table_getn) /*
66{ 51 function(t)
67 lj_lib_checktab(L, 1); 52 CHECK_tab(t)
68 return FFH_UNREACHABLE; 53 return #t
69} 54 end
55*/
70 56
71LJLIB_CF(table_maxn) 57LJLIB_CF(table_maxn)
72{ 58{
@@ -119,52 +105,67 @@ LJLIB_CF(table_insert) LJLIB_REC(.)
119 return 0; 105 return 0;
120} 106}
121 107
122LJLIB_CF(table_remove) LJLIB_REC(.) 108LJLIB_LUA(table_remove) /*
123{ 109 function(t, pos)
124 GCtab *t = lj_lib_checktab(L, 1); 110 CHECK_tab(t)
125 int32_t e = (int32_t)lj_tab_len(t); 111 local len = #t
126 int32_t pos = lj_lib_optint(L, 2, e); 112 if pos == nil then
127 if (!(1 <= pos && pos <= e)) /* Nothing to remove? */ 113 if len ~= 0 then
128 return 0; 114 local old = t[len]
129 lua_rawgeti(L, 1, pos); /* Get previous value. */ 115 t[len] = nil
130 /* NOBARRIER: This just moves existing elements around. */ 116 return old
131 for (; pos < e; pos++) { 117 end
132 cTValue *src = lj_tab_getint(t, pos+1); 118 else
133 TValue *dst = lj_tab_setint(L, t, pos); 119 CHECK_int(pos)
134 if (src) { 120 if pos >= 1 and pos <= len then
135 copyTV(L, dst, src); 121 local old = t[pos]
136 } else { 122 for i=pos+1,len do
137 setnilV(dst); 123 t[i-1] = t[i]
138 } 124 end
139 } 125 t[len] = nil
140 setnilV(lj_tab_setint(L, t, e)); /* Remove (last) value. */ 126 return old
141 return 1; /* Return previous value. */ 127 end
142} 128 end
129 end
130*/
131
132LJLIB_LUA(table_move) /*
133 function(a1, f, e, t, a2)
134 CHECK_tab(a1)
135 CHECK_int(f)
136 CHECK_int(e)
137 CHECK_int(t)
138 if a2 == nil then a2 = a1 end
139 CHECK_tab(a2)
140 if e >= f then
141 local d = t - f
142 if t > e or t <= f or a2 ~= a1 then
143 for i=f,e do a2[i+d] = a1[i] end
144 else
145 for i=e,f,-1 do a2[i+d] = a1[i] end
146 end
147 end
148 return a2
149 end
150*/
143 151
144LJLIB_CF(table_concat) 152LJLIB_CF(table_concat) LJLIB_REC(.)
145{ 153{
146 luaL_Buffer b;
147 GCtab *t = lj_lib_checktab(L, 1); 154 GCtab *t = lj_lib_checktab(L, 1);
148 GCstr *sep = lj_lib_optstr(L, 2); 155 GCstr *sep = lj_lib_optstr(L, 2);
149 MSize seplen = sep ? sep->len : 0;
150 int32_t i = lj_lib_optint(L, 3, 1); 156 int32_t i = lj_lib_optint(L, 3, 1);
151 int32_t e = (L->base+3 < L->top && !tvisnil(L->base+3)) ? 157 int32_t e = (L->base+3 < L->top && !tvisnil(L->base+3)) ?
152 lj_lib_checkint(L, 4) : (int32_t)lj_tab_len(t); 158 lj_lib_checkint(L, 4) : (int32_t)lj_tab_len(t);
153 luaL_buffinit(L, &b); 159 SBuf *sb = lj_buf_tmp_(L);
154 if (i <= e) { 160 SBuf *sbx = lj_buf_puttab(sb, t, sep, i, e);
155 for (;;) { 161 if (LJ_UNLIKELY(!sbx)) { /* Error: bad element type. */
156 cTValue *o; 162 int32_t idx = (int32_t)(intptr_t)sbufP(sb);
157 lua_rawgeti(L, 1, i); 163 cTValue *o = lj_tab_getint(t, idx);
158 o = L->top-1; 164 lj_err_callerv(L, LJ_ERR_TABCAT,
159 if (!(tvisstr(o) || tvisnumber(o))) 165 lj_obj_itypename[o ? itypemap(o) : ~LJ_TNIL], idx);
160 lj_err_callerv(L, LJ_ERR_TABCAT, lj_typename(o), i);
161 luaL_addvalue(&b);
162 if (i++ == e) break;
163 if (seplen)
164 luaL_addlstring(&b, strdata(sep), seplen);
165 }
166 } 166 }
167 luaL_pushresult(&b); 167 setstrV(L, L->top-1, lj_buf_str(L, sbx));
168 lj_gc_check(L);
168 return 1; 169 return 1;
169} 170}
170 171
@@ -284,6 +285,30 @@ LJLIB_CF(table_pack)
284} 285}
285#endif 286#endif
286 287
288LJLIB_NOREG LJLIB_CF(table_new) LJLIB_REC(.)
289{
290 int32_t a = lj_lib_checkint(L, 1);
291 int32_t h = lj_lib_checkint(L, 2);
292 lua_createtable(L, a, h);
293 return 1;
294}
295
296LJLIB_NOREG LJLIB_CF(table_clear) LJLIB_REC(.)
297{
298 lj_tab_clear(lj_lib_checktab(L, 1));
299 return 0;
300}
301
302static int luaopen_table_new(lua_State *L)
303{
304 return lj_lib_postreg(L, lj_cf_table_new, FF_table_new, "new");
305}
306
307static int luaopen_table_clear(lua_State *L)
308{
309 return lj_lib_postreg(L, lj_cf_table_clear, FF_table_clear, "clear");
310}
311
287/* ------------------------------------------------------------------------ */ 312/* ------------------------------------------------------------------------ */
288 313
289#include "lj_libdef.h" 314#include "lj_libdef.h"
@@ -295,6 +320,8 @@ LUALIB_API int luaopen_table(lua_State *L)
295 lua_getglobal(L, "unpack"); 320 lua_getglobal(L, "unpack");
296 lua_setfield(L, -2, "unpack"); 321 lua_setfield(L, -2, "unpack");
297#endif 322#endif
323 lj_lib_prereg(L, LUA_TABLIBNAME ".new", luaopen_table_new, tabV(L->top-1));
324 lj_lib_prereg(L, LUA_TABLIBNAME ".clear", luaopen_table_clear, tabV(L->top-1));
298 return 1; 325 return 1;
299} 326}
300 327
diff --git a/src/lj.supp b/src/lj.supp
deleted file mode 100644
index 217f7c89..00000000
--- a/src/lj.supp
+++ /dev/null
@@ -1,41 +0,0 @@
1# Valgrind suppression file for LuaJIT 2.0.
2{
3 Optimized string compare
4 Memcheck:Addr4
5 fun:lj_str_cmp
6}
7{
8 Optimized string compare
9 Memcheck:Addr1
10 fun:lj_str_cmp
11}
12{
13 Optimized string compare
14 Memcheck:Addr4
15 fun:lj_str_new
16}
17{
18 Optimized string compare
19 Memcheck:Addr1
20 fun:lj_str_new
21}
22{
23 Optimized string compare
24 Memcheck:Cond
25 fun:lj_str_new
26}
27{
28 Optimized string compare
29 Memcheck:Addr4
30 fun:str_fastcmp
31}
32{
33 Optimized string compare
34 Memcheck:Addr1
35 fun:str_fastcmp
36}
37{
38 Optimized string compare
39 Memcheck:Cond
40 fun:str_fastcmp
41}
diff --git a/src/lj_alloc.c b/src/lj_alloc.c
index dc64dca9..5de60b82 100644
--- a/src/lj_alloc.c
+++ b/src/lj_alloc.c
@@ -31,6 +31,7 @@
31#include "lj_def.h" 31#include "lj_def.h"
32#include "lj_arch.h" 32#include "lj_arch.h"
33#include "lj_alloc.h" 33#include "lj_alloc.h"
34#include "lj_prng.h"
34 35
35#ifndef LUAJIT_USE_SYSMALLOC 36#ifndef LUAJIT_USE_SYSMALLOC
36 37
@@ -72,15 +73,58 @@
72 73
73#define IS_DIRECT_BIT (SIZE_T_ONE) 74#define IS_DIRECT_BIT (SIZE_T_ONE)
74 75
76
77/* Determine system-specific block allocation method. */
75#if LJ_TARGET_WINDOWS 78#if LJ_TARGET_WINDOWS
76 79
77#define WIN32_LEAN_AND_MEAN 80#define WIN32_LEAN_AND_MEAN
78#include <windows.h> 81#include <windows.h>
79 82
83#define LJ_ALLOC_VIRTUALALLOC 1
84
85#if LJ_64 && !LJ_GC64
86#define LJ_ALLOC_NTAVM 1
87#endif
88
89#else
90
91#include <errno.h>
92/* If this include fails, then rebuild with: -DLUAJIT_USE_SYSMALLOC */
93#include <sys/mman.h>
94
95#define LJ_ALLOC_MMAP 1
96
80#if LJ_64 97#if LJ_64
81 98
99#define LJ_ALLOC_MMAP_PROBE 1
100
101#if LJ_GC64
102#define LJ_ALLOC_MBITS 47 /* 128 TB in LJ_GC64 mode. */
103#elif LJ_TARGET_X64 && LJ_HASJIT
104/* Due to limitations in the x64 compiler backend. */
105#define LJ_ALLOC_MBITS 31 /* 2 GB on x64 with !LJ_GC64. */
106#else
107#define LJ_ALLOC_MBITS 32 /* 4 GB on other archs with !LJ_GC64. */
108#endif
109
110#endif
111
112#if LJ_64 && !LJ_GC64 && defined(MAP_32BIT)
113#define LJ_ALLOC_MMAP32 1
114#endif
115
116#if LJ_TARGET_LINUX
117#define LJ_ALLOC_MREMAP 1
118#endif
119
120#endif
121
122
123#if LJ_ALLOC_VIRTUALALLOC
124
125#if LJ_ALLOC_NTAVM
82/* Undocumented, but hey, that's what we all love so much about Windows. */ 126/* Undocumented, but hey, that's what we all love so much about Windows. */
83typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits, 127typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG_PTR zbits,
84 size_t *size, ULONG alloctype, ULONG prot); 128 size_t *size, ULONG alloctype, ULONG prot);
85static PNTAVM ntavm; 129static PNTAVM ntavm;
86 130
@@ -89,14 +133,15 @@ static PNTAVM ntavm;
89*/ 133*/
90#define NTAVM_ZEROBITS 1 134#define NTAVM_ZEROBITS 1
91 135
92static void INIT_MMAP(void) 136static void init_mmap(void)
93{ 137{
94 ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"), 138 ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
95 "NtAllocateVirtualMemory"); 139 "NtAllocateVirtualMemory");
96} 140}
141#define INIT_MMAP() init_mmap()
97 142
98/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */ 143/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
99static LJ_AINLINE void *CALL_MMAP(size_t size) 144static void *mmap_plain(size_t size)
100{ 145{
101 DWORD olderr = GetLastError(); 146 DWORD olderr = GetLastError();
102 void *ptr = NULL; 147 void *ptr = NULL;
@@ -107,7 +152,7 @@ static LJ_AINLINE void *CALL_MMAP(size_t size)
107} 152}
108 153
109/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ 154/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
110static LJ_AINLINE void *DIRECT_MMAP(size_t size) 155static void *direct_mmap(size_t size)
111{ 156{
112 DWORD olderr = GetLastError(); 157 DWORD olderr = GetLastError();
113 void *ptr = NULL; 158 void *ptr = NULL;
@@ -119,31 +164,32 @@ static LJ_AINLINE void *DIRECT_MMAP(size_t size)
119 164
120#else 165#else
121 166
122#define INIT_MMAP() ((void)0)
123
124/* Win32 MMAP via VirtualAlloc */ 167/* Win32 MMAP via VirtualAlloc */
125static LJ_AINLINE void *CALL_MMAP(size_t size) 168static void *mmap_plain(size_t size)
126{ 169{
127 DWORD olderr = GetLastError(); 170 DWORD olderr = GetLastError();
128 void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); 171 void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
129 SetLastError(olderr); 172 SetLastError(olderr);
130 return ptr ? ptr : MFAIL; 173 return ptr ? ptr : MFAIL;
131} 174}
132 175
133/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ 176/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
134static LJ_AINLINE void *DIRECT_MMAP(size_t size) 177static void *direct_mmap(size_t size)
135{ 178{
136 DWORD olderr = GetLastError(); 179 DWORD olderr = GetLastError();
137 void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, 180 void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
138 PAGE_READWRITE); 181 PAGE_READWRITE);
139 SetLastError(olderr); 182 SetLastError(olderr);
140 return ptr ? ptr : MFAIL; 183 return ptr ? ptr : MFAIL;
141} 184}
142 185
143#endif 186#endif
144 187
188#define CALL_MMAP(prng, size) mmap_plain(size)
189#define DIRECT_MMAP(prng, size) direct_mmap(size)
190
145/* This function supports releasing coalesed segments */ 191/* This function supports releasing coalesed segments */
146static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size) 192static int CALL_MUNMAP(void *ptr, size_t size)
147{ 193{
148 DWORD olderr = GetLastError(); 194 DWORD olderr = GetLastError();
149 MEMORY_BASIC_INFORMATION minfo; 195 MEMORY_BASIC_INFORMATION minfo;
@@ -163,10 +209,7 @@ static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
163 return 0; 209 return 0;
164} 210}
165 211
166#else 212#elif LJ_ALLOC_MMAP
167
168#include <errno.h>
169#include <sys/mman.h>
170 213
171#define MMAP_PROT (PROT_READ|PROT_WRITE) 214#define MMAP_PROT (PROT_READ|PROT_WRITE)
172#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) 215#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
@@ -174,105 +217,134 @@ static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
174#endif 217#endif
175#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) 218#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
176 219
177#if LJ_64 220#if LJ_ALLOC_MMAP_PROBE
178/* 64 bit mode needs special support for allocating memory in the lower 2GB. */
179 221
180#if defined(MAP_32BIT) 222#ifdef MAP_TRYFIXED
181 223#define MMAP_FLAGS_PROBE (MMAP_FLAGS|MAP_TRYFIXED)
182#if defined(__sun__)
183#define MMAP_REGION_START ((uintptr_t)0x1000)
184#else 224#else
185/* Actually this only gives us max. 1GB in current Linux kernels. */ 225#define MMAP_FLAGS_PROBE MMAP_FLAGS
186#define MMAP_REGION_START ((uintptr_t)0)
187#endif 226#endif
188 227
189static LJ_AINLINE void *CALL_MMAP(size_t size) 228#define LJ_ALLOC_MMAP_PROBE_MAX 30
229#define LJ_ALLOC_MMAP_PROBE_LINEAR 5
230
231#define LJ_ALLOC_MMAP_PROBE_LOWER ((uintptr_t)0x4000)
232
233static void *mmap_probe(PRNGState *rs, size_t size)
190{ 234{
235 /* Hint for next allocation. Doesn't need to be thread-safe. */
236 static uintptr_t hint_addr = 0;
191 int olderr = errno; 237 int olderr = errno;
192 void *ptr = mmap((void *)MMAP_REGION_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0); 238 int retry;
239 for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) {
240 void *p = mmap((void *)hint_addr, size, MMAP_PROT, MMAP_FLAGS_PROBE, -1, 0);
241 uintptr_t addr = (uintptr_t)p;
242 if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= LJ_ALLOC_MMAP_PROBE_LOWER &&
243 ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
244 /* We got a suitable address. Bump the hint address. */
245 hint_addr = addr + size;
246 errno = olderr;
247 return p;
248 }
249 if (p != MFAIL) {
250 munmap(p, size);
251 } else if (errno == ENOMEM) {
252 return MFAIL;
253 }
254 if (hint_addr) {
255 /* First, try linear probing. */
256 if (retry < LJ_ALLOC_MMAP_PROBE_LINEAR) {
257 hint_addr += 0x1000000;
258 if (((hint_addr + size) >> LJ_ALLOC_MBITS) != 0)
259 hint_addr = 0;
260 continue;
261 } else if (retry == LJ_ALLOC_MMAP_PROBE_LINEAR) {
262 /* Next, try a no-hint probe to get back an ASLR address. */
263 hint_addr = 0;
264 continue;
265 }
266 }
267 /* Finally, try pseudo-random probing. */
268 do {
269 hint_addr = lj_prng_u64(rs) & (((uintptr_t)1<<LJ_ALLOC_MBITS)-LJ_PAGESIZE);
270 } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER);
271 }
193 errno = olderr; 272 errno = olderr;
194 return ptr; 273 return MFAIL;
195} 274}
196 275
197#elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun__) || LJ_TARGET_CYGWIN 276#endif
277
278#if LJ_ALLOC_MMAP32
198 279
199/* OSX and FreeBSD mmap() use a naive first-fit linear search. 280#if LJ_TARGET_SOLARIS
200** That's perfect for us. Except that -pagezero_size must be set for OSX, 281#define LJ_ALLOC_MMAP32_START ((uintptr_t)0x1000)
201** otherwise the lower 4GB are blocked. And the 32GB RLIMIT_DATA needs
202** to be reduced to 250MB on FreeBSD.
203*/
204#if LJ_TARGET_OSX || defined(__DragonFly__)
205#define MMAP_REGION_START ((uintptr_t)0x10000)
206#elif LJ_TARGET_PS4
207#define MMAP_REGION_START ((uintptr_t)0x4000)
208#else 282#else
209#define MMAP_REGION_START ((uintptr_t)0x10000000) 283#define LJ_ALLOC_MMAP32_START ((uintptr_t)0)
210#endif 284#endif
211#define MMAP_REGION_END ((uintptr_t)0x80000000)
212 285
213#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4 286#if LJ_ALLOC_MMAP_PROBE
214#include <sys/resource.h> 287static void *mmap_map32(PRNGState *rs, size_t size)
288#else
289static void *mmap_map32(size_t size)
215#endif 290#endif
216
217static LJ_AINLINE void *CALL_MMAP(size_t size)
218{ 291{
219 int olderr = errno; 292#if LJ_ALLOC_MMAP_PROBE
220 /* Hint for next allocation. Doesn't need to be thread-safe. */ 293 static int fallback = 0;
221 static uintptr_t alloc_hint = MMAP_REGION_START; 294 if (fallback)
222 int retry = 0; 295 return mmap_probe(rs, size);
223#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
224 static int rlimit_modified = 0;
225 if (LJ_UNLIKELY(rlimit_modified == 0)) {
226 struct rlimit rlim;
227 rlim.rlim_cur = rlim.rlim_max = MMAP_REGION_START;
228 setrlimit(RLIMIT_DATA, &rlim); /* Ignore result. May fail below. */
229 rlimit_modified = 1;
230 }
231#endif 296#endif
232 for (;;) { 297 {
233 void *p = mmap((void *)alloc_hint, size, MMAP_PROT, MMAP_FLAGS, -1, 0); 298 int olderr = errno;
234 if ((uintptr_t)p >= MMAP_REGION_START && 299 void *ptr = mmap((void *)LJ_ALLOC_MMAP32_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
235 (uintptr_t)p + size < MMAP_REGION_END) { 300 errno = olderr;
236 alloc_hint = (uintptr_t)p + size; 301 /* This only allows 1GB on Linux. So fallback to probing to get 2GB. */
237 errno = olderr; 302#if LJ_ALLOC_MMAP_PROBE
238 return p; 303 if (ptr == MFAIL) {
304 fallback = 1;
305 return mmap_probe(rs, size);
239 } 306 }
240 if (p != CMFAIL) munmap(p, size);
241#if defined(__sun__) || defined(__DragonFly__)
242 alloc_hint += 0x1000000; /* Need near-exhaustive linear scan. */
243 if (alloc_hint + size < MMAP_REGION_END) continue;
244#endif 307#endif
245 if (retry) break; 308 return ptr;
246 retry = 1;
247 alloc_hint = MMAP_REGION_START;
248 } 309 }
249 errno = olderr;
250 return CMFAIL;
251} 310}
252 311
253#else
254
255#error "NYI: need an equivalent of MAP_32BIT for this 64 bit OS"
256
257#endif 312#endif
258 313
314#if LJ_ALLOC_MMAP32
315#if LJ_ALLOC_MMAP_PROBE
316#define CALL_MMAP(prng, size) mmap_map32(prng, size)
259#else 317#else
260 318#define CALL_MMAP(prng, size) mmap_map32(size)
261/* 32 bit mode is easy. */ 319#endif
262static LJ_AINLINE void *CALL_MMAP(size_t size) 320#elif LJ_ALLOC_MMAP_PROBE
321#define CALL_MMAP(prng, size) mmap_probe(prng, size)
322#else
323static void *mmap_plain(size_t size)
263{ 324{
264 int olderr = errno; 325 int olderr = errno;
265 void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0); 326 void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
266 errno = olderr; 327 errno = olderr;
267 return ptr; 328 return ptr;
268} 329}
269 330#define CALL_MMAP(prng, size) mmap_plain(size)
270#endif 331#endif
271 332
272#define INIT_MMAP() ((void)0) 333#if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
273#define DIRECT_MMAP(s) CALL_MMAP(s) 334
335#include <sys/resource.h>
274 336
275static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size) 337static void init_mmap(void)
338{
339 struct rlimit rlim;
340 rlim.rlim_cur = rlim.rlim_max = 0x10000;
341 setrlimit(RLIMIT_DATA, &rlim); /* Ignore result. May fail later. */
342}
343#define INIT_MMAP() init_mmap()
344
345#endif
346
347static int CALL_MUNMAP(void *ptr, size_t size)
276{ 348{
277 int olderr = errno; 349 int olderr = errno;
278 int ret = munmap(ptr, size); 350 int ret = munmap(ptr, size);
@@ -280,10 +352,9 @@ static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
280 return ret; 352 return ret;
281} 353}
282 354
283#if LJ_TARGET_LINUX 355#if LJ_ALLOC_MREMAP
284/* Need to define _GNU_SOURCE to get the mremap prototype. */ 356/* Need to define _GNU_SOURCE to get the mremap prototype. */
285static LJ_AINLINE void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, 357static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags)
286 int flags)
287{ 358{
288 int olderr = errno; 359 int olderr = errno;
289 ptr = mremap(ptr, osz, nsz, flags); 360 ptr = mremap(ptr, osz, nsz, flags);
@@ -294,7 +365,7 @@ static LJ_AINLINE void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz,
294#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv)) 365#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
295#define CALL_MREMAP_NOMOVE 0 366#define CALL_MREMAP_NOMOVE 0
296#define CALL_MREMAP_MAYMOVE 1 367#define CALL_MREMAP_MAYMOVE 1
297#if LJ_64 368#if LJ_64 && !LJ_GC64
298#define CALL_MREMAP_MV CALL_MREMAP_NOMOVE 369#define CALL_MREMAP_MV CALL_MREMAP_NOMOVE
299#else 370#else
300#define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE 371#define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE
@@ -303,6 +374,15 @@ static LJ_AINLINE void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz,
303 374
304#endif 375#endif
305 376
377
378#ifndef INIT_MMAP
379#define INIT_MMAP() ((void)0)
380#endif
381
382#ifndef DIRECT_MMAP
383#define DIRECT_MMAP(prng, s) CALL_MMAP(prng, s)
384#endif
385
306#ifndef CALL_MREMAP 386#ifndef CALL_MREMAP
307#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL) 387#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
308#endif 388#endif
@@ -459,6 +539,7 @@ struct malloc_state {
459 mchunkptr smallbins[(NSMALLBINS+1)*2]; 539 mchunkptr smallbins[(NSMALLBINS+1)*2];
460 tbinptr treebins[NTREEBINS]; 540 tbinptr treebins[NTREEBINS];
461 msegment seg; 541 msegment seg;
542 PRNGState *prng;
462}; 543};
463 544
464typedef struct malloc_state *mstate; 545typedef struct malloc_state *mstate;
@@ -516,7 +597,7 @@ static int has_segment_link(mstate m, msegmentptr ss)
516 noncontiguous segments are added. 597 noncontiguous segments are added.
517*/ 598*/
518#define TOP_FOOT_SIZE\ 599#define TOP_FOOT_SIZE\
519 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) 600 (align_offset(TWO_SIZE_T_SIZES)+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
520 601
521/* ---------------------------- Indexing Bins ---------------------------- */ 602/* ---------------------------- Indexing Bins ---------------------------- */
522 603
@@ -741,11 +822,11 @@ static int has_segment_link(mstate m, msegmentptr ss)
741 822
742/* ----------------------- Direct-mmapping chunks ----------------------- */ 823/* ----------------------- Direct-mmapping chunks ----------------------- */
743 824
744static void *direct_alloc(size_t nb) 825static void *direct_alloc(mstate m, size_t nb)
745{ 826{
746 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); 827 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
747 if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */ 828 if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */
748 char *mm = (char *)(DIRECT_MMAP(mmsize)); 829 char *mm = (char *)(DIRECT_MMAP(m->prng, mmsize));
749 if (mm != CMFAIL) { 830 if (mm != CMFAIL) {
750 size_t offset = align_offset(chunk2mem(mm)); 831 size_t offset = align_offset(chunk2mem(mm));
751 size_t psize = mmsize - offset - DIRECT_FOOT_PAD; 832 size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
@@ -757,6 +838,7 @@ static void *direct_alloc(size_t nb)
757 return chunk2mem(p); 838 return chunk2mem(p);
758 } 839 }
759 } 840 }
841 UNUSED(m);
760 return NULL; 842 return NULL;
761} 843}
762 844
@@ -905,7 +987,7 @@ static void *alloc_sys(mstate m, size_t nb)
905 987
906 /* Directly map large chunks */ 988 /* Directly map large chunks */
907 if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) { 989 if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
908 void *mem = direct_alloc(nb); 990 void *mem = direct_alloc(m, nb);
909 if (mem != 0) 991 if (mem != 0)
910 return mem; 992 return mem;
911 } 993 }
@@ -914,7 +996,7 @@ static void *alloc_sys(mstate m, size_t nb)
914 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; 996 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
915 size_t rsize = granularity_align(req); 997 size_t rsize = granularity_align(req);
916 if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */ 998 if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
917 char *mp = (char *)(CALL_MMAP(rsize)); 999 char *mp = (char *)(CALL_MMAP(m->prng, rsize));
918 if (mp != CMFAIL) { 1000 if (mp != CMFAIL) {
919 tbase = mp; 1001 tbase = mp;
920 tsize = rsize; 1002 tsize = rsize;
@@ -1141,12 +1223,13 @@ static void *tmalloc_small(mstate m, size_t nb)
1141 1223
1142/* ----------------------------------------------------------------------- */ 1224/* ----------------------------------------------------------------------- */
1143 1225
1144void *lj_alloc_create(void) 1226void *lj_alloc_create(PRNGState *rs)
1145{ 1227{
1146 size_t tsize = DEFAULT_GRANULARITY; 1228 size_t tsize = DEFAULT_GRANULARITY;
1147 char *tbase; 1229 char *tbase;
1148 INIT_MMAP(); 1230 INIT_MMAP();
1149 tbase = (char *)(CALL_MMAP(tsize)); 1231 UNUSED(rs);
1232 tbase = (char *)(CALL_MMAP(rs, tsize));
1150 if (tbase != CMFAIL) { 1233 if (tbase != CMFAIL) {
1151 size_t msize = pad_request(sizeof(struct malloc_state)); 1234 size_t msize = pad_request(sizeof(struct malloc_state));
1152 mchunkptr mn; 1235 mchunkptr mn;
@@ -1165,6 +1248,12 @@ void *lj_alloc_create(void)
1165 return NULL; 1248 return NULL;
1166} 1249}
1167 1250
1251void lj_alloc_setprng(void *msp, PRNGState *rs)
1252{
1253 mstate ms = (mstate)msp;
1254 ms->prng = rs;
1255}
1256
1168void lj_alloc_destroy(void *msp) 1257void lj_alloc_destroy(void *msp)
1169{ 1258{
1170 mstate ms = (mstate)msp; 1259 mstate ms = (mstate)msp;
diff --git a/src/lj_alloc.h b/src/lj_alloc.h
index f87a7cf3..669f50b7 100644
--- a/src/lj_alloc.h
+++ b/src/lj_alloc.h
@@ -9,7 +9,8 @@
9#include "lj_def.h" 9#include "lj_def.h"
10 10
11#ifndef LUAJIT_USE_SYSMALLOC 11#ifndef LUAJIT_USE_SYSMALLOC
12LJ_FUNC void *lj_alloc_create(void); 12LJ_FUNC void *lj_alloc_create(PRNGState *rs);
13LJ_FUNC void lj_alloc_setprng(void *msp, PRNGState *rs);
13LJ_FUNC void lj_alloc_destroy(void *msp); 14LJ_FUNC void lj_alloc_destroy(void *msp);
14LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize); 15LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize);
15#endif 16#endif
diff --git a/src/lj_api.c b/src/lj_api.c
index 1a34a774..f1cfebbc 100644
--- a/src/lj_api.c
+++ b/src/lj_api.c
@@ -24,11 +24,12 @@
24#include "lj_trace.h" 24#include "lj_trace.h"
25#include "lj_vm.h" 25#include "lj_vm.h"
26#include "lj_strscan.h" 26#include "lj_strscan.h"
27#include "lj_strfmt.h"
27 28
28/* -- Common helper functions --------------------------------------------- */ 29/* -- Common helper functions --------------------------------------------- */
29 30
30#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base)) 31#define lj_checkapi_slot(idx) \
31#define api_checkvalidindex(L, i) api_check(L, (i) != niltv(L)) 32 lj_checkapi((idx) <= (L->top - L->base), "stack slot %d out of range", (idx))
32 33
33static TValue *index2adr(lua_State *L, int idx) 34static TValue *index2adr(lua_State *L, int idx)
34{ 35{
@@ -36,7 +37,8 @@ static TValue *index2adr(lua_State *L, int idx)
36 TValue *o = L->base + (idx - 1); 37 TValue *o = L->base + (idx - 1);
37 return o < L->top ? o : niltv(L); 38 return o < L->top ? o : niltv(L);
38 } else if (idx > LUA_REGISTRYINDEX) { 39 } else if (idx > LUA_REGISTRYINDEX) {
39 api_check(L, idx != 0 && -idx <= L->top - L->base); 40 lj_checkapi(idx != 0 && -idx <= L->top - L->base,
41 "bad stack slot %d", idx);
40 return L->top + idx; 42 return L->top + idx;
41 } else if (idx == LUA_GLOBALSINDEX) { 43 } else if (idx == LUA_GLOBALSINDEX) {
42 TValue *o = &G(L)->tmptv; 44 TValue *o = &G(L)->tmptv;
@@ -46,7 +48,8 @@ static TValue *index2adr(lua_State *L, int idx)
46 return registry(L); 48 return registry(L);
47 } else { 49 } else {
48 GCfunc *fn = curr_func(L); 50 GCfunc *fn = curr_func(L);
49 api_check(L, fn->c.gct == ~LJ_TFUNC && !isluafunc(fn)); 51 lj_checkapi(fn->c.gct == ~LJ_TFUNC && !isluafunc(fn),
52 "calling frame is not a C function");
50 if (idx == LUA_ENVIRONINDEX) { 53 if (idx == LUA_ENVIRONINDEX) {
51 TValue *o = &G(L)->tmptv; 54 TValue *o = &G(L)->tmptv;
52 settabV(L, o, tabref(fn->c.env)); 55 settabV(L, o, tabref(fn->c.env));
@@ -58,13 +61,27 @@ static TValue *index2adr(lua_State *L, int idx)
58 } 61 }
59} 62}
60 63
61static TValue *stkindex2adr(lua_State *L, int idx) 64static LJ_AINLINE TValue *index2adr_check(lua_State *L, int idx)
65{
66 TValue *o = index2adr(L, idx);
67 lj_checkapi(o != niltv(L), "invalid stack slot %d", idx);
68 return o;
69}
70
71static TValue *index2adr_stack(lua_State *L, int idx)
62{ 72{
63 if (idx > 0) { 73 if (idx > 0) {
64 TValue *o = L->base + (idx - 1); 74 TValue *o = L->base + (idx - 1);
75 if (o < L->top) {
76 return o;
77 } else {
78 lj_checkapi(0, "invalid stack slot %d", idx);
79 return niltv(L);
80 }
65 return o < L->top ? o : niltv(L); 81 return o < L->top ? o : niltv(L);
66 } else { 82 } else {
67 api_check(L, idx != 0 && -idx <= L->top - L->base); 83 lj_checkapi(idx != 0 && -idx <= L->top - L->base,
84 "invalid stack slot %d", idx);
68 return L->top + idx; 85 return L->top + idx;
69 } 86 }
70} 87}
@@ -98,17 +115,24 @@ LUALIB_API void luaL_checkstack(lua_State *L, int size, const char *msg)
98 lj_err_callerv(L, LJ_ERR_STKOVM, msg); 115 lj_err_callerv(L, LJ_ERR_STKOVM, msg);
99} 116}
100 117
101LUA_API void lua_xmove(lua_State *from, lua_State *to, int n) 118LUA_API void lua_xmove(lua_State *L, lua_State *to, int n)
102{ 119{
103 TValue *f, *t; 120 TValue *f, *t;
104 if (from == to) return; 121 if (L == to) return;
105 api_checknelems(from, n); 122 lj_checkapi_slot(n);
106 api_check(from, G(from) == G(to)); 123 lj_checkapi(G(L) == G(to), "move across global states");
107 lj_state_checkstack(to, (MSize)n); 124 lj_state_checkstack(to, (MSize)n);
108 f = from->top; 125 f = L->top;
109 t = to->top = to->top + n; 126 t = to->top = to->top + n;
110 while (--n >= 0) copyTV(to, --t, --f); 127 while (--n >= 0) copyTV(to, --t, --f);
111 from->top = f; 128 L->top = f;
129}
130
131LUA_API const lua_Number *lua_version(lua_State *L)
132{
133 static const lua_Number version = LUA_VERSION_NUM;
134 UNUSED(L);
135 return &version;
112} 136}
113 137
114/* -- Stack manipulation -------------------------------------------------- */ 138/* -- Stack manipulation -------------------------------------------------- */
@@ -121,7 +145,7 @@ LUA_API int lua_gettop(lua_State *L)
121LUA_API void lua_settop(lua_State *L, int idx) 145LUA_API void lua_settop(lua_State *L, int idx)
122{ 146{
123 if (idx >= 0) { 147 if (idx >= 0) {
124 api_check(L, idx <= tvref(L->maxstack) - L->base); 148 lj_checkapi(idx <= tvref(L->maxstack) - L->base, "bad stack slot %d", idx);
125 if (L->base + idx > L->top) { 149 if (L->base + idx > L->top) {
126 if (L->base + idx >= tvref(L->maxstack)) 150 if (L->base + idx >= tvref(L->maxstack))
127 lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base)); 151 lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base));
@@ -130,51 +154,58 @@ LUA_API void lua_settop(lua_State *L, int idx)
130 L->top = L->base + idx; 154 L->top = L->base + idx;
131 } 155 }
132 } else { 156 } else {
133 api_check(L, -(idx+1) <= (L->top - L->base)); 157 lj_checkapi(-(idx+1) <= (L->top - L->base), "bad stack slot %d", idx);
134 L->top += idx+1; /* Shrinks top (idx < 0). */ 158 L->top += idx+1; /* Shrinks top (idx < 0). */
135 } 159 }
136} 160}
137 161
138LUA_API void lua_remove(lua_State *L, int idx) 162LUA_API void lua_remove(lua_State *L, int idx)
139{ 163{
140 TValue *p = stkindex2adr(L, idx); 164 TValue *p = index2adr_stack(L, idx);
141 api_checkvalidindex(L, p);
142 while (++p < L->top) copyTV(L, p-1, p); 165 while (++p < L->top) copyTV(L, p-1, p);
143 L->top--; 166 L->top--;
144} 167}
145 168
146LUA_API void lua_insert(lua_State *L, int idx) 169LUA_API void lua_insert(lua_State *L, int idx)
147{ 170{
148 TValue *q, *p = stkindex2adr(L, idx); 171 TValue *q, *p = index2adr_stack(L, idx);
149 api_checkvalidindex(L, p);
150 for (q = L->top; q > p; q--) copyTV(L, q, q-1); 172 for (q = L->top; q > p; q--) copyTV(L, q, q-1);
151 copyTV(L, p, L->top); 173 copyTV(L, p, L->top);
152} 174}
153 175
154LUA_API void lua_replace(lua_State *L, int idx) 176static void copy_slot(lua_State *L, TValue *f, int idx)
155{ 177{
156 api_checknelems(L, 1);
157 if (idx == LUA_GLOBALSINDEX) { 178 if (idx == LUA_GLOBALSINDEX) {
158 api_check(L, tvistab(L->top-1)); 179 lj_checkapi(tvistab(f), "stack slot %d is not a table", idx);
159 /* NOBARRIER: A thread (i.e. L) is never black. */ 180 /* NOBARRIER: A thread (i.e. L) is never black. */
160 setgcref(L->env, obj2gco(tabV(L->top-1))); 181 setgcref(L->env, obj2gco(tabV(f)));
161 } else if (idx == LUA_ENVIRONINDEX) { 182 } else if (idx == LUA_ENVIRONINDEX) {
162 GCfunc *fn = curr_func(L); 183 GCfunc *fn = curr_func(L);
163 if (fn->c.gct != ~LJ_TFUNC) 184 if (fn->c.gct != ~LJ_TFUNC)
164 lj_err_msg(L, LJ_ERR_NOENV); 185 lj_err_msg(L, LJ_ERR_NOENV);
165 api_check(L, tvistab(L->top-1)); 186 lj_checkapi(tvistab(f), "stack slot %d is not a table", idx);
166 setgcref(fn->c.env, obj2gco(tabV(L->top-1))); 187 setgcref(fn->c.env, obj2gco(tabV(f)));
167 lj_gc_barrier(L, fn, L->top-1); 188 lj_gc_barrier(L, fn, f);
168 } else { 189 } else {
169 TValue *o = index2adr(L, idx); 190 TValue *o = index2adr_check(L, idx);
170 api_checkvalidindex(L, o); 191 copyTV(L, o, f);
171 copyTV(L, o, L->top-1);
172 if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */ 192 if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */
173 lj_gc_barrier(L, curr_func(L), L->top-1); 193 lj_gc_barrier(L, curr_func(L), f);
174 } 194 }
195}
196
197LUA_API void lua_replace(lua_State *L, int idx)
198{
199 lj_checkapi_slot(1);
200 copy_slot(L, L->top - 1, idx);
175 L->top--; 201 L->top--;
176} 202}
177 203
204LUA_API void lua_copy(lua_State *L, int fromidx, int toidx)
205{
206 copy_slot(L, index2adr(L, fromidx), toidx);
207}
208
178LUA_API void lua_pushvalue(lua_State *L, int idx) 209LUA_API void lua_pushvalue(lua_State *L, int idx)
179{ 210{
180 copyTV(L, L->top, index2adr(L, idx)); 211 copyTV(L, L->top, index2adr(L, idx));
@@ -188,7 +219,7 @@ LUA_API int lua_type(lua_State *L, int idx)
188 cTValue *o = index2adr(L, idx); 219 cTValue *o = index2adr(L, idx);
189 if (tvisnumber(o)) { 220 if (tvisnumber(o)) {
190 return LUA_TNUMBER; 221 return LUA_TNUMBER;
191#if LJ_64 222#if LJ_64 && !LJ_GC64
192 } else if (tvislightud(o)) { 223 } else if (tvislightud(o)) {
193 return LUA_TLIGHTUSERDATA; 224 return LUA_TLIGHTUSERDATA;
194#endif 225#endif
@@ -201,7 +232,7 @@ LUA_API int lua_type(lua_State *L, int idx)
201#else 232#else
202 int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u); 233 int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u);
203#endif 234#endif
204 lua_assert(tt != LUA_TNIL || tvisnil(o)); 235 lj_assertL(tt != LUA_TNIL || tvisnil(o), "bad tag conversion");
205 return tt; 236 return tt;
206 } 237 }
207} 238}
@@ -268,7 +299,7 @@ LUA_API int lua_equal(lua_State *L, int idx1, int idx2)
268 return 0; 299 return 0;
269 } else if (tvispri(o1)) { 300 } else if (tvispri(o1)) {
270 return o1 != niltv(L) && o2 != niltv(L); 301 return o1 != niltv(L) && o2 != niltv(L);
271#if LJ_64 302#if LJ_64 && !LJ_GC64
272 } else if (tvislightud(o1)) { 303 } else if (tvislightud(o1)) {
273 return o1->u64 == o2->u64; 304 return o1->u64 == o2->u64;
274#endif 305#endif
@@ -283,8 +314,8 @@ LUA_API int lua_equal(lua_State *L, int idx1, int idx2)
283 } else { 314 } else {
284 L->top = base+2; 315 L->top = base+2;
285 lj_vm_call(L, base, 1+1); 316 lj_vm_call(L, base, 1+1);
286 L->top -= 2; 317 L->top -= 2+LJ_FR2;
287 return tvistruecond(L->top+1); 318 return tvistruecond(L->top+1+LJ_FR2);
288 } 319 }
289 } 320 }
290} 321}
@@ -306,8 +337,8 @@ LUA_API int lua_lessthan(lua_State *L, int idx1, int idx2)
306 } else { 337 } else {
307 L->top = base+2; 338 L->top = base+2;
308 lj_vm_call(L, base, 1+1); 339 lj_vm_call(L, base, 1+1);
309 L->top -= 2; 340 L->top -= 2+LJ_FR2;
310 return tvistruecond(L->top+1); 341 return tvistruecond(L->top+1+LJ_FR2);
311 } 342 }
312 } 343 }
313} 344}
@@ -324,6 +355,22 @@ LUA_API lua_Number lua_tonumber(lua_State *L, int idx)
324 return 0; 355 return 0;
325} 356}
326 357
358LUA_API lua_Number lua_tonumberx(lua_State *L, int idx, int *ok)
359{
360 cTValue *o = index2adr(L, idx);
361 TValue tmp;
362 if (LJ_LIKELY(tvisnumber(o))) {
363 if (ok) *ok = 1;
364 return numberVnum(o);
365 } else if (tvisstr(o) && lj_strscan_num(strV(o), &tmp)) {
366 if (ok) *ok = 1;
367 return numV(&tmp);
368 } else {
369 if (ok) *ok = 0;
370 return 0;
371 }
372}
373
327LUALIB_API lua_Number luaL_checknumber(lua_State *L, int idx) 374LUALIB_API lua_Number luaL_checknumber(lua_State *L, int idx)
328{ 375{
329 cTValue *o = index2adr(L, idx); 376 cTValue *o = index2adr(L, idx);
@@ -361,9 +408,38 @@ LUA_API lua_Integer lua_tointeger(lua_State *L, int idx)
361 if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp))) 408 if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp)))
362 return 0; 409 return 0;
363 if (tvisint(&tmp)) 410 if (tvisint(&tmp))
364 return (lua_Integer)intV(&tmp); 411 return intV(&tmp);
412 n = numV(&tmp);
413 }
414#if LJ_64
415 return (lua_Integer)n;
416#else
417 return lj_num2int(n);
418#endif
419}
420
421LUA_API lua_Integer lua_tointegerx(lua_State *L, int idx, int *ok)
422{
423 cTValue *o = index2adr(L, idx);
424 TValue tmp;
425 lua_Number n;
426 if (LJ_LIKELY(tvisint(o))) {
427 if (ok) *ok = 1;
428 return intV(o);
429 } else if (LJ_LIKELY(tvisnum(o))) {
430 n = numV(o);
431 } else {
432 if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp))) {
433 if (ok) *ok = 0;
434 return 0;
435 }
436 if (tvisint(&tmp)) {
437 if (ok) *ok = 1;
438 return intV(&tmp);
439 }
365 n = numV(&tmp); 440 n = numV(&tmp);
366 } 441 }
442 if (ok) *ok = 1;
367#if LJ_64 443#if LJ_64
368 return (lua_Integer)n; 444 return (lua_Integer)n;
369#else 445#else
@@ -434,7 +510,7 @@ LUA_API const char *lua_tolstring(lua_State *L, int idx, size_t *len)
434 } else if (tvisnumber(o)) { 510 } else if (tvisnumber(o)) {
435 lj_gc_check(L); 511 lj_gc_check(L);
436 o = index2adr(L, idx); /* GC may move the stack. */ 512 o = index2adr(L, idx); /* GC may move the stack. */
437 s = lj_str_fromnumber(L, o); 513 s = lj_strfmt_number(L, o);
438 setstrV(L, o, s); 514 setstrV(L, o, s);
439 } else { 515 } else {
440 if (len != NULL) *len = 0; 516 if (len != NULL) *len = 0;
@@ -453,7 +529,7 @@ LUALIB_API const char *luaL_checklstring(lua_State *L, int idx, size_t *len)
453 } else if (tvisnumber(o)) { 529 } else if (tvisnumber(o)) {
454 lj_gc_check(L); 530 lj_gc_check(L);
455 o = index2adr(L, idx); /* GC may move the stack. */ 531 o = index2adr(L, idx); /* GC may move the stack. */
456 s = lj_str_fromnumber(L, o); 532 s = lj_strfmt_number(L, o);
457 setstrV(L, o, s); 533 setstrV(L, o, s);
458 } else { 534 } else {
459 lj_err_argt(L, idx, LUA_TSTRING); 535 lj_err_argt(L, idx, LUA_TSTRING);
@@ -475,7 +551,7 @@ LUALIB_API const char *luaL_optlstring(lua_State *L, int idx,
475 } else if (tvisnumber(o)) { 551 } else if (tvisnumber(o)) {
476 lj_gc_check(L); 552 lj_gc_check(L);
477 o = index2adr(L, idx); /* GC may move the stack. */ 553 o = index2adr(L, idx); /* GC may move the stack. */
478 s = lj_str_fromnumber(L, o); 554 s = lj_strfmt_number(L, o);
479 setstrV(L, o, s); 555 setstrV(L, o, s);
480 } else { 556 } else {
481 lj_err_argt(L, idx, LUA_TSTRING); 557 lj_err_argt(L, idx, LUA_TSTRING);
@@ -507,7 +583,7 @@ LUA_API size_t lua_objlen(lua_State *L, int idx)
507 } else if (tvisudata(o)) { 583 } else if (tvisudata(o)) {
508 return udataV(o)->len; 584 return udataV(o)->len;
509 } else if (tvisnumber(o)) { 585 } else if (tvisnumber(o)) {
510 GCstr *s = lj_str_fromnumber(L, o); 586 GCstr *s = lj_strfmt_number(L, o);
511 setstrV(L, o, s); 587 setstrV(L, o, s);
512 return s->len; 588 return s->len;
513 } else { 589 } else {
@@ -545,17 +621,7 @@ LUA_API lua_State *lua_tothread(lua_State *L, int idx)
545 621
546LUA_API const void *lua_topointer(lua_State *L, int idx) 622LUA_API const void *lua_topointer(lua_State *L, int idx)
547{ 623{
548 cTValue *o = index2adr(L, idx); 624 return lj_obj_ptr(index2adr(L, idx));
549 if (tvisudata(o))
550 return uddata(udataV(o));
551 else if (tvislightud(o))
552 return lightudV(o);
553 else if (tviscdata(o))
554 return cdataptr(cdataV(o));
555 else if (tvisgcv(o))
556 return gcV(o);
557 else
558 return NULL;
559} 625}
560 626
561/* -- Stack setters (object creation) ------------------------------------- */ 627/* -- Stack setters (object creation) ------------------------------------- */
@@ -606,7 +672,7 @@ LUA_API const char *lua_pushvfstring(lua_State *L, const char *fmt,
606 va_list argp) 672 va_list argp)
607{ 673{
608 lj_gc_check(L); 674 lj_gc_check(L);
609 return lj_str_pushvf(L, fmt, argp); 675 return lj_strfmt_pushvf(L, fmt, argp);
610} 676}
611 677
612LUA_API const char *lua_pushfstring(lua_State *L, const char *fmt, ...) 678LUA_API const char *lua_pushfstring(lua_State *L, const char *fmt, ...)
@@ -615,7 +681,7 @@ LUA_API const char *lua_pushfstring(lua_State *L, const char *fmt, ...)
615 va_list argp; 681 va_list argp;
616 lj_gc_check(L); 682 lj_gc_check(L);
617 va_start(argp, fmt); 683 va_start(argp, fmt);
618 ret = lj_str_pushvf(L, fmt, argp); 684 ret = lj_strfmt_pushvf(L, fmt, argp);
619 va_end(argp); 685 va_end(argp);
620 return ret; 686 return ret;
621} 687}
@@ -624,14 +690,14 @@ LUA_API void lua_pushcclosure(lua_State *L, lua_CFunction f, int n)
624{ 690{
625 GCfunc *fn; 691 GCfunc *fn;
626 lj_gc_check(L); 692 lj_gc_check(L);
627 api_checknelems(L, n); 693 lj_checkapi_slot(n);
628 fn = lj_func_newC(L, (MSize)n, getcurrenv(L)); 694 fn = lj_func_newC(L, (MSize)n, getcurrenv(L));
629 fn->c.f = f; 695 fn->c.f = f;
630 L->top -= n; 696 L->top -= n;
631 while (n--) 697 while (n--)
632 copyTV(L, &fn->c.upvalue[n], L->top+n); 698 copyTV(L, &fn->c.upvalue[n], L->top+n);
633 setfuncV(L, L->top, fn); 699 setfuncV(L, L->top, fn);
634 lua_assert(iswhite(obj2gco(fn))); 700 lj_assertL(iswhite(obj2gco(fn)), "new GC object is not white");
635 incr_top(L); 701 incr_top(L);
636} 702}
637 703
@@ -649,10 +715,8 @@ LUA_API void lua_pushlightuserdata(lua_State *L, void *p)
649 715
650LUA_API void lua_createtable(lua_State *L, int narray, int nrec) 716LUA_API void lua_createtable(lua_State *L, int narray, int nrec)
651{ 717{
652 GCtab *t;
653 lj_gc_check(L); 718 lj_gc_check(L);
654 t = lj_tab_new(L, (uint32_t)(narray > 0 ? narray+1 : 0), hsize2hbits(nrec)); 719 settabV(L, L->top, lj_tab_new_ah(L, narray, nrec));
655 settabV(L, L->top, t);
656 incr_top(L); 720 incr_top(L);
657} 721}
658 722
@@ -703,7 +767,7 @@ LUA_API void *lua_newuserdata(lua_State *L, size_t size)
703 767
704LUA_API void lua_concat(lua_State *L, int n) 768LUA_API void lua_concat(lua_State *L, int n)
705{ 769{
706 api_checknelems(L, n); 770 lj_checkapi_slot(n);
707 if (n >= 2) { 771 if (n >= 2) {
708 n--; 772 n--;
709 do { 773 do {
@@ -715,8 +779,8 @@ LUA_API void lua_concat(lua_State *L, int n)
715 n -= (int)(L->top - top); 779 n -= (int)(L->top - top);
716 L->top = top+2; 780 L->top = top+2;
717 lj_vm_call(L, top, 1+1); 781 lj_vm_call(L, top, 1+1);
718 L->top--; 782 L->top -= 1+LJ_FR2;
719 copyTV(L, L->top-1, L->top); 783 copyTV(L, L->top-1, L->top+LJ_FR2);
720 } while (--n > 0); 784 } while (--n > 0);
721 } else if (n == 0) { /* Push empty string. */ 785 } else if (n == 0) { /* Push empty string. */
722 setstrV(L, L->top, &G(L)->strempty); 786 setstrV(L, L->top, &G(L)->strempty);
@@ -729,30 +793,28 @@ LUA_API void lua_concat(lua_State *L, int n)
729 793
730LUA_API void lua_gettable(lua_State *L, int idx) 794LUA_API void lua_gettable(lua_State *L, int idx)
731{ 795{
732 cTValue *v, *t = index2adr(L, idx); 796 cTValue *t = index2adr_check(L, idx);
733 api_checkvalidindex(L, t); 797 cTValue *v = lj_meta_tget(L, t, L->top-1);
734 v = lj_meta_tget(L, t, L->top-1);
735 if (v == NULL) { 798 if (v == NULL) {
736 L->top += 2; 799 L->top += 2;
737 lj_vm_call(L, L->top-2, 1+1); 800 lj_vm_call(L, L->top-2, 1+1);
738 L->top -= 2; 801 L->top -= 2+LJ_FR2;
739 v = L->top+1; 802 v = L->top+1+LJ_FR2;
740 } 803 }
741 copyTV(L, L->top-1, v); 804 copyTV(L, L->top-1, v);
742} 805}
743 806
744LUA_API void lua_getfield(lua_State *L, int idx, const char *k) 807LUA_API void lua_getfield(lua_State *L, int idx, const char *k)
745{ 808{
746 cTValue *v, *t = index2adr(L, idx); 809 cTValue *v, *t = index2adr_check(L, idx);
747 TValue key; 810 TValue key;
748 api_checkvalidindex(L, t);
749 setstrV(L, &key, lj_str_newz(L, k)); 811 setstrV(L, &key, lj_str_newz(L, k));
750 v = lj_meta_tget(L, t, &key); 812 v = lj_meta_tget(L, t, &key);
751 if (v == NULL) { 813 if (v == NULL) {
752 L->top += 2; 814 L->top += 2;
753 lj_vm_call(L, L->top-2, 1+1); 815 lj_vm_call(L, L->top-2, 1+1);
754 L->top -= 2; 816 L->top -= 2+LJ_FR2;
755 v = L->top+1; 817 v = L->top+1+LJ_FR2;
756 } 818 }
757 copyTV(L, L->top, v); 819 copyTV(L, L->top, v);
758 incr_top(L); 820 incr_top(L);
@@ -761,14 +823,14 @@ LUA_API void lua_getfield(lua_State *L, int idx, const char *k)
761LUA_API void lua_rawget(lua_State *L, int idx) 823LUA_API void lua_rawget(lua_State *L, int idx)
762{ 824{
763 cTValue *t = index2adr(L, idx); 825 cTValue *t = index2adr(L, idx);
764 api_check(L, tvistab(t)); 826 lj_checkapi(tvistab(t), "stack slot %d is not a table", idx);
765 copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1)); 827 copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1));
766} 828}
767 829
768LUA_API void lua_rawgeti(lua_State *L, int idx, int n) 830LUA_API void lua_rawgeti(lua_State *L, int idx, int n)
769{ 831{
770 cTValue *v, *t = index2adr(L, idx); 832 cTValue *v, *t = index2adr(L, idx);
771 api_check(L, tvistab(t)); 833 lj_checkapi(tvistab(t), "stack slot %d is not a table", idx);
772 v = lj_tab_getint(tabV(t), n); 834 v = lj_tab_getint(tabV(t), n);
773 if (v) { 835 if (v) {
774 copyTV(L, L->top, v); 836 copyTV(L, L->top, v);
@@ -810,8 +872,7 @@ LUALIB_API int luaL_getmetafield(lua_State *L, int idx, const char *field)
810 872
811LUA_API void lua_getfenv(lua_State *L, int idx) 873LUA_API void lua_getfenv(lua_State *L, int idx)
812{ 874{
813 cTValue *o = index2adr(L, idx); 875 cTValue *o = index2adr_check(L, idx);
814 api_checkvalidindex(L, o);
815 if (tvisfunc(o)) { 876 if (tvisfunc(o)) {
816 settabV(L, L->top, tabref(funcV(o)->c.env)); 877 settabV(L, L->top, tabref(funcV(o)->c.env));
817 } else if (tvisudata(o)) { 878 } else if (tvisudata(o)) {
@@ -828,7 +889,7 @@ LUA_API int lua_next(lua_State *L, int idx)
828{ 889{
829 cTValue *t = index2adr(L, idx); 890 cTValue *t = index2adr(L, idx);
830 int more; 891 int more;
831 api_check(L, tvistab(t)); 892 lj_checkapi(tvistab(t), "stack slot %d is not a table", idx);
832 more = lj_tab_next(L, tabV(t), L->top-1); 893 more = lj_tab_next(L, tabV(t), L->top-1);
833 if (more) { 894 if (more) {
834 incr_top(L); /* Return new key and value slot. */ 895 incr_top(L); /* Return new key and value slot. */
@@ -854,7 +915,7 @@ LUA_API void *lua_upvalueid(lua_State *L, int idx, int n)
854{ 915{
855 GCfunc *fn = funcV(index2adr(L, idx)); 916 GCfunc *fn = funcV(index2adr(L, idx));
856 n--; 917 n--;
857 api_check(L, (uint32_t)n < fn->l.nupvalues); 918 lj_checkapi((uint32_t)n < fn->l.nupvalues, "bad upvalue %d", n);
858 return isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) : 919 return isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) :
859 (void *)&fn->c.upvalue[n]; 920 (void *)&fn->c.upvalue[n];
860} 921}
@@ -864,13 +925,15 @@ LUA_API void lua_upvaluejoin(lua_State *L, int idx1, int n1, int idx2, int n2)
864 GCfunc *fn1 = funcV(index2adr(L, idx1)); 925 GCfunc *fn1 = funcV(index2adr(L, idx1));
865 GCfunc *fn2 = funcV(index2adr(L, idx2)); 926 GCfunc *fn2 = funcV(index2adr(L, idx2));
866 n1--; n2--; 927 n1--; n2--;
867 api_check(L, isluafunc(fn1) && (uint32_t)n1 < fn1->l.nupvalues); 928 lj_checkapi(isluafunc(fn1), "stack slot %d is not a Lua function", idx1);
868 api_check(L, isluafunc(fn2) && (uint32_t)n2 < fn2->l.nupvalues); 929 lj_checkapi(isluafunc(fn2), "stack slot %d is not a Lua function", idx2);
930 lj_checkapi((uint32_t)n1 < fn1->l.nupvalues, "bad upvalue %d", n1+1);
931 lj_checkapi((uint32_t)n2 < fn2->l.nupvalues, "bad upvalue %d", n2+1);
869 setgcrefr(fn1->l.uvptr[n1], fn2->l.uvptr[n2]); 932 setgcrefr(fn1->l.uvptr[n1], fn2->l.uvptr[n2]);
870 lj_gc_objbarrier(L, fn1, gcref(fn1->l.uvptr[n1])); 933 lj_gc_objbarrier(L, fn1, gcref(fn1->l.uvptr[n1]));
871} 934}
872 935
873LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname) 936LUALIB_API void *luaL_testudata(lua_State *L, int idx, const char *tname)
874{ 937{
875 cTValue *o = index2adr(L, idx); 938 cTValue *o = index2adr(L, idx);
876 if (tvisudata(o)) { 939 if (tvisudata(o)) {
@@ -879,8 +942,14 @@ LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname)
879 if (tv && tvistab(tv) && tabV(tv) == tabref(ud->metatable)) 942 if (tv && tvistab(tv) && tabV(tv) == tabref(ud->metatable))
880 return uddata(ud); 943 return uddata(ud);
881 } 944 }
882 lj_err_argtype(L, idx, tname); 945 return NULL; /* value is not a userdata with a metatable */
883 return NULL; /* unreachable */ 946}
947
948LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname)
949{
950 void *p = luaL_testudata(L, idx, tname);
951 if (!p) lj_err_argtype(L, idx, tname);
952 return p;
884} 953}
885 954
886/* -- Object setters ------------------------------------------------------ */ 955/* -- Object setters ------------------------------------------------------ */
@@ -888,19 +957,19 @@ LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname)
888LUA_API void lua_settable(lua_State *L, int idx) 957LUA_API void lua_settable(lua_State *L, int idx)
889{ 958{
890 TValue *o; 959 TValue *o;
891 cTValue *t = index2adr(L, idx); 960 cTValue *t = index2adr_check(L, idx);
892 api_checknelems(L, 2); 961 lj_checkapi_slot(2);
893 api_checkvalidindex(L, t);
894 o = lj_meta_tset(L, t, L->top-2); 962 o = lj_meta_tset(L, t, L->top-2);
895 if (o) { 963 if (o) {
896 /* NOBARRIER: lj_meta_tset ensures the table is not black. */ 964 /* NOBARRIER: lj_meta_tset ensures the table is not black. */
897 copyTV(L, o, L->top-1);
898 L->top -= 2; 965 L->top -= 2;
966 copyTV(L, o, L->top+1);
899 } else { 967 } else {
900 L->top += 3; 968 TValue *base = L->top;
901 copyTV(L, L->top-1, L->top-6); 969 copyTV(L, base+2, base-3-2*LJ_FR2);
902 lj_vm_call(L, L->top-3, 0+1); 970 L->top = base+3;
903 L->top -= 3; 971 lj_vm_call(L, base, 0+1);
972 L->top -= 3+LJ_FR2;
904 } 973 }
905} 974}
906 975
@@ -908,20 +977,19 @@ LUA_API void lua_setfield(lua_State *L, int idx, const char *k)
908{ 977{
909 TValue *o; 978 TValue *o;
910 TValue key; 979 TValue key;
911 cTValue *t = index2adr(L, idx); 980 cTValue *t = index2adr_check(L, idx);
912 api_checknelems(L, 1); 981 lj_checkapi_slot(1);
913 api_checkvalidindex(L, t);
914 setstrV(L, &key, lj_str_newz(L, k)); 982 setstrV(L, &key, lj_str_newz(L, k));
915 o = lj_meta_tset(L, t, &key); 983 o = lj_meta_tset(L, t, &key);
916 if (o) { 984 if (o) {
917 L->top--;
918 /* NOBARRIER: lj_meta_tset ensures the table is not black. */ 985 /* NOBARRIER: lj_meta_tset ensures the table is not black. */
919 copyTV(L, o, L->top); 986 copyTV(L, o, --L->top);
920 } else { 987 } else {
921 L->top += 3; 988 TValue *base = L->top;
922 copyTV(L, L->top-1, L->top-6); 989 copyTV(L, base+2, base-3-2*LJ_FR2);
923 lj_vm_call(L, L->top-3, 0+1); 990 L->top = base+3;
924 L->top -= 2; 991 lj_vm_call(L, base, 0+1);
992 L->top -= 2+LJ_FR2;
925 } 993 }
926} 994}
927 995
@@ -929,7 +997,7 @@ LUA_API void lua_rawset(lua_State *L, int idx)
929{ 997{
930 GCtab *t = tabV(index2adr(L, idx)); 998 GCtab *t = tabV(index2adr(L, idx));
931 TValue *dst, *key; 999 TValue *dst, *key;
932 api_checknelems(L, 2); 1000 lj_checkapi_slot(2);
933 key = L->top-2; 1001 key = L->top-2;
934 dst = lj_tab_set(L, t, key); 1002 dst = lj_tab_set(L, t, key);
935 copyTV(L, dst, key+1); 1003 copyTV(L, dst, key+1);
@@ -941,7 +1009,7 @@ LUA_API void lua_rawseti(lua_State *L, int idx, int n)
941{ 1009{
942 GCtab *t = tabV(index2adr(L, idx)); 1010 GCtab *t = tabV(index2adr(L, idx));
943 TValue *dst, *src; 1011 TValue *dst, *src;
944 api_checknelems(L, 1); 1012 lj_checkapi_slot(1);
945 dst = lj_tab_setint(L, t, n); 1013 dst = lj_tab_setint(L, t, n);
946 src = L->top-1; 1014 src = L->top-1;
947 copyTV(L, dst, src); 1015 copyTV(L, dst, src);
@@ -953,13 +1021,12 @@ LUA_API int lua_setmetatable(lua_State *L, int idx)
953{ 1021{
954 global_State *g; 1022 global_State *g;
955 GCtab *mt; 1023 GCtab *mt;
956 cTValue *o = index2adr(L, idx); 1024 cTValue *o = index2adr_check(L, idx);
957 api_checknelems(L, 1); 1025 lj_checkapi_slot(1);
958 api_checkvalidindex(L, o);
959 if (tvisnil(L->top-1)) { 1026 if (tvisnil(L->top-1)) {
960 mt = NULL; 1027 mt = NULL;
961 } else { 1028 } else {
962 api_check(L, tvistab(L->top-1)); 1029 lj_checkapi(tvistab(L->top-1), "top stack slot is not a table");
963 mt = tabV(L->top-1); 1030 mt = tabV(L->top-1);
964 } 1031 }
965 g = G(L); 1032 g = G(L);
@@ -988,13 +1055,18 @@ LUA_API int lua_setmetatable(lua_State *L, int idx)
988 return 1; 1055 return 1;
989} 1056}
990 1057
1058LUALIB_API void luaL_setmetatable(lua_State *L, const char *tname)
1059{
1060 lua_getfield(L, LUA_REGISTRYINDEX, tname);
1061 lua_setmetatable(L, -2);
1062}
1063
991LUA_API int lua_setfenv(lua_State *L, int idx) 1064LUA_API int lua_setfenv(lua_State *L, int idx)
992{ 1065{
993 cTValue *o = index2adr(L, idx); 1066 cTValue *o = index2adr_check(L, idx);
994 GCtab *t; 1067 GCtab *t;
995 api_checknelems(L, 1); 1068 lj_checkapi_slot(1);
996 api_checkvalidindex(L, o); 1069 lj_checkapi(tvistab(L->top-1), "top stack slot is not a table");
997 api_check(L, tvistab(L->top-1));
998 t = tabV(L->top-1); 1070 t = tabV(L->top-1);
999 if (tvisfunc(o)) { 1071 if (tvisfunc(o)) {
1000 setgcref(funcV(o)->c.env, obj2gco(t)); 1072 setgcref(funcV(o)->c.env, obj2gco(t));
@@ -1017,7 +1089,7 @@ LUA_API const char *lua_setupvalue(lua_State *L, int idx, int n)
1017 TValue *val; 1089 TValue *val;
1018 GCobj *o; 1090 GCobj *o;
1019 const char *name; 1091 const char *name;
1020 api_checknelems(L, 1); 1092 lj_checkapi_slot(1);
1021 name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val, &o); 1093 name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val, &o);
1022 if (name) { 1094 if (name) {
1023 L->top--; 1095 L->top--;
@@ -1029,11 +1101,25 @@ LUA_API const char *lua_setupvalue(lua_State *L, int idx, int n)
1029 1101
1030/* -- Calls --------------------------------------------------------------- */ 1102/* -- Calls --------------------------------------------------------------- */
1031 1103
1104#if LJ_FR2
1105static TValue *api_call_base(lua_State *L, int nargs)
1106{
1107 TValue *o = L->top, *base = o - nargs;
1108 L->top = o+1;
1109 for (; o > base; o--) copyTV(L, o, o-1);
1110 setnilV(o);
1111 return o+1;
1112}
1113#else
1114#define api_call_base(L, nargs) (L->top - (nargs))
1115#endif
1116
1032LUA_API void lua_call(lua_State *L, int nargs, int nresults) 1117LUA_API void lua_call(lua_State *L, int nargs, int nresults)
1033{ 1118{
1034 api_check(L, L->status == 0 || L->status == LUA_ERRERR); 1119 lj_checkapi(L->status == LUA_OK || L->status == LUA_ERRERR,
1035 api_checknelems(L, nargs+1); 1120 "thread called in wrong state %d", L->status);
1036 lj_vm_call(L, L->top - nargs, nresults+1); 1121 lj_checkapi_slot(nargs+1);
1122 lj_vm_call(L, api_call_base(L, nargs), nresults+1);
1037} 1123}
1038 1124
1039LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc) 1125LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc)
@@ -1042,16 +1128,16 @@ LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc)
1042 uint8_t oldh = hook_save(g); 1128 uint8_t oldh = hook_save(g);
1043 ptrdiff_t ef; 1129 ptrdiff_t ef;
1044 int status; 1130 int status;
1045 api_check(L, L->status == 0 || L->status == LUA_ERRERR); 1131 lj_checkapi(L->status == LUA_OK || L->status == LUA_ERRERR,
1046 api_checknelems(L, nargs+1); 1132 "thread called in wrong state %d", L->status);
1133 lj_checkapi_slot(nargs+1);
1047 if (errfunc == 0) { 1134 if (errfunc == 0) {
1048 ef = 0; 1135 ef = 0;
1049 } else { 1136 } else {
1050 cTValue *o = stkindex2adr(L, errfunc); 1137 cTValue *o = index2adr_stack(L, errfunc);
1051 api_checkvalidindex(L, o);
1052 ef = savestack(L, o); 1138 ef = savestack(L, o);
1053 } 1139 }
1054 status = lj_vm_pcall(L, L->top - nargs, nresults+1, ef); 1140 status = lj_vm_pcall(L, api_call_base(L, nargs), nresults+1, ef);
1055 if (status) hook_restore(g, oldh); 1141 if (status) hook_restore(g, oldh);
1056 return status; 1142 return status;
1057} 1143}
@@ -1059,12 +1145,14 @@ LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc)
1059static TValue *cpcall(lua_State *L, lua_CFunction func, void *ud) 1145static TValue *cpcall(lua_State *L, lua_CFunction func, void *ud)
1060{ 1146{
1061 GCfunc *fn = lj_func_newC(L, 0, getcurrenv(L)); 1147 GCfunc *fn = lj_func_newC(L, 0, getcurrenv(L));
1148 TValue *top = L->top;
1062 fn->c.f = func; 1149 fn->c.f = func;
1063 setfuncV(L, L->top, fn); 1150 setfuncV(L, top++, fn);
1064 setlightudV(L->top+1, checklightudptr(L, ud)); 1151 if (LJ_FR2) setnilV(top++);
1152 setlightudV(top++, checklightudptr(L, ud));
1065 cframe_nres(L->cframe) = 1+0; /* Zero results. */ 1153 cframe_nres(L->cframe) = 1+0; /* Zero results. */
1066 L->top += 2; 1154 L->top = top;
1067 return L->top-1; /* Now call the newly allocated C function. */ 1155 return top-1; /* Now call the newly allocated C function. */
1068} 1156}
1069 1157
1070LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud) 1158LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud)
@@ -1072,7 +1160,8 @@ LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud)
1072 global_State *g = G(L); 1160 global_State *g = G(L);
1073 uint8_t oldh = hook_save(g); 1161 uint8_t oldh = hook_save(g);
1074 int status; 1162 int status;
1075 api_check(L, L->status == 0 || L->status == LUA_ERRERR); 1163 lj_checkapi(L->status == LUA_OK || L->status == LUA_ERRERR,
1164 "thread called in wrong state %d", L->status);
1076 status = lj_vm_cpcall(L, func, ud, cpcall); 1165 status = lj_vm_cpcall(L, func, ud, cpcall);
1077 if (status) hook_restore(g, oldh); 1166 if (status) hook_restore(g, oldh);
1078 return status; 1167 return status;
@@ -1081,10 +1170,11 @@ LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud)
1081LUALIB_API int luaL_callmeta(lua_State *L, int idx, const char *field) 1170LUALIB_API int luaL_callmeta(lua_State *L, int idx, const char *field)
1082{ 1171{
1083 if (luaL_getmetafield(L, idx, field)) { 1172 if (luaL_getmetafield(L, idx, field)) {
1084 TValue *base = L->top--; 1173 TValue *top = L->top--;
1085 copyTV(L, base, index2adr(L, idx)); 1174 if (LJ_FR2) setnilV(top++);
1086 L->top = base+1; 1175 copyTV(L, top++, index2adr(L, idx));
1087 lj_vm_call(L, base, 1+1); 1176 L->top = top;
1177 lj_vm_call(L, top-1, 1+1);
1088 return 1; 1178 return 1;
1089 } 1179 }
1090 return 0; 1180 return 0;
@@ -1092,6 +1182,11 @@ LUALIB_API int luaL_callmeta(lua_State *L, int idx, const char *field)
1092 1182
1093/* -- Coroutine yield and resume ------------------------------------------ */ 1183/* -- Coroutine yield and resume ------------------------------------------ */
1094 1184
1185LUA_API int lua_isyieldable(lua_State *L)
1186{
1187 return cframe_canyield(L->cframe);
1188}
1189
1095LUA_API int lua_yield(lua_State *L, int nresults) 1190LUA_API int lua_yield(lua_State *L, int nresults)
1096{ 1191{
1097 void *cf = L->cframe; 1192 void *cf = L->cframe;
@@ -1111,12 +1206,14 @@ LUA_API int lua_yield(lua_State *L, int nresults)
1111 } else { /* Yield from hook: add a pseudo-frame. */ 1206 } else { /* Yield from hook: add a pseudo-frame. */
1112 TValue *top = L->top; 1207 TValue *top = L->top;
1113 hook_leave(g); 1208 hook_leave(g);
1114 top->u64 = cframe_multres(cf); 1209 (top++)->u64 = cframe_multres(cf);
1115 setcont(top+1, lj_cont_hook); 1210 setcont(top, lj_cont_hook);
1116 setframe_pc(top+1, cframe_pc(cf)-1); 1211 if (LJ_FR2) top++;
1117 setframe_gc(top+2, obj2gco(L)); 1212 setframe_pc(top, cframe_pc(cf)-1);
1118 setframe_ftsz(top+2, (int)((char *)(top+3)-(char *)L->base)+FRAME_CONT); 1213 if (LJ_FR2) top++;
1119 L->top = L->base = top+3; 1214 setframe_gc(top, obj2gco(L), LJ_TTHREAD);
1215 setframe_ftsz(top, ((char *)(top+1)-(char *)L->base)+FRAME_CONT);
1216 L->top = L->base = top+1;
1120#if LJ_TARGET_X64 1217#if LJ_TARGET_X64
1121 lj_err_throw(L, LUA_YIELD); 1218 lj_err_throw(L, LUA_YIELD);
1122#else 1219#else
@@ -1133,7 +1230,9 @@ LUA_API int lua_yield(lua_State *L, int nresults)
1133LUA_API int lua_resume(lua_State *L, int nargs) 1230LUA_API int lua_resume(lua_State *L, int nargs)
1134{ 1231{
1135 if (L->cframe == NULL && L->status <= LUA_YIELD) 1232 if (L->cframe == NULL && L->status <= LUA_YIELD)
1136 return lj_vm_resume(L, L->top - nargs, 0, 0); 1233 return lj_vm_resume(L,
1234 L->status == LUA_OK ? api_call_base(L, nargs) : L->top - nargs,
1235 0, 0);
1137 L->top = L->base; 1236 L->top = L->base;
1138 setstrV(L, L->top, lj_err_str(L, LJ_ERR_COSUSP)); 1237 setstrV(L, L->top, lj_err_str(L, LJ_ERR_COSUSP));
1139 incr_top(L); 1238 incr_top(L);
@@ -1163,7 +1262,7 @@ LUA_API int lua_gc(lua_State *L, int what, int data)
1163 res = (int)(g->gc.total & 0x3ff); 1262 res = (int)(g->gc.total & 0x3ff);
1164 break; 1263 break;
1165 case LUA_GCSTEP: { 1264 case LUA_GCSTEP: {
1166 MSize a = (MSize)data << 10; 1265 GCSize a = (GCSize)data << 10;
1167 g->gc.threshold = (a <= g->gc.total) ? (g->gc.total - a) : 0; 1266 g->gc.threshold = (a <= g->gc.total) ? (g->gc.total - a) : 0;
1168 while (g->gc.total >= g->gc.threshold) 1267 while (g->gc.total >= g->gc.threshold)
1169 if (lj_gc_step(L) > 0) { 1268 if (lj_gc_step(L) > 0) {
@@ -1180,6 +1279,9 @@ LUA_API int lua_gc(lua_State *L, int what, int data)
1180 res = (int)(g->gc.stepmul); 1279 res = (int)(g->gc.stepmul);
1181 g->gc.stepmul = (MSize)data; 1280 g->gc.stepmul = (MSize)data;
1182 break; 1281 break;
1282 case LUA_GCISRUNNING:
1283 res = (g->gc.threshold != LJ_MAX_MEM);
1284 break;
1183 default: 1285 default:
1184 res = -1; /* Invalid option. */ 1286 res = -1; /* Invalid option. */
1185 } 1287 }
diff --git a/src/lj_arch.h b/src/lj_arch.h
index 320ccf97..baa7b663 100644
--- a/src/lj_arch.h
+++ b/src/lj_arch.h
@@ -8,6 +8,8 @@
8 8
9#include "lua.h" 9#include "lua.h"
10 10
11/* -- Target definitions -------------------------------------------------- */
12
11/* Target endianess. */ 13/* Target endianess. */
12#define LUAJIT_LE 0 14#define LUAJIT_LE 0
13#define LUAJIT_BE 1 15#define LUAJIT_BE 1
@@ -19,12 +21,16 @@
19#define LUAJIT_ARCH_x64 2 21#define LUAJIT_ARCH_x64 2
20#define LUAJIT_ARCH_ARM 3 22#define LUAJIT_ARCH_ARM 3
21#define LUAJIT_ARCH_arm 3 23#define LUAJIT_ARCH_arm 3
22#define LUAJIT_ARCH_PPC 4 24#define LUAJIT_ARCH_ARM64 4
23#define LUAJIT_ARCH_ppc 4 25#define LUAJIT_ARCH_arm64 4
24#define LUAJIT_ARCH_PPCSPE 5 26#define LUAJIT_ARCH_PPC 5
25#define LUAJIT_ARCH_ppcspe 5 27#define LUAJIT_ARCH_ppc 5
26#define LUAJIT_ARCH_MIPS 6 28#define LUAJIT_ARCH_MIPS 6
27#define LUAJIT_ARCH_mips 6 29#define LUAJIT_ARCH_mips 6
30#define LUAJIT_ARCH_MIPS32 6
31#define LUAJIT_ARCH_mips32 6
32#define LUAJIT_ARCH_MIPS64 7
33#define LUAJIT_ARCH_mips64 7
28 34
29/* Target OS. */ 35/* Target OS. */
30#define LUAJIT_OS_OTHER 0 36#define LUAJIT_OS_OTHER 0
@@ -34,6 +40,14 @@
34#define LUAJIT_OS_BSD 4 40#define LUAJIT_OS_BSD 4
35#define LUAJIT_OS_POSIX 5 41#define LUAJIT_OS_POSIX 5
36 42
43/* Number mode. */
44#define LJ_NUMMODE_SINGLE 0 /* Single-number mode only. */
45#define LJ_NUMMODE_SINGLE_DUAL 1 /* Default to single-number mode. */
46#define LJ_NUMMODE_DUAL 2 /* Dual-number mode only. */
47#define LJ_NUMMODE_DUAL_SINGLE 3 /* Default to dual-number mode. */
48
49/* -- Target detection ---------------------------------------------------- */
50
37/* Select native target if no target defined. */ 51/* Select native target if no target defined. */
38#ifndef LUAJIT_TARGET 52#ifndef LUAJIT_TARGET
39 53
@@ -43,14 +57,14 @@
43#define LUAJIT_TARGET LUAJIT_ARCH_X64 57#define LUAJIT_TARGET LUAJIT_ARCH_X64
44#elif defined(__arm__) || defined(__arm) || defined(__ARM__) || defined(__ARM) 58#elif defined(__arm__) || defined(__arm) || defined(__ARM__) || defined(__ARM)
45#define LUAJIT_TARGET LUAJIT_ARCH_ARM 59#define LUAJIT_TARGET LUAJIT_ARCH_ARM
60#elif defined(__aarch64__)
61#define LUAJIT_TARGET LUAJIT_ARCH_ARM64
46#elif defined(__ppc__) || defined(__ppc) || defined(__PPC__) || defined(__PPC) || defined(__powerpc__) || defined(__powerpc) || defined(__POWERPC__) || defined(__POWERPC) || defined(_M_PPC) 62#elif defined(__ppc__) || defined(__ppc) || defined(__PPC__) || defined(__PPC) || defined(__powerpc__) || defined(__powerpc) || defined(__POWERPC__) || defined(__POWERPC) || defined(_M_PPC)
47#ifdef __NO_FPRS__
48#define LUAJIT_TARGET LUAJIT_ARCH_PPCSPE
49#else
50#define LUAJIT_TARGET LUAJIT_ARCH_PPC 63#define LUAJIT_TARGET LUAJIT_ARCH_PPC
51#endif 64#elif defined(__mips64__) || defined(__mips64) || defined(__MIPS64__) || defined(__MIPS64)
65#define LUAJIT_TARGET LUAJIT_ARCH_MIPS64
52#elif defined(__mips__) || defined(__mips) || defined(__MIPS__) || defined(__MIPS) 66#elif defined(__mips__) || defined(__mips) || defined(__MIPS__) || defined(__MIPS)
53#define LUAJIT_TARGET LUAJIT_ARCH_MIPS 67#define LUAJIT_TARGET LUAJIT_ARCH_MIPS32
54#else 68#else
55#error "No support for this architecture (yet)" 69#error "No support for this architecture (yet)"
56#endif 70#endif
@@ -65,12 +79,16 @@
65#elif defined(__linux__) 79#elif defined(__linux__)
66#define LUAJIT_OS LUAJIT_OS_LINUX 80#define LUAJIT_OS LUAJIT_OS_LINUX
67#elif defined(__MACH__) && defined(__APPLE__) 81#elif defined(__MACH__) && defined(__APPLE__)
82#include "TargetConditionals.h"
68#define LUAJIT_OS LUAJIT_OS_OSX 83#define LUAJIT_OS LUAJIT_OS_OSX
69#elif (defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \ 84#elif (defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
70 defined(__NetBSD__) || defined(__OpenBSD__) || \ 85 defined(__NetBSD__) || defined(__OpenBSD__) || \
71 defined(__DragonFly__)) && !defined(__ORBIS__) 86 defined(__DragonFly__)) && !defined(__ORBIS__)
72#define LUAJIT_OS LUAJIT_OS_BSD 87#define LUAJIT_OS LUAJIT_OS_BSD
73#elif (defined(__sun__) && defined(__svr4__)) 88#elif (defined(__sun__) && defined(__svr4__))
89#define LJ_TARGET_SOLARIS 1
90#define LUAJIT_OS LUAJIT_OS_POSIX
91#elif defined(__HAIKU__)
74#define LUAJIT_OS LUAJIT_OS_POSIX 92#define LUAJIT_OS LUAJIT_OS_POSIX
75#elif defined(__CYGWIN__) 93#elif defined(__CYGWIN__)
76#define LJ_TARGET_CYGWIN 1 94#define LJ_TARGET_CYGWIN 1
@@ -99,10 +117,16 @@
99#define LJ_TARGET_WINDOWS (LUAJIT_OS == LUAJIT_OS_WINDOWS) 117#define LJ_TARGET_WINDOWS (LUAJIT_OS == LUAJIT_OS_WINDOWS)
100#define LJ_TARGET_LINUX (LUAJIT_OS == LUAJIT_OS_LINUX) 118#define LJ_TARGET_LINUX (LUAJIT_OS == LUAJIT_OS_LINUX)
101#define LJ_TARGET_OSX (LUAJIT_OS == LUAJIT_OS_OSX) 119#define LJ_TARGET_OSX (LUAJIT_OS == LUAJIT_OS_OSX)
102#define LJ_TARGET_IOS (LJ_TARGET_OSX && LUAJIT_TARGET == LUAJIT_ARCH_ARM) 120#define LJ_TARGET_BSD (LUAJIT_OS == LUAJIT_OS_BSD)
103#define LJ_TARGET_POSIX (LUAJIT_OS > LUAJIT_OS_WINDOWS) 121#define LJ_TARGET_POSIX (LUAJIT_OS > LUAJIT_OS_WINDOWS)
104#define LJ_TARGET_DLOPEN LJ_TARGET_POSIX 122#define LJ_TARGET_DLOPEN LJ_TARGET_POSIX
105 123
124#if TARGET_OS_IPHONE
125#define LJ_TARGET_IOS 1
126#else
127#define LJ_TARGET_IOS 0
128#endif
129
106#ifdef __CELLOS_LV2__ 130#ifdef __CELLOS_LV2__
107#define LJ_TARGET_PS3 1 131#define LJ_TARGET_PS3 1
108#define LJ_TARGET_CONSOLE 1 132#define LJ_TARGET_CONSOLE 1
@@ -125,10 +149,20 @@
125#define LJ_TARGET_CONSOLE 1 149#define LJ_TARGET_CONSOLE 1
126#endif 150#endif
127 151
128#define LJ_NUMMODE_SINGLE 0 /* Single-number mode only. */ 152#ifdef _DURANGO
129#define LJ_NUMMODE_SINGLE_DUAL 1 /* Default to single-number mode. */ 153#define LJ_TARGET_XBOXONE 1
130#define LJ_NUMMODE_DUAL 2 /* Dual-number mode only. */ 154#define LJ_TARGET_CONSOLE 1
131#define LJ_NUMMODE_DUAL_SINGLE 3 /* Default to dual-number mode. */ 155#define LJ_TARGET_GC64 1
156#endif
157
158#ifdef _UWP
159#define LJ_TARGET_UWP 1
160#if LUAJIT_TARGET == LUAJIT_ARCH_X64
161#define LJ_TARGET_GC64 1
162#endif
163#endif
164
165/* -- Arch-specific settings ---------------------------------------------- */
132 166
133/* Set target architecture properties. */ 167/* Set target architecture properties. */
134#if LUAJIT_TARGET == LUAJIT_ARCH_X86 168#if LUAJIT_TARGET == LUAJIT_ARCH_X86
@@ -167,6 +201,9 @@
167#define LJ_TARGET_MASKROT 1 201#define LJ_TARGET_MASKROT 1
168#define LJ_TARGET_UNALIGNED 1 202#define LJ_TARGET_UNALIGNED 1
169#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL 203#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL
204#ifndef LUAJIT_DISABLE_GC64
205#define LJ_TARGET_GC64 1
206#endif
170 207
171#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM 208#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM
172 209
@@ -188,34 +225,96 @@
188#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */ 225#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
189#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL 226#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
190 227
191#if __ARM_ARCH____ARM_ARCH_8__ || __ARM_ARCH_8A__ 228#if __ARM_ARCH == 8 || __ARM_ARCH_8__ || __ARM_ARCH_8A__
192#define LJ_ARCH_VERSION 80 229#define LJ_ARCH_VERSION 80
193#elif __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH_7S__ || __ARM_ARCH_7VE__ 230#elif __ARM_ARCH == 7 || __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH_7S__ || __ARM_ARCH_7VE__
194#define LJ_ARCH_VERSION 70 231#define LJ_ARCH_VERSION 70
195#elif __ARM_ARCH_6T2__ 232#elif __ARM_ARCH_6T2__
196#define LJ_ARCH_VERSION 61 233#define LJ_ARCH_VERSION 61
197#elif __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6K__ || __ARM_ARCH_6Z__ || __ARM_ARCH_6ZK__ 234#elif __ARM_ARCH == 6 || __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6K__ || __ARM_ARCH_6Z__ || __ARM_ARCH_6ZK__
198#define LJ_ARCH_VERSION 60 235#define LJ_ARCH_VERSION 60
199#else 236#else
200#define LJ_ARCH_VERSION 50 237#define LJ_ARCH_VERSION 50
201#endif 238#endif
202 239
240#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM64
241
242#define LJ_ARCH_BITS 64
243#if defined(__AARCH64EB__)
244#define LJ_ARCH_NAME "arm64be"
245#define LJ_ARCH_ENDIAN LUAJIT_BE
246#else
247#define LJ_ARCH_NAME "arm64"
248#define LJ_ARCH_ENDIAN LUAJIT_LE
249#endif
250#define LJ_TARGET_ARM64 1
251#define LJ_TARGET_EHRETREG 0
252#define LJ_TARGET_JUMPRANGE 27 /* +-2^27 = +-128MB */
253#define LJ_TARGET_MASKSHIFT 1
254#define LJ_TARGET_MASKROT 1
255#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
256#define LJ_TARGET_GC64 1
257#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
258
259#define LJ_ARCH_VERSION 80
260
203#elif LUAJIT_TARGET == LUAJIT_ARCH_PPC 261#elif LUAJIT_TARGET == LUAJIT_ARCH_PPC
204 262
205#define LJ_ARCH_NAME "ppc" 263#ifndef LJ_ARCH_ENDIAN
264#if __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
265#define LJ_ARCH_ENDIAN LUAJIT_LE
266#else
267#define LJ_ARCH_ENDIAN LUAJIT_BE
268#endif
269#endif
270
206#if _LP64 271#if _LP64
207#define LJ_ARCH_BITS 64 272#define LJ_ARCH_BITS 64
273#if LJ_ARCH_ENDIAN == LUAJIT_LE
274#define LJ_ARCH_NAME "ppc64le"
275#else
276#define LJ_ARCH_NAME "ppc64"
277#endif
208#else 278#else
209#define LJ_ARCH_BITS 32 279#define LJ_ARCH_BITS 32
280#define LJ_ARCH_NAME "ppc"
281
282#if !defined(LJ_ARCH_HASFPU)
283#if defined(_SOFT_FLOAT) || defined(_SOFT_DOUBLE)
284#define LJ_ARCH_HASFPU 0
285#else
286#define LJ_ARCH_HASFPU 1
210#endif 287#endif
211#define LJ_ARCH_ENDIAN LUAJIT_BE 288#endif
289
290#if !defined(LJ_ABI_SOFTFP)
291#if defined(_SOFT_FLOAT) || defined(_SOFT_DOUBLE)
292#define LJ_ABI_SOFTFP 1
293#else
294#define LJ_ABI_SOFTFP 0
295#endif
296#endif
297#endif
298
299#if LJ_ABI_SOFTFP
300#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
301#else
302#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL_SINGLE
303#endif
304
212#define LJ_TARGET_PPC 1 305#define LJ_TARGET_PPC 1
213#define LJ_TARGET_EHRETREG 3 306#define LJ_TARGET_EHRETREG 3
214#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */ 307#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
215#define LJ_TARGET_MASKSHIFT 0 308#define LJ_TARGET_MASKSHIFT 0
216#define LJ_TARGET_MASKROT 1 309#define LJ_TARGET_MASKROT 1
217#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */ 310#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */
218#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL_SINGLE 311
312#if LJ_TARGET_CONSOLE
313#define LJ_ARCH_PPC32ON64 1
314#define LJ_ARCH_NOFFI 1
315#elif LJ_ARCH_BITS == 64
316#error "No support for PPC64"
317#endif
219 318
220#if _ARCH_PWR7 319#if _ARCH_PWR7
221#define LJ_ARCH_VERSION 70 320#define LJ_ARCH_VERSION 70
@@ -230,10 +329,6 @@
230#else 329#else
231#define LJ_ARCH_VERSION 0 330#define LJ_ARCH_VERSION 0
232#endif 331#endif
233#if __PPC64__ || __powerpc64__ || LJ_TARGET_CONSOLE
234#define LJ_ARCH_PPC64 1
235#define LJ_ARCH_NOFFI 1
236#endif
237#if _ARCH_PPCSQ 332#if _ARCH_PPCSQ
238#define LJ_ARCH_SQRT 1 333#define LJ_ARCH_SQRT 1
239#endif 334#endif
@@ -247,44 +342,79 @@
247#define LJ_ARCH_XENON 1 342#define LJ_ARCH_XENON 1
248#endif 343#endif
249 344
250#elif LUAJIT_TARGET == LUAJIT_ARCH_PPCSPE 345#elif LUAJIT_TARGET == LUAJIT_ARCH_MIPS32 || LUAJIT_TARGET == LUAJIT_ARCH_MIPS64
251
252#define LJ_ARCH_NAME "ppcspe"
253#define LJ_ARCH_BITS 32
254#define LJ_ARCH_ENDIAN LUAJIT_BE
255#ifndef LJ_ABI_SOFTFP
256#define LJ_ABI_SOFTFP 1
257#endif
258#define LJ_ABI_EABI 1
259#define LJ_TARGET_PPCSPE 1
260#define LJ_TARGET_EHRETREG 3
261#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
262#define LJ_TARGET_MASKSHIFT 0
263#define LJ_TARGET_MASKROT 1
264#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */
265#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE
266#define LJ_ARCH_NOFFI 1 /* NYI: comparisons, calls. */
267#define LJ_ARCH_NOJIT 1
268
269#elif LUAJIT_TARGET == LUAJIT_ARCH_MIPS
270 346
271#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) 347#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL)
348#if __mips_isa_rev >= 6
349#define LJ_TARGET_MIPSR6 1
350#define LJ_TARGET_UNALIGNED 1
351#endif
352#if LUAJIT_TARGET == LUAJIT_ARCH_MIPS32
353#if LJ_TARGET_MIPSR6
354#define LJ_ARCH_NAME "mips32r6el"
355#else
272#define LJ_ARCH_NAME "mipsel" 356#define LJ_ARCH_NAME "mipsel"
357#endif
358#else
359#if LJ_TARGET_MIPSR6
360#define LJ_ARCH_NAME "mips64r6el"
361#else
362#define LJ_ARCH_NAME "mips64el"
363#endif
364#endif
273#define LJ_ARCH_ENDIAN LUAJIT_LE 365#define LJ_ARCH_ENDIAN LUAJIT_LE
274#else 366#else
367#if LUAJIT_TARGET == LUAJIT_ARCH_MIPS32
368#if LJ_TARGET_MIPSR6
369#define LJ_ARCH_NAME "mips32r6"
370#else
275#define LJ_ARCH_NAME "mips" 371#define LJ_ARCH_NAME "mips"
372#endif
373#else
374#if LJ_TARGET_MIPSR6
375#define LJ_ARCH_NAME "mips64r6"
376#else
377#define LJ_ARCH_NAME "mips64"
378#endif
379#endif
276#define LJ_ARCH_ENDIAN LUAJIT_BE 380#define LJ_ARCH_ENDIAN LUAJIT_BE
277#endif 381#endif
382
383#if !defined(LJ_ARCH_HASFPU)
384#ifdef __mips_soft_float
385#define LJ_ARCH_HASFPU 0
386#else
387#define LJ_ARCH_HASFPU 1
388#endif
389#endif
390
391#if !defined(LJ_ABI_SOFTFP)
392#ifdef __mips_soft_float
393#define LJ_ABI_SOFTFP 1
394#else
395#define LJ_ABI_SOFTFP 0
396#endif
397#endif
398
399#if LUAJIT_TARGET == LUAJIT_ARCH_MIPS32
278#define LJ_ARCH_BITS 32 400#define LJ_ARCH_BITS 32
401#define LJ_TARGET_MIPS32 1
402#else
403#define LJ_ARCH_BITS 64
404#define LJ_TARGET_MIPS64 1
405#define LJ_TARGET_GC64 1
406#endif
279#define LJ_TARGET_MIPS 1 407#define LJ_TARGET_MIPS 1
280#define LJ_TARGET_EHRETREG 4 408#define LJ_TARGET_EHRETREG 4
281#define LJ_TARGET_JUMPRANGE 27 /* 2*2^27 = 256MB-aligned region */ 409#define LJ_TARGET_JUMPRANGE 27 /* 2*2^27 = 256MB-aligned region */
282#define LJ_TARGET_MASKSHIFT 1 410#define LJ_TARGET_MASKSHIFT 1
283#define LJ_TARGET_MASKROT 1 411#define LJ_TARGET_MASKROT 1
284#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */ 412#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
285#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE 413#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
286 414
287#if _MIPS_ARCH_MIPS32R2 415#if LJ_TARGET_MIPSR6
416#define LJ_ARCH_VERSION 60
417#elif _MIPS_ARCH_MIPS32R2 || _MIPS_ARCH_MIPS64R2
288#define LJ_ARCH_VERSION 20 418#define LJ_ARCH_VERSION 20
289#else 419#else
290#define LJ_ARCH_VERSION 10 420#define LJ_ARCH_VERSION 10
@@ -294,9 +424,7 @@
294#error "No target architecture defined" 424#error "No target architecture defined"
295#endif 425#endif
296 426
297#ifndef LJ_PAGESIZE 427/* -- Checks for requirements --------------------------------------------- */
298#define LJ_PAGESIZE 4096
299#endif
300 428
301/* Check for minimum required compiler versions. */ 429/* Check for minimum required compiler versions. */
302#if defined(__GNUC__) 430#if defined(__GNUC__)
@@ -312,6 +440,16 @@
312#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 2) 440#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 2)
313#error "Need at least GCC 4.2 or newer" 441#error "Need at least GCC 4.2 or newer"
314#endif 442#endif
443#elif LJ_TARGET_ARM64
444#if __clang__
445#if ((__clang_major__ < 3) || ((__clang_major__ == 3) && __clang_minor__ < 5)) && !defined(__NX_TOOLCHAIN_MAJOR__)
446#error "Need at least Clang 3.5 or newer"
447#endif
448#else
449#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 8)
450#error "Need at least GCC 4.8 or newer"
451#endif
452#endif
315#elif !LJ_TARGET_PS3 453#elif !LJ_TARGET_PS3
316#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 3) 454#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 3)
317#error "Need at least GCC 4.3 or newer" 455#error "Need at least GCC 4.3 or newer"
@@ -335,25 +473,34 @@
335#if !(__ARM_EABI__ || LJ_TARGET_IOS) 473#if !(__ARM_EABI__ || LJ_TARGET_IOS)
336#error "Only ARM EABI or iOS 3.0+ ABI is supported" 474#error "Only ARM EABI or iOS 3.0+ ABI is supported"
337#endif 475#endif
338#elif LJ_TARGET_PPC || LJ_TARGET_PPCSPE 476#elif LJ_TARGET_ARM64
339#if defined(_SOFT_FLOAT) || defined(_SOFT_DOUBLE) 477#if defined(_ILP32)
340#error "No support for PowerPC CPUs without double-precision FPU" 478#error "No support for ILP32 model on ARM64"
341#endif 479#endif
480#elif LJ_TARGET_PPC
342#if defined(_LITTLE_ENDIAN) && (!defined(_BYTE_ORDER) || (_BYTE_ORDER == _LITTLE_ENDIAN)) 481#if defined(_LITTLE_ENDIAN) && (!defined(_BYTE_ORDER) || (_BYTE_ORDER == _LITTLE_ENDIAN))
343#error "No support for little-endian PowerPC" 482#error "No support for little-endian PPC32"
344#endif 483#endif
345#if defined(_LP64) 484#if defined(__NO_FPRS__) && !defined(_SOFT_FLOAT)
346#error "No support for PowerPC 64 bit mode" 485#error "No support for PPC/e500 anymore (use LuaJIT 2.0)"
347#endif 486#endif
348#elif LJ_TARGET_MIPS 487#elif LJ_TARGET_MIPS32
349#if defined(__mips_soft_float) 488#if !((defined(_MIPS_SIM_ABI32) && _MIPS_SIM == _MIPS_SIM_ABI32) || (defined(_ABIO32) && _MIPS_SIM == _ABIO32))
350#error "No support for MIPS CPUs without FPU" 489#error "Only o32 ABI supported for MIPS32"
351#endif 490#endif
352#if defined(_LP64) 491#if LJ_TARGET_MIPSR6
353#error "No support for MIPS64" 492/* Not that useful, since most available r6 CPUs are 64 bit. */
493#error "No support for MIPS32R6"
354#endif 494#endif
495#elif LJ_TARGET_MIPS64
496#if !((defined(_MIPS_SIM_ABI64) && _MIPS_SIM == _MIPS_SIM_ABI64) || (defined(_ABI64) && _MIPS_SIM == _ABI64))
497/* MIPS32ON64 aka n32 ABI support might be desirable, but difficult. */
498#error "Only n64 ABI supported for MIPS64"
355#endif 499#endif
356#endif 500#endif
501#endif
502
503/* -- Derived defines ----------------------------------------------------- */
357 504
358/* Enable or disable the dual-number mode for the VM. */ 505/* Enable or disable the dual-number mode for the VM. */
359#if (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE && LUAJIT_NUMMODE == 2) || \ 506#if (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE && LUAJIT_NUMMODE == 2) || \
@@ -376,6 +523,20 @@
376#endif 523#endif
377#endif 524#endif
378 525
526/* 64 bit GC references. */
527#if LJ_TARGET_GC64
528#define LJ_GC64 1
529#else
530#define LJ_GC64 0
531#endif
532
533/* 2-slot frame info. */
534#if LJ_GC64
535#define LJ_FR2 1
536#else
537#define LJ_FR2 0
538#endif
539
379/* Disable or enable the JIT compiler. */ 540/* Disable or enable the JIT compiler. */
380#if defined(LUAJIT_DISABLE_JIT) || defined(LJ_ARCH_NOJIT) || defined(LJ_OS_NOJIT) 541#if defined(LUAJIT_DISABLE_JIT) || defined(LJ_ARCH_NOJIT) || defined(LJ_OS_NOJIT)
381#define LJ_HASJIT 0 542#define LJ_HASJIT 0
@@ -390,6 +551,21 @@
390#define LJ_HASFFI 1 551#define LJ_HASFFI 1
391#endif 552#endif
392 553
554#if defined(LUAJIT_DISABLE_PROFILE)
555#define LJ_HASPROFILE 0
556#elif LJ_TARGET_POSIX
557#define LJ_HASPROFILE 1
558#define LJ_PROFILE_SIGPROF 1
559#elif LJ_TARGET_PS3
560#define LJ_HASPROFILE 1
561#define LJ_PROFILE_PTHREAD 1
562#elif LJ_TARGET_WINDOWS || LJ_TARGET_XBOX360
563#define LJ_HASPROFILE 1
564#define LJ_PROFILE_WTHREAD 1
565#else
566#define LJ_HASPROFILE 0
567#endif
568
393#ifndef LJ_ARCH_HASFPU 569#ifndef LJ_ARCH_HASFPU
394#define LJ_ARCH_HASFPU 1 570#define LJ_ARCH_HASFPU 1
395#endif 571#endif
@@ -397,6 +573,7 @@
397#define LJ_ABI_SOFTFP 0 573#define LJ_ABI_SOFTFP 0
398#endif 574#endif
399#define LJ_SOFTFP (!LJ_ARCH_HASFPU) 575#define LJ_SOFTFP (!LJ_ARCH_HASFPU)
576#define LJ_SOFTFP32 (LJ_SOFTFP && LJ_32)
400 577
401#if LJ_ARCH_ENDIAN == LUAJIT_BE 578#if LJ_ARCH_ENDIAN == LUAJIT_BE
402#define LJ_LE 0 579#define LJ_LE 0
@@ -422,12 +599,13 @@
422#define LJ_TARGET_UNALIGNED 0 599#define LJ_TARGET_UNALIGNED 0
423#endif 600#endif
424 601
425/* Various workarounds for embedded operating systems. */ 602#ifndef LJ_PAGESIZE
426#if (defined(__ANDROID__) && !defined(LJ_TARGET_X86ORX64)) || defined(__symbian__) || LJ_TARGET_XBOX360 603#define LJ_PAGESIZE 4096
427#define LUAJIT_NO_LOG2
428#endif 604#endif
429#if defined(__symbian__) 605
430#define LUAJIT_NO_EXP2 606/* Various workarounds for embedded operating systems or weak C runtimes. */
607#if defined(__ANDROID__) || defined(__symbian__) || LJ_TARGET_XBOX360 || LJ_TARGET_WINDOWS
608#define LUAJIT_NO_LOG2
431#endif 609#endif
432#if LJ_TARGET_CONSOLE || (LJ_TARGET_IOS && __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_8_0) 610#if LJ_TARGET_CONSOLE || (LJ_TARGET_IOS && __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_8_0)
433#define LJ_NO_SYSTEM 1 611#define LJ_NO_SYSTEM 1
@@ -442,6 +620,18 @@
442#define LJ_NO_UNWIND 1 620#define LJ_NO_UNWIND 1
443#endif 621#endif
444 622
623#if LJ_TARGET_WINDOWS
624#if LJ_TARGET_UWP
625#define LJ_WIN_VALLOC VirtualAllocFromApp
626#define LJ_WIN_VPROTECT VirtualProtectFromApp
627extern void *LJ_WIN_LOADLIBA(const char *path);
628#else
629#define LJ_WIN_VALLOC VirtualAlloc
630#define LJ_WIN_VPROTECT VirtualProtect
631#define LJ_WIN_LOADLIBA(path) LoadLibraryExA((path), NULL, 0)
632#endif
633#endif
634
445/* Compatibility with Lua 5.1 vs. 5.2. */ 635/* Compatibility with Lua 5.1 vs. 5.2. */
446#ifdef LUAJIT_ENABLE_LUA52COMPAT 636#ifdef LUAJIT_ENABLE_LUA52COMPAT
447#define LJ_52 1 637#define LJ_52 1
@@ -449,4 +639,46 @@
449#define LJ_52 0 639#define LJ_52 0
450#endif 640#endif
451 641
642/* -- VM security --------------------------------------------------------- */
643
644/* Don't make any changes here. Instead build with:
645** make "XCFLAGS=-DLUAJIT_SECURITY_flag=value"
646**
647** Important note to distro maintainers: DO NOT change the defaults for a
648** regular distro build -- neither upwards, nor downwards!
649** These build-time configurable security flags are intended for embedders
650** who may have specific needs wrt. security vs. performance.
651*/
652
653/* Security defaults. */
654#ifndef LUAJIT_SECURITY_PRNG
655/* PRNG init: 0 = fixed/insecure, 1 = secure from OS. */
656#define LUAJIT_SECURITY_PRNG 1
657#endif
658
659#ifndef LUAJIT_SECURITY_STRHASH
660/* String hash: 0 = sparse only, 1 = sparse + dense. */
661#define LUAJIT_SECURITY_STRHASH 1
662#endif
663
664#ifndef LUAJIT_SECURITY_STRID
665/* String IDs: 0 = linear, 1 = reseed < 255, 2 = reseed < 15, 3 = random. */
666#define LUAJIT_SECURITY_STRID 1
667#endif
668
669#ifndef LUAJIT_SECURITY_MCODE
670/* Machine code page protection: 0 = insecure RWX, 1 = secure RW^X. */
671#define LUAJIT_SECURITY_MCODE 1
672#endif
673
674#define LJ_SECURITY_MODE \
675 ( 0u \
676 | ((LUAJIT_SECURITY_PRNG & 3) << 0) \
677 | ((LUAJIT_SECURITY_STRHASH & 3) << 2) \
678 | ((LUAJIT_SECURITY_STRID & 3) << 4) \
679 | ((LUAJIT_SECURITY_MCODE & 3) << 6) \
680 )
681#define LJ_SECURITY_MODESTRING \
682 "\004prng\007strhash\005strid\005mcode"
683
452#endif 684#endif
diff --git a/src/lj_asm.c b/src/lj_asm.c
index 9b17421e..cc7841c0 100644
--- a/src/lj_asm.c
+++ b/src/lj_asm.c
@@ -90,12 +90,18 @@ typedef struct ASMState {
90 MCode *realign; /* Realign loop if not NULL. */ 90 MCode *realign; /* Realign loop if not NULL. */
91 91
92#ifdef RID_NUM_KREF 92#ifdef RID_NUM_KREF
93 int32_t krefk[RID_NUM_KREF]; 93 intptr_t krefk[RID_NUM_KREF];
94#endif 94#endif
95 IRRef1 phireg[RID_MAX]; /* PHI register references. */ 95 IRRef1 phireg[RID_MAX]; /* PHI register references. */
96 uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */ 96 uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */
97} ASMState; 97} ASMState;
98 98
99#ifdef LUA_USE_ASSERT
100#define lj_assertA(c, ...) lj_assertG_(J2G(as->J), (c), __VA_ARGS__)
101#else
102#define lj_assertA(c, ...) ((void)as)
103#endif
104
99#define IR(ref) (&as->ir[(ref)]) 105#define IR(ref) (&as->ir[(ref)])
100 106
101#define ASMREF_TMP1 REF_TRUE /* Temp. register. */ 107#define ASMREF_TMP1 REF_TRUE /* Temp. register. */
@@ -127,9 +133,8 @@ static LJ_AINLINE void checkmclim(ASMState *as)
127#ifdef LUA_USE_ASSERT 133#ifdef LUA_USE_ASSERT
128 if (as->mcp + MCLIM_REDZONE < as->mcp_prev) { 134 if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
129 IRIns *ir = IR(as->curins+1); 135 IRIns *ir = IR(as->curins+1);
130 fprintf(stderr, "RED ZONE OVERFLOW: %p IR %04d %02d %04d %04d\n", as->mcp, 136 lj_assertA(0, "red zone overflow: %p IR %04d %02d %04d %04d\n", as->mcp,
131 as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS); 137 as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
132 lua_assert(0);
133 } 138 }
134#endif 139#endif
135 if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as); 140 if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
@@ -143,7 +148,7 @@ static LJ_AINLINE void checkmclim(ASMState *as)
143#define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref))) 148#define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
144#define ra_krefk(as, ref) (as->krefk[(ref)]) 149#define ra_krefk(as, ref) (as->krefk[(ref)])
145 150
146static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, int32_t k) 151static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, intptr_t k)
147{ 152{
148 IRRef ref = (IRRef)(r - RID_MIN_KREF); 153 IRRef ref = (IRRef)(r - RID_MIN_KREF);
149 as->krefk[ref] = k; 154 as->krefk[ref] = k;
@@ -170,6 +175,8 @@ IRFLDEF(FLOFS)
170#include "lj_emit_x86.h" 175#include "lj_emit_x86.h"
171#elif LJ_TARGET_ARM 176#elif LJ_TARGET_ARM
172#include "lj_emit_arm.h" 177#include "lj_emit_arm.h"
178#elif LJ_TARGET_ARM64
179#include "lj_emit_arm64.h"
173#elif LJ_TARGET_PPC 180#elif LJ_TARGET_PPC
174#include "lj_emit_ppc.h" 181#include "lj_emit_ppc.h"
175#elif LJ_TARGET_MIPS 182#elif LJ_TARGET_MIPS
@@ -178,6 +185,12 @@ IRFLDEF(FLOFS)
178#error "Missing instruction emitter for target CPU" 185#error "Missing instruction emitter for target CPU"
179#endif 186#endif
180 187
188/* Generic load/store of register from/to stack slot. */
189#define emit_spload(as, ir, r, ofs) \
190 emit_loadofs(as, ir, (r), RID_SP, (ofs))
191#define emit_spstore(as, ir, r, ofs) \
192 emit_storeofs(as, ir, (r), RID_SP, (ofs))
193
181/* -- Register allocator debugging ---------------------------------------- */ 194/* -- Register allocator debugging ---------------------------------------- */
182 195
183/* #define LUAJIT_DEBUG_RA */ 196/* #define LUAJIT_DEBUG_RA */
@@ -235,7 +248,7 @@ static void ra_dprintf(ASMState *as, const char *fmt, ...)
235 *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q; 248 *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
236 } else { 249 } else {
237 *p++ = '?'; 250 *p++ = '?';
238 lua_assert(0); 251 lj_assertA(0, "bad register %d for debug format \"%s\"", r, fmt);
239 } 252 }
240 } else if (e[1] == 'f' || e[1] == 'i') { 253 } else if (e[1] == 'f' || e[1] == 'i') {
241 IRRef ref; 254 IRRef ref;
@@ -253,7 +266,7 @@ static void ra_dprintf(ASMState *as, const char *fmt, ...)
253 } else if (e[1] == 'x') { 266 } else if (e[1] == 'x') {
254 p += sprintf(p, "%08x", va_arg(argp, int32_t)); 267 p += sprintf(p, "%08x", va_arg(argp, int32_t));
255 } else { 268 } else {
256 lua_assert(0); 269 lj_assertA(0, "bad debug format code");
257 } 270 }
258 fmt = e+2; 271 fmt = e+2;
259 } 272 }
@@ -312,37 +325,51 @@ static Reg ra_rematk(ASMState *as, IRRef ref)
312 Reg r; 325 Reg r;
313 if (ra_iskref(ref)) { 326 if (ra_iskref(ref)) {
314 r = ra_krefreg(ref); 327 r = ra_krefreg(ref);
315 lua_assert(!rset_test(as->freeset, r)); 328 lj_assertA(!rset_test(as->freeset, r), "rematk of free reg %d", r);
316 ra_free(as, r); 329 ra_free(as, r);
317 ra_modified(as, r); 330 ra_modified(as, r);
331#if LJ_64
332 emit_loadu64(as, r, ra_krefk(as, ref));
333#else
318 emit_loadi(as, r, ra_krefk(as, ref)); 334 emit_loadi(as, r, ra_krefk(as, ref));
335#endif
319 return r; 336 return r;
320 } 337 }
321 ir = IR(ref); 338 ir = IR(ref);
322 r = ir->r; 339 r = ir->r;
323 lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s)); 340 lj_assertA(ra_hasreg(r), "rematk of K%03d has no reg", REF_BIAS - ref);
341 lj_assertA(!ra_hasspill(ir->s),
342 "rematk of K%03d has spill slot [%x]", REF_BIAS - ref, ir->s);
324 ra_free(as, r); 343 ra_free(as, r);
325 ra_modified(as, r); 344 ra_modified(as, r);
326 ir->r = RID_INIT; /* Do not keep any hint. */ 345 ir->r = RID_INIT; /* Do not keep any hint. */
327 RA_DBGX((as, "remat $i $r", ir, r)); 346 RA_DBGX((as, "remat $i $r", ir, r));
328#if !LJ_SOFTFP 347#if !LJ_SOFTFP32
329 if (ir->o == IR_KNUM) { 348 if (ir->o == IR_KNUM) {
330 emit_loadn(as, r, ir_knum(ir)); 349 emit_loadk64(as, r, ir);
331 } else 350 } else
332#endif 351#endif
333 if (emit_canremat(REF_BASE) && ir->o == IR_BASE) { 352 if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
334 ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */ 353 ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */
335 emit_getgl(as, r, jit_base); 354 emit_getgl(as, r, jit_base);
336 } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) { 355 } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
337 lua_assert(irt_isnil(ir->t)); /* REF_NIL stores ASMREF_L register. */ 356 /* REF_NIL stores ASMREF_L register. */
338 emit_getgl(as, r, jit_L); 357 lj_assertA(irt_isnil(ir->t), "rematk of bad ASMREF_L");
358 emit_getgl(as, r, cur_L);
339#if LJ_64 359#if LJ_64
340 } else if (ir->o == IR_KINT64) { 360 } else if (ir->o == IR_KINT64) {
341 emit_loadu64(as, r, ir_kint64(ir)->u64); 361 emit_loadu64(as, r, ir_kint64(ir)->u64);
362#if LJ_GC64
363 } else if (ir->o == IR_KGC) {
364 emit_loadu64(as, r, (uintptr_t)ir_kgc(ir));
365 } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
366 emit_loadu64(as, r, (uintptr_t)ir_kptr(ir));
367#endif
342#endif 368#endif
343 } else { 369 } else {
344 lua_assert(ir->o == IR_KINT || ir->o == IR_KGC || 370 lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
345 ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL); 371 ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
372 "rematk of bad IR op %d", ir->o);
346 emit_loadi(as, r, ir->i); 373 emit_loadi(as, r, ir->i);
347 } 374 }
348 return r; 375 return r;
@@ -352,7 +379,8 @@ static Reg ra_rematk(ASMState *as, IRRef ref)
352static int32_t ra_spill(ASMState *as, IRIns *ir) 379static int32_t ra_spill(ASMState *as, IRIns *ir)
353{ 380{
354 int32_t slot = ir->s; 381 int32_t slot = ir->s;
355 lua_assert(ir >= as->ir + REF_TRUE); 382 lj_assertA(ir >= as->ir + REF_TRUE,
383 "spill of K%03d", REF_BIAS - (int)(ir - as->ir));
356 if (!ra_hasspill(slot)) { 384 if (!ra_hasspill(slot)) {
357 if (irt_is64(ir->t)) { 385 if (irt_is64(ir->t)) {
358 slot = as->evenspill; 386 slot = as->evenspill;
@@ -377,7 +405,9 @@ static Reg ra_releasetmp(ASMState *as, IRRef ref)
377{ 405{
378 IRIns *ir = IR(ref); 406 IRIns *ir = IR(ref);
379 Reg r = ir->r; 407 Reg r = ir->r;
380 lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s)); 408 lj_assertA(ra_hasreg(r), "release of TMP%d has no reg", ref-ASMREF_TMP1+1);
409 lj_assertA(!ra_hasspill(ir->s),
410 "release of TMP%d has spill slot [%x]", ref-ASMREF_TMP1+1, ir->s);
381 ra_free(as, r); 411 ra_free(as, r);
382 ra_modified(as, r); 412 ra_modified(as, r);
383 ir->r = RID_INIT; 413 ir->r = RID_INIT;
@@ -393,7 +423,7 @@ static Reg ra_restore(ASMState *as, IRRef ref)
393 IRIns *ir = IR(ref); 423 IRIns *ir = IR(ref);
394 int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */ 424 int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */
395 Reg r = ir->r; 425 Reg r = ir->r;
396 lua_assert(ra_hasreg(r)); 426 lj_assertA(ra_hasreg(r), "restore of IR %04d has no reg", ref - REF_BIAS);
397 ra_sethint(ir->r, r); /* Keep hint. */ 427 ra_sethint(ir->r, r); /* Keep hint. */
398 ra_free(as, r); 428 ra_free(as, r);
399 if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */ 429 if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */
@@ -422,14 +452,15 @@ static Reg ra_evict(ASMState *as, RegSet allow)
422{ 452{
423 IRRef ref; 453 IRRef ref;
424 RegCost cost = ~(RegCost)0; 454 RegCost cost = ~(RegCost)0;
425 lua_assert(allow != RSET_EMPTY); 455 lj_assertA(allow != RSET_EMPTY, "evict from empty set");
426 if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) { 456 if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
427 GPRDEF(MINCOST) 457 GPRDEF(MINCOST)
428 } else { 458 } else {
429 FPRDEF(MINCOST) 459 FPRDEF(MINCOST)
430 } 460 }
431 ref = regcost_ref(cost); 461 ref = regcost_ref(cost);
432 lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins)); 462 lj_assertA(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins),
463 "evict of out-of-range IR %04d", ref - REF_BIAS);
433 /* Preferably pick any weak ref instead of a non-weak, non-const ref. */ 464 /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
434 if (!irref_isk(ref) && (as->weakset & allow)) { 465 if (!irref_isk(ref) && (as->weakset & allow)) {
435 IRIns *ir = IR(ref); 466 IRIns *ir = IR(ref);
@@ -511,7 +542,7 @@ static void ra_evictk(ASMState *as)
511 542
512#ifdef RID_NUM_KREF 543#ifdef RID_NUM_KREF
513/* Allocate a register for a constant. */ 544/* Allocate a register for a constant. */
514static Reg ra_allock(ASMState *as, int32_t k, RegSet allow) 545static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow)
515{ 546{
516 /* First try to find a register which already holds the same constant. */ 547 /* First try to find a register which already holds the same constant. */
517 RegSet pick, work = ~as->freeset & RSET_GPR; 548 RegSet pick, work = ~as->freeset & RSET_GPR;
@@ -520,9 +551,31 @@ static Reg ra_allock(ASMState *as, int32_t k, RegSet allow)
520 IRRef ref; 551 IRRef ref;
521 r = rset_pickbot(work); 552 r = rset_pickbot(work);
522 ref = regcost_ref(as->cost[r]); 553 ref = regcost_ref(as->cost[r]);
554#if LJ_64
555 if (ref < ASMREF_L) {
556 if (ra_iskref(ref)) {
557 if (k == ra_krefk(as, ref))
558 return r;
559 } else {
560 IRIns *ir = IR(ref);
561 if ((ir->o == IR_KINT64 && k == (int64_t)ir_kint64(ir)->u64) ||
562#if LJ_GC64
563 (ir->o == IR_KINT && k == ir->i) ||
564 (ir->o == IR_KGC && k == (intptr_t)ir_kgc(ir)) ||
565 ((ir->o == IR_KPTR || ir->o == IR_KKPTR) &&
566 k == (intptr_t)ir_kptr(ir))
567#else
568 (ir->o != IR_KINT64 && k == ir->i)
569#endif
570 )
571 return r;
572 }
573 }
574#else
523 if (ref < ASMREF_L && 575 if (ref < ASMREF_L &&
524 k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i)) 576 k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
525 return r; 577 return r;
578#endif
526 rset_clear(work, r); 579 rset_clear(work, r);
527 } 580 }
528 pick = as->freeset & allow; 581 pick = as->freeset & allow;
@@ -542,7 +595,7 @@ static Reg ra_allock(ASMState *as, int32_t k, RegSet allow)
542} 595}
543 596
544/* Allocate a specific register for a constant. */ 597/* Allocate a specific register for a constant. */
545static void ra_allockreg(ASMState *as, int32_t k, Reg r) 598static void ra_allockreg(ASMState *as, intptr_t k, Reg r)
546{ 599{
547 Reg kr = ra_allock(as, k, RID2RSET(r)); 600 Reg kr = ra_allock(as, k, RID2RSET(r));
548 if (kr != r) { 601 if (kr != r) {
@@ -565,7 +618,8 @@ static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
565 IRIns *ir = IR(ref); 618 IRIns *ir = IR(ref);
566 RegSet pick = as->freeset & allow; 619 RegSet pick = as->freeset & allow;
567 Reg r; 620 Reg r;
568 lua_assert(ra_noreg(ir->r)); 621 lj_assertA(ra_noreg(ir->r),
622 "IR %04d already has reg %d", ref - REF_BIAS, ir->r);
569 if (pick) { 623 if (pick) {
570 /* First check register hint from propagation or PHI. */ 624 /* First check register hint from propagation or PHI. */
571 if (ra_hashint(ir->r)) { 625 if (ra_hashint(ir->r)) {
@@ -612,15 +666,27 @@ static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
612 return r; 666 return r;
613} 667}
614 668
669/* Add a register rename to the IR. */
670static void ra_addrename(ASMState *as, Reg down, IRRef ref, SnapNo snapno)
671{
672 IRRef ren;
673 lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, snapno);
674 ren = tref_ref(lj_ir_emit(as->J));
675 as->J->cur.ir[ren].r = (uint8_t)down;
676 as->J->cur.ir[ren].s = SPS_NONE;
677}
678
615/* Rename register allocation and emit move. */ 679/* Rename register allocation and emit move. */
616static void ra_rename(ASMState *as, Reg down, Reg up) 680static void ra_rename(ASMState *as, Reg down, Reg up)
617{ 681{
618 IRRef ren, ref = regcost_ref(as->cost[up] = as->cost[down]); 682 IRRef ref = regcost_ref(as->cost[up] = as->cost[down]);
619 IRIns *ir = IR(ref); 683 IRIns *ir = IR(ref);
620 ir->r = (uint8_t)up; 684 ir->r = (uint8_t)up;
621 as->cost[down] = 0; 685 as->cost[down] = 0;
622 lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR)); 686 lj_assertA((down < RID_MAX_GPR) == (up < RID_MAX_GPR),
623 lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up)); 687 "rename between GPR/FPR %d and %d", down, up);
688 lj_assertA(!rset_test(as->freeset, down), "rename from free reg %d", down);
689 lj_assertA(rset_test(as->freeset, up), "rename to non-free reg %d", up);
624 ra_free(as, down); /* 'down' is free ... */ 690 ra_free(as, down); /* 'down' is free ... */
625 ra_modified(as, down); 691 ra_modified(as, down);
626 rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */ 692 rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */
@@ -628,11 +694,7 @@ static void ra_rename(ASMState *as, Reg down, Reg up)
628 RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up)); 694 RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up));
629 emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */ 695 emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */
630 if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */ 696 if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */
631 lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, as->snapno); 697 ra_addrename(as, down, ref, as->snapno);
632 ren = tref_ref(lj_ir_emit(as->J));
633 as->ir = as->T->ir; /* The IR may have been reallocated. */
634 IR(ren)->r = (uint8_t)down;
635 IR(ren)->s = SPS_NONE;
636 } 698 }
637} 699}
638 700
@@ -665,7 +727,7 @@ static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
665{ 727{
666 Reg dest = ra_dest(as, ir, RID2RSET(r)); 728 Reg dest = ra_dest(as, ir, RID2RSET(r));
667 if (dest != r) { 729 if (dest != r) {
668 lua_assert(rset_test(as->freeset, r)); 730 lj_assertA(rset_test(as->freeset, r), "dest reg %d is not free", r);
669 ra_modified(as, r); 731 ra_modified(as, r);
670 emit_movrr(as, ir, dest, r); 732 emit_movrr(as, ir, dest, r);
671 } 733 }
@@ -682,20 +744,25 @@ static void ra_left(ASMState *as, Reg dest, IRRef lref)
682 if (ra_noreg(left)) { 744 if (ra_noreg(left)) {
683 if (irref_isk(lref)) { 745 if (irref_isk(lref)) {
684 if (ir->o == IR_KNUM) { 746 if (ir->o == IR_KNUM) {
685 cTValue *tv = ir_knum(ir);
686 /* FP remat needs a load except for +0. Still better than eviction. */ 747 /* FP remat needs a load except for +0. Still better than eviction. */
687 if (tvispzero(tv) || !(as->freeset & RSET_FPR)) { 748 if (tvispzero(ir_knum(ir)) || !(as->freeset & RSET_FPR)) {
688 emit_loadn(as, dest, tv); 749 emit_loadk64(as, dest, ir);
689 return; 750 return;
690 } 751 }
691#if LJ_64 752#if LJ_64
692 } else if (ir->o == IR_KINT64) { 753 } else if (ir->o == IR_KINT64) {
693 emit_loadu64(as, dest, ir_kint64(ir)->u64); 754 emit_loadk64(as, dest, ir);
755 return;
756#if LJ_GC64
757 } else if (ir->o == IR_KGC || ir->o == IR_KPTR || ir->o == IR_KKPTR) {
758 emit_loadk64(as, dest, ir);
694 return; 759 return;
695#endif 760#endif
696 } else { 761#endif
697 lua_assert(ir->o == IR_KINT || ir->o == IR_KGC || 762 } else if (ir->o != IR_KPRI) {
698 ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL); 763 lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
764 ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
765 "K%03d has bad IR op %d", REF_BIAS - lref, ir->o);
699 emit_loadi(as, dest, ir->i); 766 emit_loadi(as, dest, ir->i);
700 return; 767 return;
701 } 768 }
@@ -837,11 +904,14 @@ static void asm_snap_alloc1(ASMState *as, IRRef ref)
837#endif 904#endif
838 { /* Allocate stored values for TNEW, TDUP and CNEW. */ 905 { /* Allocate stored values for TNEW, TDUP and CNEW. */
839 IRIns *irs; 906 IRIns *irs;
840 lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW); 907 lj_assertA(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW,
908 "sink of IR %04d has bad op %d", ref - REF_BIAS, ir->o);
841 for (irs = IR(as->snapref-1); irs > ir; irs--) 909 for (irs = IR(as->snapref-1); irs > ir; irs--)
842 if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) { 910 if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
843 lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || 911 lj_assertA(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
844 irs->o == IR_FSTORE || irs->o == IR_XSTORE); 912 irs->o == IR_FSTORE || irs->o == IR_XSTORE,
913 "sunk store IR %04d has bad op %d",
914 (int)(irs - as->ir) - REF_BIAS, irs->o);
845 asm_snap_alloc1(as, irs->op2); 915 asm_snap_alloc1(as, irs->op2);
846 if (LJ_32 && (irs+1)->o == IR_HIOP) 916 if (LJ_32 && (irs+1)->o == IR_HIOP)
847 asm_snap_alloc1(as, (irs+1)->op2); 917 asm_snap_alloc1(as, (irs+1)->op2);
@@ -888,7 +958,9 @@ static void asm_snap_alloc(ASMState *as)
888 if (!irref_isk(ref)) { 958 if (!irref_isk(ref)) {
889 asm_snap_alloc1(as, ref); 959 asm_snap_alloc1(as, ref);
890 if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) { 960 if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
891 lua_assert(irt_type(IR(ref+1)->t) == IRT_SOFTFP); 961 lj_assertA(irt_type(IR(ref+1)->t) == IRT_SOFTFP,
962 "snap %d[%d] points to bad SOFTFP IR %04d",
963 as->snapno, n, ref - REF_BIAS);
892 asm_snap_alloc1(as, ref+1); 964 asm_snap_alloc1(as, ref+1);
893 } 965 }
894 } 966 }
@@ -934,7 +1006,7 @@ static void asm_snap_prep(ASMState *as)
934 } else { 1006 } else {
935 /* Process any renames above the highwater mark. */ 1007 /* Process any renames above the highwater mark. */
936 for (; as->snaprename < as->T->nins; as->snaprename++) { 1008 for (; as->snaprename < as->T->nins; as->snaprename++) {
937 IRIns *ir = IR(as->snaprename); 1009 IRIns *ir = &as->T->ir[as->snaprename];
938 if (asm_snap_checkrename(as, ir->op1)) 1010 if (asm_snap_checkrename(as, ir->op1))
939 ir->op2 = REF_BIAS-1; /* Kill rename. */ 1011 ir->op2 = REF_BIAS-1; /* Kill rename. */
940 } 1012 }
@@ -943,44 +1015,6 @@ static void asm_snap_prep(ASMState *as)
943 1015
944/* -- Miscellaneous helpers ----------------------------------------------- */ 1016/* -- Miscellaneous helpers ----------------------------------------------- */
945 1017
946/* Collect arguments from CALL* and CARG instructions. */
947static void asm_collectargs(ASMState *as, IRIns *ir,
948 const CCallInfo *ci, IRRef *args)
949{
950 uint32_t n = CCI_NARGS(ci);
951 lua_assert(n <= CCI_NARGS_MAX*2); /* Account for split args. */
952 if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
953 while (n-- > 1) {
954 ir = IR(ir->op1);
955 lua_assert(ir->o == IR_CARG);
956 args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
957 }
958 args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
959 lua_assert(IR(ir->op1)->o != IR_CARG);
960}
961
962/* Reconstruct CCallInfo flags for CALLX*. */
963static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
964{
965 uint32_t nargs = 0;
966 if (ir->op1 != REF_NIL) { /* Count number of arguments first. */
967 IRIns *ira = IR(ir->op1);
968 nargs++;
969 while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
970 }
971#if LJ_HASFFI
972 if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */
973 CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
974 CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
975 nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
976#if LJ_TARGET_X86
977 nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
978#endif
979 }
980#endif
981 return (nargs | (ir->t.irt << CCI_OTSHIFT));
982}
983
984/* Calculate stack adjustment. */ 1018/* Calculate stack adjustment. */
985static int32_t asm_stack_adjust(ASMState *as) 1019static int32_t asm_stack_adjust(ASMState *as)
986{ 1020{
@@ -990,21 +1024,26 @@ static int32_t asm_stack_adjust(ASMState *as)
990} 1024}
991 1025
992/* Must match with hash*() in lj_tab.c. */ 1026/* Must match with hash*() in lj_tab.c. */
993static uint32_t ir_khash(IRIns *ir) 1027static uint32_t ir_khash(ASMState *as, IRIns *ir)
994{ 1028{
995 uint32_t lo, hi; 1029 uint32_t lo, hi;
1030 UNUSED(as);
996 if (irt_isstr(ir->t)) { 1031 if (irt_isstr(ir->t)) {
997 return ir_kstr(ir)->hash; 1032 return ir_kstr(ir)->sid;
998 } else if (irt_isnum(ir->t)) { 1033 } else if (irt_isnum(ir->t)) {
999 lo = ir_knum(ir)->u32.lo; 1034 lo = ir_knum(ir)->u32.lo;
1000 hi = ir_knum(ir)->u32.hi << 1; 1035 hi = ir_knum(ir)->u32.hi << 1;
1001 } else if (irt_ispri(ir->t)) { 1036 } else if (irt_ispri(ir->t)) {
1002 lua_assert(!irt_isnil(ir->t)); 1037 lj_assertA(!irt_isnil(ir->t), "hash of nil key");
1003 return irt_type(ir->t)-IRT_FALSE; 1038 return irt_type(ir->t)-IRT_FALSE;
1004 } else { 1039 } else {
1005 lua_assert(irt_isgcv(ir->t)); 1040 lj_assertA(irt_isgcv(ir->t), "hash of bad IR type %d", irt_type(ir->t));
1006 lo = u32ptr(ir_kgc(ir)); 1041 lo = u32ptr(ir_kgc(ir));
1042#if LJ_GC64
1043 hi = (uint32_t)(u64ptr(ir_kgc(ir)) >> 32) | (irt_toitype(ir->t) << 15);
1044#else
1007 hi = lo + HASH_BIAS; 1045 hi = lo + HASH_BIAS;
1046#endif
1008 } 1047 }
1009 return hashrot(lo, hi); 1048 return hashrot(lo, hi);
1010} 1049}
@@ -1065,6 +1104,237 @@ static void asm_gcstep(ASMState *as, IRIns *ir)
1065 as->gcsteps = 0x80000000; /* Prevent implicit GC check further up. */ 1104 as->gcsteps = 0x80000000; /* Prevent implicit GC check further up. */
1066} 1105}
1067 1106
1107/* -- Buffer operations --------------------------------------------------- */
1108
1109static void asm_tvptr(ASMState *as, Reg dest, IRRef ref);
1110
1111static void asm_bufhdr(ASMState *as, IRIns *ir)
1112{
1113 Reg sb = ra_dest(as, ir, RSET_GPR);
1114 if ((ir->op2 & IRBUFHDR_APPEND)) {
1115 /* Rematerialize const buffer pointer instead of likely spill. */
1116 IRIns *irp = IR(ir->op1);
1117 if (!(ra_hasreg(irp->r) || irp == ir-1 ||
1118 (irp == ir-2 && !ra_used(ir-1)))) {
1119 while (!(irp->o == IR_BUFHDR && !(irp->op2 & IRBUFHDR_APPEND)))
1120 irp = IR(irp->op1);
1121 if (irref_isk(irp->op1)) {
1122 ra_weak(as, ra_allocref(as, ir->op1, RSET_GPR));
1123 ir = irp;
1124 }
1125 }
1126 } else {
1127 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
1128 /* Passing ir isn't strictly correct, but it's an IRT_PGC, too. */
1129 emit_storeofs(as, ir, tmp, sb, offsetof(SBuf, p));
1130 emit_loadofs(as, ir, tmp, sb, offsetof(SBuf, b));
1131 }
1132#if LJ_TARGET_X86ORX64
1133 ra_left(as, sb, ir->op1);
1134#else
1135 ra_leftov(as, sb, ir->op1);
1136#endif
1137}
1138
1139static void asm_bufput(ASMState *as, IRIns *ir)
1140{
1141 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_putstr];
1142 IRRef args[3];
1143 IRIns *irs;
1144 int kchar = -129;
1145 args[0] = ir->op1; /* SBuf * */
1146 args[1] = ir->op2; /* GCstr * */
1147 irs = IR(ir->op2);
1148 lj_assertA(irt_isstr(irs->t),
1149 "BUFPUT of non-string IR %04d", ir->op2 - REF_BIAS);
1150 if (irs->o == IR_KGC) {
1151 GCstr *s = ir_kstr(irs);
1152 if (s->len == 1) { /* Optimize put of single-char string constant. */
1153 kchar = (int8_t)strdata(s)[0]; /* Signed! */
1154 args[1] = ASMREF_TMP1; /* int, truncated to char */
1155 ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1156 }
1157 } else if (mayfuse(as, ir->op2) && ra_noreg(irs->r)) {
1158 if (irs->o == IR_TOSTR) { /* Fuse number to string conversions. */
1159 if (irs->op2 == IRTOSTR_NUM) {
1160 args[1] = ASMREF_TMP1; /* TValue * */
1161 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum];
1162 } else {
1163 lj_assertA(irt_isinteger(IR(irs->op1)->t),
1164 "TOSTR of non-numeric IR %04d", irs->op1);
1165 args[1] = irs->op1; /* int */
1166 if (irs->op2 == IRTOSTR_INT)
1167 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint];
1168 else
1169 ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1170 }
1171 } else if (irs->o == IR_SNEW) { /* Fuse string allocation. */
1172 args[1] = irs->op1; /* const void * */
1173 args[2] = irs->op2; /* MSize */
1174 ci = &lj_ir_callinfo[IRCALL_lj_buf_putmem];
1175 }
1176 }
1177 asm_setupresult(as, ir, ci); /* SBuf * */
1178 asm_gencall(as, ci, args);
1179 if (args[1] == ASMREF_TMP1) {
1180 Reg tmp = ra_releasetmp(as, ASMREF_TMP1);
1181 if (kchar == -129)
1182 asm_tvptr(as, tmp, irs->op1);
1183 else
1184 ra_allockreg(as, kchar, tmp);
1185 }
1186}
1187
1188static void asm_bufstr(ASMState *as, IRIns *ir)
1189{
1190 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_tostr];
1191 IRRef args[1];
1192 args[0] = ir->op1; /* SBuf *sb */
1193 as->gcsteps++;
1194 asm_setupresult(as, ir, ci); /* GCstr * */
1195 asm_gencall(as, ci, args);
1196}
1197
1198/* -- Type conversions ---------------------------------------------------- */
1199
1200static void asm_tostr(ASMState *as, IRIns *ir)
1201{
1202 const CCallInfo *ci;
1203 IRRef args[2];
1204 args[0] = ASMREF_L;
1205 as->gcsteps++;
1206 if (ir->op2 == IRTOSTR_NUM) {
1207 args[1] = ASMREF_TMP1; /* cTValue * */
1208 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_num];
1209 } else {
1210 args[1] = ir->op1; /* int32_t k */
1211 if (ir->op2 == IRTOSTR_INT)
1212 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_int];
1213 else
1214 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_char];
1215 }
1216 asm_setupresult(as, ir, ci); /* GCstr * */
1217 asm_gencall(as, ci, args);
1218 if (ir->op2 == IRTOSTR_NUM)
1219 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
1220}
1221
1222#if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86
1223static void asm_conv64(ASMState *as, IRIns *ir)
1224{
1225 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
1226 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
1227 IRCallID id;
1228 IRRef args[2];
1229 lj_assertA((ir-1)->o == IR_CONV && ir->o == IR_HIOP,
1230 "not a CONV/HIOP pair at IR %04d", (int)(ir - as->ir) - REF_BIAS);
1231 args[LJ_BE] = (ir-1)->op1;
1232 args[LJ_LE] = ir->op1;
1233 if (st == IRT_NUM || st == IRT_FLOAT) {
1234 id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
1235 ir--;
1236 } else {
1237 id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
1238 }
1239 {
1240#if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
1241 CCallInfo cim = lj_ir_callinfo[id], *ci = &cim;
1242 cim.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */
1243#else
1244 const CCallInfo *ci = &lj_ir_callinfo[id];
1245#endif
1246 asm_setupresult(as, ir, ci);
1247 asm_gencall(as, ci, args);
1248 }
1249}
1250#endif
1251
1252/* -- Memory references --------------------------------------------------- */
1253
1254static void asm_newref(ASMState *as, IRIns *ir)
1255{
1256 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
1257 IRRef args[3];
1258 if (ir->r == RID_SINK)
1259 return;
1260 args[0] = ASMREF_L; /* lua_State *L */
1261 args[1] = ir->op1; /* GCtab *t */
1262 args[2] = ASMREF_TMP1; /* cTValue *key */
1263 asm_setupresult(as, ir, ci); /* TValue * */
1264 asm_gencall(as, ci, args);
1265 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
1266}
1267
1268static void asm_lref(ASMState *as, IRIns *ir)
1269{
1270 Reg r = ra_dest(as, ir, RSET_GPR);
1271#if LJ_TARGET_X86ORX64
1272 ra_left(as, r, ASMREF_L);
1273#else
1274 ra_leftov(as, r, ASMREF_L);
1275#endif
1276}
1277
1278/* -- Calls --------------------------------------------------------------- */
1279
1280/* Collect arguments from CALL* and CARG instructions. */
1281static void asm_collectargs(ASMState *as, IRIns *ir,
1282 const CCallInfo *ci, IRRef *args)
1283{
1284 uint32_t n = CCI_XNARGS(ci);
1285 /* Account for split args. */
1286 lj_assertA(n <= CCI_NARGS_MAX*2, "too many args %d to collect", n);
1287 if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
1288 while (n-- > 1) {
1289 ir = IR(ir->op1);
1290 lj_assertA(ir->o == IR_CARG, "malformed CALL arg tree");
1291 args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
1292 }
1293 args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
1294 lj_assertA(IR(ir->op1)->o != IR_CARG, "malformed CALL arg tree");
1295}
1296
1297/* Reconstruct CCallInfo flags for CALLX*. */
1298static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
1299{
1300 uint32_t nargs = 0;
1301 if (ir->op1 != REF_NIL) { /* Count number of arguments first. */
1302 IRIns *ira = IR(ir->op1);
1303 nargs++;
1304 while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
1305 }
1306#if LJ_HASFFI
1307 if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */
1308 CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
1309 CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
1310 nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
1311#if LJ_TARGET_X86
1312 nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
1313#endif
1314 }
1315#endif
1316 return (nargs | (ir->t.irt << CCI_OTSHIFT));
1317}
1318
1319static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
1320{
1321 const CCallInfo *ci = &lj_ir_callinfo[id];
1322 IRRef args[2];
1323 args[0] = ir->op1;
1324 args[1] = ir->op2;
1325 asm_setupresult(as, ir, ci);
1326 asm_gencall(as, ci, args);
1327}
1328
1329static void asm_call(ASMState *as, IRIns *ir)
1330{
1331 IRRef args[CCI_NARGS_MAX];
1332 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
1333 asm_collectargs(as, ir, ci, args);
1334 asm_setupresult(as, ir, ci);
1335 asm_gencall(as, ci, args);
1336}
1337
1068/* -- PHI and loop handling ----------------------------------------------- */ 1338/* -- PHI and loop handling ----------------------------------------------- */
1069 1339
1070/* Break a PHI cycle by renaming to a free register (evict if needed). */ 1340/* Break a PHI cycle by renaming to a free register (evict if needed). */
@@ -1250,12 +1520,7 @@ static void asm_phi_fixup(ASMState *as)
1250 irt_clearmark(ir->t); 1520 irt_clearmark(ir->t);
1251 /* Left PHI gained a spill slot before the loop? */ 1521 /* Left PHI gained a spill slot before the loop? */
1252 if (ra_hasspill(ir->s)) { 1522 if (ra_hasspill(ir->s)) {
1253 IRRef ren; 1523 ra_addrename(as, r, lref, as->loopsnapno);
1254 lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), lref, as->loopsnapno);
1255 ren = tref_ref(lj_ir_emit(as->J));
1256 as->ir = as->T->ir; /* The IR may have been reallocated. */
1257 IR(ren)->r = (uint8_t)r;
1258 IR(ren)->s = SPS_NONE;
1259 } 1524 }
1260 } 1525 }
1261 rset_clear(work, r); 1526 rset_clear(work, r);
@@ -1330,6 +1595,8 @@ static void asm_loop(ASMState *as)
1330#include "lj_asm_x86.h" 1595#include "lj_asm_x86.h"
1331#elif LJ_TARGET_ARM 1596#elif LJ_TARGET_ARM
1332#include "lj_asm_arm.h" 1597#include "lj_asm_arm.h"
1598#elif LJ_TARGET_ARM64
1599#include "lj_asm_arm64.h"
1333#elif LJ_TARGET_PPC 1600#elif LJ_TARGET_PPC
1334#include "lj_asm_ppc.h" 1601#include "lj_asm_ppc.h"
1335#elif LJ_TARGET_MIPS 1602#elif LJ_TARGET_MIPS
@@ -1338,6 +1605,203 @@ static void asm_loop(ASMState *as)
1338#error "Missing assembler for target CPU" 1605#error "Missing assembler for target CPU"
1339#endif 1606#endif
1340 1607
1608/* -- Common instruction helpers ------------------------------------------ */
1609
1610#if !LJ_SOFTFP32
1611#if !LJ_TARGET_X86ORX64
1612#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)
1613#define asm_fppowi(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi)
1614#endif
1615
1616static void asm_pow(ASMState *as, IRIns *ir)
1617{
1618#if LJ_64 && LJ_HASFFI
1619 if (!irt_isnum(ir->t))
1620 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
1621 IRCALL_lj_carith_powu64);
1622 else
1623#endif
1624 if (irt_isnum(IR(ir->op2)->t))
1625 asm_callid(as, ir, IRCALL_pow);
1626 else
1627 asm_fppowi(as, ir);
1628}
1629
1630static void asm_div(ASMState *as, IRIns *ir)
1631{
1632#if LJ_64 && LJ_HASFFI
1633 if (!irt_isnum(ir->t))
1634 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
1635 IRCALL_lj_carith_divu64);
1636 else
1637#endif
1638 asm_fpdiv(as, ir);
1639}
1640#endif
1641
1642static void asm_mod(ASMState *as, IRIns *ir)
1643{
1644#if LJ_64 && LJ_HASFFI
1645 if (!irt_isint(ir->t))
1646 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
1647 IRCALL_lj_carith_modu64);
1648 else
1649#endif
1650 asm_callid(as, ir, IRCALL_lj_vm_modi);
1651}
1652
1653static void asm_fuseequal(ASMState *as, IRIns *ir)
1654{
1655 /* Fuse HREF + EQ/NE. */
1656 if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
1657 as->curins--;
1658 asm_href(as, ir-1, (IROp)ir->o);
1659 } else {
1660 asm_equal(as, ir);
1661 }
1662}
1663
1664static void asm_alen(ASMState *as, IRIns *ir)
1665{
1666 asm_callid(as, ir, ir->op2 == REF_NIL ? IRCALL_lj_tab_len :
1667 IRCALL_lj_tab_len_hint);
1668}
1669
1670/* -- Instruction dispatch ------------------------------------------------ */
1671
1672/* Assemble a single instruction. */
1673static void asm_ir(ASMState *as, IRIns *ir)
1674{
1675 switch ((IROp)ir->o) {
1676 /* Miscellaneous ops. */
1677 case IR_LOOP: asm_loop(as); break;
1678 case IR_NOP: case IR_XBAR:
1679 lj_assertA(!ra_used(ir),
1680 "IR %04d not unused", (int)(ir - as->ir) - REF_BIAS);
1681 break;
1682 case IR_USE:
1683 ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
1684 case IR_PHI: asm_phi(as, ir); break;
1685 case IR_HIOP: asm_hiop(as, ir); break;
1686 case IR_GCSTEP: asm_gcstep(as, ir); break;
1687 case IR_PROF: asm_prof(as, ir); break;
1688
1689 /* Guarded assertions. */
1690 case IR_LT: case IR_GE: case IR_LE: case IR_GT:
1691 case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
1692 case IR_ABC:
1693 asm_comp(as, ir);
1694 break;
1695 case IR_EQ: case IR_NE: asm_fuseequal(as, ir); break;
1696
1697 case IR_RETF: asm_retf(as, ir); break;
1698
1699 /* Bit ops. */
1700 case IR_BNOT: asm_bnot(as, ir); break;
1701 case IR_BSWAP: asm_bswap(as, ir); break;
1702 case IR_BAND: asm_band(as, ir); break;
1703 case IR_BOR: asm_bor(as, ir); break;
1704 case IR_BXOR: asm_bxor(as, ir); break;
1705 case IR_BSHL: asm_bshl(as, ir); break;
1706 case IR_BSHR: asm_bshr(as, ir); break;
1707 case IR_BSAR: asm_bsar(as, ir); break;
1708 case IR_BROL: asm_brol(as, ir); break;
1709 case IR_BROR: asm_bror(as, ir); break;
1710
1711 /* Arithmetic ops. */
1712 case IR_ADD: asm_add(as, ir); break;
1713 case IR_SUB: asm_sub(as, ir); break;
1714 case IR_MUL: asm_mul(as, ir); break;
1715 case IR_MOD: asm_mod(as, ir); break;
1716 case IR_NEG: asm_neg(as, ir); break;
1717#if LJ_SOFTFP32
1718 case IR_DIV: case IR_POW: case IR_ABS:
1719 case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
1720 /* Unused for LJ_SOFTFP32. */
1721 lj_assertA(0, "IR %04d with unused op %d",
1722 (int)(ir - as->ir) - REF_BIAS, ir->o);
1723 break;
1724#else
1725 case IR_DIV: asm_div(as, ir); break;
1726 case IR_POW: asm_pow(as, ir); break;
1727 case IR_ABS: asm_abs(as, ir); break;
1728 case IR_LDEXP: asm_ldexp(as, ir); break;
1729 case IR_FPMATH: asm_fpmath(as, ir); break;
1730 case IR_TOBIT: asm_tobit(as, ir); break;
1731#endif
1732 case IR_MIN: asm_min(as, ir); break;
1733 case IR_MAX: asm_max(as, ir); break;
1734
1735 /* Overflow-checking arithmetic ops. */
1736 case IR_ADDOV: asm_addov(as, ir); break;
1737 case IR_SUBOV: asm_subov(as, ir); break;
1738 case IR_MULOV: asm_mulov(as, ir); break;
1739
1740 /* Memory references. */
1741 case IR_AREF: asm_aref(as, ir); break;
1742 case IR_HREF: asm_href(as, ir, 0); break;
1743 case IR_HREFK: asm_hrefk(as, ir); break;
1744 case IR_NEWREF: asm_newref(as, ir); break;
1745 case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
1746 case IR_FREF: asm_fref(as, ir); break;
1747 case IR_STRREF: asm_strref(as, ir); break;
1748 case IR_LREF: asm_lref(as, ir); break;
1749
1750 /* Loads and stores. */
1751 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1752 asm_ahuvload(as, ir);
1753 break;
1754 case IR_FLOAD: asm_fload(as, ir); break;
1755 case IR_XLOAD: asm_xload(as, ir); break;
1756 case IR_SLOAD: asm_sload(as, ir); break;
1757 case IR_ALEN: asm_alen(as, ir); break;
1758
1759 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
1760 case IR_FSTORE: asm_fstore(as, ir); break;
1761 case IR_XSTORE: asm_xstore(as, ir); break;
1762
1763 /* Allocations. */
1764 case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
1765 case IR_TNEW: asm_tnew(as, ir); break;
1766 case IR_TDUP: asm_tdup(as, ir); break;
1767 case IR_CNEW: case IR_CNEWI:
1768#if LJ_HASFFI
1769 asm_cnew(as, ir);
1770#else
1771 lj_assertA(0, "IR %04d with unused op %d",
1772 (int)(ir - as->ir) - REF_BIAS, ir->o);
1773#endif
1774 break;
1775
1776 /* Buffer operations. */
1777 case IR_BUFHDR: asm_bufhdr(as, ir); break;
1778 case IR_BUFPUT: asm_bufput(as, ir); break;
1779 case IR_BUFSTR: asm_bufstr(as, ir); break;
1780
1781 /* Write barriers. */
1782 case IR_TBAR: asm_tbar(as, ir); break;
1783 case IR_OBAR: asm_obar(as, ir); break;
1784
1785 /* Type conversions. */
1786 case IR_CONV: asm_conv(as, ir); break;
1787 case IR_TOSTR: asm_tostr(as, ir); break;
1788 case IR_STRTO: asm_strto(as, ir); break;
1789
1790 /* Calls. */
1791 case IR_CALLA:
1792 as->gcsteps++;
1793 /* fallthrough */
1794 case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
1795 case IR_CALLXS: asm_callx(as, ir); break;
1796 case IR_CARG: break;
1797
1798 default:
1799 setintV(&as->J->errinfo, ir->o);
1800 lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
1801 break;
1802 }
1803}
1804
1341/* -- Head of trace ------------------------------------------------------- */ 1805/* -- Head of trace ------------------------------------------------------- */
1342 1806
1343/* Head of a root trace. */ 1807/* Head of a root trace. */
@@ -1383,8 +1847,10 @@ static void asm_head_side(ASMState *as)
1383 for (i = as->stopins; i > REF_BASE; i--) { 1847 for (i = as->stopins; i > REF_BASE; i--) {
1384 IRIns *ir = IR(i); 1848 IRIns *ir = IR(i);
1385 RegSP rs; 1849 RegSP rs;
1386 lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) || 1850 lj_assertA((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
1387 (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL); 1851 (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL,
1852 "IR %04d has bad parent op %d",
1853 (int)(ir - as->ir) - REF_BIAS, ir->o);
1388 rs = as->parentmap[i - REF_FIRST]; 1854 rs = as->parentmap[i - REF_FIRST];
1389 if (ra_hasreg(ir->r)) { 1855 if (ra_hasreg(ir->r)) {
1390 rset_clear(allow, ir->r); 1856 rset_clear(allow, ir->r);
@@ -1536,7 +2002,7 @@ static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
1536 SnapEntry sn = map[n-1]; 2002 SnapEntry sn = map[n-1];
1537 if ((sn & SNAP_FRAME)) { 2003 if ((sn & SNAP_FRAME)) {
1538 *gotframe = 1; 2004 *gotframe = 1;
1539 return snap_slot(sn); 2005 return snap_slot(sn) - LJ_FR2;
1540 } 2006 }
1541 } 2007 }
1542 return 0; 2008 return 0;
@@ -1556,19 +2022,23 @@ static void asm_tail_link(ASMState *as)
1556 2022
1557 if (as->T->link == 0) { 2023 if (as->T->link == 0) {
1558 /* Setup fixed registers for exit to interpreter. */ 2024 /* Setup fixed registers for exit to interpreter. */
1559 const BCIns *pc = snap_pc(as->T->snapmap[snap->mapofs + snap->nent]); 2025 const BCIns *pc = snap_pc(&as->T->snapmap[snap->mapofs + snap->nent]);
1560 int32_t mres; 2026 int32_t mres;
1561 if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */ 2027 if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */
1562 BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins; 2028 BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
1563 if (bc_isret(bc_op(*retpc))) 2029 if (bc_isret(bc_op(*retpc)))
1564 pc = retpc; 2030 pc = retpc;
1565 } 2031 }
2032#if LJ_GC64
2033 emit_loadu64(as, RID_LPC, u64ptr(pc));
2034#else
1566 ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH); 2035 ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
1567 ra_allockreg(as, i32ptr(pc), RID_LPC); 2036 ra_allockreg(as, i32ptr(pc), RID_LPC);
1568 mres = (int32_t)(snap->nslots - baseslot); 2037#endif
2038 mres = (int32_t)(snap->nslots - baseslot - LJ_FR2);
1569 switch (bc_op(*pc)) { 2039 switch (bc_op(*pc)) {
1570 case BC_CALLM: case BC_CALLMT: 2040 case BC_CALLM: case BC_CALLMT:
1571 mres -= (int32_t)(1 + bc_a(*pc) + bc_c(*pc)); break; 2041 mres -= (int32_t)(1 + LJ_FR2 + bc_a(*pc) + bc_c(*pc)); break;
1572 case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break; 2042 case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
1573 case BC_TSETM: mres -= (int32_t)bc_a(*pc); break; 2043 case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
1574 default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break; 2044 default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
@@ -1580,6 +2050,11 @@ static void asm_tail_link(ASMState *as)
1580 } 2050 }
1581 emit_addptr(as, RID_BASE, 8*(int32_t)baseslot); 2051 emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
1582 2052
2053 if (as->J->ktrace) { /* Patch ktrace slot with the final GCtrace pointer. */
2054 setgcref(IR(as->J->ktrace)[LJ_GC64].gcr, obj2gco(as->J->curfinal));
2055 IR(as->J->ktrace)->o = IR_KGC;
2056 }
2057
1583 /* Sync the interpreter state with the on-trace state. */ 2058 /* Sync the interpreter state with the on-trace state. */
1584 asm_stack_restore(as, snap); 2059 asm_stack_restore(as, snap);
1585 2060
@@ -1605,17 +2080,23 @@ static void asm_setup_regsp(ASMState *as)
1605 ra_setup(as); 2080 ra_setup(as);
1606 2081
1607 /* Clear reg/sp for constants. */ 2082 /* Clear reg/sp for constants. */
1608 for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++) 2083 for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++) {
1609 ir->prev = REGSP_INIT; 2084 ir->prev = REGSP_INIT;
2085 if (irt_is64(ir->t) && ir->o != IR_KNULL) {
2086#if LJ_GC64
2087 /* The false-positive of irt_is64() for ASMREF_L (REF_NIL) is OK here. */
2088 ir->i = 0; /* Will become non-zero only for RIP-relative addresses. */
2089#else
2090 /* Make life easier for backends by putting address of constant in i. */
2091 ir->i = (int32_t)(intptr_t)(ir+1);
2092#endif
2093 ir++;
2094 }
2095 }
1610 2096
1611 /* REF_BASE is used for implicit references to the BASE register. */ 2097 /* REF_BASE is used for implicit references to the BASE register. */
1612 lastir->prev = REGSP_HINT(RID_BASE); 2098 lastir->prev = REGSP_HINT(RID_BASE);
1613 2099
1614 ir = IR(nins-1);
1615 if (ir->o == IR_RENAME) {
1616 do { ir--; nins--; } while (ir->o == IR_RENAME);
1617 T->nins = nins; /* Remove any renames left over from ASM restart. */
1618 }
1619 as->snaprename = nins; 2100 as->snaprename = nins;
1620 as->snapref = nins; 2101 as->snapref = nins;
1621 as->snapno = T->nsnap; 2102 as->snapno = T->nsnap;
@@ -1628,7 +2109,7 @@ static void asm_setup_regsp(ASMState *as)
1628 ir = IR(REF_FIRST); 2109 ir = IR(REF_FIRST);
1629 if (as->parent) { 2110 if (as->parent) {
1630 uint16_t *p; 2111 uint16_t *p;
1631 lastir = lj_snap_regspmap(as->parent, as->J->exitno, ir); 2112 lastir = lj_snap_regspmap(as->J, as->parent, as->J->exitno, ir);
1632 if (lastir - ir > LJ_MAX_JSLOTS) 2113 if (lastir - ir > LJ_MAX_JSLOTS)
1633 lj_trace_err(as->J, LJ_TRERR_NYICOAL); 2114 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1634 as->stopins = (IRRef)((lastir-1) - as->ir); 2115 as->stopins = (IRRef)((lastir-1) - as->ir);
@@ -1676,7 +2157,7 @@ static void asm_setup_regsp(ASMState *as)
1676 as->modset |= RSET_SCRATCH; 2157 as->modset |= RSET_SCRATCH;
1677 continue; 2158 continue;
1678 } 2159 }
1679 case IR_CALLN: case IR_CALLL: case IR_CALLS: { 2160 case IR_CALLN: case IR_CALLA: case IR_CALLL: case IR_CALLS: {
1680 const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; 2161 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
1681 ir->prev = asm_setup_call_slots(as, ir, ci); 2162 ir->prev = asm_setup_call_slots(as, ir, ci);
1682 if (inloop) 2163 if (inloop)
@@ -1701,8 +2182,8 @@ static void asm_setup_regsp(ASMState *as)
1701 ir->prev = REGSP_HINT(RID_FPRET); 2182 ir->prev = REGSP_HINT(RID_FPRET);
1702 continue; 2183 continue;
1703 } 2184 }
1704 /* fallthrough */
1705#endif 2185#endif
2186 /* fallthrough */
1706 case IR_CALLN: case IR_CALLXS: 2187 case IR_CALLN: case IR_CALLXS:
1707#if LJ_SOFTFP 2188#if LJ_SOFTFP
1708 case IR_MIN: case IR_MAX: 2189 case IR_MIN: case IR_MAX:
@@ -1721,11 +2202,23 @@ static void asm_setup_regsp(ASMState *as)
1721#endif 2202#endif
1722 /* fallthrough */ 2203 /* fallthrough */
1723 /* C calls evict all scratch regs and return results in RID_RET. */ 2204 /* C calls evict all scratch regs and return results in RID_RET. */
1724 case IR_SNEW: case IR_XSNEW: case IR_NEWREF: 2205 case IR_SNEW: case IR_XSNEW: case IR_NEWREF: case IR_BUFPUT:
1725 if (REGARG_NUMGPR < 3 && as->evenspill < 3) 2206 if (REGARG_NUMGPR < 3 && as->evenspill < 3)
1726 as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */ 2207 as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */
2208#if LJ_TARGET_X86 && LJ_HASFFI
2209 if (0) {
2210 case IR_CNEW:
2211 if (ir->op2 != REF_NIL && as->evenspill < 4)
2212 as->evenspill = 4; /* lj_cdata_newv needs 4 args. */
2213 }
1727 /* fallthrough */ 2214 /* fallthrough */
1728 case IR_TNEW: case IR_TDUP: case IR_CNEW: case IR_CNEWI: case IR_TOSTR: 2215#else
2216 /* fallthrough */
2217 case IR_CNEW:
2218#endif
2219 /* fallthrough */
2220 case IR_TNEW: case IR_TDUP: case IR_CNEWI: case IR_TOSTR:
2221 case IR_BUFSTR:
1729 ir->prev = REGSP_HINT(RID_RET); 2222 ir->prev = REGSP_HINT(RID_RET);
1730 if (inloop) 2223 if (inloop)
1731 as->modset = RSET_SCRATCH; 2224 as->modset = RSET_SCRATCH;
@@ -1734,21 +2227,26 @@ static void asm_setup_regsp(ASMState *as)
1734 if (inloop) 2227 if (inloop)
1735 as->modset = RSET_SCRATCH; 2228 as->modset = RSET_SCRATCH;
1736 break; 2229 break;
1737#if !LJ_TARGET_X86ORX64 && !LJ_SOFTFP 2230#if !LJ_SOFTFP
1738 case IR_ATAN2: case IR_LDEXP: 2231#if !LJ_TARGET_X86ORX64
2232 case IR_LDEXP:
2233#endif
1739#endif 2234#endif
2235 /* fallthrough */
1740 case IR_POW: 2236 case IR_POW:
1741 if (!LJ_SOFTFP && irt_isnum(ir->t)) { 2237 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1742#if LJ_TARGET_X86ORX64
1743 ir->prev = REGSP_HINT(RID_XMM0);
1744 if (inloop) 2238 if (inloop)
1745 as->modset |= RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX); 2239 as->modset |= RSET_SCRATCH;
2240#if LJ_TARGET_X86
2241 if (irt_isnum(IR(ir->op2)->t)) {
2242 if (as->evenspill < 4) /* Leave room to call pow(). */
2243 as->evenspill = 4;
2244 }
2245 break;
1746#else 2246#else
1747 ir->prev = REGSP_HINT(RID_FPRET); 2247 ir->prev = REGSP_HINT(RID_FPRET);
1748 if (inloop)
1749 as->modset |= RSET_SCRATCH;
1750#endif
1751 continue; 2248 continue;
2249#endif
1752 } 2250 }
1753 /* fallthrough */ /* for integer POW */ 2251 /* fallthrough */ /* for integer POW */
1754 case IR_DIV: case IR_MOD: 2252 case IR_DIV: case IR_MOD:
@@ -1761,31 +2259,31 @@ static void asm_setup_regsp(ASMState *as)
1761 break; 2259 break;
1762 case IR_FPMATH: 2260 case IR_FPMATH:
1763#if LJ_TARGET_X86ORX64 2261#if LJ_TARGET_X86ORX64
1764 if (ir->op2 == IRFPM_EXP2) { /* May be joined to lj_vm_pow_sse. */ 2262 if (ir->op2 <= IRFPM_TRUNC) {
1765 ir->prev = REGSP_HINT(RID_XMM0); 2263 if (!(as->flags & JIT_F_SSE4_1)) {
1766#if !LJ_64 2264 ir->prev = REGSP_HINT(RID_XMM0);
1767 if (as->evenspill < 4) /* Leave room for 16 byte scratch area. */ 2265 if (inloop)
1768 as->evenspill = 4; 2266 as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
1769#endif 2267 continue;
1770 if (inloop) 2268 }
1771 as->modset |= RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX); 2269 break;
1772 continue;
1773 } else if (ir->op2 <= IRFPM_TRUNC && !(as->flags & JIT_F_SSE4_1)) {
1774 ir->prev = REGSP_HINT(RID_XMM0);
1775 if (inloop)
1776 as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
1777 continue;
1778 } 2270 }
2271#endif
2272 if (inloop)
2273 as->modset |= RSET_SCRATCH;
2274#if LJ_TARGET_X86
1779 break; 2275 break;
1780#else 2276#else
1781 ir->prev = REGSP_HINT(RID_FPRET); 2277 ir->prev = REGSP_HINT(RID_FPRET);
1782 if (inloop)
1783 as->modset |= RSET_SCRATCH;
1784 continue; 2278 continue;
1785#endif 2279#endif
1786#if LJ_TARGET_X86ORX64 2280#if LJ_TARGET_X86ORX64
1787 /* Non-constant shift counts need to be in RID_ECX on x86/x64. */ 2281 /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
1788 case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR: 2282 case IR_BSHL: case IR_BSHR: case IR_BSAR:
2283 if ((as->flags & JIT_F_BMI2)) /* Except if BMI2 is available. */
2284 break;
2285 /* fallthrough */
2286 case IR_BROL: case IR_BROR:
1789 if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) { 2287 if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
1790 IR(ir->op2)->r = REGSP_HINT(RID_ECX); 2288 IR(ir->op2)->r = REGSP_HINT(RID_ECX);
1791 if (inloop) 2289 if (inloop)
@@ -1831,14 +2329,25 @@ void lj_asm_trace(jit_State *J, GCtrace *T)
1831 ASMState *as = &as_; 2329 ASMState *as = &as_;
1832 MCode *origtop; 2330 MCode *origtop;
1833 2331
2332 /* Remove nops/renames left over from ASM restart due to LJ_TRERR_MCODELM. */
2333 {
2334 IRRef nins = T->nins;
2335 IRIns *ir = &T->ir[nins-1];
2336 if (ir->o == IR_NOP || ir->o == IR_RENAME) {
2337 do { ir--; nins--; } while (ir->o == IR_NOP || ir->o == IR_RENAME);
2338 T->nins = nins;
2339 }
2340 }
2341
1834 /* Ensure an initialized instruction beyond the last one for HIOP checks. */ 2342 /* Ensure an initialized instruction beyond the last one for HIOP checks. */
1835 J->cur.nins = lj_ir_nextins(J); 2343 /* This also allows one RENAME to be added without reallocating curfinal. */
1836 J->cur.ir[J->cur.nins].o = IR_NOP; 2344 as->orignins = lj_ir_nextins(J);
2345 J->cur.ir[as->orignins].o = IR_NOP;
1837 2346
1838 /* Setup initial state. Copy some fields to reduce indirections. */ 2347 /* Setup initial state. Copy some fields to reduce indirections. */
1839 as->J = J; 2348 as->J = J;
1840 as->T = T; 2349 as->T = T;
1841 as->ir = T->ir; 2350 J->curfinal = lj_trace_alloc(J->L, T); /* This copies the IR, too. */
1842 as->flags = J->flags; 2351 as->flags = J->flags;
1843 as->loopref = J->loopref; 2352 as->loopref = J->loopref;
1844 as->realign = NULL; 2353 as->realign = NULL;
@@ -1851,12 +2360,41 @@ void lj_asm_trace(jit_State *J, GCtrace *T)
1851 as->mclim = as->mcbot + MCLIM_REDZONE; 2360 as->mclim = as->mcbot + MCLIM_REDZONE;
1852 asm_setup_target(as); 2361 asm_setup_target(as);
1853 2362
1854 do { 2363 /*
2364 ** This is a loop, because the MCode may have to be (re-)assembled
2365 ** multiple times:
2366 **
2367 ** 1. as->realign is set (and the assembly aborted), if the arch-specific
2368 ** backend wants the MCode to be aligned differently.
2369 **
2370 ** This is currently only the case on x86/x64, where small loops get
2371 ** an aligned loop body plus a short branch. Not much effort is wasted,
2372 ** because the abort happens very quickly and only once.
2373 **
2374 ** 2. The IR is immovable, since the MCode embeds pointers to various
2375 ** constants inside the IR. But RENAMEs may need to be added to the IR
2376 ** during assembly, which might grow and reallocate the IR. We check
2377 ** at the end if the IR (in J->cur.ir) has actually grown, resize the
2378 ** copy (in J->curfinal.ir) and try again.
2379 **
2380 ** 95% of all traces have zero RENAMEs, 3% have one RENAME, 1.5% have
2381 ** 2 RENAMEs and only 0.5% have more than that. That's why we opt to
2382 ** always have one spare slot in the IR (see above), which means we
2383 ** have to redo the assembly for only ~2% of all traces.
2384 **
2385 ** Very, very rarely, this needs to be done repeatedly, since the
2386 ** location of constants inside the IR (actually, reachability from
2387 ** a global pointer) may affect register allocation and thus the
2388 ** number of RENAMEs.
2389 */
2390 for (;;) {
1855 as->mcp = as->mctop; 2391 as->mcp = as->mctop;
1856#ifdef LUA_USE_ASSERT 2392#ifdef LUA_USE_ASSERT
1857 as->mcp_prev = as->mcp; 2393 as->mcp_prev = as->mcp;
1858#endif 2394#endif
1859 as->curins = T->nins; 2395 as->ir = J->curfinal->ir; /* Use the copied IR. */
2396 as->curins = J->cur.nins = as->orignins;
2397
1860 RA_DBG_START(); 2398 RA_DBG_START();
1861 RA_DBGX((as, "===== STOP =====")); 2399 RA_DBGX((as, "===== STOP ====="));
1862 2400
@@ -1875,7 +2413,10 @@ void lj_asm_trace(jit_State *J, GCtrace *T)
1875 /* Assemble a trace in linear backwards order. */ 2413 /* Assemble a trace in linear backwards order. */
1876 for (as->curins--; as->curins > as->stopins; as->curins--) { 2414 for (as->curins--; as->curins > as->stopins; as->curins--) {
1877 IRIns *ir = IR(as->curins); 2415 IRIns *ir = IR(as->curins);
1878 lua_assert(!(LJ_32 && irt_isint64(ir->t))); /* Handled by SPLIT. */ 2416 /* 64 bit types handled by SPLIT for 32 bit archs. */
2417 lj_assertA(!(LJ_32 && irt_isint64(ir->t)),
2418 "IR %04d has unsplit 64 bit type",
2419 (int)(ir - as->ir) - REF_BIAS);
1879 if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE)) 2420 if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
1880 continue; /* Dead-code elimination can be soooo easy. */ 2421 continue; /* Dead-code elimination can be soooo easy. */
1881 if (irt_isguard(ir->t)) 2422 if (irt_isguard(ir->t))
@@ -1884,22 +2425,40 @@ void lj_asm_trace(jit_State *J, GCtrace *T)
1884 checkmclim(as); 2425 checkmclim(as);
1885 asm_ir(as, ir); 2426 asm_ir(as, ir);
1886 } 2427 }
1887 } while (as->realign); /* Retry in case the MCode needs to be realigned. */
1888 2428
1889 /* Emit head of trace. */ 2429 if (as->realign && J->curfinal->nins >= T->nins)
1890 RA_DBG_REF(); 2430 continue; /* Retry in case only the MCode needs to be realigned. */
1891 checkmclim(as); 2431
1892 if (as->gcsteps > 0) { 2432 /* Emit head of trace. */
1893 as->curins = as->T->snap[0].ref; 2433 RA_DBG_REF();
1894 asm_snap_prep(as); /* The GC check is a guard. */ 2434 checkmclim(as);
1895 asm_gc_check(as); 2435 if (as->gcsteps > 0) {
2436 as->curins = as->T->snap[0].ref;
2437 asm_snap_prep(as); /* The GC check is a guard. */
2438 asm_gc_check(as);
2439 as->curins = as->stopins;
2440 }
2441 ra_evictk(as);
2442 if (as->parent)
2443 asm_head_side(as);
2444 else
2445 asm_head_root(as);
2446 asm_phi_fixup(as);
2447
2448 if (J->curfinal->nins >= T->nins) { /* IR didn't grow? */
2449 lj_assertA(J->curfinal->nk == T->nk, "unexpected IR constant growth");
2450 memcpy(J->curfinal->ir + as->orignins, T->ir + as->orignins,
2451 (T->nins - as->orignins) * sizeof(IRIns)); /* Copy RENAMEs. */
2452 T->nins = J->curfinal->nins;
2453 break; /* Done. */
2454 }
2455
2456 /* Otherwise try again with a bigger IR. */
2457 lj_trace_free(J2G(J), J->curfinal);
2458 J->curfinal = NULL; /* In case lj_trace_alloc() OOMs. */
2459 J->curfinal = lj_trace_alloc(J->L, T);
2460 as->realign = NULL;
1896 } 2461 }
1897 ra_evictk(as);
1898 if (as->parent)
1899 asm_head_side(as);
1900 else
1901 asm_head_root(as);
1902 asm_phi_fixup(as);
1903 2462
1904 RA_DBGX((as, "===== START ====")); 2463 RA_DBGX((as, "===== START ===="));
1905 RA_DBG_FLUSH(); 2464 RA_DBG_FLUSH();
@@ -1912,6 +2471,9 @@ void lj_asm_trace(jit_State *J, GCtrace *T)
1912 if (!as->loopref) 2471 if (!as->loopref)
1913 asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */ 2472 asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */
1914 T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp); 2473 T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
2474#if LJ_TARGET_MCODE_FIXUP
2475 asm_mcode_fixup(T->mcode, T->szmcode);
2476#endif
1915 lj_mcode_sync(T->mcode, origtop); 2477 lj_mcode_sync(T->mcode, origtop);
1916} 2478}
1917 2479
diff --git a/src/lj_asm_arm.h b/src/lj_asm_arm.h
index 087530b2..e7d2bf17 100644
--- a/src/lj_asm_arm.h
+++ b/src/lj_asm_arm.h
@@ -41,7 +41,7 @@ static Reg ra_scratchpair(ASMState *as, RegSet allow)
41 } 41 }
42 } 42 }
43 } 43 }
44 lua_assert(rset_test(RSET_GPREVEN, r)); 44 lj_assertA(rset_test(RSET_GPREVEN, r), "odd reg %d", r);
45 ra_modified(as, r); 45 ra_modified(as, r);
46 ra_modified(as, r+1); 46 ra_modified(as, r+1);
47 RA_DBGX((as, "scratchpair $r $r", r, r+1)); 47 RA_DBGX((as, "scratchpair $r $r", r, r+1));
@@ -269,7 +269,7 @@ static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref,
269 return; 269 return;
270 } 270 }
271 } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) { 271 } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) {
272 lua_assert(ofs == 0); 272 lj_assertA(ofs == 0, "bad usage");
273 ofs = (int32_t)sizeof(GCstr); 273 ofs = (int32_t)sizeof(GCstr);
274 if (irref_isk(ir->op2)) { 274 if (irref_isk(ir->op2)) {
275 ofs += IR(ir->op2)->i; 275 ofs += IR(ir->op2)->i;
@@ -338,7 +338,7 @@ static int asm_fusemadd(ASMState *as, IRIns *ir, ARMIns ai, ARMIns air)
338/* Generate a call to a C function. */ 338/* Generate a call to a C function. */
339static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) 339static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
340{ 340{
341 uint32_t n, nargs = CCI_NARGS(ci); 341 uint32_t n, nargs = CCI_XNARGS(ci);
342 int32_t ofs = 0; 342 int32_t ofs = 0;
343#if LJ_SOFTFP 343#if LJ_SOFTFP
344 Reg gpr = REGARG_FIRSTGPR; 344 Reg gpr = REGARG_FIRSTGPR;
@@ -389,9 +389,11 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
389 as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); 389 as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
390 if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u; 390 if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u;
391 if (gpr <= REGARG_LASTGPR) { 391 if (gpr <= REGARG_LASTGPR) {
392 lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ 392 lj_assertA(rset_test(as->freeset, gpr),
393 "reg %d not free", gpr); /* Must have been evicted. */
393 if (irt_isnum(ir->t)) { 394 if (irt_isnum(ir->t)) {
394 lua_assert(rset_test(as->freeset, gpr+1)); /* Ditto. */ 395 lj_assertA(rset_test(as->freeset, gpr+1),
396 "reg %d not free", gpr+1); /* Ditto. */
395 emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15)); 397 emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15));
396 gpr += 2; 398 gpr += 2;
397 } else { 399 } else {
@@ -408,7 +410,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
408#endif 410#endif
409 { 411 {
410 if (gpr <= REGARG_LASTGPR) { 412 if (gpr <= REGARG_LASTGPR) {
411 lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ 413 lj_assertA(rset_test(as->freeset, gpr),
414 "reg %d not free", gpr); /* Must have been evicted. */
412 if (ref) ra_leftov(as, gpr, ref); 415 if (ref) ra_leftov(as, gpr, ref);
413 gpr++; 416 gpr++;
414 } else { 417 } else {
@@ -433,7 +436,7 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
433 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ 436 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
434 ra_evictset(as, drop); /* Evictions must be performed first. */ 437 ra_evictset(as, drop); /* Evictions must be performed first. */
435 if (ra_used(ir)) { 438 if (ra_used(ir)) {
436 lua_assert(!irt_ispri(ir->t)); 439 lj_assertA(!irt_ispri(ir->t), "PRI dest");
437 if (!LJ_SOFTFP && irt_isfp(ir->t)) { 440 if (!LJ_SOFTFP && irt_isfp(ir->t)) {
438 if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) { 441 if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) {
439 Reg dest = (ra_dest(as, ir, RSET_FPR) & 15); 442 Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
@@ -453,15 +456,6 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
453 UNUSED(ci); 456 UNUSED(ci);
454} 457}
455 458
456static void asm_call(ASMState *as, IRIns *ir)
457{
458 IRRef args[CCI_NARGS_MAX];
459 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
460 asm_collectargs(as, ir, ci, args);
461 asm_setupresult(as, ir, ci);
462 asm_gencall(as, ci, args);
463}
464
465static void asm_callx(ASMState *as, IRIns *ir) 459static void asm_callx(ASMState *as, IRIns *ir)
466{ 460{
467 IRRef args[CCI_NARGS_MAX*2]; 461 IRRef args[CCI_NARGS_MAX*2];
@@ -490,7 +484,7 @@ static void asm_retf(ASMState *as, IRIns *ir)
490{ 484{
491 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); 485 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
492 void *pc = ir_kptr(IR(ir->op2)); 486 void *pc = ir_kptr(IR(ir->op2));
493 int32_t delta = 1+bc_a(*((const BCIns *)pc - 1)); 487 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
494 as->topslot -= (BCReg)delta; 488 as->topslot -= (BCReg)delta;
495 if ((int32_t)as->topslot < 0) as->topslot = 0; 489 if ((int32_t)as->topslot < 0) as->topslot = 0;
496 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ 490 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
@@ -539,13 +533,17 @@ static void asm_conv(ASMState *as, IRIns *ir)
539#endif 533#endif
540 IRRef lref = ir->op1; 534 IRRef lref = ir->op1;
541 /* 64 bit integer conversions are handled by SPLIT. */ 535 /* 64 bit integer conversions are handled by SPLIT. */
542 lua_assert(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64)); 536 lj_assertA(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64),
537 "IR %04d has unsplit 64 bit type",
538 (int)(ir - as->ir) - REF_BIAS);
543#if LJ_SOFTFP 539#if LJ_SOFTFP
544 /* FP conversions are handled by SPLIT. */ 540 /* FP conversions are handled by SPLIT. */
545 lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT)); 541 lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
542 "IR %04d has FP type",
543 (int)(ir - as->ir) - REF_BIAS);
546 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ 544 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
547#else 545#else
548 lua_assert(irt_type(ir->t) != st); 546 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
549 if (irt_isfp(ir->t)) { 547 if (irt_isfp(ir->t)) {
550 Reg dest = ra_dest(as, ir, RSET_FPR); 548 Reg dest = ra_dest(as, ir, RSET_FPR);
551 if (stfp) { /* FP to FP conversion. */ 549 if (stfp) { /* FP to FP conversion. */
@@ -562,7 +560,8 @@ static void asm_conv(ASMState *as, IRIns *ir)
562 } else if (stfp) { /* FP to integer conversion. */ 560 } else if (stfp) { /* FP to integer conversion. */
563 if (irt_isguard(ir->t)) { 561 if (irt_isguard(ir->t)) {
564 /* Checked conversions are only supported from number to int. */ 562 /* Checked conversions are only supported from number to int. */
565 lua_assert(irt_isint(ir->t) && st == IRT_NUM); 563 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
564 "bad type for checked CONV");
566 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); 565 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
567 } else { 566 } else {
568 Reg left = ra_alloc1(as, lref, RSET_FPR); 567 Reg left = ra_alloc1(as, lref, RSET_FPR);
@@ -581,7 +580,7 @@ static void asm_conv(ASMState *as, IRIns *ir)
581 Reg dest = ra_dest(as, ir, RSET_GPR); 580 Reg dest = ra_dest(as, ir, RSET_GPR);
582 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ 581 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
583 Reg left = ra_alloc1(as, lref, RSET_GPR); 582 Reg left = ra_alloc1(as, lref, RSET_GPR);
584 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); 583 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
585 if ((as->flags & JIT_F_ARMV6)) { 584 if ((as->flags & JIT_F_ARMV6)) {
586 ARMIns ai = st == IRT_I8 ? ARMI_SXTB : 585 ARMIns ai = st == IRT_I8 ? ARMI_SXTB :
587 st == IRT_U8 ? ARMI_UXTB : 586 st == IRT_U8 ? ARMI_UXTB :
@@ -601,31 +600,6 @@ static void asm_conv(ASMState *as, IRIns *ir)
601 } 600 }
602} 601}
603 602
604#if !LJ_SOFTFP && LJ_HASFFI
605static void asm_conv64(ASMState *as, IRIns *ir)
606{
607 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
608 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
609 IRCallID id;
610 CCallInfo ci;
611 IRRef args[2];
612 args[0] = (ir-1)->op1;
613 args[1] = ir->op1;
614 if (st == IRT_NUM || st == IRT_FLOAT) {
615 id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
616 ir--;
617 } else {
618 id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
619 }
620 ci = lj_ir_callinfo[id];
621#if !LJ_ABI_SOFTFP
622 ci.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */
623#endif
624 asm_setupresult(as, ir, &ci);
625 asm_gencall(as, &ci, args);
626}
627#endif
628
629static void asm_strto(ASMState *as, IRIns *ir) 603static void asm_strto(ASMState *as, IRIns *ir)
630{ 604{
631 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; 605 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
@@ -689,6 +663,8 @@ static void asm_strto(ASMState *as, IRIns *ir)
689 emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR); 663 emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR);
690} 664}
691 665
666/* -- Memory references --------------------------------------------------- */
667
692/* Get pointer to TValue. */ 668/* Get pointer to TValue. */
693static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) 669static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
694{ 670{
@@ -699,7 +675,7 @@ static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
699 ra_allockreg(as, i32ptr(ir_knum(ir)), dest); 675 ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
700 } else { 676 } else {
701#if LJ_SOFTFP 677#if LJ_SOFTFP
702 lua_assert(0); 678 lj_assertA(0, "unsplit FP op");
703#else 679#else
704 /* Otherwise force a spill and use the spill slot. */ 680 /* Otherwise force a spill and use the spill slot. */
705 emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR); 681 emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR);
@@ -714,7 +690,7 @@ static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
714 Reg src = ra_alloc1(as, ref, allow); 690 Reg src = ra_alloc1(as, ref, allow);
715 emit_lso(as, ARMI_STR, src, RID_SP, 0); 691 emit_lso(as, ARMI_STR, src, RID_SP, 0);
716 } 692 }
717 if ((ir+1)->o == IR_HIOP) 693 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
718 type = ra_alloc1(as, ref+1, allow); 694 type = ra_alloc1(as, ref+1, allow);
719 else 695 else
720 type = ra_allock(as, irt_toitype(ir->t), allow); 696 type = ra_allock(as, irt_toitype(ir->t), allow);
@@ -722,27 +698,6 @@ static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
722 } 698 }
723} 699}
724 700
725static void asm_tostr(ASMState *as, IRIns *ir)
726{
727 IRRef args[2];
728 args[0] = ASMREF_L;
729 as->gcsteps++;
730 if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
731 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
732 args[1] = ASMREF_TMP1; /* const lua_Number * */
733 asm_setupresult(as, ir, ci); /* GCstr * */
734 asm_gencall(as, ci, args);
735 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
736 } else {
737 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
738 args[1] = ir->op1; /* int32_t k */
739 asm_setupresult(as, ir, ci); /* GCstr * */
740 asm_gencall(as, ci, args);
741 }
742}
743
744/* -- Memory references --------------------------------------------------- */
745
746static void asm_aref(ASMState *as, IRIns *ir) 701static void asm_aref(ASMState *as, IRIns *ir)
747{ 702{
748 Reg dest = ra_dest(as, ir, RSET_GPR); 703 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -864,16 +819,16 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
864 *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu); 819 *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu);
865 820
866 /* Load main position relative to tab->node into dest. */ 821 /* Load main position relative to tab->node into dest. */
867 khash = irref_isk(refkey) ? ir_khash(irkey) : 1; 822 khash = irref_isk(refkey) ? ir_khash(as, irkey) : 1;
868 if (khash == 0) { 823 if (khash == 0) {
869 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); 824 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
870 } else { 825 } else {
871 emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp); 826 emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp);
872 emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp); 827 emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp);
873 if (irt_isstr(kt)) { /* Fetch of str->hash is cheaper than ra_allock. */ 828 if (irt_isstr(kt)) { /* Fetch of str->sid is cheaper than ra_allock. */
874 emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP); 829 emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP);
875 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); 830 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
876 emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, hash)); 831 emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, sid));
877 emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask)); 832 emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
878 } else if (irref_isk(refkey)) { 833 } else if (irref_isk(refkey)) {
879 emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash, 834 emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash,
@@ -920,7 +875,7 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
920 Reg node = ra_alloc1(as, ir->op1, RSET_GPR); 875 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
921 Reg key = RID_NONE, type = RID_TMP, idx = node; 876 Reg key = RID_NONE, type = RID_TMP, idx = node;
922 RegSet allow = rset_exclude(RSET_GPR, node); 877 RegSet allow = rset_exclude(RSET_GPR, node);
923 lua_assert(ofs % sizeof(Node) == 0); 878 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
924 if (ofs > 4095) { 879 if (ofs > 4095) {
925 idx = dest; 880 idx = dest;
926 rset_clear(allow, dest); 881 rset_clear(allow, dest);
@@ -960,20 +915,6 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
960 emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR); 915 emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR);
961} 916}
962 917
963static void asm_newref(ASMState *as, IRIns *ir)
964{
965 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
966 IRRef args[3];
967 if (ir->r == RID_SINK)
968 return;
969 args[0] = ASMREF_L; /* lua_State *L */
970 args[1] = ir->op1; /* GCtab *t */
971 args[2] = ASMREF_TMP1; /* cTValue *key */
972 asm_setupresult(as, ir, ci); /* TValue * */
973 asm_gencall(as, ci, args);
974 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
975}
976
977static void asm_uref(ASMState *as, IRIns *ir) 918static void asm_uref(ASMState *as, IRIns *ir)
978{ 919{
979 Reg dest = ra_dest(as, ir, RSET_GPR); 920 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -1001,7 +942,7 @@ static void asm_uref(ASMState *as, IRIns *ir)
1001static void asm_fref(ASMState *as, IRIns *ir) 942static void asm_fref(ASMState *as, IRIns *ir)
1002{ 943{
1003 UNUSED(as); UNUSED(ir); 944 UNUSED(as); UNUSED(ir);
1004 lua_assert(!ra_used(ir)); 945 lj_assertA(!ra_used(ir), "unfused FREF");
1005} 946}
1006 947
1007static void asm_strref(ASMState *as, IRIns *ir) 948static void asm_strref(ASMState *as, IRIns *ir)
@@ -1038,25 +979,27 @@ static void asm_strref(ASMState *as, IRIns *ir)
1038 979
1039/* -- Loads and stores ---------------------------------------------------- */ 980/* -- Loads and stores ---------------------------------------------------- */
1040 981
1041static ARMIns asm_fxloadins(IRIns *ir) 982static ARMIns asm_fxloadins(ASMState *as, IRIns *ir)
1042{ 983{
984 UNUSED(as);
1043 switch (irt_type(ir->t)) { 985 switch (irt_type(ir->t)) {
1044 case IRT_I8: return ARMI_LDRSB; 986 case IRT_I8: return ARMI_LDRSB;
1045 case IRT_U8: return ARMI_LDRB; 987 case IRT_U8: return ARMI_LDRB;
1046 case IRT_I16: return ARMI_LDRSH; 988 case IRT_I16: return ARMI_LDRSH;
1047 case IRT_U16: return ARMI_LDRH; 989 case IRT_U16: return ARMI_LDRH;
1048 case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VLDR_D; 990 case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return ARMI_VLDR_D;
1049 case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S; /* fallthrough */ 991 case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S; /* fallthrough */
1050 default: return ARMI_LDR; 992 default: return ARMI_LDR;
1051 } 993 }
1052} 994}
1053 995
1054static ARMIns asm_fxstoreins(IRIns *ir) 996static ARMIns asm_fxstoreins(ASMState *as, IRIns *ir)
1055{ 997{
998 UNUSED(as);
1056 switch (irt_type(ir->t)) { 999 switch (irt_type(ir->t)) {
1057 case IRT_I8: case IRT_U8: return ARMI_STRB; 1000 case IRT_I8: case IRT_U8: return ARMI_STRB;
1058 case IRT_I16: case IRT_U16: return ARMI_STRH; 1001 case IRT_I16: case IRT_U16: return ARMI_STRH;
1059 case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VSTR_D; 1002 case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return ARMI_VSTR_D;
1060 case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S; /* fallthrough */ 1003 case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S; /* fallthrough */
1061 default: return ARMI_STR; 1004 default: return ARMI_STR;
1062 } 1005 }
@@ -1065,17 +1008,23 @@ static ARMIns asm_fxstoreins(IRIns *ir)
1065static void asm_fload(ASMState *as, IRIns *ir) 1008static void asm_fload(ASMState *as, IRIns *ir)
1066{ 1009{
1067 Reg dest = ra_dest(as, ir, RSET_GPR); 1010 Reg dest = ra_dest(as, ir, RSET_GPR);
1068 Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); 1011 ARMIns ai = asm_fxloadins(as, ir);
1069 ARMIns ai = asm_fxloadins(ir); 1012 Reg idx;
1070 int32_t ofs; 1013 int32_t ofs;
1071 if (ir->op2 == IRFL_TAB_ARRAY) { 1014 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
1072 ofs = asm_fuseabase(as, ir->op1); 1015 idx = ra_allock(as, (int32_t)(ir->op2<<2) + (int32_t)J2GG(as->J), RSET_GPR);
1073 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ 1016 ofs = 0;
1074 emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx); 1017 } else {
1075 return; 1018 idx = ra_alloc1(as, ir->op1, RSET_GPR);
1019 if (ir->op2 == IRFL_TAB_ARRAY) {
1020 ofs = asm_fuseabase(as, ir->op1);
1021 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
1022 emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx);
1023 return;
1024 }
1076 } 1025 }
1026 ofs = field_ofs[ir->op2];
1077 } 1027 }
1078 ofs = field_ofs[ir->op2];
1079 if ((ai & 0x04000000)) 1028 if ((ai & 0x04000000))
1080 emit_lso(as, ai, dest, idx, ofs); 1029 emit_lso(as, ai, dest, idx, ofs);
1081 else 1030 else
@@ -1089,7 +1038,7 @@ static void asm_fstore(ASMState *as, IRIns *ir)
1089 IRIns *irf = IR(ir->op1); 1038 IRIns *irf = IR(ir->op1);
1090 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); 1039 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
1091 int32_t ofs = field_ofs[irf->op2]; 1040 int32_t ofs = field_ofs[irf->op2];
1092 ARMIns ai = asm_fxstoreins(ir); 1041 ARMIns ai = asm_fxstoreins(as, ir);
1093 if ((ai & 0x04000000)) 1042 if ((ai & 0x04000000))
1094 emit_lso(as, ai, src, idx, ofs); 1043 emit_lso(as, ai, src, idx, ofs);
1095 else 1044 else
@@ -1101,20 +1050,22 @@ static void asm_xload(ASMState *as, IRIns *ir)
1101{ 1050{
1102 Reg dest = ra_dest(as, ir, 1051 Reg dest = ra_dest(as, ir,
1103 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); 1052 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
1104 lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); 1053 lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
1105 asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); 1054 asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
1106} 1055}
1107 1056
1108static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs) 1057static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
1109{ 1058{
1110 if (ir->r != RID_SINK) { 1059 if (ir->r != RID_SINK) {
1111 Reg src = ra_alloc1(as, ir->op2, 1060 Reg src = ra_alloc1(as, ir->op2,
1112 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); 1061 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
1113 asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, 1062 asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
1114 rset_exclude(RSET_GPR, src), ofs); 1063 rset_exclude(RSET_GPR, src), ofs);
1115 } 1064 }
1116} 1065}
1117 1066
1067#define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
1068
1118static void asm_ahuvload(ASMState *as, IRIns *ir) 1069static void asm_ahuvload(ASMState *as, IRIns *ir)
1119{ 1070{
1120 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); 1071 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
@@ -1127,8 +1078,9 @@ static void asm_ahuvload(ASMState *as, IRIns *ir)
1127 rset_clear(allow, type); 1078 rset_clear(allow, type);
1128 } 1079 }
1129 if (ra_used(ir)) { 1080 if (ra_used(ir)) {
1130 lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || 1081 lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
1131 irt_isint(ir->t) || irt_isaddr(ir->t)); 1082 irt_isint(ir->t) || irt_isaddr(ir->t),
1083 "bad load type %d", irt_type(ir->t));
1132 dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow); 1084 dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
1133 rset_clear(allow, dest); 1085 rset_clear(allow, dest);
1134 } 1086 }
@@ -1194,10 +1146,13 @@ static void asm_sload(ASMState *as, IRIns *ir)
1194 IRType t = hiop ? IRT_NUM : irt_type(ir->t); 1146 IRType t = hiop ? IRT_NUM : irt_type(ir->t);
1195 Reg dest = RID_NONE, type = RID_NONE, base; 1147 Reg dest = RID_NONE, type = RID_NONE, base;
1196 RegSet allow = RSET_GPR; 1148 RegSet allow = RSET_GPR;
1197 lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ 1149 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1198 lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK)); 1150 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1151 lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1152 "inconsistent SLOAD variant");
1199#if LJ_SOFTFP 1153#if LJ_SOFTFP
1200 lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */ 1154 lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
1155 "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
1201 if (hiop && ra_used(ir+1)) { 1156 if (hiop && ra_used(ir+1)) {
1202 type = ra_dest(as, ir+1, allow); 1157 type = ra_dest(as, ir+1, allow);
1203 rset_clear(allow, type); 1158 rset_clear(allow, type);
@@ -1213,8 +1168,9 @@ static void asm_sload(ASMState *as, IRIns *ir)
1213 Reg tmp = RID_NONE; 1168 Reg tmp = RID_NONE;
1214 if ((ir->op2 & IRSLOAD_CONVERT)) 1169 if ((ir->op2 & IRSLOAD_CONVERT))
1215 tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR); 1170 tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR);
1216 lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || 1171 lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
1217 irt_isint(ir->t) || irt_isaddr(ir->t)); 1172 irt_isint(ir->t) || irt_isaddr(ir->t),
1173 "bad SLOAD type %d", irt_type(ir->t));
1218 dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow); 1174 dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
1219 rset_clear(allow, dest); 1175 rset_clear(allow, dest);
1220 base = ra_alloc1(as, REF_BASE, allow); 1176 base = ra_alloc1(as, REF_BASE, allow);
@@ -1272,19 +1228,17 @@ dotypecheck:
1272static void asm_cnew(ASMState *as, IRIns *ir) 1228static void asm_cnew(ASMState *as, IRIns *ir)
1273{ 1229{
1274 CTState *cts = ctype_ctsG(J2G(as->J)); 1230 CTState *cts = ctype_ctsG(J2G(as->J));
1275 CTypeID ctypeid = (CTypeID)IR(ir->op1)->i; 1231 CTypeID id = (CTypeID)IR(ir->op1)->i;
1276 CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ? 1232 CTSize sz;
1277 lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i; 1233 CTInfo info = lj_ctype_info(cts, id, &sz);
1278 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; 1234 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1279 IRRef args[2]; 1235 IRRef args[4];
1280 RegSet allow = (RSET_GPR & ~RSET_SCRATCH); 1236 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1281 RegSet drop = RSET_SCRATCH; 1237 RegSet drop = RSET_SCRATCH;
1282 lua_assert(sz != CTSIZE_INVALID); 1238 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1239 "bad CNEW/CNEWI operands");
1283 1240
1284 args[0] = ASMREF_L; /* lua_State *L */
1285 args[1] = ASMREF_TMP1; /* MSize size */
1286 as->gcsteps++; 1241 as->gcsteps++;
1287
1288 if (ra_hasreg(ir->r)) 1242 if (ra_hasreg(ir->r))
1289 rset_clear(drop, ir->r); /* Dest reg handled below. */ 1243 rset_clear(drop, ir->r); /* Dest reg handled below. */
1290 ra_evictset(as, drop); 1244 ra_evictset(as, drop);
@@ -1294,10 +1248,10 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1294 /* Initialize immutable cdata object. */ 1248 /* Initialize immutable cdata object. */
1295 if (ir->o == IR_CNEWI) { 1249 if (ir->o == IR_CNEWI) {
1296 int32_t ofs = sizeof(GCcdata); 1250 int32_t ofs = sizeof(GCcdata);
1297 lua_assert(sz == 4 || sz == 8); 1251 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1298 if (sz == 8) { 1252 if (sz == 8) {
1299 ofs += 4; ir++; 1253 ofs += 4; ir++;
1300 lua_assert(ir->o == IR_HIOP); 1254 lj_assertA(ir->o == IR_HIOP, "expected HIOP for CNEWI");
1301 } 1255 }
1302 for (;;) { 1256 for (;;) {
1303 Reg r = ra_alloc1(as, ir->op2, allow); 1257 Reg r = ra_alloc1(as, ir->op2, allow);
@@ -1306,22 +1260,32 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1306 if (ofs == sizeof(GCcdata)) break; 1260 if (ofs == sizeof(GCcdata)) break;
1307 ofs -= 4; ir--; 1261 ofs -= 4; ir--;
1308 } 1262 }
1263 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1264 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1265 args[0] = ASMREF_L; /* lua_State *L */
1266 args[1] = ir->op1; /* CTypeID id */
1267 args[2] = ir->op2; /* CTSize sz */
1268 args[3] = ASMREF_TMP1; /* CTSize align */
1269 asm_gencall(as, ci, args);
1270 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1271 return;
1309 } 1272 }
1273
1310 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ 1274 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1311 { 1275 {
1312 uint32_t k = emit_isk12(ARMI_MOV, ctypeid); 1276 uint32_t k = emit_isk12(ARMI_MOV, id);
1313 Reg r = k ? RID_R1 : ra_allock(as, ctypeid, allow); 1277 Reg r = k ? RID_R1 : ra_allock(as, id, allow);
1314 emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct)); 1278 emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
1315 emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid)); 1279 emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid));
1316 emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP); 1280 emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP);
1317 if (k) emit_d(as, ARMI_MOV^k, RID_R1); 1281 if (k) emit_d(as, ARMI_MOV^k, RID_R1);
1318 } 1282 }
1283 args[0] = ASMREF_L; /* lua_State *L */
1284 args[1] = ASMREF_TMP1; /* MSize size */
1319 asm_gencall(as, ci, args); 1285 asm_gencall(as, ci, args);
1320 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), 1286 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
1321 ra_releasetmp(as, ASMREF_TMP1)); 1287 ra_releasetmp(as, ASMREF_TMP1));
1322} 1288}
1323#else
1324#define asm_cnew(as, ir) ((void)0)
1325#endif 1289#endif
1326 1290
1327/* -- Write barriers ------------------------------------------------------ */ 1291/* -- Write barriers ------------------------------------------------------ */
@@ -1353,7 +1317,7 @@ static void asm_obar(ASMState *as, IRIns *ir)
1353 MCLabel l_end; 1317 MCLabel l_end;
1354 Reg obj, val, tmp; 1318 Reg obj, val, tmp;
1355 /* No need for other object barriers (yet). */ 1319 /* No need for other object barriers (yet). */
1356 lua_assert(IR(ir->op1)->o == IR_UREFC); 1320 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1357 ra_evictset(as, RSET_SCRATCH); 1321 ra_evictset(as, RSET_SCRATCH);
1358 l_end = emit_label(as); 1322 l_end = emit_label(as);
1359 args[0] = ASMREF_TMP1; /* global_State *g */ 1323 args[0] = ASMREF_TMP1; /* global_State *g */
@@ -1392,23 +1356,36 @@ static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai)
1392 emit_dm(as, ai, (dest & 15), (left & 15)); 1356 emit_dm(as, ai, (dest & 15), (left & 15));
1393} 1357}
1394 1358
1395static int asm_fpjoin_pow(ASMState *as, IRIns *ir) 1359static void asm_callround(ASMState *as, IRIns *ir, int id)
1396{ 1360{
1397 IRIns *irp = IR(ir->op1); 1361 /* The modified regs must match with the *.dasc implementation. */
1398 if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) { 1362 RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)|
1399 IRIns *irpp = IR(irp->op1); 1363 RID2RSET(RID_R3)|RID2RSET(RID_R12);
1400 if (irpp == ir-2 && irpp->o == IR_FPMATH && 1364 RegSet of;
1401 irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) { 1365 Reg dest, src;
1402 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow]; 1366 ra_evictset(as, drop);
1403 IRRef args[2]; 1367 dest = ra_dest(as, ir, RSET_FPR);
1404 args[0] = irpp->op1; 1368 emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15));
1405 args[1] = irp->op2; 1369 emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf :
1406 asm_setupresult(as, ir, ci); 1370 id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf :
1407 asm_gencall(as, ci, args); 1371 (void *)lj_vm_trunc_sf);
1408 return 1; 1372 /* Workaround to protect argument GPRs from being used for remat. */
1409 } 1373 of = as->freeset;
1410 } 1374 as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1);
1411 return 0; 1375 as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L);
1376 src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */
1377 as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1));
1378 emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15));
1379}
1380
1381static void asm_fpmath(ASMState *as, IRIns *ir)
1382{
1383 if (ir->op2 <= IRFPM_TRUNC)
1384 asm_callround(as, ir, ir->op2);
1385 else if (ir->op2 == IRFPM_SQRT)
1386 asm_fpunary(as, ir, ARMI_VSQRT_D);
1387 else
1388 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
1412} 1389}
1413#endif 1390#endif
1414 1391
@@ -1474,19 +1451,6 @@ static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai)
1474 asm_intop(as, ir, asm_drop_cmp0(as, ai)); 1451 asm_intop(as, ir, asm_drop_cmp0(as, ai));
1475} 1452}
1476 1453
1477static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai)
1478{
1479 ai = asm_drop_cmp0(as, ai);
1480 if (ir->op2 == 0) {
1481 Reg dest = ra_dest(as, ir, RSET_GPR);
1482 uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
1483 emit_d(as, ai^m, dest);
1484 } else {
1485 /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
1486 asm_intop(as, ir, ai);
1487 }
1488}
1489
1490static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai) 1454static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai)
1491{ 1455{
1492 Reg dest = ra_dest(as, ir, RSET_GPR); 1456 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -1552,6 +1516,15 @@ static void asm_mul(ASMState *as, IRIns *ir)
1552 asm_intmul(as, ir); 1516 asm_intmul(as, ir);
1553} 1517}
1554 1518
1519#define asm_addov(as, ir) asm_add(as, ir)
1520#define asm_subov(as, ir) asm_sub(as, ir)
1521#define asm_mulov(as, ir) asm_mul(as, ir)
1522
1523#if !LJ_SOFTFP
1524#define asm_fpdiv(as, ir) asm_fparith(as, ir, ARMI_VDIV_D)
1525#define asm_abs(as, ir) asm_fpunary(as, ir, ARMI_VABS_D)
1526#endif
1527
1555static void asm_neg(ASMState *as, IRIns *ir) 1528static void asm_neg(ASMState *as, IRIns *ir)
1556{ 1529{
1557#if !LJ_SOFTFP 1530#if !LJ_SOFTFP
@@ -1563,41 +1536,22 @@ static void asm_neg(ASMState *as, IRIns *ir)
1563 asm_intneg(as, ir, ARMI_RSB); 1536 asm_intneg(as, ir, ARMI_RSB);
1564} 1537}
1565 1538
1566static void asm_callid(ASMState *as, IRIns *ir, IRCallID id) 1539static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai)
1567{ 1540{
1568 const CCallInfo *ci = &lj_ir_callinfo[id]; 1541 ai = asm_drop_cmp0(as, ai);
1569 IRRef args[2]; 1542 if (ir->op2 == 0) {
1570 args[0] = ir->op1; 1543 Reg dest = ra_dest(as, ir, RSET_GPR);
1571 args[1] = ir->op2; 1544 uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
1572 asm_setupresult(as, ir, ci); 1545 emit_d(as, ai^m, dest);
1573 asm_gencall(as, ci, args); 1546 } else {
1547 /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
1548 asm_intop(as, ir, ai);
1549 }
1574} 1550}
1575 1551
1576#if !LJ_SOFTFP 1552#define asm_bnot(as, ir) asm_bitop(as, ir, ARMI_MVN)
1577static void asm_callround(ASMState *as, IRIns *ir, int id)
1578{
1579 /* The modified regs must match with the *.dasc implementation. */
1580 RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)|
1581 RID2RSET(RID_R3)|RID2RSET(RID_R12);
1582 RegSet of;
1583 Reg dest, src;
1584 ra_evictset(as, drop);
1585 dest = ra_dest(as, ir, RSET_FPR);
1586 emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15));
1587 emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf :
1588 id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf :
1589 (void *)lj_vm_trunc_sf);
1590 /* Workaround to protect argument GPRs from being used for remat. */
1591 of = as->freeset;
1592 as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1);
1593 as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L);
1594 src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */
1595 as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1));
1596 emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15));
1597}
1598#endif
1599 1553
1600static void asm_bitswap(ASMState *as, IRIns *ir) 1554static void asm_bswap(ASMState *as, IRIns *ir)
1601{ 1555{
1602 Reg dest = ra_dest(as, ir, RSET_GPR); 1556 Reg dest = ra_dest(as, ir, RSET_GPR);
1603 Reg left = ra_alloc1(as, ir->op1, RSET_GPR); 1557 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
@@ -1614,6 +1568,10 @@ static void asm_bitswap(ASMState *as, IRIns *ir)
1614 } 1568 }
1615} 1569}
1616 1570
1571#define asm_band(as, ir) asm_bitop(as, ir, ARMI_AND)
1572#define asm_bor(as, ir) asm_bitop(as, ir, ARMI_ORR)
1573#define asm_bxor(as, ir) asm_bitop(as, ir, ARMI_EOR)
1574
1617static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) 1575static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh)
1618{ 1576{
1619 if (irref_isk(ir->op2)) { /* Constant shifts. */ 1577 if (irref_isk(ir->op2)) { /* Constant shifts. */
@@ -1631,6 +1589,12 @@ static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh)
1631 } 1589 }
1632} 1590}
1633 1591
1592#define asm_bshl(as, ir) asm_bitshift(as, ir, ARMSH_LSL)
1593#define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR)
1594#define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR)
1595#define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR)
1596#define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
1597
1634static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) 1598static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
1635{ 1599{
1636 uint32_t kcmp = 0, kmov = 0; 1600 uint32_t kcmp = 0, kmov = 0;
@@ -1704,6 +1668,9 @@ static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc)
1704 asm_intmin_max(as, ir, cc); 1668 asm_intmin_max(as, ir, cc);
1705} 1669}
1706 1670
1671#define asm_min(as, ir) asm_min_max(as, ir, CC_GT, CC_PL)
1672#define asm_max(as, ir) asm_min_max(as, ir, CC_LT, CC_LE)
1673
1707/* -- Comparisons --------------------------------------------------------- */ 1674/* -- Comparisons --------------------------------------------------------- */
1708 1675
1709/* Map of comparisons to flags. ORDER IR. */ 1676/* Map of comparisons to flags. ORDER IR. */
@@ -1777,7 +1744,8 @@ static void asm_intcomp(ASMState *as, IRIns *ir)
1777 Reg left; 1744 Reg left;
1778 uint32_t m; 1745 uint32_t m;
1779 int cmpprev0 = 0; 1746 int cmpprev0 = 0;
1780 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); 1747 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t),
1748 "bad comparison data type %d", irt_type(ir->t));
1781 if (asm_swapops(as, lref, rref)) { 1749 if (asm_swapops(as, lref, rref)) {
1782 Reg tmp = lref; lref = rref; rref = tmp; 1750 Reg tmp = lref; lref = rref; rref = tmp;
1783 if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */ 1751 if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
@@ -1819,6 +1787,18 @@ notst:
1819 as->flagmcp = as->mcp; /* Allow elimination of the compare. */ 1787 as->flagmcp = as->mcp; /* Allow elimination of the compare. */
1820} 1788}
1821 1789
1790static void asm_comp(ASMState *as, IRIns *ir)
1791{
1792#if !LJ_SOFTFP
1793 if (irt_isnum(ir->t))
1794 asm_fpcomp(as, ir);
1795 else
1796#endif
1797 asm_intcomp(as, ir);
1798}
1799
1800#define asm_equal(as, ir) asm_comp(as, ir)
1801
1822#if LJ_HASFFI 1802#if LJ_HASFFI
1823/* 64 bit integer comparisons. */ 1803/* 64 bit integer comparisons. */
1824static void asm_int64comp(ASMState *as, IRIns *ir) 1804static void asm_int64comp(ASMState *as, IRIns *ir)
@@ -1882,7 +1862,7 @@ static void asm_hiop(ASMState *as, IRIns *ir)
1882 } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) { 1862 } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
1883 as->curins--; /* Always skip the loword min/max. */ 1863 as->curins--; /* Always skip the loword min/max. */
1884 if (uselo || usehi) 1864 if (uselo || usehi)
1885 asm_sfpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_HI : CC_LO); 1865 asm_sfpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_PL : CC_LE);
1886 return; 1866 return;
1887#elif LJ_HASFFI 1867#elif LJ_HASFFI
1888 } else if ((ir-1)->o == IR_CONV) { 1868 } else if ((ir-1)->o == IR_CONV) {
@@ -1893,7 +1873,7 @@ static void asm_hiop(ASMState *as, IRIns *ir)
1893#endif 1873#endif
1894 } else if ((ir-1)->o == IR_XSTORE) { 1874 } else if ((ir-1)->o == IR_XSTORE) {
1895 if ((ir-1)->r != RID_SINK) 1875 if ((ir-1)->r != RID_SINK)
1896 asm_xstore(as, ir, 4); 1876 asm_xstore_(as, ir, 4);
1897 return; 1877 return;
1898 } 1878 }
1899 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ 1879 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
@@ -1934,13 +1914,24 @@ static void asm_hiop(ASMState *as, IRIns *ir)
1934 case IR_CNEWI: 1914 case IR_CNEWI:
1935 /* Nothing to do here. Handled by lo op itself. */ 1915 /* Nothing to do here. Handled by lo op itself. */
1936 break; 1916 break;
1937 default: lua_assert(0); break; 1917 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
1938 } 1918 }
1939#else 1919#else
1940 UNUSED(as); UNUSED(ir); lua_assert(0); 1920 /* Unused without SOFTFP or FFI. */
1921 UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP");
1941#endif 1922#endif
1942} 1923}
1943 1924
1925/* -- Profiling ----------------------------------------------------------- */
1926
1927static void asm_prof(ASMState *as, IRIns *ir)
1928{
1929 UNUSED(ir);
1930 asm_guardcc(as, CC_NE);
1931 emit_n(as, ARMI_TST|ARMI_K12|HOOK_PROFILE, RID_TMP);
1932 emit_lsptr(as, ARMI_LDRB, RID_TMP, (void *)&J2G(as->J)->hookmask);
1933}
1934
1944/* -- Stack handling ------------------------------------------------------ */ 1935/* -- Stack handling ------------------------------------------------------ */
1945 1936
1946/* Check Lua stack size for overflow. Use exit handler as fallback. */ 1937/* Check Lua stack size for overflow. Use exit handler as fallback. */
@@ -1952,7 +1943,7 @@ static void asm_stack_check(ASMState *as, BCReg topslot,
1952 if (irp) { 1943 if (irp) {
1953 if (!ra_hasspill(irp->s)) { 1944 if (!ra_hasspill(irp->s)) {
1954 pbase = irp->r; 1945 pbase = irp->r;
1955 lua_assert(ra_hasreg(pbase)); 1946 lj_assertA(ra_hasreg(pbase), "base reg lost");
1956 } else if (allow) { 1947 } else if (allow) {
1957 pbase = rset_pickbot(allow); 1948 pbase = rset_pickbot(allow);
1958 } else { 1949 } else {
@@ -1964,13 +1955,13 @@ static void asm_stack_check(ASMState *as, BCReg topslot,
1964 } 1955 }
1965 emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno)); 1956 emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno));
1966 k = emit_isk12(0, (int32_t)(8*topslot)); 1957 k = emit_isk12(0, (int32_t)(8*topslot));
1967 lua_assert(k); 1958 lj_assertA(k, "slot offset %d does not fit in K12", 8*topslot);
1968 emit_n(as, ARMI_CMP^k, RID_TMP); 1959 emit_n(as, ARMI_CMP^k, RID_TMP);
1969 emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase); 1960 emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase);
1970 emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, 1961 emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP,
1971 (int32_t)offsetof(lua_State, maxstack)); 1962 (int32_t)offsetof(lua_State, maxstack));
1972 if (irp) { /* Must not spill arbitrary registers in head of side trace. */ 1963 if (irp) { /* Must not spill arbitrary registers in head of side trace. */
1973 int32_t i = i32ptr(&J2G(as->J)->jit_L); 1964 int32_t i = i32ptr(&J2G(as->J)->cur_L);
1974 if (ra_hasspill(irp->s)) 1965 if (ra_hasspill(irp->s))
1975 emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s)); 1966 emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s));
1976 emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095)); 1967 emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095));
@@ -1978,7 +1969,7 @@ static void asm_stack_check(ASMState *as, BCReg topslot,
1978 emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */ 1969 emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */
1979 emit_loadi(as, RID_TMP, (i & ~4095)); 1970 emit_loadi(as, RID_TMP, (i & ~4095));
1980 } else { 1971 } else {
1981 emit_getgl(as, RID_TMP, jit_L); 1972 emit_getgl(as, RID_TMP, cur_L);
1982 } 1973 }
1983} 1974}
1984 1975
@@ -2001,7 +1992,8 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
2001#if LJ_SOFTFP 1992#if LJ_SOFTFP
2002 RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); 1993 RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
2003 Reg tmp; 1994 Reg tmp;
2004 lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */ 1995 /* LJ_SOFTFP: must be a number constant. */
1996 lj_assertA(irref_isk(ref), "unsplit FP op");
2005 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, 1997 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo,
2006 rset_exclude(RSET_GPREVEN, RID_BASE)); 1998 rset_exclude(RSET_GPREVEN, RID_BASE));
2007 emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs); 1999 emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs);
@@ -2015,7 +2007,8 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
2015 } else { 2007 } else {
2016 RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); 2008 RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
2017 Reg type; 2009 Reg type;
2018 lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); 2010 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
2011 "restore of IR type %d", irt_type(ir->t));
2019 if (!irt_ispri(ir->t)) { 2012 if (!irt_ispri(ir->t)) {
2020 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE)); 2013 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE));
2021 emit_lso(as, ARMI_STR, src, RID_BASE, ofs); 2014 emit_lso(as, ARMI_STR, src, RID_BASE, ofs);
@@ -2035,7 +2028,7 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
2035 } 2028 }
2036 checkmclim(as); 2029 checkmclim(as);
2037 } 2030 }
2038 lua_assert(map + nent == flinks); 2031 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
2039} 2032}
2040 2033
2041/* -- GC handling --------------------------------------------------------- */ 2034/* -- GC handling --------------------------------------------------------- */
@@ -2087,13 +2080,13 @@ static void asm_loop_fixup(ASMState *as)
2087 2080
2088/* -- Head of trace ------------------------------------------------------- */ 2081/* -- Head of trace ------------------------------------------------------- */
2089 2082
2090/* Reload L register from g->jit_L. */ 2083/* Reload L register from g->cur_L. */
2091static void asm_head_lreg(ASMState *as) 2084static void asm_head_lreg(ASMState *as)
2092{ 2085{
2093 IRIns *ir = IR(ASMREF_L); 2086 IRIns *ir = IR(ASMREF_L);
2094 if (ra_used(ir)) { 2087 if (ra_used(ir)) {
2095 Reg r = ra_dest(as, ir, RSET_GPR); 2088 Reg r = ra_dest(as, ir, RSET_GPR);
2096 emit_getgl(as, r, jit_L); 2089 emit_getgl(as, r, cur_L);
2097 ra_evictk(as); 2090 ra_evictk(as);
2098 } 2091 }
2099} 2092}
@@ -2121,7 +2114,7 @@ static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
2121 rset_clear(allow, ra_dest(as, ir, allow)); 2114 rset_clear(allow, ra_dest(as, ir, allow));
2122 } else { 2115 } else {
2123 Reg r = irp->r; 2116 Reg r = irp->r;
2124 lua_assert(ra_hasreg(r)); 2117 lj_assertA(ra_hasreg(r), "base reg lost");
2125 rset_clear(allow, r); 2118 rset_clear(allow, r);
2126 if (r != ir->r && !rset_test(as->freeset, r)) 2119 if (r != ir->r && !rset_test(as->freeset, r))
2127 ra_restore(as, regcost_ref(as->cost[r])); 2120 ra_restore(as, regcost_ref(as->cost[r]));
@@ -2143,7 +2136,7 @@ static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2143 } else { 2136 } else {
2144 /* Patch stack adjustment. */ 2137 /* Patch stack adjustment. */
2145 uint32_t k = emit_isk12(ARMI_ADD, spadj); 2138 uint32_t k = emit_isk12(ARMI_ADD, spadj);
2146 lua_assert(k); 2139 lj_assertA(k, "stack adjustment %d does not fit in K12", spadj);
2147 p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP); 2140 p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP);
2148 } 2141 }
2149 /* Patch exit branch. */ 2142 /* Patch exit branch. */
@@ -2164,143 +2157,13 @@ static void asm_tail_prep(ASMState *as)
2164 *p = 0; /* Prevent load/store merging. */ 2157 *p = 0; /* Prevent load/store merging. */
2165} 2158}
2166 2159
2167/* -- Instruction dispatch ------------------------------------------------ */
2168
2169/* Assemble a single instruction. */
2170static void asm_ir(ASMState *as, IRIns *ir)
2171{
2172 switch ((IROp)ir->o) {
2173 /* Miscellaneous ops. */
2174 case IR_LOOP: asm_loop(as); break;
2175 case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
2176 case IR_USE:
2177 ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
2178 case IR_PHI: asm_phi(as, ir); break;
2179 case IR_HIOP: asm_hiop(as, ir); break;
2180 case IR_GCSTEP: asm_gcstep(as, ir); break;
2181
2182 /* Guarded assertions. */
2183 case IR_EQ: case IR_NE:
2184 if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
2185 as->curins--;
2186 asm_href(as, ir-1, (IROp)ir->o);
2187 break;
2188 }
2189 /* fallthrough */
2190 case IR_LT: case IR_GE: case IR_LE: case IR_GT:
2191 case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
2192 case IR_ABC:
2193#if !LJ_SOFTFP
2194 if (irt_isnum(ir->t)) { asm_fpcomp(as, ir); break; }
2195#endif
2196 asm_intcomp(as, ir);
2197 break;
2198
2199 case IR_RETF: asm_retf(as, ir); break;
2200
2201 /* Bit ops. */
2202 case IR_BNOT: asm_bitop(as, ir, ARMI_MVN); break;
2203 case IR_BSWAP: asm_bitswap(as, ir); break;
2204
2205 case IR_BAND: asm_bitop(as, ir, ARMI_AND); break;
2206 case IR_BOR: asm_bitop(as, ir, ARMI_ORR); break;
2207 case IR_BXOR: asm_bitop(as, ir, ARMI_EOR); break;
2208
2209 case IR_BSHL: asm_bitshift(as, ir, ARMSH_LSL); break;
2210 case IR_BSHR: asm_bitshift(as, ir, ARMSH_LSR); break;
2211 case IR_BSAR: asm_bitshift(as, ir, ARMSH_ASR); break;
2212 case IR_BROR: asm_bitshift(as, ir, ARMSH_ROR); break;
2213 case IR_BROL: lua_assert(0); break;
2214
2215 /* Arithmetic ops. */
2216 case IR_ADD: case IR_ADDOV: asm_add(as, ir); break;
2217 case IR_SUB: case IR_SUBOV: asm_sub(as, ir); break;
2218 case IR_MUL: case IR_MULOV: asm_mul(as, ir); break;
2219 case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
2220 case IR_NEG: asm_neg(as, ir); break;
2221
2222#if LJ_SOFTFP
2223 case IR_DIV: case IR_POW: case IR_ABS:
2224 case IR_ATAN2: case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
2225 lua_assert(0); /* Unused for LJ_SOFTFP. */
2226 break;
2227#else
2228 case IR_DIV: asm_fparith(as, ir, ARMI_VDIV_D); break;
2229 case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
2230 case IR_ABS: asm_fpunary(as, ir, ARMI_VABS_D); break;
2231 case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
2232 case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
2233 case IR_FPMATH:
2234 if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
2235 break;
2236 if (ir->op2 <= IRFPM_TRUNC)
2237 asm_callround(as, ir, ir->op2);
2238 else if (ir->op2 == IRFPM_SQRT)
2239 asm_fpunary(as, ir, ARMI_VSQRT_D);
2240 else
2241 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
2242 break;
2243 case IR_TOBIT: asm_tobit(as, ir); break;
2244#endif
2245
2246 case IR_MIN: asm_min_max(as, ir, CC_GT, CC_HI); break;
2247 case IR_MAX: asm_min_max(as, ir, CC_LT, CC_LO); break;
2248
2249 /* Memory references. */
2250 case IR_AREF: asm_aref(as, ir); break;
2251 case IR_HREF: asm_href(as, ir, 0); break;
2252 case IR_HREFK: asm_hrefk(as, ir); break;
2253 case IR_NEWREF: asm_newref(as, ir); break;
2254 case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
2255 case IR_FREF: asm_fref(as, ir); break;
2256 case IR_STRREF: asm_strref(as, ir); break;
2257
2258 /* Loads and stores. */
2259 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2260 asm_ahuvload(as, ir);
2261 break;
2262 case IR_FLOAD: asm_fload(as, ir); break;
2263 case IR_XLOAD: asm_xload(as, ir); break;
2264 case IR_SLOAD: asm_sload(as, ir); break;
2265
2266 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
2267 case IR_FSTORE: asm_fstore(as, ir); break;
2268 case IR_XSTORE: asm_xstore(as, ir, 0); break;
2269
2270 /* Allocations. */
2271 case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
2272 case IR_TNEW: asm_tnew(as, ir); break;
2273 case IR_TDUP: asm_tdup(as, ir); break;
2274 case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
2275
2276 /* Write barriers. */
2277 case IR_TBAR: asm_tbar(as, ir); break;
2278 case IR_OBAR: asm_obar(as, ir); break;
2279
2280 /* Type conversions. */
2281 case IR_CONV: asm_conv(as, ir); break;
2282 case IR_TOSTR: asm_tostr(as, ir); break;
2283 case IR_STRTO: asm_strto(as, ir); break;
2284
2285 /* Calls. */
2286 case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
2287 case IR_CALLXS: asm_callx(as, ir); break;
2288 case IR_CARG: break;
2289
2290 default:
2291 setintV(&as->J->errinfo, ir->o);
2292 lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
2293 break;
2294 }
2295}
2296
2297/* -- Trace setup --------------------------------------------------------- */ 2160/* -- Trace setup --------------------------------------------------------- */
2298 2161
2299/* Ensure there are enough stack slots for call arguments. */ 2162/* Ensure there are enough stack slots for call arguments. */
2300static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) 2163static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2301{ 2164{
2302 IRRef args[CCI_NARGS_MAX*2]; 2165 IRRef args[CCI_NARGS_MAX*2];
2303 uint32_t i, nargs = (int)CCI_NARGS(ci); 2166 uint32_t i, nargs = CCI_XNARGS(ci);
2304 int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0; 2167 int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0;
2305 asm_collectargs(as, ir, ci, args); 2168 asm_collectargs(as, ir, ci, args);
2306 for (i = 0; i < nargs; i++) { 2169 for (i = 0; i < nargs; i++) {
@@ -2355,7 +2218,7 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
2355 if (!cstart) cstart = p; 2218 if (!cstart) cstart = p;
2356 } 2219 }
2357 } 2220 }
2358 lua_assert(cstart != NULL); 2221 lj_assertJ(cstart != NULL, "exit stub %d not found", exitno);
2359 lj_mcode_sync(cstart, cend); 2222 lj_mcode_sync(cstart, cend);
2360 lj_mcode_patch(J, mcarea, 1); 2223 lj_mcode_patch(J, mcarea, 1);
2361} 2224}
diff --git a/src/lj_asm_arm64.h b/src/lj_asm_arm64.h
new file mode 100644
index 00000000..b1fd3acc
--- /dev/null
+++ b/src/lj_asm_arm64.h
@@ -0,0 +1,2018 @@
1/*
2** ARM64 IR assembler (SSA IR -> machine code).
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4**
5** Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com.
6** Sponsored by Cisco Systems, Inc.
7*/
8
9/* -- Register allocator extensions --------------------------------------- */
10
11/* Allocate a register with a hint. */
12static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
13{
14 Reg r = IR(ref)->r;
15 if (ra_noreg(r)) {
16 if (!ra_hashint(r) && !iscrossref(as, ref))
17 ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
18 r = ra_allocref(as, ref, allow);
19 }
20 ra_noweak(as, r);
21 return r;
22}
23
24/* Allocate two source registers for three-operand instructions. */
25static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
26{
27 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
28 Reg left = irl->r, right = irr->r;
29 if (ra_hasreg(left)) {
30 ra_noweak(as, left);
31 if (ra_noreg(right))
32 right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
33 else
34 ra_noweak(as, right);
35 } else if (ra_hasreg(right)) {
36 ra_noweak(as, right);
37 left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
38 } else if (ra_hashint(right)) {
39 right = ra_allocref(as, ir->op2, allow);
40 left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
41 } else {
42 left = ra_allocref(as, ir->op1, allow);
43 right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
44 }
45 return left | (right << 8);
46}
47
48/* -- Guard handling ------------------------------------------------------ */
49
50/* Setup all needed exit stubs. */
51static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
52{
53 ExitNo i;
54 MCode *mxp = as->mctop;
55 if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim)
56 asm_mclimit(as);
57 /* 1: str lr,[sp]; bl ->vm_exit_handler; movz w0,traceno; bl <1; bl <1; ... */
58 for (i = nexits-1; (int32_t)i >= 0; i--)
59 *--mxp = A64I_LE(A64I_BL | A64F_S26(-3-i));
60 *--mxp = A64I_LE(A64I_MOVZw | A64F_U16(as->T->traceno));
61 mxp--;
62 *mxp = A64I_LE(A64I_BL | A64F_S26(((MCode *)(void *)lj_vm_exit_handler-mxp)));
63 *--mxp = A64I_LE(A64I_STRx | A64F_D(RID_LR) | A64F_N(RID_SP));
64 as->mctop = mxp;
65}
66
67static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno)
68{
69 /* Keep this in-sync with exitstub_trace_addr(). */
70 return as->mctop + exitno + 3;
71}
72
73/* Emit conditional branch to exit for guard. */
74static void asm_guardcc(ASMState *as, A64CC cc)
75{
76 MCode *target = asm_exitstub_addr(as, as->snapno);
77 MCode *p = as->mcp;
78 if (LJ_UNLIKELY(p == as->invmcp)) {
79 as->loopinv = 1;
80 *p = A64I_B | A64F_S26(target-p);
81 emit_cond_branch(as, cc^1, p-1);
82 return;
83 }
84 emit_cond_branch(as, cc, target);
85}
86
87/* Emit test and branch instruction to exit for guard. */
88static void asm_guardtnb(ASMState *as, A64Ins ai, Reg r, uint32_t bit)
89{
90 MCode *target = asm_exitstub_addr(as, as->snapno);
91 MCode *p = as->mcp;
92 if (LJ_UNLIKELY(p == as->invmcp)) {
93 as->loopinv = 1;
94 *p = A64I_B | A64F_S26(target-p);
95 emit_tnb(as, ai^0x01000000u, r, bit, p-1);
96 return;
97 }
98 emit_tnb(as, ai, r, bit, target);
99}
100
101/* Emit compare and branch instruction to exit for guard. */
102static void asm_guardcnb(ASMState *as, A64Ins ai, Reg r)
103{
104 MCode *target = asm_exitstub_addr(as, as->snapno);
105 MCode *p = as->mcp;
106 if (LJ_UNLIKELY(p == as->invmcp)) {
107 as->loopinv = 1;
108 *p = A64I_B | A64F_S26(target-p);
109 emit_cnb(as, ai^0x01000000u, r, p-1);
110 return;
111 }
112 emit_cnb(as, ai, r, target);
113}
114
115/* -- Operand fusion ------------------------------------------------------ */
116
117/* Limit linear search to this distance. Avoids O(n^2) behavior. */
118#define CONFLICT_SEARCH_LIM 31
119
120static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
121{
122 if (irref_isk(ref)) {
123 IRIns *ir = IR(ref);
124 if (ir->o == IR_KNULL || !irt_is64(ir->t)) {
125 *k = ir->i;
126 return 1;
127 } else if (checki32((int64_t)ir_k64(ir)->u64)) {
128 *k = (int32_t)ir_k64(ir)->u64;
129 return 1;
130 }
131 }
132 return 0;
133}
134
135/* Check if there's no conflicting instruction between curins and ref. */
136static int noconflict(ASMState *as, IRRef ref, IROp conflict)
137{
138 IRIns *ir = as->ir;
139 IRRef i = as->curins;
140 if (i > ref + CONFLICT_SEARCH_LIM)
141 return 0; /* Give up, ref is too far away. */
142 while (--i > ref)
143 if (ir[i].o == conflict)
144 return 0; /* Conflict found. */
145 return 1; /* Ok, no conflict. */
146}
147
148/* Fuse the array base of colocated arrays. */
149static int32_t asm_fuseabase(ASMState *as, IRRef ref)
150{
151 IRIns *ir = IR(ref);
152 if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
153 !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
154 return (int32_t)sizeof(GCtab);
155 return 0;
156}
157
158#define FUSE_REG 0x40000000
159
160/* Fuse array/hash/upvalue reference into register+offset operand. */
161static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow,
162 A64Ins ins)
163{
164 IRIns *ir = IR(ref);
165 if (ra_noreg(ir->r)) {
166 if (ir->o == IR_AREF) {
167 if (mayfuse(as, ref)) {
168 if (irref_isk(ir->op2)) {
169 IRRef tab = IR(ir->op1)->op1;
170 int32_t ofs = asm_fuseabase(as, tab);
171 IRRef refa = ofs ? tab : ir->op1;
172 ofs += 8*IR(ir->op2)->i;
173 if (emit_checkofs(ins, ofs)) {
174 *ofsp = ofs;
175 return ra_alloc1(as, refa, allow);
176 }
177 } else {
178 Reg base = ra_alloc1(as, ir->op1, allow);
179 *ofsp = FUSE_REG|ra_alloc1(as, ir->op2, rset_exclude(allow, base));
180 return base;
181 }
182 }
183 } else if (ir->o == IR_HREFK) {
184 if (mayfuse(as, ref)) {
185 int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
186 if (emit_checkofs(ins, ofs)) {
187 *ofsp = ofs;
188 return ra_alloc1(as, ir->op1, allow);
189 }
190 }
191 } else if (ir->o == IR_UREFC) {
192 if (irref_isk(ir->op1)) {
193 GCfunc *fn = ir_kfunc(IR(ir->op1));
194 GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
195 int64_t ofs = glofs(as, &uv->tv);
196 if (emit_checkofs(ins, ofs)) {
197 *ofsp = (int32_t)ofs;
198 return RID_GL;
199 }
200 }
201 }
202 }
203 *ofsp = 0;
204 return ra_alloc1(as, ref, allow);
205}
206
207/* Fuse m operand into arithmetic/logic instructions. */
208static uint32_t asm_fuseopm(ASMState *as, A64Ins ai, IRRef ref, RegSet allow)
209{
210 IRIns *ir = IR(ref);
211 if (ra_hasreg(ir->r)) {
212 ra_noweak(as, ir->r);
213 return A64F_M(ir->r);
214 } else if (irref_isk(ref)) {
215 uint32_t m;
216 int64_t k = get_k64val(as, ref);
217 if ((ai & 0x1f000000) == 0x0a000000)
218 m = emit_isk13(k, irt_is64(ir->t));
219 else
220 m = emit_isk12(k);
221 if (m)
222 return m;
223 } else if (mayfuse(as, ref)) {
224 if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR && irref_isk(ir->op2)) ||
225 (ir->o == IR_ADD && ir->op1 == ir->op2)) {
226 A64Shift sh = ir->o == IR_BSHR ? A64SH_LSR :
227 ir->o == IR_BSAR ? A64SH_ASR : A64SH_LSL;
228 int shift = ir->o == IR_ADD ? 1 :
229 (IR(ir->op2)->i & (irt_is64(ir->t) ? 63 : 31));
230 IRIns *irl = IR(ir->op1);
231 if (sh == A64SH_LSL &&
232 irl->o == IR_CONV &&
233 irl->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT) &&
234 shift <= 4 &&
235 canfuse(as, irl)) {
236 Reg m = ra_alloc1(as, irl->op1, allow);
237 return A64F_M(m) | A64F_EXSH(A64EX_SXTW, shift);
238 } else {
239 Reg m = ra_alloc1(as, ir->op1, allow);
240 return A64F_M(m) | A64F_SH(sh, shift);
241 }
242 } else if (ir->o == IR_CONV &&
243 ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT)) {
244 Reg m = ra_alloc1(as, ir->op1, allow);
245 return A64F_M(m) | A64F_EX(A64EX_SXTW);
246 }
247 }
248 return A64F_M(ra_allocref(as, ref, allow));
249}
250
251/* Fuse XLOAD/XSTORE reference into load/store operand. */
252static void asm_fusexref(ASMState *as, A64Ins ai, Reg rd, IRRef ref,
253 RegSet allow)
254{
255 IRIns *ir = IR(ref);
256 Reg base;
257 int32_t ofs = 0;
258 if (ra_noreg(ir->r) && canfuse(as, ir)) {
259 if (ir->o == IR_ADD) {
260 if (asm_isk32(as, ir->op2, &ofs) && emit_checkofs(ai, ofs)) {
261 ref = ir->op1;
262 } else {
263 Reg rn, rm;
264 IRRef lref = ir->op1, rref = ir->op2;
265 IRIns *irl = IR(lref);
266 if (mayfuse(as, irl->op1)) {
267 unsigned int shift = 4;
268 if (irl->o == IR_BSHL && irref_isk(irl->op2)) {
269 shift = (IR(irl->op2)->i & 63);
270 } else if (irl->o == IR_ADD && irl->op1 == irl->op2) {
271 shift = 1;
272 }
273 if ((ai >> 30) == shift) {
274 lref = irl->op1;
275 irl = IR(lref);
276 ai |= A64I_LS_SH;
277 }
278 }
279 if (irl->o == IR_CONV &&
280 irl->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT) &&
281 canfuse(as, irl)) {
282 lref = irl->op1;
283 ai |= A64I_LS_SXTWx;
284 } else {
285 ai |= A64I_LS_LSLx;
286 }
287 rm = ra_alloc1(as, lref, allow);
288 rn = ra_alloc1(as, rref, rset_exclude(allow, rm));
289 emit_dnm(as, (ai^A64I_LS_R), (rd & 31), rn, rm);
290 return;
291 }
292 } else if (ir->o == IR_STRREF) {
293 if (asm_isk32(as, ir->op2, &ofs)) {
294 ref = ir->op1;
295 } else if (asm_isk32(as, ir->op1, &ofs)) {
296 ref = ir->op2;
297 } else {
298 Reg refk = irref_isk(ir->op1) ? ir->op1 : ir->op2;
299 Reg refv = irref_isk(ir->op1) ? ir->op2 : ir->op1;
300 Reg rn = ra_alloc1(as, refv, allow);
301 IRIns *irr = IR(refk);
302 uint32_t m;
303 if (irr+1 == ir && !ra_used(irr) &&
304 irr->o == IR_ADD && irref_isk(irr->op2)) {
305 ofs = sizeof(GCstr) + IR(irr->op2)->i;
306 if (emit_checkofs(ai, ofs)) {
307 Reg rm = ra_alloc1(as, irr->op1, rset_exclude(allow, rn));
308 m = A64F_M(rm) | A64F_EX(A64EX_SXTW);
309 goto skipopm;
310 }
311 }
312 m = asm_fuseopm(as, 0, refk, rset_exclude(allow, rn));
313 ofs = sizeof(GCstr);
314 skipopm:
315 emit_lso(as, ai, rd, rd, ofs);
316 emit_dn(as, A64I_ADDx^m, rd, rn);
317 return;
318 }
319 ofs += sizeof(GCstr);
320 if (!emit_checkofs(ai, ofs)) {
321 Reg rn = ra_alloc1(as, ref, allow);
322 Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn));
323 emit_dnm(as, (ai^A64I_LS_R)|A64I_LS_UXTWx, rd, rn, rm);
324 return;
325 }
326 }
327 }
328 base = ra_alloc1(as, ref, allow);
329 emit_lso(as, ai, (rd & 31), base, ofs);
330}
331
332/* Fuse FP multiply-add/sub. */
333static int asm_fusemadd(ASMState *as, IRIns *ir, A64Ins ai, A64Ins air)
334{
335 IRRef lref = ir->op1, rref = ir->op2;
336 IRIns *irm;
337 if (lref != rref &&
338 ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
339 ra_noreg(irm->r)) ||
340 (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
341 (rref = lref, ai = air, ra_noreg(irm->r))))) {
342 Reg dest = ra_dest(as, ir, RSET_FPR);
343 Reg add = ra_hintalloc(as, rref, dest, RSET_FPR);
344 Reg left = ra_alloc2(as, irm,
345 rset_exclude(rset_exclude(RSET_FPR, dest), add));
346 Reg right = (left >> 8); left &= 255;
347 emit_dnma(as, ai, (dest & 31), (left & 31), (right & 31), (add & 31));
348 return 1;
349 }
350 return 0;
351}
352
353/* Fuse BAND + BSHL/BSHR into UBFM. */
354static int asm_fuseandshift(ASMState *as, IRIns *ir)
355{
356 IRIns *irl = IR(ir->op1);
357 lj_assertA(ir->o == IR_BAND, "bad usage");
358 if (canfuse(as, irl) && irref_isk(ir->op2)) {
359 uint64_t mask = get_k64val(as, ir->op2);
360 if (irref_isk(irl->op2) && (irl->o == IR_BSHR || irl->o == IR_BSHL)) {
361 int32_t shmask = irt_is64(irl->t) ? 63 : 31;
362 int32_t shift = (IR(irl->op2)->i & shmask);
363 int32_t imms = shift;
364 if (irl->o == IR_BSHL) {
365 mask >>= shift;
366 shift = (shmask-shift+1) & shmask;
367 imms = 0;
368 }
369 if (mask && !((mask+1) & mask)) { /* Contiguous 1-bits at the bottom. */
370 Reg dest = ra_dest(as, ir, RSET_GPR);
371 Reg left = ra_alloc1(as, irl->op1, RSET_GPR);
372 A64Ins ai = shmask == 63 ? A64I_UBFMx : A64I_UBFMw;
373 imms += 63 - emit_clz64(mask);
374 if (imms > shmask) imms = shmask;
375 emit_dn(as, ai | A64F_IMMS(imms) | A64F_IMMR(shift), dest, left);
376 return 1;
377 }
378 }
379 }
380 return 0;
381}
382
383/* Fuse BOR(BSHL, BSHR) into EXTR/ROR. */
384static int asm_fuseorshift(ASMState *as, IRIns *ir)
385{
386 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
387 lj_assertA(ir->o == IR_BOR, "bad usage");
388 if (canfuse(as, irl) && canfuse(as, irr) &&
389 ((irl->o == IR_BSHR && irr->o == IR_BSHL) ||
390 (irl->o == IR_BSHL && irr->o == IR_BSHR))) {
391 if (irref_isk(irl->op2) && irref_isk(irr->op2)) {
392 IRRef lref = irl->op1, rref = irr->op1;
393 uint32_t lshift = IR(irl->op2)->i, rshift = IR(irr->op2)->i;
394 if (irl->o == IR_BSHR) { /* BSHR needs to be the right operand. */
395 uint32_t tmp2;
396 IRRef tmp1 = lref; lref = rref; rref = tmp1;
397 tmp2 = lshift; lshift = rshift; rshift = tmp2;
398 }
399 if (rshift + lshift == (irt_is64(ir->t) ? 64 : 32)) {
400 A64Ins ai = irt_is64(ir->t) ? A64I_EXTRx : A64I_EXTRw;
401 Reg dest = ra_dest(as, ir, RSET_GPR);
402 Reg left = ra_alloc1(as, lref, RSET_GPR);
403 Reg right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left));
404 emit_dnm(as, ai | A64F_IMMS(rshift), dest, left, right);
405 return 1;
406 }
407 }
408 }
409 return 0;
410}
411
412/* -- Calls --------------------------------------------------------------- */
413
414/* Generate a call to a C function. */
415static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
416{
417 uint32_t n, nargs = CCI_XNARGS(ci);
418 int32_t ofs = 0;
419 Reg gpr, fpr = REGARG_FIRSTFPR;
420 if ((void *)ci->func)
421 emit_call(as, (void *)ci->func);
422 for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
423 as->cost[gpr] = REGCOST(~0u, ASMREF_L);
424 gpr = REGARG_FIRSTGPR;
425 for (n = 0; n < nargs; n++) { /* Setup args. */
426 IRRef ref = args[n];
427 IRIns *ir = IR(ref);
428 if (ref) {
429 if (irt_isfp(ir->t)) {
430 if (fpr <= REGARG_LASTFPR) {
431 lj_assertA(rset_test(as->freeset, fpr),
432 "reg %d not free", fpr); /* Must have been evicted. */
433 ra_leftov(as, fpr, ref);
434 fpr++;
435 } else {
436 Reg r = ra_alloc1(as, ref, RSET_FPR);
437 emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_isnum(ir->t)) ? 4 : 0));
438 ofs += 8;
439 }
440 } else {
441 if (gpr <= REGARG_LASTGPR) {
442 lj_assertA(rset_test(as->freeset, gpr),
443 "reg %d not free", gpr); /* Must have been evicted. */
444 ra_leftov(as, gpr, ref);
445 gpr++;
446 } else {
447 Reg r = ra_alloc1(as, ref, RSET_GPR);
448 emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_is64(ir->t)) ? 4 : 0));
449 ofs += 8;
450 }
451 }
452 }
453 }
454}
455
456/* Setup result reg/sp for call. Evict scratch regs. */
457static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
458{
459 RegSet drop = RSET_SCRATCH;
460 if (ra_hasreg(ir->r))
461 rset_clear(drop, ir->r); /* Dest reg handled below. */
462 ra_evictset(as, drop); /* Evictions must be performed first. */
463 if (ra_used(ir)) {
464 lj_assertA(!irt_ispri(ir->t), "PRI dest");
465 if (irt_isfp(ir->t)) {
466 if (ci->flags & CCI_CASTU64) {
467 Reg dest = ra_dest(as, ir, RSET_FPR) & 31;
468 emit_dn(as, irt_isnum(ir->t) ? A64I_FMOV_D_R : A64I_FMOV_S_R,
469 dest, RID_RET);
470 } else {
471 ra_destreg(as, ir, RID_FPRET);
472 }
473 } else {
474 ra_destreg(as, ir, RID_RET);
475 }
476 }
477 UNUSED(ci);
478}
479
480static void asm_callx(ASMState *as, IRIns *ir)
481{
482 IRRef args[CCI_NARGS_MAX*2];
483 CCallInfo ci;
484 IRRef func;
485 IRIns *irf;
486 ci.flags = asm_callx_flags(as, ir);
487 asm_collectargs(as, ir, &ci, args);
488 asm_setupresult(as, ir, &ci);
489 func = ir->op2; irf = IR(func);
490 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
491 if (irref_isk(func)) { /* Call to constant address. */
492 ci.func = (ASMFunction)(ir_k64(irf)->u64);
493 } else { /* Need a non-argument register for indirect calls. */
494 Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_X8, RID_MAX_GPR)-RSET_FIXED);
495 emit_n(as, A64I_BLR, freg);
496 ci.func = (ASMFunction)(void *)0;
497 }
498 asm_gencall(as, &ci, args);
499}
500
501/* -- Returns ------------------------------------------------------------- */
502
503/* Return to lower frame. Guard that it goes to the right spot. */
504static void asm_retf(ASMState *as, IRIns *ir)
505{
506 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
507 void *pc = ir_kptr(IR(ir->op2));
508 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
509 as->topslot -= (BCReg)delta;
510 if ((int32_t)as->topslot < 0) as->topslot = 0;
511 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
512 /* Need to force a spill on REF_BASE now to update the stack slot. */
513 emit_lso(as, A64I_STRx, base, RID_SP, ra_spill(as, IR(REF_BASE)));
514 emit_setgl(as, base, jit_base);
515 emit_addptr(as, base, -8*delta);
516 asm_guardcc(as, CC_NE);
517 emit_nm(as, A64I_CMPx, RID_TMP,
518 ra_allock(as, i64ptr(pc), rset_exclude(RSET_GPR, base)));
519 emit_lso(as, A64I_LDRx, RID_TMP, base, -8);
520}
521
522/* -- Type conversions ---------------------------------------------------- */
523
524static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
525{
526 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
527 Reg dest = ra_dest(as, ir, RSET_GPR);
528 asm_guardcc(as, CC_NE);
529 emit_nm(as, A64I_FCMPd, (tmp & 31), (left & 31));
530 emit_dn(as, A64I_FCVT_F64_S32, (tmp & 31), dest);
531 emit_dn(as, A64I_FCVT_S32_F64, dest, (left & 31));
532}
533
534static void asm_tobit(ASMState *as, IRIns *ir)
535{
536 RegSet allow = RSET_FPR;
537 Reg left = ra_alloc1(as, ir->op1, allow);
538 Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
539 Reg tmp = ra_scratch(as, rset_clear(allow, right));
540 Reg dest = ra_dest(as, ir, RSET_GPR);
541 emit_dn(as, A64I_FMOV_R_S, dest, (tmp & 31));
542 emit_dnm(as, A64I_FADDd, (tmp & 31), (left & 31), (right & 31));
543}
544
545static void asm_conv(ASMState *as, IRIns *ir)
546{
547 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
548 int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64);
549 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
550 IRRef lref = ir->op1;
551 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
552 if (irt_isfp(ir->t)) {
553 Reg dest = ra_dest(as, ir, RSET_FPR);
554 if (stfp) { /* FP to FP conversion. */
555 emit_dn(as, st == IRT_NUM ? A64I_FCVT_F32_F64 : A64I_FCVT_F64_F32,
556 (dest & 31), (ra_alloc1(as, lref, RSET_FPR) & 31));
557 } else { /* Integer to FP conversion. */
558 Reg left = ra_alloc1(as, lref, RSET_GPR);
559 A64Ins ai = irt_isfloat(ir->t) ?
560 (((IRT_IS64 >> st) & 1) ?
561 (st == IRT_I64 ? A64I_FCVT_F32_S64 : A64I_FCVT_F32_U64) :
562 (st == IRT_INT ? A64I_FCVT_F32_S32 : A64I_FCVT_F32_U32)) :
563 (((IRT_IS64 >> st) & 1) ?
564 (st == IRT_I64 ? A64I_FCVT_F64_S64 : A64I_FCVT_F64_U64) :
565 (st == IRT_INT ? A64I_FCVT_F64_S32 : A64I_FCVT_F64_U32));
566 emit_dn(as, ai, (dest & 31), left);
567 }
568 } else if (stfp) { /* FP to integer conversion. */
569 if (irt_isguard(ir->t)) {
570 /* Checked conversions are only supported from number to int. */
571 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
572 "bad type for checked CONV");
573 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
574 } else {
575 Reg left = ra_alloc1(as, lref, RSET_FPR);
576 Reg dest = ra_dest(as, ir, RSET_GPR);
577 A64Ins ai = irt_is64(ir->t) ?
578 (st == IRT_NUM ?
579 (irt_isi64(ir->t) ? A64I_FCVT_S64_F64 : A64I_FCVT_U64_F64) :
580 (irt_isi64(ir->t) ? A64I_FCVT_S64_F32 : A64I_FCVT_U64_F32)) :
581 (st == IRT_NUM ?
582 (irt_isint(ir->t) ? A64I_FCVT_S32_F64 : A64I_FCVT_U32_F64) :
583 (irt_isint(ir->t) ? A64I_FCVT_S32_F32 : A64I_FCVT_U32_F32));
584 emit_dn(as, ai, dest, (left & 31));
585 }
586 } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
587 Reg dest = ra_dest(as, ir, RSET_GPR);
588 Reg left = ra_alloc1(as, lref, RSET_GPR);
589 A64Ins ai = st == IRT_I8 ? A64I_SXTBw :
590 st == IRT_U8 ? A64I_UXTBw :
591 st == IRT_I16 ? A64I_SXTHw : A64I_UXTHw;
592 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
593 emit_dn(as, ai, dest, left);
594 } else {
595 Reg dest = ra_dest(as, ir, RSET_GPR);
596 if (irt_is64(ir->t)) {
597 if (st64 || !(ir->op2 & IRCONV_SEXT)) {
598 /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
599 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
600 } else { /* 32 to 64 bit sign extension. */
601 Reg left = ra_alloc1(as, lref, RSET_GPR);
602 emit_dn(as, A64I_SXTW, dest, left);
603 }
604 } else {
605 if (st64) {
606 /* This is either a 32 bit reg/reg mov which zeroes the hiword
607 ** or a load of the loword from a 64 bit address.
608 */
609 Reg left = ra_alloc1(as, lref, RSET_GPR);
610 emit_dm(as, A64I_MOVw, dest, left);
611 } else { /* 32/32 bit no-op (cast). */
612 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
613 }
614 }
615 }
616}
617
618static void asm_strto(ASMState *as, IRIns *ir)
619{
620 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
621 IRRef args[2];
622 Reg dest = 0, tmp;
623 int destused = ra_used(ir);
624 int32_t ofs = 0;
625 ra_evictset(as, RSET_SCRATCH);
626 if (destused) {
627 if (ra_hasspill(ir->s)) {
628 ofs = sps_scale(ir->s);
629 destused = 0;
630 if (ra_hasreg(ir->r)) {
631 ra_free(as, ir->r);
632 ra_modified(as, ir->r);
633 emit_spload(as, ir, ir->r, ofs);
634 }
635 } else {
636 dest = ra_dest(as, ir, RSET_FPR);
637 }
638 }
639 if (destused)
640 emit_lso(as, A64I_LDRd, (dest & 31), RID_SP, 0);
641 asm_guardcnb(as, A64I_CBZ, RID_RET);
642 args[0] = ir->op1; /* GCstr *str */
643 args[1] = ASMREF_TMP1; /* TValue *n */
644 asm_gencall(as, ci, args);
645 tmp = ra_releasetmp(as, ASMREF_TMP1);
646 emit_opk(as, A64I_ADDx, tmp, RID_SP, ofs, RSET_GPR);
647}
648
649/* -- Memory references --------------------------------------------------- */
650
651/* Store tagged value for ref at base+ofs. */
652static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref)
653{
654 RegSet allow = rset_exclude(RSET_GPR, base);
655 IRIns *ir = IR(ref);
656 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
657 "store of IR type %d", irt_type(ir->t));
658 if (irref_isk(ref)) {
659 TValue k;
660 lj_ir_kvalue(as->J->L, &k, ir);
661 emit_lso(as, A64I_STRx, ra_allock(as, k.u64, allow), base, ofs);
662 } else {
663 Reg src = ra_alloc1(as, ref, allow);
664 rset_clear(allow, src);
665 if (irt_isinteger(ir->t)) {
666 Reg type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, allow);
667 emit_lso(as, A64I_STRx, RID_TMP, base, ofs);
668 emit_dnm(as, A64I_ADDx | A64F_EX(A64EX_UXTW), RID_TMP, type, src);
669 } else {
670 Reg type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
671 emit_lso(as, A64I_STRx, RID_TMP, base, ofs);
672 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), RID_TMP, src, type);
673 }
674 }
675}
676
677/* Get pointer to TValue. */
678static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
679{
680 IRIns *ir = IR(ref);
681 if (irt_isnum(ir->t)) {
682 if (irref_isk(ref)) {
683 /* Use the number constant itself as a TValue. */
684 ra_allockreg(as, i64ptr(ir_knum(ir)), dest);
685 } else {
686 /* Otherwise force a spill and use the spill slot. */
687 emit_opk(as, A64I_ADDx, dest, RID_SP, ra_spill(as, ir), RSET_GPR);
688 }
689 } else {
690 /* Otherwise use g->tmptv to hold the TValue. */
691 asm_tvstore64(as, dest, 0, ref);
692 ra_allockreg(as, i64ptr(&J2G(as->J)->tmptv), dest);
693 }
694}
695
696static void asm_aref(ASMState *as, IRIns *ir)
697{
698 Reg dest = ra_dest(as, ir, RSET_GPR);
699 Reg idx, base;
700 if (irref_isk(ir->op2)) {
701 IRRef tab = IR(ir->op1)->op1;
702 int32_t ofs = asm_fuseabase(as, tab);
703 IRRef refa = ofs ? tab : ir->op1;
704 uint32_t k = emit_isk12(ofs + 8*IR(ir->op2)->i);
705 if (k) {
706 base = ra_alloc1(as, refa, RSET_GPR);
707 emit_dn(as, A64I_ADDx^k, dest, base);
708 return;
709 }
710 }
711 base = ra_alloc1(as, ir->op1, RSET_GPR);
712 idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
713 emit_dnm(as, A64I_ADDx | A64F_EXSH(A64EX_UXTW, 3), dest, base, idx);
714}
715
716/* Inlined hash lookup. Specialized for key type and for const keys.
717** The equivalent C code is:
718** Node *n = hashkey(t, key);
719** do {
720** if (lj_obj_equal(&n->key, key)) return &n->val;
721** } while ((n = nextnode(n)));
722** return niltv(L);
723*/
724static void asm_href(ASMState *as, IRIns *ir, IROp merge)
725{
726 RegSet allow = RSET_GPR;
727 int destused = ra_used(ir);
728 Reg dest = ra_dest(as, ir, allow);
729 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
730 Reg key = 0, tmp = RID_TMP;
731 Reg ftmp = RID_NONE, type = RID_NONE, scr = RID_NONE, tisnum = RID_NONE;
732 IRRef refkey = ir->op2;
733 IRIns *irkey = IR(refkey);
734 int isk = irref_isk(ir->op2);
735 IRType1 kt = irkey->t;
736 uint32_t k = 0;
737 uint32_t khash;
738 MCLabel l_end, l_loop, l_next;
739 rset_clear(allow, tab);
740
741 if (!isk) {
742 key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
743 rset_clear(allow, key);
744 if (!irt_isstr(kt)) {
745 tmp = ra_scratch(as, allow);
746 rset_clear(allow, tmp);
747 }
748 } else if (irt_isnum(kt)) {
749 int64_t val = (int64_t)ir_knum(irkey)->u64;
750 if (!(k = emit_isk12(val))) {
751 key = ra_allock(as, val, allow);
752 rset_clear(allow, key);
753 }
754 } else if (!irt_ispri(kt)) {
755 if (!(k = emit_isk12(irkey->i))) {
756 key = ra_alloc1(as, refkey, allow);
757 rset_clear(allow, key);
758 }
759 }
760
761 /* Allocate constants early. */
762 if (irt_isnum(kt)) {
763 if (!isk) {
764 tisnum = ra_allock(as, LJ_TISNUM << 15, allow);
765 ftmp = ra_scratch(as, rset_exclude(RSET_FPR, key));
766 rset_clear(allow, tisnum);
767 }
768 } else if (irt_isaddr(kt)) {
769 if (isk) {
770 int64_t kk = ((int64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64;
771 scr = ra_allock(as, kk, allow);
772 } else {
773 scr = ra_scratch(as, allow);
774 }
775 rset_clear(allow, scr);
776 } else {
777 lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
778 type = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow);
779 scr = ra_scratch(as, rset_clear(allow, type));
780 rset_clear(allow, scr);
781 }
782
783 /* Key not found in chain: jump to exit (if merged) or load niltv. */
784 l_end = emit_label(as);
785 as->invmcp = NULL;
786 if (merge == IR_NE)
787 asm_guardcc(as, CC_AL);
788 else if (destused)
789 emit_loada(as, dest, niltvg(J2G(as->J)));
790
791 /* Follow hash chain until the end. */
792 l_loop = --as->mcp;
793 emit_n(as, A64I_CMPx^A64I_K12^0, dest);
794 emit_lso(as, A64I_LDRx, dest, dest, offsetof(Node, next));
795 l_next = emit_label(as);
796
797 /* Type and value comparison. */
798 if (merge == IR_EQ)
799 asm_guardcc(as, CC_EQ);
800 else
801 emit_cond_branch(as, CC_EQ, l_end);
802
803 if (irt_isnum(kt)) {
804 if (isk) {
805 /* Assumes -0.0 is already canonicalized to +0.0. */
806 if (k)
807 emit_n(as, A64I_CMPx^k, tmp);
808 else
809 emit_nm(as, A64I_CMPx, key, tmp);
810 emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.u64));
811 } else {
812 emit_nm(as, A64I_FCMPd, key, ftmp);
813 emit_dn(as, A64I_FMOV_D_R, (ftmp & 31), (tmp & 31));
814 emit_cond_branch(as, CC_LO, l_next);
815 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), tisnum, tmp);
816 emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.n));
817 }
818 } else if (irt_isaddr(kt)) {
819 if (isk) {
820 emit_nm(as, A64I_CMPx, scr, tmp);
821 emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.u64));
822 } else {
823 emit_nm(as, A64I_CMPx, tmp, scr);
824 emit_lso(as, A64I_LDRx, scr, dest, offsetof(Node, key.u64));
825 }
826 } else {
827 emit_nm(as, A64I_CMPw, scr, type);
828 emit_lso(as, A64I_LDRx, scr, dest, offsetof(Node, key));
829 }
830
831 *l_loop = A64I_BCC | A64F_S19(as->mcp - l_loop) | CC_NE;
832 if (!isk && irt_isaddr(kt)) {
833 type = ra_allock(as, (int32_t)irt_toitype(kt), allow);
834 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), tmp, key, type);
835 rset_clear(allow, type);
836 }
837 /* Load main position relative to tab->node into dest. */
838 khash = isk ? ir_khash(as, irkey) : 1;
839 if (khash == 0) {
840 emit_lso(as, A64I_LDRx, dest, tab, offsetof(GCtab, node));
841 } else {
842 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 3), dest, tmp, dest);
843 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 1), dest, dest, dest);
844 emit_lso(as, A64I_LDRx, tmp, tab, offsetof(GCtab, node));
845 if (isk) {
846 Reg tmphash = ra_allock(as, khash, allow);
847 emit_dnm(as, A64I_ANDw, dest, dest, tmphash);
848 emit_lso(as, A64I_LDRw, dest, tab, offsetof(GCtab, hmask));
849 } else if (irt_isstr(kt)) {
850 /* Fetch of str->sid is cheaper than ra_allock. */
851 emit_dnm(as, A64I_ANDw, dest, dest, tmp);
852 emit_lso(as, A64I_LDRw, tmp, key, offsetof(GCstr, sid));
853 emit_lso(as, A64I_LDRw, dest, tab, offsetof(GCtab, hmask));
854 } else { /* Must match with hash*() in lj_tab.c. */
855 emit_dnm(as, A64I_ANDw, dest, dest, tmp);
856 emit_lso(as, A64I_LDRw, tmp, tab, offsetof(GCtab, hmask));
857 emit_dnm(as, A64I_SUBw, dest, dest, tmp);
858 emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT3)), tmp, tmp, tmp);
859 emit_dnm(as, A64I_EORw, dest, dest, tmp);
860 emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT2)), dest, dest, dest);
861 emit_dnm(as, A64I_SUBw, tmp, tmp, dest);
862 emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT1)), dest, dest, dest);
863 emit_dnm(as, A64I_EORw, tmp, tmp, dest);
864 if (irt_isnum(kt)) {
865 emit_dnm(as, A64I_ADDw, dest, dest, dest);
866 emit_dn(as, A64I_LSRx | A64F_IMMR(32)|A64F_IMMS(32), dest, dest);
867 emit_dm(as, A64I_MOVw, tmp, dest);
868 emit_dn(as, A64I_FMOV_R_D, dest, (key & 31));
869 } else {
870 checkmclim(as);
871 emit_dm(as, A64I_MOVw, tmp, key);
872 emit_dnm(as, A64I_EORw, dest, dest,
873 ra_allock(as, irt_toitype(kt) << 15, allow));
874 emit_dn(as, A64I_LSRx | A64F_IMMR(32)|A64F_IMMS(32), dest, dest);
875 emit_dm(as, A64I_MOVx, dest, key);
876 }
877 }
878 }
879}
880
881static void asm_hrefk(ASMState *as, IRIns *ir)
882{
883 IRIns *kslot = IR(ir->op2);
884 IRIns *irkey = IR(kslot->op1);
885 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
886 int32_t kofs = ofs + (int32_t)offsetof(Node, key);
887 int bigofs = !emit_checkofs(A64I_LDRx, ofs);
888 Reg dest = (ra_used(ir) || bigofs) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
889 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
890 Reg key, idx = node;
891 RegSet allow = rset_exclude(RSET_GPR, node);
892 uint64_t k;
893 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
894 if (bigofs) {
895 idx = dest;
896 rset_clear(allow, dest);
897 kofs = (int32_t)offsetof(Node, key);
898 } else if (ra_hasreg(dest)) {
899 emit_opk(as, A64I_ADDx, dest, node, ofs, allow);
900 }
901 asm_guardcc(as, CC_NE);
902 if (irt_ispri(irkey->t)) {
903 k = ~((int64_t)~irt_toitype(irkey->t) << 47);
904 } else if (irt_isnum(irkey->t)) {
905 k = ir_knum(irkey)->u64;
906 } else {
907 k = ((uint64_t)irt_toitype(irkey->t) << 47) | (uint64_t)ir_kgc(irkey);
908 }
909 key = ra_scratch(as, allow);
910 emit_nm(as, A64I_CMPx, key, ra_allock(as, k, rset_exclude(allow, key)));
911 emit_lso(as, A64I_LDRx, key, idx, kofs);
912 if (bigofs)
913 emit_opk(as, A64I_ADDx, dest, node, ofs, RSET_GPR);
914}
915
916static void asm_uref(ASMState *as, IRIns *ir)
917{
918 Reg dest = ra_dest(as, ir, RSET_GPR);
919 if (irref_isk(ir->op1)) {
920 GCfunc *fn = ir_kfunc(IR(ir->op1));
921 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
922 emit_lsptr(as, A64I_LDRx, dest, v);
923 } else {
924 Reg uv = ra_scratch(as, RSET_GPR);
925 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
926 if (ir->o == IR_UREFC) {
927 asm_guardcc(as, CC_NE);
928 emit_n(as, (A64I_CMPx^A64I_K12) | A64F_U12(1), RID_TMP);
929 emit_opk(as, A64I_ADDx, dest, uv,
930 (int32_t)offsetof(GCupval, tv), RSET_GPR);
931 emit_lso(as, A64I_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
932 } else {
933 emit_lso(as, A64I_LDRx, dest, uv, (int32_t)offsetof(GCupval, v));
934 }
935 emit_lso(as, A64I_LDRx, uv, func,
936 (int32_t)offsetof(GCfuncL, uvptr) + 8*(int32_t)(ir->op2 >> 8));
937 }
938}
939
940static void asm_fref(ASMState *as, IRIns *ir)
941{
942 UNUSED(as); UNUSED(ir);
943 lj_assertA(!ra_used(ir), "unfused FREF");
944}
945
946static void asm_strref(ASMState *as, IRIns *ir)
947{
948 RegSet allow = RSET_GPR;
949 Reg dest = ra_dest(as, ir, allow);
950 Reg base = ra_alloc1(as, ir->op1, allow);
951 IRIns *irr = IR(ir->op2);
952 int32_t ofs = sizeof(GCstr);
953 uint32_t m;
954 rset_clear(allow, base);
955 if (irref_isk(ir->op2) && (m = emit_isk12(ofs + irr->i))) {
956 emit_dn(as, A64I_ADDx^m, dest, base);
957 } else {
958 emit_dn(as, (A64I_ADDx^A64I_K12) | A64F_U12(ofs), dest, dest);
959 emit_dnm(as, A64I_ADDx, dest, base, ra_alloc1(as, ir->op2, allow));
960 }
961}
962
963/* -- Loads and stores ---------------------------------------------------- */
964
965static A64Ins asm_fxloadins(IRIns *ir)
966{
967 switch (irt_type(ir->t)) {
968 case IRT_I8: return A64I_LDRB ^ A64I_LS_S;
969 case IRT_U8: return A64I_LDRB;
970 case IRT_I16: return A64I_LDRH ^ A64I_LS_S;
971 case IRT_U16: return A64I_LDRH;
972 case IRT_NUM: return A64I_LDRd;
973 case IRT_FLOAT: return A64I_LDRs;
974 default: return irt_is64(ir->t) ? A64I_LDRx : A64I_LDRw;
975 }
976}
977
978static A64Ins asm_fxstoreins(IRIns *ir)
979{
980 switch (irt_type(ir->t)) {
981 case IRT_I8: case IRT_U8: return A64I_STRB;
982 case IRT_I16: case IRT_U16: return A64I_STRH;
983 case IRT_NUM: return A64I_STRd;
984 case IRT_FLOAT: return A64I_STRs;
985 default: return irt_is64(ir->t) ? A64I_STRx : A64I_STRw;
986 }
987}
988
989static void asm_fload(ASMState *as, IRIns *ir)
990{
991 Reg dest = ra_dest(as, ir, RSET_GPR);
992 Reg idx;
993 A64Ins ai = asm_fxloadins(ir);
994 int32_t ofs;
995 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
996 idx = RID_GL;
997 ofs = (ir->op2 << 2) - GG_OFS(g);
998 } else {
999 idx = ra_alloc1(as, ir->op1, RSET_GPR);
1000 if (ir->op2 == IRFL_TAB_ARRAY) {
1001 ofs = asm_fuseabase(as, ir->op1);
1002 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
1003 emit_dn(as, (A64I_ADDx^A64I_K12) | A64F_U12(ofs), dest, idx);
1004 return;
1005 }
1006 }
1007 ofs = field_ofs[ir->op2];
1008 }
1009 emit_lso(as, ai, (dest & 31), idx, ofs);
1010}
1011
1012static void asm_fstore(ASMState *as, IRIns *ir)
1013{
1014 if (ir->r != RID_SINK) {
1015 Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
1016 IRIns *irf = IR(ir->op1);
1017 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
1018 int32_t ofs = field_ofs[irf->op2];
1019 emit_lso(as, asm_fxstoreins(ir), (src & 31), idx, ofs);
1020 }
1021}
1022
1023static void asm_xload(ASMState *as, IRIns *ir)
1024{
1025 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
1026 lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
1027 asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR);
1028}
1029
1030static void asm_xstore(ASMState *as, IRIns *ir)
1031{
1032 if (ir->r != RID_SINK) {
1033 Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
1034 asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
1035 rset_exclude(RSET_GPR, src));
1036 }
1037}
1038
1039static void asm_ahuvload(ASMState *as, IRIns *ir)
1040{
1041 Reg idx, tmp, type;
1042 int32_t ofs = 0;
1043 RegSet gpr = RSET_GPR, allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
1044 lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
1045 irt_isint(ir->t),
1046 "bad load type %d", irt_type(ir->t));
1047 if (ra_used(ir)) {
1048 Reg dest = ra_dest(as, ir, allow);
1049 tmp = irt_isnum(ir->t) ? ra_scratch(as, rset_clear(gpr, dest)) : dest;
1050 if (irt_isaddr(ir->t)) {
1051 emit_dn(as, A64I_ANDx^emit_isk13(LJ_GCVMASK, 1), dest, dest);
1052 } else if (irt_isnum(ir->t)) {
1053 emit_dn(as, A64I_FMOV_D_R, (dest & 31), tmp);
1054 } else if (irt_isint(ir->t)) {
1055 emit_dm(as, A64I_MOVw, dest, dest);
1056 }
1057 } else {
1058 tmp = ra_scratch(as, gpr);
1059 }
1060 type = ra_scratch(as, rset_clear(gpr, tmp));
1061 idx = asm_fuseahuref(as, ir->op1, &ofs, rset_clear(gpr, type), A64I_LDRx);
1062 /* Always do the type check, even if the load result is unused. */
1063 asm_guardcc(as, irt_isnum(ir->t) ? CC_LS : CC_NE);
1064 if (irt_type(ir->t) >= IRT_NUM) {
1065 lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t),
1066 "bad load type %d", irt_type(ir->t));
1067 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
1068 ra_allock(as, LJ_TISNUM << 15, rset_exclude(gpr, idx)), tmp);
1069 } else if (irt_isaddr(ir->t)) {
1070 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(-irt_toitype(ir->t)), type);
1071 emit_dn(as, A64I_ASRx | A64F_IMMR(47), type, tmp);
1072 } else if (irt_isnil(ir->t)) {
1073 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(1), tmp);
1074 } else {
1075 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
1076 ra_allock(as, (irt_toitype(ir->t) << 15) | 0x7fff, gpr), tmp);
1077 }
1078 if (ofs & FUSE_REG)
1079 emit_dnm(as, (A64I_LDRx^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, tmp, idx, (ofs & 31));
1080 else
1081 emit_lso(as, A64I_LDRx, tmp, idx, ofs);
1082}
1083
1084static void asm_ahustore(ASMState *as, IRIns *ir)
1085{
1086 if (ir->r != RID_SINK) {
1087 RegSet allow = RSET_GPR;
1088 Reg idx, src = RID_NONE, tmp = RID_TMP, type = RID_NONE;
1089 int32_t ofs = 0;
1090 if (irt_isnum(ir->t)) {
1091 src = ra_alloc1(as, ir->op2, RSET_FPR);
1092 idx = asm_fuseahuref(as, ir->op1, &ofs, allow, A64I_STRd);
1093 if (ofs & FUSE_REG)
1094 emit_dnm(as, (A64I_STRd^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, (src & 31), idx, (ofs &31));
1095 else
1096 emit_lso(as, A64I_STRd, (src & 31), idx, ofs);
1097 } else {
1098 if (!irt_ispri(ir->t)) {
1099 src = ra_alloc1(as, ir->op2, allow);
1100 rset_clear(allow, src);
1101 if (irt_isinteger(ir->t))
1102 type = ra_allock(as, (uint64_t)(int32_t)LJ_TISNUM << 47, allow);
1103 else
1104 type = ra_allock(as, irt_toitype(ir->t), allow);
1105 } else {
1106 tmp = type = ra_allock(as, ~((int64_t)~irt_toitype(ir->t)<<47), allow);
1107 }
1108 idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type),
1109 A64I_STRx);
1110 if (ofs & FUSE_REG)
1111 emit_dnm(as, (A64I_STRx^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, tmp, idx, (ofs & 31));
1112 else
1113 emit_lso(as, A64I_STRx, tmp, idx, ofs);
1114 if (ra_hasreg(src)) {
1115 if (irt_isinteger(ir->t)) {
1116 emit_dnm(as, A64I_ADDx | A64F_EX(A64EX_UXTW), tmp, type, src);
1117 } else {
1118 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), tmp, src, type);
1119 }
1120 }
1121 }
1122 }
1123}
1124
1125static void asm_sload(ASMState *as, IRIns *ir)
1126{
1127 int32_t ofs = 8*((int32_t)ir->op1-2);
1128 IRType1 t = ir->t;
1129 Reg dest = RID_NONE, base;
1130 RegSet allow = RSET_GPR;
1131 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1132 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1133 lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1134 "inconsistent SLOAD variant");
1135 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1136 dest = ra_scratch(as, RSET_FPR);
1137 asm_tointg(as, ir, dest);
1138 t.irt = IRT_NUM; /* Continue with a regular number type check. */
1139 } else if (ra_used(ir)) {
1140 Reg tmp = RID_NONE;
1141 if ((ir->op2 & IRSLOAD_CONVERT))
1142 tmp = ra_scratch(as, irt_isint(t) ? RSET_FPR : RSET_GPR);
1143 lj_assertA((irt_isnum(t)) || irt_isint(t) || irt_isaddr(t),
1144 "bad SLOAD type %d", irt_type(t));
1145 dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : allow);
1146 base = ra_alloc1(as, REF_BASE, rset_clear(allow, dest));
1147 if (irt_isaddr(t)) {
1148 emit_dn(as, A64I_ANDx^emit_isk13(LJ_GCVMASK, 1), dest, dest);
1149 } else if ((ir->op2 & IRSLOAD_CONVERT)) {
1150 if (irt_isint(t)) {
1151 emit_dn(as, A64I_FCVT_S32_F64, dest, (tmp & 31));
1152 /* If value is already loaded for type check, move it to FPR. */
1153 if ((ir->op2 & IRSLOAD_TYPECHECK))
1154 emit_dn(as, A64I_FMOV_D_R, (tmp & 31), dest);
1155 else
1156 dest = tmp;
1157 t.irt = IRT_NUM; /* Check for original type. */
1158 } else {
1159 emit_dn(as, A64I_FCVT_F64_S32, (dest & 31), tmp);
1160 dest = tmp;
1161 t.irt = IRT_INT; /* Check for original type. */
1162 }
1163 } else if (irt_isint(t) && (ir->op2 & IRSLOAD_TYPECHECK)) {
1164 emit_dm(as, A64I_MOVw, dest, dest);
1165 }
1166 goto dotypecheck;
1167 }
1168 base = ra_alloc1(as, REF_BASE, allow);
1169dotypecheck:
1170 rset_clear(allow, base);
1171 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1172 Reg tmp;
1173 if (ra_hasreg(dest) && rset_test(RSET_GPR, dest)) {
1174 tmp = dest;
1175 } else {
1176 tmp = ra_scratch(as, allow);
1177 rset_clear(allow, tmp);
1178 }
1179 if (irt_isnum(t) && !(ir->op2 & IRSLOAD_CONVERT))
1180 emit_dn(as, A64I_FMOV_D_R, (dest & 31), tmp);
1181 /* Need type check, even if the load result is unused. */
1182 asm_guardcc(as, irt_isnum(t) ? CC_LS : CC_NE);
1183 if (irt_type(t) >= IRT_NUM) {
1184 lj_assertA(irt_isinteger(t) || irt_isnum(t),
1185 "bad SLOAD type %d", irt_type(t));
1186 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
1187 ra_allock(as, LJ_TISNUM << 15, allow), tmp);
1188 } else if (irt_isnil(t)) {
1189 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(1), tmp);
1190 } else if (irt_ispri(t)) {
1191 emit_nm(as, A64I_CMPx,
1192 ra_allock(as, ~((int64_t)~irt_toitype(t) << 47) , allow), tmp);
1193 } else {
1194 Reg type = ra_scratch(as, allow);
1195 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(-irt_toitype(t)), type);
1196 emit_dn(as, A64I_ASRx | A64F_IMMR(47), type, tmp);
1197 }
1198 emit_lso(as, A64I_LDRx, tmp, base, ofs);
1199 return;
1200 }
1201 if (ra_hasreg(dest)) {
1202 emit_lso(as, irt_isnum(t) ? A64I_LDRd :
1203 (irt_isint(t) ? A64I_LDRw : A64I_LDRx), (dest & 31), base,
1204 ofs ^ ((LJ_BE && irt_isint(t) ? 4 : 0)));
1205 }
1206}
1207
1208/* -- Allocations --------------------------------------------------------- */
1209
1210#if LJ_HASFFI
1211static void asm_cnew(ASMState *as, IRIns *ir)
1212{
1213 CTState *cts = ctype_ctsG(J2G(as->J));
1214 CTypeID id = (CTypeID)IR(ir->op1)->i;
1215 CTSize sz;
1216 CTInfo info = lj_ctype_info(cts, id, &sz);
1217 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1218 IRRef args[4];
1219 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1220 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1221 "bad CNEW/CNEWI operands");
1222
1223 as->gcsteps++;
1224 asm_setupresult(as, ir, ci); /* GCcdata * */
1225 /* Initialize immutable cdata object. */
1226 if (ir->o == IR_CNEWI) {
1227 int32_t ofs = sizeof(GCcdata);
1228 Reg r = ra_alloc1(as, ir->op2, allow);
1229 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1230 emit_lso(as, sz == 8 ? A64I_STRx : A64I_STRw, r, RID_RET, ofs);
1231 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1232 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1233 args[0] = ASMREF_L; /* lua_State *L */
1234 args[1] = ir->op1; /* CTypeID id */
1235 args[2] = ir->op2; /* CTSize sz */
1236 args[3] = ASMREF_TMP1; /* CTSize align */
1237 asm_gencall(as, ci, args);
1238 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1239 return;
1240 }
1241
1242 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1243 {
1244 Reg r = (id < 65536) ? RID_X1 : ra_allock(as, id, allow);
1245 emit_lso(as, A64I_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
1246 emit_lso(as, A64I_STRH, r, RID_RET, offsetof(GCcdata, ctypeid));
1247 emit_d(as, A64I_MOVZw | A64F_U16(~LJ_TCDATA), RID_TMP);
1248 if (id < 65536) emit_d(as, A64I_MOVZw | A64F_U16(id), RID_X1);
1249 }
1250 args[0] = ASMREF_L; /* lua_State *L */
1251 args[1] = ASMREF_TMP1; /* MSize size */
1252 asm_gencall(as, ci, args);
1253 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
1254 ra_releasetmp(as, ASMREF_TMP1));
1255}
1256#endif
1257
1258/* -- Write barriers ------------------------------------------------------ */
1259
1260static void asm_tbar(ASMState *as, IRIns *ir)
1261{
1262 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1263 Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1264 Reg gr = ra_allock(as, i64ptr(J2G(as->J)),
1265 rset_exclude(rset_exclude(RSET_GPR, tab), link));
1266 Reg mark = RID_TMP;
1267 MCLabel l_end = emit_label(as);
1268 emit_lso(as, A64I_STRx, link, tab, (int32_t)offsetof(GCtab, gclist));
1269 emit_lso(as, A64I_STRB, mark, tab, (int32_t)offsetof(GCtab, marked));
1270 emit_lso(as, A64I_STRx, tab, gr,
1271 (int32_t)offsetof(global_State, gc.grayagain));
1272 emit_dn(as, A64I_ANDw^emit_isk13(~LJ_GC_BLACK, 0), mark, mark);
1273 emit_lso(as, A64I_LDRx, link, gr,
1274 (int32_t)offsetof(global_State, gc.grayagain));
1275 emit_cond_branch(as, CC_EQ, l_end);
1276 emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_BLACK, 0), mark);
1277 emit_lso(as, A64I_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked));
1278}
1279
1280static void asm_obar(ASMState *as, IRIns *ir)
1281{
1282 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
1283 IRRef args[2];
1284 MCLabel l_end;
1285 RegSet allow = RSET_GPR;
1286 Reg obj, val, tmp;
1287 /* No need for other object barriers (yet). */
1288 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1289 ra_evictset(as, RSET_SCRATCH);
1290 l_end = emit_label(as);
1291 args[0] = ASMREF_TMP1; /* global_State *g */
1292 args[1] = ir->op1; /* TValue *tv */
1293 asm_gencall(as, ci, args);
1294 ra_allockreg(as, i64ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1) );
1295 obj = IR(ir->op1)->r;
1296 tmp = ra_scratch(as, rset_exclude(allow, obj));
1297 emit_cond_branch(as, CC_EQ, l_end);
1298 emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_BLACK, 0), tmp);
1299 emit_cond_branch(as, CC_EQ, l_end);
1300 emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_WHITES, 0), RID_TMP);
1301 val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
1302 emit_lso(as, A64I_LDRB, tmp, obj,
1303 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
1304 emit_lso(as, A64I_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked));
1305}
1306
1307/* -- Arithmetic and logic operations ------------------------------------- */
1308
1309static void asm_fparith(ASMState *as, IRIns *ir, A64Ins ai)
1310{
1311 Reg dest = ra_dest(as, ir, RSET_FPR);
1312 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1313 right = (left >> 8); left &= 255;
1314 emit_dnm(as, ai, (dest & 31), (left & 31), (right & 31));
1315}
1316
1317static void asm_fpunary(ASMState *as, IRIns *ir, A64Ins ai)
1318{
1319 Reg dest = ra_dest(as, ir, RSET_FPR);
1320 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
1321 emit_dn(as, ai, (dest & 31), (left & 31));
1322}
1323
1324static void asm_fpmath(ASMState *as, IRIns *ir)
1325{
1326 IRFPMathOp fpm = (IRFPMathOp)ir->op2;
1327 if (fpm == IRFPM_SQRT) {
1328 asm_fpunary(as, ir, A64I_FSQRTd);
1329 } else if (fpm <= IRFPM_TRUNC) {
1330 asm_fpunary(as, ir, fpm == IRFPM_FLOOR ? A64I_FRINTMd :
1331 fpm == IRFPM_CEIL ? A64I_FRINTPd : A64I_FRINTZd);
1332 } else {
1333 asm_callid(as, ir, IRCALL_lj_vm_floor + fpm);
1334 }
1335}
1336
1337static int asm_swapops(ASMState *as, IRRef lref, IRRef rref)
1338{
1339 IRIns *ir;
1340 if (irref_isk(rref))
1341 return 0; /* Don't swap constants to the left. */
1342 if (irref_isk(lref))
1343 return 1; /* But swap constants to the right. */
1344 ir = IR(rref);
1345 if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR) ||
1346 (ir->o == IR_ADD && ir->op1 == ir->op2) ||
1347 (ir->o == IR_CONV && ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT)))
1348 return 0; /* Don't swap fusable operands to the left. */
1349 ir = IR(lref);
1350 if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR) ||
1351 (ir->o == IR_ADD && ir->op1 == ir->op2) ||
1352 (ir->o == IR_CONV && ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT)))
1353 return 1; /* But swap fusable operands to the right. */
1354 return 0; /* Otherwise don't swap. */
1355}
1356
1357static void asm_intop(ASMState *as, IRIns *ir, A64Ins ai)
1358{
1359 IRRef lref = ir->op1, rref = ir->op2;
1360 Reg left, dest = ra_dest(as, ir, RSET_GPR);
1361 uint32_t m;
1362 if ((ai & ~A64I_S) != A64I_SUBw && asm_swapops(as, lref, rref)) {
1363 IRRef tmp = lref; lref = rref; rref = tmp;
1364 }
1365 left = ra_hintalloc(as, lref, dest, RSET_GPR);
1366 if (irt_is64(ir->t)) ai |= A64I_X;
1367 m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
1368 if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */
1369 asm_guardcc(as, CC_VS);
1370 ai |= A64I_S;
1371 }
1372 emit_dn(as, ai^m, dest, left);
1373}
1374
1375static void asm_intop_s(ASMState *as, IRIns *ir, A64Ins ai)
1376{
1377 if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */
1378 as->flagmcp = NULL;
1379 as->mcp++;
1380 ai |= A64I_S;
1381 }
1382 asm_intop(as, ir, ai);
1383}
1384
1385static void asm_intneg(ASMState *as, IRIns *ir)
1386{
1387 Reg dest = ra_dest(as, ir, RSET_GPR);
1388 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1389 emit_dm(as, irt_is64(ir->t) ? A64I_NEGx : A64I_NEGw, dest, left);
1390}
1391
1392/* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
1393static void asm_intmul(ASMState *as, IRIns *ir)
1394{
1395 Reg dest = ra_dest(as, ir, RSET_GPR);
1396 Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest));
1397 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1398 if (irt_isguard(ir->t)) { /* IR_MULOV */
1399 asm_guardcc(as, CC_NE);
1400 emit_dm(as, A64I_MOVw, dest, dest); /* Zero-extend. */
1401 emit_nm(as, A64I_CMPw | A64F_SH(A64SH_ASR, 31), RID_TMP, dest);
1402 emit_dn(as, A64I_ASRx | A64F_IMMR(32), RID_TMP, dest);
1403 emit_dnm(as, A64I_SMULL, dest, right, left);
1404 } else {
1405 emit_dnm(as, irt_is64(ir->t) ? A64I_MULx : A64I_MULw, dest, left, right);
1406 }
1407}
1408
1409static void asm_add(ASMState *as, IRIns *ir)
1410{
1411 if (irt_isnum(ir->t)) {
1412 if (!asm_fusemadd(as, ir, A64I_FMADDd, A64I_FMADDd))
1413 asm_fparith(as, ir, A64I_FADDd);
1414 return;
1415 }
1416 asm_intop_s(as, ir, A64I_ADDw);
1417}
1418
1419static void asm_sub(ASMState *as, IRIns *ir)
1420{
1421 if (irt_isnum(ir->t)) {
1422 if (!asm_fusemadd(as, ir, A64I_FNMSUBd, A64I_FMSUBd))
1423 asm_fparith(as, ir, A64I_FSUBd);
1424 return;
1425 }
1426 asm_intop_s(as, ir, A64I_SUBw);
1427}
1428
1429static void asm_mul(ASMState *as, IRIns *ir)
1430{
1431 if (irt_isnum(ir->t)) {
1432 asm_fparith(as, ir, A64I_FMULd);
1433 return;
1434 }
1435 asm_intmul(as, ir);
1436}
1437
1438#define asm_addov(as, ir) asm_add(as, ir)
1439#define asm_subov(as, ir) asm_sub(as, ir)
1440#define asm_mulov(as, ir) asm_mul(as, ir)
1441
1442#define asm_fpdiv(as, ir) asm_fparith(as, ir, A64I_FDIVd)
1443#define asm_abs(as, ir) asm_fpunary(as, ir, A64I_FABS)
1444
1445static void asm_neg(ASMState *as, IRIns *ir)
1446{
1447 if (irt_isnum(ir->t)) {
1448 asm_fpunary(as, ir, A64I_FNEGd);
1449 return;
1450 }
1451 asm_intneg(as, ir);
1452}
1453
1454static void asm_band(ASMState *as, IRIns *ir)
1455{
1456 A64Ins ai = A64I_ANDw;
1457 if (asm_fuseandshift(as, ir))
1458 return;
1459 if (as->flagmcp == as->mcp) {
1460 /* Try to drop cmp r, #0. */
1461 as->flagmcp = NULL;
1462 as->mcp++;
1463 ai = A64I_ANDSw;
1464 }
1465 asm_intop(as, ir, ai);
1466}
1467
1468static void asm_borbxor(ASMState *as, IRIns *ir, A64Ins ai)
1469{
1470 IRRef lref = ir->op1, rref = ir->op2;
1471 IRIns *irl = IR(lref), *irr = IR(rref);
1472 if ((canfuse(as, irl) && irl->o == IR_BNOT && !irref_isk(rref)) ||
1473 (canfuse(as, irr) && irr->o == IR_BNOT && !irref_isk(lref))) {
1474 Reg left, dest = ra_dest(as, ir, RSET_GPR);
1475 uint32_t m;
1476 if (irl->o == IR_BNOT) {
1477 IRRef tmp = lref; lref = rref; rref = tmp;
1478 }
1479 left = ra_alloc1(as, lref, RSET_GPR);
1480 ai |= A64I_ON;
1481 if (irt_is64(ir->t)) ai |= A64I_X;
1482 m = asm_fuseopm(as, ai, IR(rref)->op1, rset_exclude(RSET_GPR, left));
1483 emit_dn(as, ai^m, dest, left);
1484 } else {
1485 asm_intop(as, ir, ai);
1486 }
1487}
1488
1489static void asm_bor(ASMState *as, IRIns *ir)
1490{
1491 if (asm_fuseorshift(as, ir))
1492 return;
1493 asm_borbxor(as, ir, A64I_ORRw);
1494}
1495
1496#define asm_bxor(as, ir) asm_borbxor(as, ir, A64I_EORw)
1497
1498static void asm_bnot(ASMState *as, IRIns *ir)
1499{
1500 A64Ins ai = A64I_MVNw;
1501 Reg dest = ra_dest(as, ir, RSET_GPR);
1502 uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
1503 if (irt_is64(ir->t)) ai |= A64I_X;
1504 emit_d(as, ai^m, dest);
1505}
1506
1507static void asm_bswap(ASMState *as, IRIns *ir)
1508{
1509 Reg dest = ra_dest(as, ir, RSET_GPR);
1510 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1511 emit_dn(as, irt_is64(ir->t) ? A64I_REVx : A64I_REVw, dest, left);
1512}
1513
1514static void asm_bitshift(ASMState *as, IRIns *ir, A64Ins ai, A64Shift sh)
1515{
1516 int32_t shmask = irt_is64(ir->t) ? 63 : 31;
1517 if (irref_isk(ir->op2)) { /* Constant shifts. */
1518 Reg left, dest = ra_dest(as, ir, RSET_GPR);
1519 int32_t shift = (IR(ir->op2)->i & shmask);
1520 IRIns *irl = IR(ir->op1);
1521 if (shmask == 63) ai += A64I_UBFMx - A64I_UBFMw;
1522
1523 /* Fuse BSHL + BSHR/BSAR into UBFM/SBFM aka UBFX/SBFX/UBFIZ/SBFIZ. */
1524 if ((sh == A64SH_LSR || sh == A64SH_ASR) && canfuse(as, irl)) {
1525 if (irl->o == IR_BSHL && irref_isk(irl->op2)) {
1526 int32_t shift2 = (IR(irl->op2)->i & shmask);
1527 shift = ((shift - shift2) & shmask);
1528 shmask -= shift2;
1529 ir = irl;
1530 }
1531 }
1532
1533 left = ra_alloc1(as, ir->op1, RSET_GPR);
1534 switch (sh) {
1535 case A64SH_LSL:
1536 emit_dn(as, ai | A64F_IMMS(shmask-shift) |
1537 A64F_IMMR((shmask-shift+1)&shmask), dest, left);
1538 break;
1539 case A64SH_LSR: case A64SH_ASR:
1540 emit_dn(as, ai | A64F_IMMS(shmask) | A64F_IMMR(shift), dest, left);
1541 break;
1542 case A64SH_ROR:
1543 emit_dnm(as, ai | A64F_IMMS(shift), dest, left, left);
1544 break;
1545 }
1546 } else { /* Variable-length shifts. */
1547 Reg dest = ra_dest(as, ir, RSET_GPR);
1548 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1549 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1550 emit_dnm(as, (shmask == 63 ? A64I_SHRx : A64I_SHRw) | A64F_BSH(sh), dest, left, right);
1551 }
1552}
1553
1554#define asm_bshl(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSL)
1555#define asm_bshr(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSR)
1556#define asm_bsar(as, ir) asm_bitshift(as, ir, A64I_SBFMw, A64SH_ASR)
1557#define asm_bror(as, ir) asm_bitshift(as, ir, A64I_EXTRw, A64SH_ROR)
1558#define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
1559
1560static void asm_intmin_max(ASMState *as, IRIns *ir, A64CC cc)
1561{
1562 Reg dest = ra_dest(as, ir, RSET_GPR);
1563 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1564 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1565 emit_dnm(as, A64I_CSELw|A64F_CC(cc), dest, left, right);
1566 emit_nm(as, A64I_CMPw, left, right);
1567}
1568
1569static void asm_fpmin_max(ASMState *as, IRIns *ir, A64CC fcc)
1570{
1571 Reg dest = (ra_dest(as, ir, RSET_FPR) & 31);
1572 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1573 right = ((left >> 8) & 31); left &= 31;
1574 emit_dnm(as, A64I_FCSELd | A64F_CC(fcc), dest, right, left);
1575 emit_nm(as, A64I_FCMPd, left, right);
1576}
1577
1578static void asm_min_max(ASMState *as, IRIns *ir, A64CC cc, A64CC fcc)
1579{
1580 if (irt_isnum(ir->t))
1581 asm_fpmin_max(as, ir, fcc);
1582 else
1583 asm_intmin_max(as, ir, cc);
1584}
1585
1586#define asm_min(as, ir) asm_min_max(as, ir, CC_LT, CC_PL)
1587#define asm_max(as, ir) asm_min_max(as, ir, CC_GT, CC_LE)
1588
1589/* -- Comparisons --------------------------------------------------------- */
1590
1591/* Map of comparisons to flags. ORDER IR. */
1592static const uint8_t asm_compmap[IR_ABC+1] = {
1593 /* op FP swp int cc FP cc */
1594 /* LT */ CC_GE + (CC_HS << 4),
1595 /* GE x */ CC_LT + (CC_HI << 4),
1596 /* LE */ CC_GT + (CC_HI << 4),
1597 /* GT x */ CC_LE + (CC_HS << 4),
1598 /* ULT x */ CC_HS + (CC_LS << 4),
1599 /* UGE */ CC_LO + (CC_LO << 4),
1600 /* ULE x */ CC_HI + (CC_LO << 4),
1601 /* UGT */ CC_LS + (CC_LS << 4),
1602 /* EQ */ CC_NE + (CC_NE << 4),
1603 /* NE */ CC_EQ + (CC_EQ << 4),
1604 /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */
1605};
1606
1607/* FP comparisons. */
1608static void asm_fpcomp(ASMState *as, IRIns *ir)
1609{
1610 Reg left, right;
1611 A64Ins ai;
1612 int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1);
1613 if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) {
1614 left = (ra_alloc1(as, ir->op1, RSET_FPR) & 31);
1615 right = 0;
1616 ai = A64I_FCMPZd;
1617 } else {
1618 left = ra_alloc2(as, ir, RSET_FPR);
1619 if (swp) {
1620 right = (left & 31); left = ((left >> 8) & 31);
1621 } else {
1622 right = ((left >> 8) & 31); left &= 31;
1623 }
1624 ai = A64I_FCMPd;
1625 }
1626 asm_guardcc(as, (asm_compmap[ir->o] >> 4));
1627 emit_nm(as, ai, left, right);
1628}
1629
1630/* Integer comparisons. */
1631static void asm_intcomp(ASMState *as, IRIns *ir)
1632{
1633 A64CC oldcc, cc = (asm_compmap[ir->o] & 15);
1634 A64Ins ai = irt_is64(ir->t) ? A64I_CMPx : A64I_CMPw;
1635 IRRef lref = ir->op1, rref = ir->op2;
1636 Reg left;
1637 uint32_t m;
1638 int cmpprev0 = 0;
1639 lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) ||
1640 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t),
1641 "bad comparison data type %d", irt_type(ir->t));
1642 if (asm_swapops(as, lref, rref)) {
1643 IRRef tmp = lref; lref = rref; rref = tmp;
1644 if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
1645 else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
1646 }
1647 oldcc = cc;
1648 if (irref_isk(rref) && get_k64val(as, rref) == 0) {
1649 IRIns *irl = IR(lref);
1650 if (cc == CC_GE) cc = CC_PL;
1651 else if (cc == CC_LT) cc = CC_MI;
1652 else if (cc > CC_NE) goto nocombine; /* Other conds don't work with tst. */
1653 cmpprev0 = (irl+1 == ir);
1654 /* Combine and-cmp-bcc into tbz/tbnz or and-cmp into tst. */
1655 if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) {
1656 IRRef blref = irl->op1, brref = irl->op2;
1657 uint32_t m2 = 0;
1658 Reg bleft;
1659 if (asm_swapops(as, blref, brref)) {
1660 Reg tmp = blref; blref = brref; brref = tmp;
1661 }
1662 if (irref_isk(brref)) {
1663 uint64_t k = get_k64val(as, brref);
1664 if (k && !(k & (k-1)) && (cc == CC_EQ || cc == CC_NE)) {
1665 asm_guardtnb(as, cc == CC_EQ ? A64I_TBZ : A64I_TBNZ,
1666 ra_alloc1(as, blref, RSET_GPR), emit_ctz64(k));
1667 return;
1668 }
1669 m2 = emit_isk13(k, irt_is64(irl->t));
1670 }
1671 bleft = ra_alloc1(as, blref, RSET_GPR);
1672 ai = (irt_is64(irl->t) ? A64I_TSTx : A64I_TSTw);
1673 if (!m2)
1674 m2 = asm_fuseopm(as, ai, brref, rset_exclude(RSET_GPR, bleft));
1675 asm_guardcc(as, cc);
1676 emit_n(as, ai^m2, bleft);
1677 return;
1678 }
1679 if (cc == CC_EQ || cc == CC_NE) {
1680 /* Combine cmp-bcc into cbz/cbnz. */
1681 ai = cc == CC_EQ ? A64I_CBZ : A64I_CBNZ;
1682 if (irt_is64(ir->t)) ai |= A64I_X;
1683 asm_guardcnb(as, ai, ra_alloc1(as, lref, RSET_GPR));
1684 return;
1685 }
1686 }
1687nocombine:
1688 left = ra_alloc1(as, lref, RSET_GPR);
1689 m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
1690 asm_guardcc(as, cc);
1691 emit_n(as, ai^m, left);
1692 /* Signed comparison with zero and referencing previous ins? */
1693 if (cmpprev0 && (oldcc <= CC_NE || oldcc >= CC_GE))
1694 as->flagmcp = as->mcp; /* Allow elimination of the compare. */
1695}
1696
1697static void asm_comp(ASMState *as, IRIns *ir)
1698{
1699 if (irt_isnum(ir->t))
1700 asm_fpcomp(as, ir);
1701 else
1702 asm_intcomp(as, ir);
1703}
1704
1705#define asm_equal(as, ir) asm_comp(as, ir)
1706
1707/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
1708
1709/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1710static void asm_hiop(ASMState *as, IRIns *ir)
1711{
1712 UNUSED(as); UNUSED(ir);
1713 lj_assertA(0, "unexpected HIOP"); /* Unused on 64 bit. */
1714}
1715
1716/* -- Profiling ----------------------------------------------------------- */
1717
1718static void asm_prof(ASMState *as, IRIns *ir)
1719{
1720 uint32_t k = emit_isk13(HOOK_PROFILE, 0);
1721 lj_assertA(k != 0, "HOOK_PROFILE does not fit in K13");
1722 UNUSED(ir);
1723 asm_guardcc(as, CC_NE);
1724 emit_n(as, A64I_TSTw^k, RID_TMP);
1725 emit_lsptr(as, A64I_LDRB, RID_TMP, (void *)&J2G(as->J)->hookmask);
1726}
1727
1728/* -- Stack handling ------------------------------------------------------ */
1729
1730/* Check Lua stack size for overflow. Use exit handler as fallback. */
1731static void asm_stack_check(ASMState *as, BCReg topslot,
1732 IRIns *irp, RegSet allow, ExitNo exitno)
1733{
1734 Reg pbase;
1735 uint32_t k;
1736 if (irp) {
1737 if (!ra_hasspill(irp->s)) {
1738 pbase = irp->r;
1739 lj_assertA(ra_hasreg(pbase), "base reg lost");
1740 } else if (allow) {
1741 pbase = rset_pickbot(allow);
1742 } else {
1743 pbase = RID_RET;
1744 emit_lso(as, A64I_LDRx, RID_RET, RID_SP, 0); /* Restore temp register. */
1745 }
1746 } else {
1747 pbase = RID_BASE;
1748 }
1749 emit_cond_branch(as, CC_LS, asm_exitstub_addr(as, exitno));
1750 k = emit_isk12((8*topslot));
1751 lj_assertA(k, "slot offset %d does not fit in K12", 8*topslot);
1752 emit_n(as, A64I_CMPx^k, RID_TMP);
1753 emit_dnm(as, A64I_SUBx, RID_TMP, RID_TMP, pbase);
1754 emit_lso(as, A64I_LDRx, RID_TMP, RID_TMP,
1755 (int32_t)offsetof(lua_State, maxstack));
1756 if (irp) { /* Must not spill arbitrary registers in head of side trace. */
1757 if (ra_hasspill(irp->s))
1758 emit_lso(as, A64I_LDRx, pbase, RID_SP, sps_scale(irp->s));
1759 emit_lso(as, A64I_LDRx, RID_TMP, RID_GL, glofs(as, &J2G(as->J)->cur_L));
1760 if (ra_hasspill(irp->s) && !allow)
1761 emit_lso(as, A64I_STRx, RID_RET, RID_SP, 0); /* Save temp register. */
1762 } else {
1763 emit_getgl(as, RID_TMP, cur_L);
1764 }
1765}
1766
1767/* Restore Lua stack from on-trace state. */
1768static void asm_stack_restore(ASMState *as, SnapShot *snap)
1769{
1770 SnapEntry *map = &as->T->snapmap[snap->mapofs];
1771#ifdef LUA_USE_ASSERT
1772 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2];
1773#endif
1774 MSize n, nent = snap->nent;
1775 /* Store the value of all modified slots to the Lua stack. */
1776 for (n = 0; n < nent; n++) {
1777 SnapEntry sn = map[n];
1778 BCReg s = snap_slot(sn);
1779 int32_t ofs = 8*((int32_t)s-1-LJ_FR2);
1780 IRRef ref = snap_ref(sn);
1781 IRIns *ir = IR(ref);
1782 if ((sn & SNAP_NORESTORE))
1783 continue;
1784 if (irt_isnum(ir->t)) {
1785 Reg src = ra_alloc1(as, ref, RSET_FPR);
1786 emit_lso(as, A64I_STRd, (src & 31), RID_BASE, ofs);
1787 } else {
1788 asm_tvstore64(as, RID_BASE, ofs, ref);
1789 }
1790 checkmclim(as);
1791 }
1792 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
1793}
1794
1795/* -- GC handling --------------------------------------------------------- */
1796
1797/* Check GC threshold and do one or more GC steps. */
1798static void asm_gc_check(ASMState *as)
1799{
1800 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
1801 IRRef args[2];
1802 MCLabel l_end;
1803 Reg tmp1, tmp2;
1804 ra_evictset(as, RSET_SCRATCH);
1805 l_end = emit_label(as);
1806 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
1807 asm_guardcnb(as, A64I_CBNZ, RID_RET); /* Assumes asm_snap_prep() is done. */
1808 args[0] = ASMREF_TMP1; /* global_State *g */
1809 args[1] = ASMREF_TMP2; /* MSize steps */
1810 asm_gencall(as, ci, args);
1811 tmp1 = ra_releasetmp(as, ASMREF_TMP1);
1812 tmp2 = ra_releasetmp(as, ASMREF_TMP2);
1813 emit_loadi(as, tmp2, as->gcsteps);
1814 /* Jump around GC step if GC total < GC threshold. */
1815 emit_cond_branch(as, CC_LS, l_end);
1816 emit_nm(as, A64I_CMPx, RID_TMP, tmp2);
1817 emit_lso(as, A64I_LDRx, tmp2, tmp1,
1818 (int32_t)offsetof(global_State, gc.threshold));
1819 emit_lso(as, A64I_LDRx, RID_TMP, tmp1,
1820 (int32_t)offsetof(global_State, gc.total));
1821 ra_allockreg(as, i64ptr(J2G(as->J)), tmp1);
1822 as->gcsteps = 0;
1823 checkmclim(as);
1824}
1825
1826/* -- Loop handling ------------------------------------------------------- */
1827
1828/* Fixup the loop branch. */
1829static void asm_loop_fixup(ASMState *as)
1830{
1831 MCode *p = as->mctop;
1832 MCode *target = as->mcp;
1833 if (as->loopinv) { /* Inverted loop branch? */
1834 uint32_t mask = (p[-2] & 0x7e000000) == 0x36000000 ? 0x3fffu : 0x7ffffu;
1835 ptrdiff_t delta = target - (p - 2);
1836 /* asm_guard* already inverted the bcc/tnb/cnb and patched the final b. */
1837 p[-2] |= ((uint32_t)delta & mask) << 5;
1838 } else {
1839 ptrdiff_t delta = target - (p - 1);
1840 p[-1] = A64I_B | A64F_S26(delta);
1841 }
1842}
1843
1844/* -- Head of trace ------------------------------------------------------- */
1845
1846/* Reload L register from g->cur_L. */
1847static void asm_head_lreg(ASMState *as)
1848{
1849 IRIns *ir = IR(ASMREF_L);
1850 if (ra_used(ir)) {
1851 Reg r = ra_dest(as, ir, RSET_GPR);
1852 emit_getgl(as, r, cur_L);
1853 ra_evictk(as);
1854 }
1855}
1856
1857/* Coalesce BASE register for a root trace. */
1858static void asm_head_root_base(ASMState *as)
1859{
1860 IRIns *ir;
1861 asm_head_lreg(as);
1862 ir = IR(REF_BASE);
1863 if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
1864 ra_spill(as, ir);
1865 ra_destreg(as, ir, RID_BASE);
1866}
1867
1868/* Coalesce BASE register for a side trace. */
1869static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
1870{
1871 IRIns *ir;
1872 asm_head_lreg(as);
1873 ir = IR(REF_BASE);
1874 if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
1875 ra_spill(as, ir);
1876 if (ra_hasspill(irp->s)) {
1877 rset_clear(allow, ra_dest(as, ir, allow));
1878 } else {
1879 Reg r = irp->r;
1880 lj_assertA(ra_hasreg(r), "base reg lost");
1881 rset_clear(allow, r);
1882 if (r != ir->r && !rset_test(as->freeset, r))
1883 ra_restore(as, regcost_ref(as->cost[r]));
1884 ra_destreg(as, ir, r);
1885 }
1886 return allow;
1887}
1888
1889/* -- Tail of trace ------------------------------------------------------- */
1890
1891/* Fixup the tail code. */
1892static void asm_tail_fixup(ASMState *as, TraceNo lnk)
1893{
1894 MCode *p = as->mctop;
1895 MCode *target;
1896 /* Undo the sp adjustment in BC_JLOOP when exiting to the interpreter. */
1897 int32_t spadj = as->T->spadjust + (lnk ? 0 : sps_scale(SPS_FIXED));
1898 if (spadj == 0) {
1899 *--p = A64I_LE(A64I_NOP);
1900 as->mctop = p;
1901 } else {
1902 /* Patch stack adjustment. */
1903 uint32_t k = emit_isk12(spadj);
1904 lj_assertA(k, "stack adjustment %d does not fit in K12", spadj);
1905 p[-2] = (A64I_ADDx^k) | A64F_D(RID_SP) | A64F_N(RID_SP);
1906 }
1907 /* Patch exit branch. */
1908 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
1909 p[-1] = A64I_B | A64F_S26((target-p)+1);
1910}
1911
1912/* Prepare tail of code. */
1913static void asm_tail_prep(ASMState *as)
1914{
1915 MCode *p = as->mctop - 1; /* Leave room for exit branch. */
1916 if (as->loopref) {
1917 as->invmcp = as->mcp = p;
1918 } else {
1919 as->mcp = p-1; /* Leave room for stack pointer adjustment. */
1920 as->invmcp = NULL;
1921 }
1922 *p = 0; /* Prevent load/store merging. */
1923}
1924
1925/* -- Trace setup --------------------------------------------------------- */
1926
1927/* Ensure there are enough stack slots for call arguments. */
1928static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
1929{
1930 IRRef args[CCI_NARGS_MAX*2];
1931 uint32_t i, nargs = CCI_XNARGS(ci);
1932 int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
1933 asm_collectargs(as, ir, ci, args);
1934 for (i = 0; i < nargs; i++) {
1935 if (args[i] && irt_isfp(IR(args[i])->t)) {
1936 if (nfpr > 0) nfpr--; else nslots += 2;
1937 } else {
1938 if (ngpr > 0) ngpr--; else nslots += 2;
1939 }
1940 }
1941 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
1942 as->evenspill = nslots;
1943 return REGSP_HINT(RID_RET);
1944}
1945
1946static void asm_setup_target(ASMState *as)
1947{
1948 /* May need extra exit for asm_stack_check on side traces. */
1949 asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
1950}
1951
1952#if LJ_BE
1953/* ARM64 instructions are always little-endian. Swap for ARM64BE. */
1954static void asm_mcode_fixup(MCode *mcode, MSize size)
1955{
1956 MCode *pe = (MCode *)((char *)mcode + size);
1957 while (mcode < pe) {
1958 MCode ins = *mcode;
1959 *mcode++ = lj_bswap(ins);
1960 }
1961}
1962#define LJ_TARGET_MCODE_FIXUP 1
1963#endif
1964
1965/* -- Trace patching ------------------------------------------------------ */
1966
1967/* Patch exit jumps of existing machine code to a new target. */
1968void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
1969{
1970 MCode *p = T->mcode;
1971 MCode *pe = (MCode *)((char *)p + T->szmcode);
1972 MCode *cstart = NULL;
1973 MCode *mcarea = lj_mcode_patch(J, p, 0);
1974 MCode *px = exitstub_trace_addr(T, exitno);
1975 /* Note: this assumes a trace exit is only ever patched once. */
1976 for (; p < pe; p++) {
1977 /* Look for exitstub branch, replace with branch to target. */
1978 ptrdiff_t delta = target - p;
1979 MCode ins = A64I_LE(*p);
1980 if ((ins & 0xff000000u) == 0x54000000u &&
1981 ((ins ^ ((px-p)<<5)) & 0x00ffffe0u) == 0) {
1982 /* Patch bcc, if within range. */
1983 if (A64F_S_OK(delta, 19)) {
1984 *p = A64I_LE((ins & 0xff00001fu) | A64F_S19(delta));
1985 if (!cstart) cstart = p;
1986 }
1987 } else if ((ins & 0xfc000000u) == 0x14000000u &&
1988 ((ins ^ (px-p)) & 0x03ffffffu) == 0) {
1989 /* Patch b. */
1990 lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range");
1991 *p = A64I_LE((ins & 0xfc000000u) | A64F_S26(delta));
1992 if (!cstart) cstart = p;
1993 } else if ((ins & 0x7e000000u) == 0x34000000u &&
1994 ((ins ^ ((px-p)<<5)) & 0x00ffffe0u) == 0) {
1995 /* Patch cbz/cbnz, if within range. */
1996 if (A64F_S_OK(delta, 19)) {
1997 *p = A64I_LE((ins & 0xff00001fu) | A64F_S19(delta));
1998 if (!cstart) cstart = p;
1999 }
2000 } else if ((ins & 0x7e000000u) == 0x36000000u &&
2001 ((ins ^ ((px-p)<<5)) & 0x0007ffe0u) == 0) {
2002 /* Patch tbz/tbnz, if within range. */
2003 if (A64F_S_OK(delta, 14)) {
2004 *p = A64I_LE((ins & 0xfff8001fu) | A64F_S14(delta));
2005 if (!cstart) cstart = p;
2006 }
2007 }
2008 }
2009 { /* Always patch long-range branch in exit stub itself. */
2010 ptrdiff_t delta = target - px;
2011 lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range");
2012 *px = A64I_B | A64F_S26(delta);
2013 if (!cstart) cstart = px;
2014 }
2015 lj_mcode_sync(cstart, px+1);
2016 lj_mcode_patch(J, mcarea, 1);
2017}
2018
diff --git a/src/lj_asm_mips.h b/src/lj_asm_mips.h
index 190a55eb..513bd5ca 100644
--- a/src/lj_asm_mips.h
+++ b/src/lj_asm_mips.h
@@ -23,7 +23,7 @@ static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow)
23{ 23{
24 Reg r = IR(ref)->r; 24 Reg r = IR(ref)->r;
25 if (ra_noreg(r)) { 25 if (ra_noreg(r)) {
26 if (!(allow & RSET_FPR) && irref_isk(ref) && IR(ref)->i == 0) 26 if (!(allow & RSET_FPR) && irref_isk(ref) && get_kval(as, ref) == 0)
27 return RID_ZERO; 27 return RID_ZERO;
28 r = ra_allocref(as, ref, allow); 28 r = ra_allocref(as, ref, allow);
29 } else { 29 } else {
@@ -66,10 +66,10 @@ static void asm_sparejump_setup(ASMState *as)
66{ 66{
67 MCode *mxp = as->mcbot; 67 MCode *mxp = as->mcbot;
68 if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == sizeof(MCLink)) { 68 if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == sizeof(MCLink)) {
69 lua_assert(MIPSI_NOP == 0); 69 lj_assertA(MIPSI_NOP == 0, "bad NOP");
70 memset(mxp, 0, MIPS_SPAREJUMP*2*sizeof(MCode)); 70 memset(mxp, 0, MIPS_SPAREJUMP*2*sizeof(MCode));
71 mxp += MIPS_SPAREJUMP*2; 71 mxp += MIPS_SPAREJUMP*2;
72 lua_assert(mxp < as->mctop); 72 lj_assertA(mxp < as->mctop, "MIPS_SPAREJUMP too big");
73 lj_mcode_sync(as->mcbot, mxp); 73 lj_mcode_sync(as->mcbot, mxp);
74 lj_mcode_commitbot(as->J, mxp); 74 lj_mcode_commitbot(as->J, mxp);
75 as->mcbot = mxp; 75 as->mcbot = mxp;
@@ -84,7 +84,8 @@ static void asm_exitstub_setup(ASMState *as)
84 /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */ 84 /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */
85 *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno; 85 *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno;
86 *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu); 86 *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu);
87 lua_assert(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0); 87 lj_assertA(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0,
88 "branch target out of range");
88 *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0; 89 *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0;
89 as->mctop = mxp; 90 as->mctop = mxp;
90} 91}
@@ -101,7 +102,12 @@ static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt)
101 as->invmcp = NULL; 102 as->invmcp = NULL;
102 as->loopinv = 1; 103 as->loopinv = 1;
103 as->mcp = p+1; 104 as->mcp = p+1;
105#if !LJ_TARGET_MIPSR6
104 mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */ 106 mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */
107#else
108 mi = mi ^ ((mi>>28) == 1 ? 0x04000000u :
109 (mi>>28) == 4 ? 0x00800000u : 0x00010000u); /* Invert cond. */
110#endif
105 target = p; /* Patch target later in asm_loop_fixup. */ 111 target = p; /* Patch target later in asm_loop_fixup. */
106 } 112 }
107 emit_ti(as, MIPSI_LI, RID_TMP, as->snapno); 113 emit_ti(as, MIPSI_LI, RID_TMP, as->snapno);
@@ -165,9 +171,9 @@ static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
165 } else if (ir->o == IR_UREFC) { 171 } else if (ir->o == IR_UREFC) {
166 if (irref_isk(ir->op1)) { 172 if (irref_isk(ir->op1)) {
167 GCfunc *fn = ir_kfunc(IR(ir->op1)); 173 GCfunc *fn = ir_kfunc(IR(ir->op1));
168 int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv); 174 intptr_t ofs = (intptr_t)&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv;
169 int32_t jgl = (intptr_t)J2G(as->J); 175 intptr_t jgl = (intptr_t)J2G(as->J);
170 if ((uint32_t)(ofs-jgl) < 65536) { 176 if ((uintptr_t)(ofs-jgl) < 65536) {
171 *ofsp = ofs-jgl-32768; 177 *ofsp = ofs-jgl-32768;
172 return RID_JGL; 178 return RID_JGL;
173 } else { 179 } else {
@@ -189,20 +195,21 @@ static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref,
189 Reg base; 195 Reg base;
190 if (ra_noreg(ir->r) && canfuse(as, ir)) { 196 if (ra_noreg(ir->r) && canfuse(as, ir)) {
191 if (ir->o == IR_ADD) { 197 if (ir->o == IR_ADD) {
192 int32_t ofs2; 198 intptr_t ofs2;
193 if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) { 199 if (irref_isk(ir->op2) && (ofs2 = ofs + get_kval(as, ir->op2),
200 checki16(ofs2))) {
194 ref = ir->op1; 201 ref = ir->op1;
195 ofs = ofs2; 202 ofs = (int32_t)ofs2;
196 } 203 }
197 } else if (ir->o == IR_STRREF) { 204 } else if (ir->o == IR_STRREF) {
198 int32_t ofs2 = 65536; 205 intptr_t ofs2 = 65536;
199 lua_assert(ofs == 0); 206 lj_assertA(ofs == 0, "bad usage");
200 ofs = (int32_t)sizeof(GCstr); 207 ofs = (int32_t)sizeof(GCstr);
201 if (irref_isk(ir->op2)) { 208 if (irref_isk(ir->op2)) {
202 ofs2 = ofs + IR(ir->op2)->i; 209 ofs2 = ofs + get_kval(as, ir->op2);
203 ref = ir->op1; 210 ref = ir->op1;
204 } else if (irref_isk(ir->op1)) { 211 } else if (irref_isk(ir->op1)) {
205 ofs2 = ofs + IR(ir->op1)->i; 212 ofs2 = ofs + get_kval(as, ir->op1);
206 ref = ir->op2; 213 ref = ir->op2;
207 } 214 }
208 if (!checki16(ofs2)) { 215 if (!checki16(ofs2)) {
@@ -210,7 +217,7 @@ static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref,
210 Reg right, left = ra_alloc2(as, ir, allow); 217 Reg right, left = ra_alloc2(as, ir, allow);
211 right = (left >> 8); left &= 255; 218 right = (left >> 8); left &= 255;
212 emit_hsi(as, mi, rt, RID_TMP, ofs); 219 emit_hsi(as, mi, rt, RID_TMP, ofs);
213 emit_dst(as, MIPSI_ADDU, RID_TMP, left, right); 220 emit_dst(as, MIPSI_AADDU, RID_TMP, left, right);
214 return; 221 return;
215 } 222 }
216 ofs = ofs2; 223 ofs = ofs2;
@@ -225,29 +232,43 @@ static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref,
225/* Generate a call to a C function. */ 232/* Generate a call to a C function. */
226static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) 233static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
227{ 234{
228 uint32_t n, nargs = CCI_NARGS(ci); 235 uint32_t n, nargs = CCI_XNARGS(ci);
229 int32_t ofs = 16; 236 int32_t ofs = LJ_32 ? 16 : 0;
237#if LJ_SOFTFP
238 Reg gpr = REGARG_FIRSTGPR;
239#else
230 Reg gpr, fpr = REGARG_FIRSTFPR; 240 Reg gpr, fpr = REGARG_FIRSTFPR;
241#endif
231 if ((void *)ci->func) 242 if ((void *)ci->func)
232 emit_call(as, (void *)ci->func); 243 emit_call(as, (void *)ci->func, 1);
244#if !LJ_SOFTFP
233 for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++) 245 for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
234 as->cost[gpr] = REGCOST(~0u, ASMREF_L); 246 as->cost[gpr] = REGCOST(~0u, ASMREF_L);
235 gpr = REGARG_FIRSTGPR; 247 gpr = REGARG_FIRSTGPR;
248#endif
236 for (n = 0; n < nargs; n++) { /* Setup args. */ 249 for (n = 0; n < nargs; n++) { /* Setup args. */
237 IRRef ref = args[n]; 250 IRRef ref = args[n];
238 if (ref) { 251 if (ref) {
239 IRIns *ir = IR(ref); 252 IRIns *ir = IR(ref);
253#if !LJ_SOFTFP
240 if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR && 254 if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR &&
241 !(ci->flags & CCI_VARARG)) { 255 !(ci->flags & CCI_VARARG)) {
242 lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */ 256 lj_assertA(rset_test(as->freeset, fpr),
257 "reg %d not free", fpr); /* Already evicted. */
243 ra_leftov(as, fpr, ref); 258 ra_leftov(as, fpr, ref);
244 fpr += 2; 259 fpr += LJ_32 ? 2 : 1;
245 gpr += irt_isnum(ir->t) ? 2 : 1; 260 gpr += (LJ_32 && irt_isnum(ir->t)) ? 2 : 1;
246 } else { 261 } else
262#endif
263 {
264#if LJ_32 && !LJ_SOFTFP
247 fpr = REGARG_LASTFPR+1; 265 fpr = REGARG_LASTFPR+1;
248 if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1; 266#endif
267 if (LJ_32 && irt_isnum(ir->t)) gpr = (gpr+1) & ~1;
249 if (gpr <= REGARG_LASTGPR) { 268 if (gpr <= REGARG_LASTGPR) {
250 lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */ 269 lj_assertA(rset_test(as->freeset, gpr),
270 "reg %d not free", gpr); /* Already evicted. */
271#if !LJ_SOFTFP
251 if (irt_isfp(ir->t)) { 272 if (irt_isfp(ir->t)) {
252 RegSet of = as->freeset; 273 RegSet of = as->freeset;
253 Reg r; 274 Reg r;
@@ -256,31 +277,56 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
256 r = ra_alloc1(as, ref, RSET_FPR); 277 r = ra_alloc1(as, ref, RSET_FPR);
257 as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); 278 as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
258 if (irt_isnum(ir->t)) { 279 if (irt_isnum(ir->t)) {
280#if LJ_32
259 emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1); 281 emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1);
260 emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r); 282 emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r);
261 lua_assert(rset_test(as->freeset, gpr+1)); /* Already evicted. */ 283 lj_assertA(rset_test(as->freeset, gpr+1),
284 "reg %d not free", gpr+1); /* Already evicted. */
262 gpr += 2; 285 gpr += 2;
286#else
287 emit_tg(as, MIPSI_DMFC1, gpr, r);
288 gpr++; fpr++;
289#endif
263 } else if (irt_isfloat(ir->t)) { 290 } else if (irt_isfloat(ir->t)) {
264 emit_tg(as, MIPSI_MFC1, gpr, r); 291 emit_tg(as, MIPSI_MFC1, gpr, r);
265 gpr++; 292 gpr++;
293#if LJ_64
294 fpr++;
295#endif
266 } 296 }
267 } else { 297 } else
298#endif
299 {
268 ra_leftov(as, gpr, ref); 300 ra_leftov(as, gpr, ref);
269 gpr++; 301 gpr++;
302#if LJ_64 && !LJ_SOFTFP
303 fpr++;
304#endif
270 } 305 }
271 } else { 306 } else {
272 Reg r = ra_alloc1z(as, ref, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); 307 Reg r = ra_alloc1z(as, ref, !LJ_SOFTFP && irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
308#if LJ_32
273 if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4; 309 if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
274 emit_spstore(as, ir, r, ofs); 310 emit_spstore(as, ir, r, ofs);
275 ofs += irt_isnum(ir->t) ? 8 : 4; 311 ofs += irt_isnum(ir->t) ? 8 : 4;
312#else
313 emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_isfp(ir->t) && !irt_is64(ir->t)) ? 4 : 0));
314 ofs += 8;
315#endif
276 } 316 }
277 } 317 }
278 } else { 318 } else {
319#if !LJ_SOFTFP
279 fpr = REGARG_LASTFPR+1; 320 fpr = REGARG_LASTFPR+1;
280 if (gpr <= REGARG_LASTGPR) 321#endif
322 if (gpr <= REGARG_LASTGPR) {
281 gpr++; 323 gpr++;
282 else 324#if LJ_64 && !LJ_SOFTFP
283 ofs += 4; 325 fpr++;
326#endif
327 } else {
328 ofs += LJ_32 ? 4 : 8;
329 }
284 } 330 }
285 checkmclim(as); 331 checkmclim(as);
286 } 332 }
@@ -290,50 +336,57 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
290static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) 336static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
291{ 337{
292 RegSet drop = RSET_SCRATCH; 338 RegSet drop = RSET_SCRATCH;
339#if LJ_32
293 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t)); 340 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
341#endif
342#if !LJ_SOFTFP
294 if ((ci->flags & CCI_NOFPRCLOBBER)) 343 if ((ci->flags & CCI_NOFPRCLOBBER))
295 drop &= ~RSET_FPR; 344 drop &= ~RSET_FPR;
345#endif
296 if (ra_hasreg(ir->r)) 346 if (ra_hasreg(ir->r))
297 rset_clear(drop, ir->r); /* Dest reg handled below. */ 347 rset_clear(drop, ir->r); /* Dest reg handled below. */
348#if LJ_32
298 if (hiop && ra_hasreg((ir+1)->r)) 349 if (hiop && ra_hasreg((ir+1)->r))
299 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ 350 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
351#endif
300 ra_evictset(as, drop); /* Evictions must be performed first. */ 352 ra_evictset(as, drop); /* Evictions must be performed first. */
301 if (ra_used(ir)) { 353 if (ra_used(ir)) {
302 lua_assert(!irt_ispri(ir->t)); 354 lj_assertA(!irt_ispri(ir->t), "PRI dest");
303 if (irt_isfp(ir->t)) { 355 if (!LJ_SOFTFP && irt_isfp(ir->t)) {
304 if ((ci->flags & CCI_CASTU64)) { 356 if ((ci->flags & CCI_CASTU64)) {
305 int32_t ofs = sps_scale(ir->s); 357 int32_t ofs = sps_scale(ir->s);
306 Reg dest = ir->r; 358 Reg dest = ir->r;
307 if (ra_hasreg(dest)) { 359 if (ra_hasreg(dest)) {
308 ra_free(as, dest); 360 ra_free(as, dest);
309 ra_modified(as, dest); 361 ra_modified(as, dest);
362#if LJ_32
310 emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1); 363 emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1);
311 emit_tg(as, MIPSI_MTC1, RID_RETLO, dest); 364 emit_tg(as, MIPSI_MTC1, RID_RETLO, dest);
365#else
366 emit_tg(as, MIPSI_DMTC1, RID_RET, dest);
367#endif
312 } 368 }
313 if (ofs) { 369 if (ofs) {
370#if LJ_32
314 emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0)); 371 emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0));
315 emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4)); 372 emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4));
373#else
374 emit_tsi(as, MIPSI_SD, RID_RET, RID_SP, ofs);
375#endif
316 } 376 }
317 } else { 377 } else {
318 ra_destreg(as, ir, RID_FPRET); 378 ra_destreg(as, ir, RID_FPRET);
319 } 379 }
380#if LJ_32
320 } else if (hiop) { 381 } else if (hiop) {
321 ra_destpair(as, ir); 382 ra_destpair(as, ir);
383#endif
322 } else { 384 } else {
323 ra_destreg(as, ir, RID_RET); 385 ra_destreg(as, ir, RID_RET);
324 } 386 }
325 } 387 }
326} 388}
327 389
328static void asm_call(ASMState *as, IRIns *ir)
329{
330 IRRef args[CCI_NARGS_MAX];
331 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
332 asm_collectargs(as, ir, ci, args);
333 asm_setupresult(as, ir, ci);
334 asm_gencall(as, ci, args);
335}
336
337static void asm_callx(ASMState *as, IRIns *ir) 390static void asm_callx(ASMState *as, IRIns *ir)
338{ 391{
339 IRRef args[CCI_NARGS_MAX*2]; 392 IRRef args[CCI_NARGS_MAX*2];
@@ -346,7 +399,7 @@ static void asm_callx(ASMState *as, IRIns *ir)
346 func = ir->op2; irf = IR(func); 399 func = ir->op2; irf = IR(func);
347 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } 400 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
348 if (irref_isk(func)) { /* Call to constant address. */ 401 if (irref_isk(func)) { /* Call to constant address. */
349 ci.func = (ASMFunction)(void *)(irf->i); 402 ci.func = (ASMFunction)(void *)get_kval(as, func);
350 } else { /* Need specific register for indirect calls. */ 403 } else { /* Need specific register for indirect calls. */
351 Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR)); 404 Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR));
352 MCode *p = as->mcp; 405 MCode *p = as->mcp;
@@ -361,27 +414,23 @@ static void asm_callx(ASMState *as, IRIns *ir)
361 asm_gencall(as, &ci, args); 414 asm_gencall(as, &ci, args);
362} 415}
363 416
364static void asm_callid(ASMState *as, IRIns *ir, IRCallID id) 417#if !LJ_SOFTFP
365{
366 const CCallInfo *ci = &lj_ir_callinfo[id];
367 IRRef args[2];
368 args[0] = ir->op1;
369 args[1] = ir->op2;
370 asm_setupresult(as, ir, ci);
371 asm_gencall(as, ci, args);
372}
373
374static void asm_callround(ASMState *as, IRIns *ir, IRCallID id) 418static void asm_callround(ASMState *as, IRIns *ir, IRCallID id)
375{ 419{
376 /* The modified regs must match with the *.dasc implementation. */ 420 /* The modified regs must match with the *.dasc implementation. */
377 RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)| 421 RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)|
378 RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR); 422 RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR)
423#if LJ_TARGET_MIPSR6
424 |RID2RSET(RID_F21)
425#endif
426 ;
379 if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); 427 if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
380 ra_evictset(as, drop); 428 ra_evictset(as, drop);
381 ra_destreg(as, ir, RID_FPRET); 429 ra_destreg(as, ir, RID_FPRET);
382 emit_call(as, (void *)lj_ir_callinfo[id].func); 430 emit_call(as, (void *)lj_ir_callinfo[id].func, 0);
383 ra_leftov(as, REGARG_FIRSTFPR, ir->op1); 431 ra_leftov(as, REGARG_FIRSTFPR, ir->op1);
384} 432}
433#endif
385 434
386/* -- Returns ------------------------------------------------------------- */ 435/* -- Returns ------------------------------------------------------------- */
387 436
@@ -390,25 +439,31 @@ static void asm_retf(ASMState *as, IRIns *ir)
390{ 439{
391 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); 440 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
392 void *pc = ir_kptr(IR(ir->op2)); 441 void *pc = ir_kptr(IR(ir->op2));
393 int32_t delta = 1+bc_a(*((const BCIns *)pc - 1)); 442 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
394 as->topslot -= (BCReg)delta; 443 as->topslot -= (BCReg)delta;
395 if ((int32_t)as->topslot < 0) as->topslot = 0; 444 if ((int32_t)as->topslot < 0) as->topslot = 0;
396 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ 445 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
397 emit_setgl(as, base, jit_base); 446 emit_setgl(as, base, jit_base);
398 emit_addptr(as, base, -8*delta); 447 emit_addptr(as, base, -8*delta);
399 asm_guard(as, MIPSI_BNE, RID_TMP, 448 asm_guard(as, MIPSI_BNE, RID_TMP,
400 ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base))); 449 ra_allock(as, igcptr(pc), rset_exclude(RSET_GPR, base)));
401 emit_tsi(as, MIPSI_LW, RID_TMP, base, -8); 450 emit_tsi(as, MIPSI_AL, RID_TMP, base, -8);
402} 451}
403 452
404/* -- Type conversions ---------------------------------------------------- */ 453/* -- Type conversions ---------------------------------------------------- */
405 454
455#if !LJ_SOFTFP
406static void asm_tointg(ASMState *as, IRIns *ir, Reg left) 456static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
407{ 457{
408 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); 458 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
409 Reg dest = ra_dest(as, ir, RSET_GPR); 459 Reg dest = ra_dest(as, ir, RSET_GPR);
460#if !LJ_TARGET_MIPSR6
410 asm_guard(as, MIPSI_BC1F, 0, 0); 461 asm_guard(as, MIPSI_BC1F, 0, 0);
411 emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left); 462 emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left);
463#else
464 asm_guard(as, MIPSI_BC1EQZ, 0, (tmp&31));
465 emit_fgh(as, MIPSI_CMP_EQ_D, tmp, tmp, left);
466#endif
412 emit_fg(as, MIPSI_CVT_D_W, tmp, tmp); 467 emit_fg(as, MIPSI_CVT_D_W, tmp, tmp);
413 emit_tg(as, MIPSI_MFC1, dest, tmp); 468 emit_tg(as, MIPSI_MFC1, dest, tmp);
414 emit_fg(as, MIPSI_CVT_W_D, tmp, left); 469 emit_fg(as, MIPSI_CVT_W_D, tmp, left);
@@ -424,15 +479,57 @@ static void asm_tobit(ASMState *as, IRIns *ir)
424 emit_tg(as, MIPSI_MFC1, dest, tmp); 479 emit_tg(as, MIPSI_MFC1, dest, tmp);
425 emit_fgh(as, MIPSI_ADD_D, tmp, left, right); 480 emit_fgh(as, MIPSI_ADD_D, tmp, left, right);
426} 481}
482#elif LJ_64 /* && LJ_SOFTFP */
483static void asm_tointg(ASMState *as, IRIns *ir, Reg r)
484{
485 /* The modified regs must match with the *.dasc implementation. */
486 RegSet drop = RID2RSET(REGARG_FIRSTGPR)|RID2RSET(RID_RET)|RID2RSET(RID_RET+1)|
487 RID2RSET(RID_R1)|RID2RSET(RID_R12);
488 if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
489 ra_evictset(as, drop);
490 /* Return values are in RID_RET (converted value) and RID_RET+1 (status). */
491 ra_destreg(as, ir, RID_RET);
492 asm_guard(as, MIPSI_BNE, RID_RET+1, RID_ZERO);
493 emit_call(as, (void *)lj_ir_callinfo[IRCALL_lj_vm_tointg].func, 0);
494 if (r == RID_NONE)
495 ra_leftov(as, REGARG_FIRSTGPR, ir->op1);
496 else if (r != REGARG_FIRSTGPR)
497 emit_move(as, REGARG_FIRSTGPR, r);
498}
499
500static void asm_tobit(ASMState *as, IRIns *ir)
501{
502 Reg dest = ra_dest(as, ir, RSET_GPR);
503 emit_dta(as, MIPSI_SLL, dest, dest, 0);
504 asm_callid(as, ir, IRCALL_lj_vm_tobit);
505}
506#endif
427 507
428static void asm_conv(ASMState *as, IRIns *ir) 508static void asm_conv(ASMState *as, IRIns *ir)
429{ 509{
430 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); 510 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
511#if !LJ_SOFTFP32
431 int stfp = (st == IRT_NUM || st == IRT_FLOAT); 512 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
513#endif
514#if LJ_64
515 int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64);
516#endif
432 IRRef lref = ir->op1; 517 IRRef lref = ir->op1;
433 lua_assert(irt_type(ir->t) != st); 518#if LJ_32
434 lua_assert(!(irt_isint64(ir->t) || 519 /* 64 bit integer conversions are handled by SPLIT. */
435 (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */ 520 lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)),
521 "IR %04d has unsplit 64 bit type",
522 (int)(ir - as->ir) - REF_BIAS);
523#endif
524#if LJ_SOFTFP32
525 /* FP conversions are handled by SPLIT. */
526 lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
527 "IR %04d has FP type",
528 (int)(ir - as->ir) - REF_BIAS);
529 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
530#else
531 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
532#if !LJ_SOFTFP
436 if (irt_isfp(ir->t)) { 533 if (irt_isfp(ir->t)) {
437 Reg dest = ra_dest(as, ir, RSET_FPR); 534 Reg dest = ra_dest(as, ir, RSET_FPR);
438 if (stfp) { /* FP to FP conversion. */ 535 if (stfp) { /* FP to FP conversion. */
@@ -448,27 +545,56 @@ static void asm_conv(ASMState *as, IRIns *ir)
448 emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp); 545 emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp);
449 emit_fg(as, MIPSI_CVT_D_W, dest, dest); 546 emit_fg(as, MIPSI_CVT_D_W, dest, dest);
450 emit_lsptr(as, MIPSI_LDC1, (tmp & 31), 547 emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
451 (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)), 548 (void *)&as->J->k64[LJ_K64_2P31], RSET_GPR);
452 RSET_GPR);
453 emit_tg(as, MIPSI_MTC1, RID_TMP, dest); 549 emit_tg(as, MIPSI_MTC1, RID_TMP, dest);
454 emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left); 550 emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left);
455 emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000); 551 emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
552#if LJ_64
553 } else if(st == IRT_U64) { /* U64 to FP conversion. */
554 /* if (x >= 1u<<63) y = (double)(int64_t)(x&(1u<<63)-1) + pow(2.0, 63) */
555 Reg left = ra_alloc1(as, lref, RSET_GPR);
556 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest));
557 MCLabel l_end = emit_label(as);
558 if (irt_isfloat(ir->t)) {
559 emit_fgh(as, MIPSI_ADD_S, dest, dest, tmp);
560 emit_lsptr(as, MIPSI_LWC1, (tmp & 31), (void *)&as->J->k32[LJ_K32_2P63],
561 rset_exclude(RSET_GPR, left));
562 emit_fg(as, MIPSI_CVT_S_L, dest, dest);
563 } else {
564 emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp);
565 emit_lsptr(as, MIPSI_LDC1, (tmp & 31), (void *)&as->J->k64[LJ_K64_2P63],
566 rset_exclude(RSET_GPR, left));
567 emit_fg(as, MIPSI_CVT_D_L, dest, dest);
568 }
569 emit_branch(as, MIPSI_BGEZ, left, RID_ZERO, l_end);
570 emit_tg(as, MIPSI_DMTC1, RID_TMP, dest);
571 emit_tsml(as, MIPSI_DEXTM, RID_TMP, left, 30, 0);
572#endif
456 } else { /* Integer to FP conversion. */ 573 } else { /* Integer to FP conversion. */
457 Reg left = ra_alloc1(as, lref, RSET_GPR); 574 Reg left = ra_alloc1(as, lref, RSET_GPR);
575#if LJ_32
458 emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W, 576 emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W,
459 dest, dest); 577 dest, dest);
460 emit_tg(as, MIPSI_MTC1, left, dest); 578 emit_tg(as, MIPSI_MTC1, left, dest);
579#else
580 MIPSIns mi = irt_isfloat(ir->t) ?
581 (st64 ? MIPSI_CVT_S_L : MIPSI_CVT_S_W) :
582 (st64 ? MIPSI_CVT_D_L : MIPSI_CVT_D_W);
583 emit_fg(as, mi, dest, dest);
584 emit_tg(as, st64 ? MIPSI_DMTC1 : MIPSI_MTC1, left, dest);
585#endif
461 } 586 }
462 } else if (stfp) { /* FP to integer conversion. */ 587 } else if (stfp) { /* FP to integer conversion. */
463 if (irt_isguard(ir->t)) { 588 if (irt_isguard(ir->t)) {
464 /* Checked conversions are only supported from number to int. */ 589 /* Checked conversions are only supported from number to int. */
465 lua_assert(irt_isint(ir->t) && st == IRT_NUM); 590 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
591 "bad type for checked CONV");
466 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); 592 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
467 } else { 593 } else {
468 Reg dest = ra_dest(as, ir, RSET_GPR); 594 Reg dest = ra_dest(as, ir, RSET_GPR);
469 Reg left = ra_alloc1(as, lref, RSET_FPR); 595 Reg left = ra_alloc1(as, lref, RSET_FPR);
470 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); 596 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
471 if (irt_isu32(ir->t)) { 597 if (irt_isu32(ir->t)) { /* FP to U32 conversion. */
472 /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */ 598 /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */
473 emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP); 599 emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP);
474 emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000); 600 emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
@@ -479,25 +605,112 @@ static void asm_conv(ASMState *as, IRIns *ir)
479 tmp, left, tmp); 605 tmp, left, tmp);
480 if (st == IRT_FLOAT) 606 if (st == IRT_FLOAT)
481 emit_lsptr(as, MIPSI_LWC1, (tmp & 31), 607 emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
482 (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)), 608 (void *)&as->J->k32[LJ_K32_2P31], RSET_GPR);
483 RSET_GPR);
484 else 609 else
485 emit_lsptr(as, MIPSI_LDC1, (tmp & 31), 610 emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
486 (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)), 611 (void *)&as->J->k64[LJ_K64_2P31], RSET_GPR);
487 RSET_GPR); 612#if LJ_64
613 } else if (irt_isu64(ir->t)) { /* FP to U64 conversion. */
614 MCLabel l_end;
615 emit_tg(as, MIPSI_DMFC1, dest, tmp);
616 l_end = emit_label(as);
617 /* For inputs >= 2^63 add -2^64 and convert again. */
618 if (st == IRT_NUM) {
619 emit_fg(as, MIPSI_TRUNC_L_D, tmp, tmp);
620 emit_fgh(as, MIPSI_ADD_D, tmp, left, tmp);
621 emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
622 (void *)&as->J->k64[LJ_K64_M2P64],
623 rset_exclude(RSET_GPR, dest));
624 emit_fg(as, MIPSI_TRUNC_L_D, tmp, left); /* Delay slot. */
625#if !LJ_TARGET_MIPSR6
626 emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
627 emit_fgh(as, MIPSI_C_OLT_D, 0, left, tmp);
628#else
629 emit_branch(as, MIPSI_BC1NEZ, 0, (left&31), l_end);
630 emit_fgh(as, MIPSI_CMP_LT_D, left, left, tmp);
631#endif
632 emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
633 (void *)&as->J->k64[LJ_K64_2P63],
634 rset_exclude(RSET_GPR, dest));
635 } else {
636 emit_fg(as, MIPSI_TRUNC_L_S, tmp, tmp);
637 emit_fgh(as, MIPSI_ADD_S, tmp, left, tmp);
638 emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
639 (void *)&as->J->k32[LJ_K32_M2P64],
640 rset_exclude(RSET_GPR, dest));
641 emit_fg(as, MIPSI_TRUNC_L_S, tmp, left); /* Delay slot. */
642#if !LJ_TARGET_MIPSR6
643 emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
644 emit_fgh(as, MIPSI_C_OLT_S, 0, left, tmp);
645#else
646 emit_branch(as, MIPSI_BC1NEZ, 0, (left&31), l_end);
647 emit_fgh(as, MIPSI_CMP_LT_S, left, left, tmp);
648#endif
649 emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
650 (void *)&as->J->k32[LJ_K32_2P63],
651 rset_exclude(RSET_GPR, dest));
652 }
653#endif
488 } else { 654 } else {
655#if LJ_32
489 emit_tg(as, MIPSI_MFC1, dest, tmp); 656 emit_tg(as, MIPSI_MFC1, dest, tmp);
490 emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D, 657 emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D,
491 tmp, left); 658 tmp, left);
659#else
660 MIPSIns mi = irt_is64(ir->t) ?
661 (st == IRT_NUM ? MIPSI_TRUNC_L_D : MIPSI_TRUNC_L_S) :
662 (st == IRT_NUM ? MIPSI_TRUNC_W_D : MIPSI_TRUNC_W_S);
663 emit_tg(as, irt_is64(ir->t) ? MIPSI_DMFC1 : MIPSI_MFC1, dest, left);
664 emit_fg(as, mi, left, left);
665#endif
492 } 666 }
493 } 667 }
494 } else { 668 } else
669#else
670 if (irt_isfp(ir->t)) {
671#if LJ_64 && LJ_HASFFI
672 if (stfp) { /* FP to FP conversion. */
673 asm_callid(as, ir, irt_isnum(ir->t) ? IRCALL_softfp_f2d :
674 IRCALL_softfp_d2f);
675 } else { /* Integer to FP conversion. */
676 IRCallID cid = ((IRT_IS64 >> st) & 1) ?
677 (irt_isnum(ir->t) ?
678 (st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d) :
679 (st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f)) :
680 (irt_isnum(ir->t) ?
681 (st == IRT_INT ? IRCALL_softfp_i2d : IRCALL_softfp_ui2d) :
682 (st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f));
683 asm_callid(as, ir, cid);
684 }
685#else
686 asm_callid(as, ir, IRCALL_softfp_i2d);
687#endif
688 } else if (stfp) { /* FP to integer conversion. */
689 if (irt_isguard(ir->t)) {
690 /* Checked conversions are only supported from number to int. */
691 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
692 "bad type for checked CONV");
693 asm_tointg(as, ir, RID_NONE);
694 } else {
695 IRCallID cid = irt_is64(ir->t) ?
696 ((st == IRT_NUM) ?
697 (irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul) :
698 (irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul)) :
699 ((st == IRT_NUM) ?
700 (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) :
701 (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui));
702 asm_callid(as, ir, cid);
703 }
704 } else
705#endif
706#endif
707 {
495 Reg dest = ra_dest(as, ir, RSET_GPR); 708 Reg dest = ra_dest(as, ir, RSET_GPR);
496 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ 709 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
497 Reg left = ra_alloc1(as, ir->op1, RSET_GPR); 710 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
498 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); 711 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
499 if ((ir->op2 & IRCONV_SEXT)) { 712 if ((ir->op2 & IRCONV_SEXT)) {
500 if ((as->flags & JIT_F_MIPS32R2)) { 713 if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) {
501 emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left); 714 emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left);
502 } else { 715 } else {
503 uint32_t shift = st == IRT_I8 ? 24 : 16; 716 uint32_t shift = st == IRT_I8 ? 24 : 16;
@@ -509,49 +722,109 @@ static void asm_conv(ASMState *as, IRIns *ir)
509 (int32_t)(st == IRT_U8 ? 0xff : 0xffff)); 722 (int32_t)(st == IRT_U8 ? 0xff : 0xffff));
510 } 723 }
511 } else { /* 32/64 bit integer conversions. */ 724 } else { /* 32/64 bit integer conversions. */
725#if LJ_32
512 /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */ 726 /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
513 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ 727 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
728#else
729 if (irt_is64(ir->t)) {
730 if (st64) {
731 /* 64/64 bit no-op (cast)*/
732 ra_leftov(as, dest, lref);
733 } else {
734 Reg left = ra_alloc1(as, lref, RSET_GPR);
735 if ((ir->op2 & IRCONV_SEXT)) { /* 32 to 64 bit sign extension. */
736 emit_dta(as, MIPSI_SLL, dest, left, 0);
737 } else { /* 32 to 64 bit zero extension. */
738 emit_tsml(as, MIPSI_DEXT, dest, left, 31, 0);
739 }
740 }
741 } else {
742 if (st64) {
743 /* This is either a 32 bit reg/reg mov which zeroes the hiword
744 ** or a load of the loword from a 64 bit address.
745 */
746 Reg left = ra_alloc1(as, lref, RSET_GPR);
747 emit_tsml(as, MIPSI_DEXT, dest, left, 31, 0);
748 } else { /* 32/32 bit no-op (cast). */
749 /* Do nothing, but may need to move regs. */
750 ra_leftov(as, dest, lref);
751 }
752 }
753#endif
514 } 754 }
515 } 755 }
516} 756}
517 757
518#if LJ_HASFFI
519static void asm_conv64(ASMState *as, IRIns *ir)
520{
521 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
522 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
523 IRCallID id;
524 const CCallInfo *ci;
525 IRRef args[2];
526 args[LJ_BE?0:1] = ir->op1;
527 args[LJ_BE?1:0] = (ir-1)->op1;
528 if (st == IRT_NUM || st == IRT_FLOAT) {
529 id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
530 ir--;
531 } else {
532 id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
533 }
534 ci = &lj_ir_callinfo[id];
535 asm_setupresult(as, ir, ci);
536 asm_gencall(as, ci, args);
537}
538#endif
539
540static void asm_strto(ASMState *as, IRIns *ir) 758static void asm_strto(ASMState *as, IRIns *ir)
541{ 759{
542 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; 760 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
543 IRRef args[2]; 761 IRRef args[2];
762 int32_t ofs = 0;
763#if LJ_SOFTFP32
764 ra_evictset(as, RSET_SCRATCH);
765 if (ra_used(ir)) {
766 if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
767 (ir->s & 1) == LJ_BE && (ir->s ^ 1) == (ir+1)->s) {
768 int i;
769 for (i = 0; i < 2; i++) {
770 Reg r = (ir+i)->r;
771 if (ra_hasreg(r)) {
772 ra_free(as, r);
773 ra_modified(as, r);
774 emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
775 }
776 }
777 ofs = sps_scale(ir->s & ~1);
778 } else {
779 Reg rhi = ra_dest(as, ir+1, RSET_GPR);
780 Reg rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
781 emit_tsi(as, MIPSI_LW, rhi, RID_SP, ofs+(LJ_BE?0:4));
782 emit_tsi(as, MIPSI_LW, rlo, RID_SP, ofs+(LJ_BE?4:0));
783 }
784 }
785#else
544 RegSet drop = RSET_SCRATCH; 786 RegSet drop = RSET_SCRATCH;
545 if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */ 787 if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
546 ra_evictset(as, drop); 788 ra_evictset(as, drop);
789 ofs = sps_scale(ir->s);
790#endif
547 asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */ 791 asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */
548 args[0] = ir->op1; /* GCstr *str */ 792 args[0] = ir->op1; /* GCstr *str */
549 args[1] = ASMREF_TMP1; /* TValue *n */ 793 args[1] = ASMREF_TMP1; /* TValue *n */
550 asm_gencall(as, ci, args); 794 asm_gencall(as, ci, args);
551 /* Store the result to the spill slot or temp slots. */ 795 /* Store the result to the spill slot or temp slots. */
552 emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), 796 emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1),
553 RID_SP, sps_scale(ir->s)); 797 RID_SP, ofs);
798}
799
800/* -- Memory references --------------------------------------------------- */
801
802#if LJ_64
803/* Store tagged value for ref at base+ofs. */
804static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref)
805{
806 RegSet allow = rset_exclude(RSET_GPR, base);
807 IRIns *ir = IR(ref);
808 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
809 "store of IR type %d", irt_type(ir->t));
810 if (irref_isk(ref)) {
811 TValue k;
812 lj_ir_kvalue(as->J->L, &k, ir);
813 emit_tsi(as, MIPSI_SD, ra_allock(as, (int64_t)k.u64, allow), base, ofs);
814 } else {
815 Reg src = ra_alloc1(as, ref, allow);
816 Reg type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47,
817 rset_exclude(allow, src));
818 emit_tsi(as, MIPSI_SD, RID_TMP, base, ofs);
819 if (irt_isinteger(ir->t)) {
820 emit_dst(as, MIPSI_DADDU, RID_TMP, RID_TMP, type);
821 emit_tsml(as, MIPSI_DEXT, RID_TMP, src, 31, 0);
822 } else {
823 emit_dst(as, MIPSI_DADDU, RID_TMP, src, type);
824 }
825 }
554} 826}
827#endif
555 828
556/* Get pointer to TValue. */ 829/* Get pointer to TValue. */
557static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) 830static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
@@ -559,44 +832,32 @@ static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
559 IRIns *ir = IR(ref); 832 IRIns *ir = IR(ref);
560 if (irt_isnum(ir->t)) { 833 if (irt_isnum(ir->t)) {
561 if (irref_isk(ref)) /* Use the number constant itself as a TValue. */ 834 if (irref_isk(ref)) /* Use the number constant itself as a TValue. */
562 ra_allockreg(as, i32ptr(ir_knum(ir)), dest); 835 ra_allockreg(as, igcptr(ir_knum(ir)), dest);
563 else /* Otherwise force a spill and use the spill slot. */ 836 else /* Otherwise force a spill and use the spill slot. */
564 emit_tsi(as, MIPSI_ADDIU, dest, RID_SP, ra_spill(as, ir)); 837 emit_tsi(as, MIPSI_AADDIU, dest, RID_SP, ra_spill(as, ir));
565 } else { 838 } else {
566 /* Otherwise use g->tmptv to hold the TValue. */ 839 /* Otherwise use g->tmptv to hold the TValue. */
840#if LJ_32
567 RegSet allow = rset_exclude(RSET_GPR, dest); 841 RegSet allow = rset_exclude(RSET_GPR, dest);
568 Reg type; 842 Reg type;
569 emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, offsetof(global_State, tmptv)-32768); 843 emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, (int32_t)(offsetof(global_State, tmptv)-32768));
570 if (!irt_ispri(ir->t)) { 844 if (!irt_ispri(ir->t)) {
571 Reg src = ra_alloc1(as, ref, allow); 845 Reg src = ra_alloc1(as, ref, allow);
572 emit_setgl(as, src, tmptv.gcr); 846 emit_setgl(as, src, tmptv.gcr);
573 } 847 }
574 type = ra_allock(as, irt_toitype(ir->t), allow); 848 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
849 type = ra_alloc1(as, ref+1, allow);
850 else
851 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
575 emit_setgl(as, type, tmptv.it); 852 emit_setgl(as, type, tmptv.it);
853#else
854 asm_tvstore64(as, dest, 0, ref);
855 emit_tsi(as, MIPSI_DADDIU, dest, RID_JGL,
856 (int32_t)(offsetof(global_State, tmptv)-32768));
857#endif
576 } 858 }
577} 859}
578 860
579static void asm_tostr(ASMState *as, IRIns *ir)
580{
581 IRRef args[2];
582 args[0] = ASMREF_L;
583 as->gcsteps++;
584 if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
585 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
586 args[1] = ASMREF_TMP1; /* const lua_Number * */
587 asm_setupresult(as, ir, ci); /* GCstr * */
588 asm_gencall(as, ci, args);
589 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
590 } else {
591 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
592 args[1] = ir->op1; /* int32_t k */
593 asm_setupresult(as, ir, ci); /* GCstr * */
594 asm_gencall(as, ci, args);
595 }
596}
597
598/* -- Memory references --------------------------------------------------- */
599
600static void asm_aref(ASMState *as, IRIns *ir) 861static void asm_aref(ASMState *as, IRIns *ir)
601{ 862{
602 Reg dest = ra_dest(as, ir, RSET_GPR); 863 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -608,14 +869,18 @@ static void asm_aref(ASMState *as, IRIns *ir)
608 ofs += 8*IR(ir->op2)->i; 869 ofs += 8*IR(ir->op2)->i;
609 if (checki16(ofs)) { 870 if (checki16(ofs)) {
610 base = ra_alloc1(as, refa, RSET_GPR); 871 base = ra_alloc1(as, refa, RSET_GPR);
611 emit_tsi(as, MIPSI_ADDIU, dest, base, ofs); 872 emit_tsi(as, MIPSI_AADDIU, dest, base, ofs);
612 return; 873 return;
613 } 874 }
614 } 875 }
615 base = ra_alloc1(as, ir->op1, RSET_GPR); 876 base = ra_alloc1(as, ir->op1, RSET_GPR);
616 idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); 877 idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
617 emit_dst(as, MIPSI_ADDU, dest, RID_TMP, base); 878#if !LJ_TARGET_MIPSR6
879 emit_dst(as, MIPSI_AADDU, dest, RID_TMP, base);
618 emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3); 880 emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3);
881#else
882 emit_dst(as, MIPSI_ALSA | MIPSF_A(3-1), dest, idx, base);
883#endif
619} 884}
620 885
621/* Inlined hash lookup. Specialized for key type and for const keys. 886/* Inlined hash lookup. Specialized for key type and for const keys.
@@ -626,51 +891,109 @@ static void asm_aref(ASMState *as, IRIns *ir)
626** } while ((n = nextnode(n))); 891** } while ((n = nextnode(n)));
627** return niltv(L); 892** return niltv(L);
628*/ 893*/
629static void asm_href(ASMState *as, IRIns *ir) 894static void asm_href(ASMState *as, IRIns *ir, IROp merge)
630{ 895{
631 RegSet allow = RSET_GPR; 896 RegSet allow = RSET_GPR;
632 int destused = ra_used(ir); 897 int destused = ra_used(ir);
633 Reg dest = ra_dest(as, ir, allow); 898 Reg dest = ra_dest(as, ir, allow);
634 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); 899 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
635 Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2; 900 Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2;
901#if LJ_64
902 Reg cmp64 = RID_NONE;
903#endif
636 IRRef refkey = ir->op2; 904 IRRef refkey = ir->op2;
637 IRIns *irkey = IR(refkey); 905 IRIns *irkey = IR(refkey);
906 int isk = irref_isk(refkey);
638 IRType1 kt = irkey->t; 907 IRType1 kt = irkey->t;
639 uint32_t khash; 908 uint32_t khash;
640 MCLabel l_end, l_loop, l_next; 909 MCLabel l_end, l_loop, l_next;
641 910
642 rset_clear(allow, tab); 911 rset_clear(allow, tab);
643 if (irt_isnum(kt)) { 912#if LJ_SOFTFP32
913 if (!isk) {
914 key = ra_alloc1(as, refkey, allow);
915 rset_clear(allow, key);
916 if (irkey[1].o == IR_HIOP) {
917 if (ra_hasreg((irkey+1)->r)) {
918 type = tmpnum = (irkey+1)->r;
919 tmp1 = ra_scratch(as, allow);
920 rset_clear(allow, tmp1);
921 ra_noweak(as, tmpnum);
922 } else {
923 type = tmpnum = ra_allocref(as, refkey+1, allow);
924 }
925 rset_clear(allow, tmpnum);
926 } else {
927 type = ra_allock(as, (int32_t)irt_toitype(irkey->t), allow);
928 rset_clear(allow, type);
929 }
930 }
931#else
932 if (!LJ_SOFTFP && irt_isnum(kt)) {
644 key = ra_alloc1(as, refkey, RSET_FPR); 933 key = ra_alloc1(as, refkey, RSET_FPR);
645 tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key)); 934 tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
646 } else if (!irt_ispri(kt)) { 935 } else if (!irt_ispri(kt)) {
647 key = ra_alloc1(as, refkey, allow); 936 key = ra_alloc1(as, refkey, allow);
648 rset_clear(allow, key); 937 rset_clear(allow, key);
649 type = ra_allock(as, irt_toitype(irkey->t), allow); 938#if LJ_32
939 type = ra_allock(as, (int32_t)irt_toitype(irkey->t), allow);
650 rset_clear(allow, type); 940 rset_clear(allow, type);
941#endif
651 } 942 }
943#endif
652 tmp2 = ra_scratch(as, allow); 944 tmp2 = ra_scratch(as, allow);
653 rset_clear(allow, tmp2); 945 rset_clear(allow, tmp2);
946#if LJ_64
947 if (LJ_SOFTFP || !irt_isnum(kt)) {
948 /* Allocate cmp64 register used for 64-bit comparisons */
949 if (LJ_SOFTFP && irt_isnum(kt)) {
950 cmp64 = key;
951 } else if (!isk && irt_isaddr(kt)) {
952 cmp64 = tmp2;
953 } else {
954 int64_t k;
955 if (isk && irt_isaddr(kt)) {
956 k = ((int64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64;
957 } else {
958 lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
959 k = ~((int64_t)~irt_toitype(ir->t) << 47);
960 }
961 cmp64 = ra_allock(as, k, allow);
962 rset_clear(allow, cmp64);
963 }
964 }
965#endif
654 966
655 /* Key not found in chain: load niltv. */ 967 /* Key not found in chain: jump to exit (if merged) or load niltv. */
656 l_end = emit_label(as); 968 l_end = emit_label(as);
657 if (destused) 969 as->invmcp = NULL;
970 if (merge == IR_NE)
971 asm_guard(as, MIPSI_B, RID_ZERO, RID_ZERO);
972 else if (destused)
658 emit_loada(as, dest, niltvg(J2G(as->J))); 973 emit_loada(as, dest, niltvg(J2G(as->J)));
659 else
660 *--as->mcp = MIPSI_NOP;
661 /* Follow hash chain until the end. */ 974 /* Follow hash chain until the end. */
662 emit_move(as, dest, tmp1); 975 emit_move(as, dest, tmp1);
663 l_loop = --as->mcp; 976 l_loop = --as->mcp;
664 emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, next)); 977 emit_tsi(as, MIPSI_AL, tmp1, dest, (int32_t)offsetof(Node, next));
665 l_next = emit_label(as); 978 l_next = emit_label(as);
666 979
667 /* Type and value comparison. */ 980 /* Type and value comparison. */
668 if (irt_isnum(kt)) { 981 if (merge == IR_EQ) { /* Must match asm_guard(). */
982 emit_ti(as, MIPSI_LI, RID_TMP, as->snapno);
983 l_end = asm_exitstub_addr(as);
984 }
985 if (!LJ_SOFTFP && irt_isnum(kt)) {
986#if !LJ_TARGET_MIPSR6
669 emit_branch(as, MIPSI_BC1T, 0, 0, l_end); 987 emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
670 emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key); 988 emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key);
671 emit_tg(as, MIPSI_MFC1, tmp1, key+1); 989#else
990 emit_branch(as, MIPSI_BC1NEZ, 0, (tmpnum&31), l_end);
991 emit_fgh(as, MIPSI_CMP_EQ_D, tmpnum, tmpnum, key);
992#endif
993 *--as->mcp = MIPSI_NOP; /* Avoid NaN comparison overhead. */
672 emit_branch(as, MIPSI_BEQ, tmp1, RID_ZERO, l_next); 994 emit_branch(as, MIPSI_BEQ, tmp1, RID_ZERO, l_next);
673 emit_tsi(as, MIPSI_SLTIU, tmp1, tmp1, (int32_t)LJ_TISNUM); 995 emit_tsi(as, MIPSI_SLTIU, tmp1, tmp1, (int32_t)LJ_TISNUM);
996#if LJ_32
674 emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n)); 997 emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n));
675 } else { 998 } else {
676 if (irt_ispri(kt)) { 999 if (irt_ispri(kt)) {
@@ -683,36 +1006,52 @@ static void asm_href(ASMState *as, IRIns *ir)
683 } 1006 }
684 emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.it)); 1007 emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.it));
685 *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu); 1008 *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu);
1009#else
1010 emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 15);
1011 emit_tg(as, MIPSI_DMTC1, tmp1, tmpnum);
1012 emit_tsi(as, MIPSI_LD, tmp1, dest, (int32_t)offsetof(Node, key.u64));
1013 } else {
1014 emit_branch(as, MIPSI_BEQ, tmp1, cmp64, l_end);
1015 emit_tsi(as, MIPSI_LD, tmp1, dest, (int32_t)offsetof(Node, key.u64));
1016 }
1017 *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu);
1018 if (!isk && irt_isaddr(kt)) {
1019 type = ra_allock(as, (int64_t)irt_toitype(kt) << 47, allow);
1020 emit_dst(as, MIPSI_DADDU, tmp2, key, type);
1021 rset_clear(allow, type);
1022 }
1023#endif
686 1024
687 /* Load main position relative to tab->node into dest. */ 1025 /* Load main position relative to tab->node into dest. */
688 khash = irref_isk(refkey) ? ir_khash(irkey) : 1; 1026 khash = isk ? ir_khash(as, irkey) : 1;
689 if (khash == 0) { 1027 if (khash == 0) {
690 emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node)); 1028 emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node));
691 } else { 1029 } else {
692 Reg tmphash = tmp1; 1030 Reg tmphash = tmp1;
693 if (irref_isk(refkey)) 1031 if (isk)
694 tmphash = ra_allock(as, khash, allow); 1032 tmphash = ra_allock(as, khash, allow);
695 emit_dst(as, MIPSI_ADDU, dest, dest, tmp1); 1033 emit_dst(as, MIPSI_AADDU, dest, dest, tmp1);
696 lua_assert(sizeof(Node) == 24); 1034 lj_assertA(sizeof(Node) == 24, "bad Node size");
697 emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1); 1035 emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1);
698 emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3); 1036 emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3);
699 emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5); 1037 emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5);
700 emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash); 1038 emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash);
701 emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node)); 1039 emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node));
702 emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask)); 1040 emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
703 if (irref_isk(refkey)) { 1041 if (isk) {
704 /* Nothing to do. */ 1042 /* Nothing to do. */
705 } else if (irt_isstr(kt)) { 1043 } else if (irt_isstr(kt)) {
706 emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, hash)); 1044 emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, sid));
707 } else { /* Must match with hash*() in lj_tab.c. */ 1045 } else { /* Must match with hash*() in lj_tab.c. */
708 emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2); 1046 emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2);
709 emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31); 1047 emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31);
710 emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2); 1048 emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2);
711 emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31); 1049 emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31);
712 emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest); 1050 emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest);
713 if (irt_isnum(kt)) { 1051#if LJ_32
1052 if (LJ_SOFTFP ? (irkey[1].o == IR_HIOP) : irt_isnum(kt)) {
714 emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1); 1053 emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1);
715 if ((as->flags & JIT_F_MIPS32R2)) { 1054 if ((as->flags & JIT_F_MIPSXXR2)) {
716 emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31); 1055 emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31);
717 } else { 1056 } else {
718 emit_dst(as, MIPSI_OR, dest, dest, tmp1); 1057 emit_dst(as, MIPSI_OR, dest, dest, tmp1);
@@ -720,13 +1059,35 @@ static void asm_href(ASMState *as, IRIns *ir)
720 emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31); 1059 emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31);
721 } 1060 }
722 emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1); 1061 emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1);
1062#if LJ_SOFTFP
1063 emit_ds(as, MIPSI_MOVE, tmp1, type);
1064 emit_ds(as, MIPSI_MOVE, tmp2, key);
1065#else
723 emit_tg(as, MIPSI_MFC1, tmp2, key); 1066 emit_tg(as, MIPSI_MFC1, tmp2, key);
724 emit_tg(as, MIPSI_MFC1, tmp1, key+1); 1067 emit_tg(as, MIPSI_MFC1, tmp1, key+1);
1068#endif
725 } else { 1069 } else {
726 emit_dst(as, MIPSI_XOR, tmp2, key, tmp1); 1070 emit_dst(as, MIPSI_XOR, tmp2, key, tmp1);
727 emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31); 1071 emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31);
728 emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow)); 1072 emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow));
729 } 1073 }
1074#else
1075 emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1);
1076 emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31);
1077 if (irt_isnum(kt)) {
1078 emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1);
1079 emit_dta(as, MIPSI_DSRA32, tmp1, LJ_SOFTFP ? key : tmp1, 0);
1080 emit_dta(as, MIPSI_SLL, tmp2, LJ_SOFTFP ? key : tmp1, 0);
1081#if !LJ_SOFTFP
1082 emit_tg(as, MIPSI_DMFC1, tmp1, key);
1083#endif
1084 } else {
1085 checkmclim(as);
1086 emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 0);
1087 emit_dta(as, MIPSI_SLL, tmp2, key, 0);
1088 emit_dst(as, MIPSI_DADDU, tmp1, key, type);
1089 }
1090#endif
730 } 1091 }
731 } 1092 }
732} 1093}
@@ -739,17 +1100,24 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
739 int32_t kofs = ofs + (int32_t)offsetof(Node, key); 1100 int32_t kofs = ofs + (int32_t)offsetof(Node, key);
740 Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; 1101 Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
741 Reg node = ra_alloc1(as, ir->op1, RSET_GPR); 1102 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
742 Reg key = RID_NONE, type = RID_TMP, idx = node;
743 RegSet allow = rset_exclude(RSET_GPR, node); 1103 RegSet allow = rset_exclude(RSET_GPR, node);
1104 Reg idx = node;
1105#if LJ_32
1106 Reg key = RID_NONE, type = RID_TMP;
744 int32_t lo, hi; 1107 int32_t lo, hi;
745 lua_assert(ofs % sizeof(Node) == 0); 1108#else
1109 Reg key = ra_scratch(as, allow);
1110 int64_t k;
1111#endif
1112 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
746 if (ofs > 32736) { 1113 if (ofs > 32736) {
747 idx = dest; 1114 idx = dest;
748 rset_clear(allow, dest); 1115 rset_clear(allow, dest);
749 kofs = (int32_t)offsetof(Node, key); 1116 kofs = (int32_t)offsetof(Node, key);
750 } else if (ra_hasreg(dest)) { 1117 } else if (ra_hasreg(dest)) {
751 emit_tsi(as, MIPSI_ADDIU, dest, node, ofs); 1118 emit_tsi(as, MIPSI_AADDIU, dest, node, ofs);
752 } 1119 }
1120#if LJ_32
753 if (!irt_ispri(irkey->t)) { 1121 if (!irt_ispri(irkey->t)) {
754 key = ra_scratch(as, allow); 1122 key = ra_scratch(as, allow);
755 rset_clear(allow, key); 1123 rset_clear(allow, key);
@@ -768,22 +1136,20 @@ nolo:
768 asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO); 1136 asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO);
769 if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0)); 1137 if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0));
770 emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4)); 1138 emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4));
771 if (ofs > 32736) 1139#else
772 emit_tsi(as, MIPSI_ADDU, dest, node, ra_allock(as, ofs, allow)); 1140 if (irt_ispri(irkey->t)) {
773} 1141 lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type");
774 1142 k = ~((int64_t)~irt_toitype(irkey->t) << 47);
775static void asm_newref(ASMState *as, IRIns *ir) 1143 } else if (irt_isnum(irkey->t)) {
776{ 1144 k = (int64_t)ir_knum(irkey)->u64;
777 if (ir->r != RID_SINK) { 1145 } else {
778 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey]; 1146 k = ((int64_t)irt_toitype(irkey->t) << 47) | (int64_t)ir_kgc(irkey);
779 IRRef args[3];
780 args[0] = ASMREF_L; /* lua_State *L */
781 args[1] = ir->op1; /* GCtab *t */
782 args[2] = ASMREF_TMP1; /* cTValue *key */
783 asm_setupresult(as, ir, ci); /* TValue * */
784 asm_gencall(as, ci, args);
785 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
786 } 1147 }
1148 asm_guard(as, MIPSI_BNE, key, ra_allock(as, k, allow));
1149 emit_tsi(as, MIPSI_LD, key, idx, kofs);
1150#endif
1151 if (ofs > 32736)
1152 emit_tsi(as, MIPSI_AADDU, dest, node, ra_allock(as, ofs, allow));
787} 1153}
788 1154
789static void asm_uref(ASMState *as, IRIns *ir) 1155static void asm_uref(ASMState *as, IRIns *ir)
@@ -792,30 +1158,31 @@ static void asm_uref(ASMState *as, IRIns *ir)
792 if (irref_isk(ir->op1)) { 1158 if (irref_isk(ir->op1)) {
793 GCfunc *fn = ir_kfunc(IR(ir->op1)); 1159 GCfunc *fn = ir_kfunc(IR(ir->op1));
794 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; 1160 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
795 emit_lsptr(as, MIPSI_LW, dest, v, RSET_GPR); 1161 emit_lsptr(as, MIPSI_AL, dest, v, RSET_GPR);
796 } else { 1162 } else {
797 Reg uv = ra_scratch(as, RSET_GPR); 1163 Reg uv = ra_scratch(as, RSET_GPR);
798 Reg func = ra_alloc1(as, ir->op1, RSET_GPR); 1164 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
799 if (ir->o == IR_UREFC) { 1165 if (ir->o == IR_UREFC) {
800 asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); 1166 asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
801 emit_tsi(as, MIPSI_ADDIU, dest, uv, (int32_t)offsetof(GCupval, tv)); 1167 emit_tsi(as, MIPSI_AADDIU, dest, uv, (int32_t)offsetof(GCupval, tv));
802 emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); 1168 emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
803 } else { 1169 } else {
804 emit_tsi(as, MIPSI_LW, dest, uv, (int32_t)offsetof(GCupval, v)); 1170 emit_tsi(as, MIPSI_AL, dest, uv, (int32_t)offsetof(GCupval, v));
805 } 1171 }
806 emit_tsi(as, MIPSI_LW, uv, func, 1172 emit_tsi(as, MIPSI_AL, uv, func, (int32_t)offsetof(GCfuncL, uvptr) +
807 (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); 1173 (int32_t)sizeof(MRef) * (int32_t)(ir->op2 >> 8));
808 } 1174 }
809} 1175}
810 1176
811static void asm_fref(ASMState *as, IRIns *ir) 1177static void asm_fref(ASMState *as, IRIns *ir)
812{ 1178{
813 UNUSED(as); UNUSED(ir); 1179 UNUSED(as); UNUSED(ir);
814 lua_assert(!ra_used(ir)); 1180 lj_assertA(!ra_used(ir), "unfused FREF");
815} 1181}
816 1182
817static void asm_strref(ASMState *as, IRIns *ir) 1183static void asm_strref(ASMState *as, IRIns *ir)
818{ 1184{
1185#if LJ_32
819 Reg dest = ra_dest(as, ir, RSET_GPR); 1186 Reg dest = ra_dest(as, ir, RSET_GPR);
820 IRRef ref = ir->op2, refk = ir->op1; 1187 IRRef ref = ir->op2, refk = ir->op1;
821 int32_t ofs = (int32_t)sizeof(GCstr); 1188 int32_t ofs = (int32_t)sizeof(GCstr);
@@ -847,49 +1214,79 @@ static void asm_strref(ASMState *as, IRIns *ir)
847 else 1214 else
848 emit_dst(as, MIPSI_ADDU, dest, r, 1215 emit_dst(as, MIPSI_ADDU, dest, r,
849 ra_allock(as, ofs, rset_exclude(RSET_GPR, r))); 1216 ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
1217#else
1218 RegSet allow = RSET_GPR;
1219 Reg dest = ra_dest(as, ir, allow);
1220 Reg base = ra_alloc1(as, ir->op1, allow);
1221 IRIns *irr = IR(ir->op2);
1222 int32_t ofs = sizeof(GCstr);
1223 rset_clear(allow, base);
1224 if (irref_isk(ir->op2) && checki16(ofs + irr->i)) {
1225 emit_tsi(as, MIPSI_DADDIU, dest, base, ofs + irr->i);
1226 } else {
1227 emit_tsi(as, MIPSI_DADDIU, dest, dest, ofs);
1228 emit_dst(as, MIPSI_DADDU, dest, base, ra_alloc1(as, ir->op2, allow));
1229 }
1230#endif
850} 1231}
851 1232
852/* -- Loads and stores ---------------------------------------------------- */ 1233/* -- Loads and stores ---------------------------------------------------- */
853 1234
854static MIPSIns asm_fxloadins(IRIns *ir) 1235static MIPSIns asm_fxloadins(ASMState *as, IRIns *ir)
855{ 1236{
1237 UNUSED(as);
856 switch (irt_type(ir->t)) { 1238 switch (irt_type(ir->t)) {
857 case IRT_I8: return MIPSI_LB; 1239 case IRT_I8: return MIPSI_LB;
858 case IRT_U8: return MIPSI_LBU; 1240 case IRT_U8: return MIPSI_LBU;
859 case IRT_I16: return MIPSI_LH; 1241 case IRT_I16: return MIPSI_LH;
860 case IRT_U16: return MIPSI_LHU; 1242 case IRT_U16: return MIPSI_LHU;
861 case IRT_NUM: return MIPSI_LDC1; 1243 case IRT_NUM:
862 case IRT_FLOAT: return MIPSI_LWC1; 1244 lj_assertA(!LJ_SOFTFP32, "unsplit FP op");
863 default: return MIPSI_LW; 1245 if (!LJ_SOFTFP) return MIPSI_LDC1;
1246 /* fallthrough */
1247 case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_LWC1;
1248 /* fallthrough */
1249 default: return (LJ_64 && irt_is64(ir->t)) ? MIPSI_LD : MIPSI_LW;
864 } 1250 }
865} 1251}
866 1252
867static MIPSIns asm_fxstoreins(IRIns *ir) 1253static MIPSIns asm_fxstoreins(ASMState *as, IRIns *ir)
868{ 1254{
1255 UNUSED(as);
869 switch (irt_type(ir->t)) { 1256 switch (irt_type(ir->t)) {
870 case IRT_I8: case IRT_U8: return MIPSI_SB; 1257 case IRT_I8: case IRT_U8: return MIPSI_SB;
871 case IRT_I16: case IRT_U16: return MIPSI_SH; 1258 case IRT_I16: case IRT_U16: return MIPSI_SH;
872 case IRT_NUM: return MIPSI_SDC1; 1259 case IRT_NUM:
873 case IRT_FLOAT: return MIPSI_SWC1; 1260 lj_assertA(!LJ_SOFTFP32, "unsplit FP op");
874 default: return MIPSI_SW; 1261 if (!LJ_SOFTFP) return MIPSI_SDC1;
1262 /* fallthrough */
1263 case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_SWC1;
1264 /* fallthrough */
1265 default: return (LJ_64 && irt_is64(ir->t)) ? MIPSI_SD : MIPSI_SW;
875 } 1266 }
876} 1267}
877 1268
878static void asm_fload(ASMState *as, IRIns *ir) 1269static void asm_fload(ASMState *as, IRIns *ir)
879{ 1270{
880 Reg dest = ra_dest(as, ir, RSET_GPR); 1271 Reg dest = ra_dest(as, ir, RSET_GPR);
881 Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); 1272 MIPSIns mi = asm_fxloadins(as, ir);
882 MIPSIns mi = asm_fxloadins(ir); 1273 Reg idx;
883 int32_t ofs; 1274 int32_t ofs;
884 if (ir->op2 == IRFL_TAB_ARRAY) { 1275 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
885 ofs = asm_fuseabase(as, ir->op1); 1276 idx = RID_JGL;
886 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ 1277 ofs = (ir->op2 << 2) - 32768 - GG_OFS(g);
887 emit_tsi(as, MIPSI_ADDIU, dest, idx, ofs); 1278 } else {
888 return; 1279 idx = ra_alloc1(as, ir->op1, RSET_GPR);
1280 if (ir->op2 == IRFL_TAB_ARRAY) {
1281 ofs = asm_fuseabase(as, ir->op1);
1282 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
1283 emit_tsi(as, MIPSI_AADDIU, dest, idx, ofs);
1284 return;
1285 }
889 } 1286 }
1287 ofs = field_ofs[ir->op2];
890 } 1288 }
891 ofs = field_ofs[ir->op2]; 1289 lj_assertA(!irt_isfp(ir->t), "bad FP FLOAD");
892 lua_assert(!irt_isfp(ir->t));
893 emit_tsi(as, mi, dest, idx, ofs); 1290 emit_tsi(as, mi, dest, idx, ofs);
894} 1291}
895 1292
@@ -900,51 +1297,89 @@ static void asm_fstore(ASMState *as, IRIns *ir)
900 IRIns *irf = IR(ir->op1); 1297 IRIns *irf = IR(ir->op1);
901 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); 1298 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
902 int32_t ofs = field_ofs[irf->op2]; 1299 int32_t ofs = field_ofs[irf->op2];
903 MIPSIns mi = asm_fxstoreins(ir); 1300 MIPSIns mi = asm_fxstoreins(as, ir);
904 lua_assert(!irt_isfp(ir->t)); 1301 lj_assertA(!irt_isfp(ir->t), "bad FP FSTORE");
905 emit_tsi(as, mi, src, idx, ofs); 1302 emit_tsi(as, mi, src, idx, ofs);
906 } 1303 }
907} 1304}
908 1305
909static void asm_xload(ASMState *as, IRIns *ir) 1306static void asm_xload(ASMState *as, IRIns *ir)
910{ 1307{
911 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); 1308 Reg dest = ra_dest(as, ir,
912 lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); 1309 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
913 asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); 1310 lj_assertA(LJ_TARGET_UNALIGNED || !(ir->op2 & IRXLOAD_UNALIGNED),
1311 "unaligned XLOAD");
1312 asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
914} 1313}
915 1314
916static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs) 1315static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
917{ 1316{
918 if (ir->r != RID_SINK) { 1317 if (ir->r != RID_SINK) {
919 Reg src = ra_alloc1z(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); 1318 Reg src = ra_alloc1z(as, ir->op2,
920 asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, 1319 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
1320 asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
921 rset_exclude(RSET_GPR, src), ofs); 1321 rset_exclude(RSET_GPR, src), ofs);
922 } 1322 }
923} 1323}
924 1324
1325#define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
1326
925static void asm_ahuvload(ASMState *as, IRIns *ir) 1327static void asm_ahuvload(ASMState *as, IRIns *ir)
926{ 1328{
927 IRType1 t = ir->t; 1329 int hiop = (LJ_SOFTFP32 && (ir+1)->o == IR_HIOP);
928 Reg dest = RID_NONE, type = RID_TMP, idx; 1330 Reg dest = RID_NONE, type = RID_TMP, idx;
929 RegSet allow = RSET_GPR; 1331 RegSet allow = RSET_GPR;
930 int32_t ofs = 0; 1332 int32_t ofs = 0;
1333 IRType1 t = ir->t;
1334 if (hiop) {
1335 t.irt = IRT_NUM;
1336 if (ra_used(ir+1)) {
1337 type = ra_dest(as, ir+1, allow);
1338 rset_clear(allow, type);
1339 }
1340 }
931 if (ra_used(ir)) { 1341 if (ra_used(ir)) {
932 lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); 1342 lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) ||
933 dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); 1343 irt_isint(ir->t) || irt_isaddr(ir->t),
1344 "bad load type %d", irt_type(ir->t));
1345 dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
934 rset_clear(allow, dest); 1346 rset_clear(allow, dest);
1347#if LJ_64
1348 if (irt_isaddr(t))
1349 emit_tsml(as, MIPSI_DEXTM, dest, dest, 14, 0);
1350 else if (irt_isint(t))
1351 emit_dta(as, MIPSI_SLL, dest, dest, 0);
1352#endif
935 } 1353 }
936 idx = asm_fuseahuref(as, ir->op1, &ofs, allow); 1354 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
937 rset_clear(allow, idx); 1355 rset_clear(allow, idx);
938 if (irt_isnum(t)) { 1356 if (irt_isnum(t)) {
939 asm_guard(as, MIPSI_BEQ, type, RID_ZERO); 1357 asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
940 emit_tsi(as, MIPSI_SLTIU, type, type, (int32_t)LJ_TISNUM); 1358 emit_tsi(as, MIPSI_SLTIU, RID_TMP, type, (int32_t)LJ_TISNUM);
941 if (ra_hasreg(dest))
942 emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
943 } else { 1359 } else {
944 asm_guard(as, MIPSI_BNE, type, ra_allock(as, irt_toitype(t), allow)); 1360 asm_guard(as, MIPSI_BNE, type,
945 if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0)); 1361 ra_allock(as, (int32_t)irt_toitype(t), allow));
1362 }
1363#if LJ_32
1364 if (ra_hasreg(dest)) {
1365 if (!LJ_SOFTFP && irt_isnum(t))
1366 emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
1367 else
1368 emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0));
946 } 1369 }
947 emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4)); 1370 emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4));
1371#else
1372 if (ra_hasreg(dest)) {
1373 if (!LJ_SOFTFP && irt_isnum(t)) {
1374 emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
1375 dest = type;
1376 }
1377 } else {
1378 dest = type;
1379 }
1380 emit_dta(as, MIPSI_DSRA32, type, dest, 15);
1381 emit_tsi(as, MIPSI_LD, dest, idx, ofs);
1382#endif
948} 1383}
949 1384
950static void asm_ahustore(ASMState *as, IRIns *ir) 1385static void asm_ahustore(ASMState *as, IRIns *ir)
@@ -954,81 +1389,180 @@ static void asm_ahustore(ASMState *as, IRIns *ir)
954 int32_t ofs = 0; 1389 int32_t ofs = 0;
955 if (ir->r == RID_SINK) 1390 if (ir->r == RID_SINK)
956 return; 1391 return;
957 if (irt_isnum(ir->t)) { 1392 if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
958 src = ra_alloc1(as, ir->op2, RSET_FPR); 1393 src = ra_alloc1(as, ir->op2, LJ_SOFTFP ? RSET_GPR : RSET_FPR);
1394 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
1395 emit_hsi(as, LJ_SOFTFP ? MIPSI_SD : MIPSI_SDC1, src, idx, ofs);
959 } else { 1396 } else {
1397#if LJ_32
960 if (!irt_ispri(ir->t)) { 1398 if (!irt_ispri(ir->t)) {
961 src = ra_alloc1(as, ir->op2, allow); 1399 src = ra_alloc1(as, ir->op2, allow);
962 rset_clear(allow, src); 1400 rset_clear(allow, src);
963 } 1401 }
964 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); 1402 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
1403 type = ra_alloc1(as, (ir+1)->op2, allow);
1404 else
1405 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
965 rset_clear(allow, type); 1406 rset_clear(allow, type);
966 } 1407 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
967 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
968 if (irt_isnum(ir->t)) {
969 emit_hsi(as, MIPSI_SDC1, src, idx, ofs);
970 } else {
971 if (ra_hasreg(src)) 1408 if (ra_hasreg(src))
972 emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0)); 1409 emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0));
973 emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4)); 1410 emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4));
1411#else
1412 Reg tmp = RID_TMP;
1413 if (irt_ispri(ir->t)) {
1414 tmp = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow);
1415 rset_clear(allow, tmp);
1416 } else {
1417 src = ra_alloc1(as, ir->op2, allow);
1418 rset_clear(allow, src);
1419 type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, allow);
1420 rset_clear(allow, type);
1421 }
1422 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
1423 emit_tsi(as, MIPSI_SD, tmp, idx, ofs);
1424 if (ra_hasreg(src)) {
1425 if (irt_isinteger(ir->t)) {
1426 emit_dst(as, MIPSI_DADDU, tmp, tmp, type);
1427 emit_tsml(as, MIPSI_DEXT, tmp, src, 31, 0);
1428 } else {
1429 emit_dst(as, MIPSI_DADDU, tmp, src, type);
1430 }
1431 }
1432#endif
974 } 1433 }
975} 1434}
976 1435
977static void asm_sload(ASMState *as, IRIns *ir) 1436static void asm_sload(ASMState *as, IRIns *ir)
978{ 1437{
979 int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
980 IRType1 t = ir->t;
981 Reg dest = RID_NONE, type = RID_NONE, base; 1438 Reg dest = RID_NONE, type = RID_NONE, base;
982 RegSet allow = RSET_GPR; 1439 RegSet allow = RSET_GPR;
983 lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ 1440 IRType1 t = ir->t;
984 lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); 1441#if LJ_32
985 lua_assert(!irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); 1442 int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
1443 int hiop = (LJ_SOFTFP32 && (ir+1)->o == IR_HIOP);
1444 if (hiop)
1445 t.irt = IRT_NUM;
1446#else
1447 int32_t ofs = 8*((int32_t)ir->op1-2);
1448#endif
1449 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1450 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1451 lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1452 "inconsistent SLOAD variant");
1453#if LJ_SOFTFP32
1454 lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
1455 "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
1456 if (hiop && ra_used(ir+1)) {
1457 type = ra_dest(as, ir+1, allow);
1458 rset_clear(allow, type);
1459 }
1460#else
986 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { 1461 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
987 dest = ra_scratch(as, RSET_FPR); 1462 dest = ra_scratch(as, LJ_SOFTFP ? allow : RSET_FPR);
988 asm_tointg(as, ir, dest); 1463 asm_tointg(as, ir, dest);
989 t.irt = IRT_NUM; /* Continue with a regular number type check. */ 1464 t.irt = IRT_NUM; /* Continue with a regular number type check. */
990 } else if (ra_used(ir)) { 1465 } else
991 lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); 1466#endif
992 dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); 1467 if (ra_used(ir)) {
1468 lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) ||
1469 irt_isint(ir->t) || irt_isaddr(ir->t),
1470 "bad SLOAD type %d", irt_type(ir->t));
1471 dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
993 rset_clear(allow, dest); 1472 rset_clear(allow, dest);
994 base = ra_alloc1(as, REF_BASE, allow); 1473 base = ra_alloc1(as, REF_BASE, allow);
995 rset_clear(allow, base); 1474 rset_clear(allow, base);
996 if ((ir->op2 & IRSLOAD_CONVERT)) { 1475 if (!LJ_SOFTFP32 && (ir->op2 & IRSLOAD_CONVERT)) {
997 if (irt_isint(t)) { 1476 if (irt_isint(t)) {
998 Reg tmp = ra_scratch(as, RSET_FPR); 1477 Reg tmp = ra_scratch(as, LJ_SOFTFP ? RSET_GPR : RSET_FPR);
1478#if LJ_SOFTFP
1479 ra_evictset(as, rset_exclude(RSET_SCRATCH, dest));
1480 ra_destreg(as, ir, RID_RET);
1481 emit_call(as, (void *)lj_ir_callinfo[IRCALL_softfp_d2i].func, 0);
1482 if (tmp != REGARG_FIRSTGPR)
1483 emit_move(as, REGARG_FIRSTGPR, tmp);
1484#else
999 emit_tg(as, MIPSI_MFC1, dest, tmp); 1485 emit_tg(as, MIPSI_MFC1, dest, tmp);
1000 emit_fg(as, MIPSI_CVT_W_D, tmp, tmp); 1486 emit_fg(as, MIPSI_TRUNC_W_D, tmp, tmp);
1487#endif
1001 dest = tmp; 1488 dest = tmp;
1002 t.irt = IRT_NUM; /* Check for original type. */ 1489 t.irt = IRT_NUM; /* Check for original type. */
1003 } else { 1490 } else {
1004 Reg tmp = ra_scratch(as, RSET_GPR); 1491 Reg tmp = ra_scratch(as, RSET_GPR);
1492#if LJ_SOFTFP
1493 ra_evictset(as, rset_exclude(RSET_SCRATCH, dest));
1494 ra_destreg(as, ir, RID_RET);
1495 emit_call(as, (void *)lj_ir_callinfo[IRCALL_softfp_i2d].func, 0);
1496 emit_dta(as, MIPSI_SLL, REGARG_FIRSTGPR, tmp, 0);
1497#else
1005 emit_fg(as, MIPSI_CVT_D_W, dest, dest); 1498 emit_fg(as, MIPSI_CVT_D_W, dest, dest);
1006 emit_tg(as, MIPSI_MTC1, tmp, dest); 1499 emit_tg(as, MIPSI_MTC1, tmp, dest);
1500#endif
1007 dest = tmp; 1501 dest = tmp;
1008 t.irt = IRT_INT; /* Check for original type. */ 1502 t.irt = IRT_INT; /* Check for original type. */
1009 } 1503 }
1010 } 1504 }
1505#if LJ_64
1506 else if (irt_isaddr(t)) {
1507 /* Clear type from pointers. */
1508 emit_tsml(as, MIPSI_DEXTM, dest, dest, 14, 0);
1509 } else if (irt_isint(t) && (ir->op2 & IRSLOAD_TYPECHECK)) {
1510 /* Sign-extend integers. */
1511 emit_dta(as, MIPSI_SLL, dest, dest, 0);
1512 }
1513#endif
1011 goto dotypecheck; 1514 goto dotypecheck;
1012 } 1515 }
1013 base = ra_alloc1(as, REF_BASE, allow); 1516 base = ra_alloc1(as, REF_BASE, allow);
1014 rset_clear(allow, base); 1517 rset_clear(allow, base);
1015dotypecheck: 1518dotypecheck:
1016 if (irt_isnum(t)) { 1519#if LJ_32
1017 if ((ir->op2 & IRSLOAD_TYPECHECK)) { 1520 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1018 asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); 1521 if (ra_noreg(type))
1019 emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM);
1020 type = RID_TMP; 1522 type = RID_TMP;
1021 } 1523 if (irt_isnum(t)) {
1022 if (ra_hasreg(dest)) emit_hsi(as, MIPSI_LDC1, dest, base, ofs); 1524 asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
1023 } else { 1525 emit_tsi(as, MIPSI_SLTIU, RID_TMP, type, (int32_t)LJ_TISNUM);
1024 if ((ir->op2 & IRSLOAD_TYPECHECK)) { 1526 } else {
1025 Reg ktype = ra_allock(as, irt_toitype(t), allow); 1527 Reg ktype = ra_allock(as, irt_toitype(t), allow);
1026 asm_guard(as, MIPSI_BNE, RID_TMP, ktype); 1528 asm_guard(as, MIPSI_BNE, type, ktype);
1027 type = RID_TMP;
1028 } 1529 }
1029 if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0));
1030 } 1530 }
1031 if (ra_hasreg(type)) emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4)); 1531 if (ra_hasreg(dest)) {
1532 if (!LJ_SOFTFP && irt_isnum(t))
1533 emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
1534 else
1535 emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0));
1536 }
1537 if (ra_hasreg(type))
1538 emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4));
1539#else
1540 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1541 type = dest < RID_MAX_GPR ? dest : RID_TMP;
1542 if (irt_ispri(t)) {
1543 asm_guard(as, MIPSI_BNE, type,
1544 ra_allock(as, ~((int64_t)~irt_toitype(t) << 47) , allow));
1545 } else {
1546 if (irt_isnum(t)) {
1547 asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
1548 emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM);
1549 if (!LJ_SOFTFP && ra_hasreg(dest))
1550 emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
1551 } else {
1552 asm_guard(as, MIPSI_BNE, RID_TMP,
1553 ra_allock(as, (int32_t)irt_toitype(t), allow));
1554 }
1555 emit_dta(as, MIPSI_DSRA32, RID_TMP, type, 15);
1556 }
1557 emit_tsi(as, MIPSI_LD, type, base, ofs);
1558 } else if (ra_hasreg(dest)) {
1559 if (!LJ_SOFTFP && irt_isnum(t))
1560 emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
1561 else
1562 emit_tsi(as, irt_isint(t) ? MIPSI_LW : MIPSI_LD, dest, base,
1563 ofs ^ ((LJ_BE && irt_isint(t)) ? 4 : 0));
1564 }
1565#endif
1032} 1566}
1033 1567
1034/* -- Allocations --------------------------------------------------------- */ 1568/* -- Allocations --------------------------------------------------------- */
@@ -1037,19 +1571,16 @@ dotypecheck:
1037static void asm_cnew(ASMState *as, IRIns *ir) 1571static void asm_cnew(ASMState *as, IRIns *ir)
1038{ 1572{
1039 CTState *cts = ctype_ctsG(J2G(as->J)); 1573 CTState *cts = ctype_ctsG(J2G(as->J));
1040 CTypeID ctypeid = (CTypeID)IR(ir->op1)->i; 1574 CTypeID id = (CTypeID)IR(ir->op1)->i;
1041 CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ? 1575 CTSize sz;
1042 lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i; 1576 CTInfo info = lj_ctype_info(cts, id, &sz);
1043 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; 1577 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1044 IRRef args[2]; 1578 IRRef args[4];
1045 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1046 RegSet drop = RSET_SCRATCH; 1579 RegSet drop = RSET_SCRATCH;
1047 lua_assert(sz != CTSIZE_INVALID); 1580 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1581 "bad CNEW/CNEWI operands");
1048 1582
1049 args[0] = ASMREF_L; /* lua_State *L */
1050 args[1] = ASMREF_TMP1; /* MSize size */
1051 as->gcsteps++; 1583 as->gcsteps++;
1052
1053 if (ra_hasreg(ir->r)) 1584 if (ra_hasreg(ir->r))
1054 rset_clear(drop, ir->r); /* Dest reg handled below. */ 1585 rset_clear(drop, ir->r); /* Dest reg handled below. */
1055 ra_evictset(as, drop); 1586 ra_evictset(as, drop);
@@ -1058,11 +1589,12 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1058 1589
1059 /* Initialize immutable cdata object. */ 1590 /* Initialize immutable cdata object. */
1060 if (ir->o == IR_CNEWI) { 1591 if (ir->o == IR_CNEWI) {
1592 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1593#if LJ_32
1061 int32_t ofs = sizeof(GCcdata); 1594 int32_t ofs = sizeof(GCcdata);
1062 lua_assert(sz == 4 || sz == 8);
1063 if (sz == 8) { 1595 if (sz == 8) {
1064 ofs += 4; 1596 ofs += 4;
1065 lua_assert((ir+1)->o == IR_HIOP); 1597 lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI");
1066 if (LJ_LE) ir++; 1598 if (LJ_LE) ir++;
1067 } 1599 }
1068 for (;;) { 1600 for (;;) {
@@ -1072,18 +1604,33 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1072 if (ofs == sizeof(GCcdata)) break; 1604 if (ofs == sizeof(GCcdata)) break;
1073 ofs -= 4; if (LJ_BE) ir++; else ir--; 1605 ofs -= 4; if (LJ_BE) ir++; else ir--;
1074 } 1606 }
1607#else
1608 emit_tsi(as, sz == 8 ? MIPSI_SD : MIPSI_SW, ra_alloc1(as, ir->op2, allow),
1609 RID_RET, sizeof(GCcdata));
1610#endif
1611 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1612 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1613 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1614 args[0] = ASMREF_L; /* lua_State *L */
1615 args[1] = ir->op1; /* CTypeID id */
1616 args[2] = ir->op2; /* CTSize sz */
1617 args[3] = ASMREF_TMP1; /* CTSize align */
1618 asm_gencall(as, ci, args);
1619 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1620 return;
1075 } 1621 }
1622
1076 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ 1623 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1077 emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct)); 1624 emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
1078 emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid)); 1625 emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
1079 emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA); 1626 emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA);
1080 emit_ti(as, MIPSI_LI, RID_TMP, ctypeid); /* Lower 16 bit used. Sign-ext ok. */ 1627 emit_ti(as, MIPSI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */
1628 args[0] = ASMREF_L; /* lua_State *L */
1629 args[1] = ASMREF_TMP1; /* MSize size */
1081 asm_gencall(as, ci, args); 1630 asm_gencall(as, ci, args);
1082 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), 1631 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
1083 ra_releasetmp(as, ASMREF_TMP1)); 1632 ra_releasetmp(as, ASMREF_TMP1));
1084} 1633}
1085#else
1086#define asm_cnew(as, ir) ((void)0)
1087#endif 1634#endif
1088 1635
1089/* -- Write barriers ------------------------------------------------------ */ 1636/* -- Write barriers ------------------------------------------------------ */
@@ -1094,7 +1641,7 @@ static void asm_tbar(ASMState *as, IRIns *ir)
1094 Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab)); 1641 Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1095 Reg link = RID_TMP; 1642 Reg link = RID_TMP;
1096 MCLabel l_end = emit_label(as); 1643 MCLabel l_end = emit_label(as);
1097 emit_tsi(as, MIPSI_SW, link, tab, (int32_t)offsetof(GCtab, gclist)); 1644 emit_tsi(as, MIPSI_AS, link, tab, (int32_t)offsetof(GCtab, gclist));
1098 emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked)); 1645 emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked));
1099 emit_setgl(as, tab, gc.grayagain); 1646 emit_setgl(as, tab, gc.grayagain);
1100 emit_getgl(as, link, gc.grayagain); 1647 emit_getgl(as, link, gc.grayagain);
@@ -1111,13 +1658,13 @@ static void asm_obar(ASMState *as, IRIns *ir)
1111 MCLabel l_end; 1658 MCLabel l_end;
1112 Reg obj, val, tmp; 1659 Reg obj, val, tmp;
1113 /* No need for other object barriers (yet). */ 1660 /* No need for other object barriers (yet). */
1114 lua_assert(IR(ir->op1)->o == IR_UREFC); 1661 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1115 ra_evictset(as, RSET_SCRATCH); 1662 ra_evictset(as, RSET_SCRATCH);
1116 l_end = emit_label(as); 1663 l_end = emit_label(as);
1117 args[0] = ASMREF_TMP1; /* global_State *g */ 1664 args[0] = ASMREF_TMP1; /* global_State *g */
1118 args[1] = ir->op1; /* TValue *tv */ 1665 args[1] = ir->op1; /* TValue *tv */
1119 asm_gencall(as, ci, args); 1666 asm_gencall(as, ci, args);
1120 emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); 1667 emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
1121 obj = IR(ir->op1)->r; 1668 obj = IR(ir->op1)->r;
1122 tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj)); 1669 tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
1123 emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); 1670 emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
@@ -1132,6 +1679,7 @@ static void asm_obar(ASMState *as, IRIns *ir)
1132 1679
1133/* -- Arithmetic and logic operations ------------------------------------- */ 1680/* -- Arithmetic and logic operations ------------------------------------- */
1134 1681
1682#if !LJ_SOFTFP
1135static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi) 1683static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi)
1136{ 1684{
1137 Reg dest = ra_dest(as, ir, RSET_FPR); 1685 Reg dest = ra_dest(as, ir, RSET_FPR);
@@ -1146,83 +1694,147 @@ static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi)
1146 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); 1694 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
1147 emit_fg(as, mi, dest, left); 1695 emit_fg(as, mi, dest, left);
1148} 1696}
1697#endif
1149 1698
1150static int asm_fpjoin_pow(ASMState *as, IRIns *ir) 1699#if !LJ_SOFTFP32
1151{ 1700static void asm_fpmath(ASMState *as, IRIns *ir)
1152 IRIns *irp = IR(ir->op1); 1701{
1153 if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) { 1702#if !LJ_SOFTFP
1154 IRIns *irpp = IR(irp->op1); 1703 if (ir->op2 <= IRFPM_TRUNC)
1155 if (irpp == ir-2 && irpp->o == IR_FPMATH && 1704 asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2);
1156 irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) { 1705 else if (ir->op2 == IRFPM_SQRT)
1157 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow]; 1706 asm_fpunary(as, ir, MIPSI_SQRT_D);
1158 IRRef args[2]; 1707 else
1159 args[0] = irpp->op1; 1708#endif
1160 args[1] = irp->op2; 1709 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
1161 asm_setupresult(as, ir, ci);
1162 asm_gencall(as, ci, args);
1163 return 1;
1164 }
1165 }
1166 return 0;
1167} 1710}
1711#endif
1712
1713#if !LJ_SOFTFP
1714#define asm_fpadd(as, ir) asm_fparith(as, ir, MIPSI_ADD_D)
1715#define asm_fpsub(as, ir) asm_fparith(as, ir, MIPSI_SUB_D)
1716#define asm_fpmul(as, ir) asm_fparith(as, ir, MIPSI_MUL_D)
1717#elif LJ_64 /* && LJ_SOFTFP */
1718#define asm_fpadd(as, ir) asm_callid(as, ir, IRCALL_softfp_add)
1719#define asm_fpsub(as, ir) asm_callid(as, ir, IRCALL_softfp_sub)
1720#define asm_fpmul(as, ir) asm_callid(as, ir, IRCALL_softfp_mul)
1721#endif
1168 1722
1169static void asm_add(ASMState *as, IRIns *ir) 1723static void asm_add(ASMState *as, IRIns *ir)
1170{ 1724{
1171 if (irt_isnum(ir->t)) { 1725 IRType1 t = ir->t;
1172 asm_fparith(as, ir, MIPSI_ADD_D); 1726#if !LJ_SOFTFP32
1173 } else { 1727 if (irt_isnum(t)) {
1728 asm_fpadd(as, ir);
1729 } else
1730#endif
1731 {
1732 /* TODO MIPSR6: Fuse ADD(BSHL(a,1-4),b) or ADD(ADD(a,a),b) to MIPSI_ALSA. */
1174 Reg dest = ra_dest(as, ir, RSET_GPR); 1733 Reg dest = ra_dest(as, ir, RSET_GPR);
1175 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 1734 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1176 if (irref_isk(ir->op2)) { 1735 if (irref_isk(ir->op2)) {
1177 int32_t k = IR(ir->op2)->i; 1736 intptr_t k = get_kval(as, ir->op2);
1178 if (checki16(k)) { 1737 if (checki16(k)) {
1179 emit_tsi(as, MIPSI_ADDIU, dest, left, k); 1738 emit_tsi(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDIU : MIPSI_ADDIU, dest,
1739 left, k);
1180 return; 1740 return;
1181 } 1741 }
1182 } 1742 }
1183 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); 1743 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1184 emit_dst(as, MIPSI_ADDU, dest, left, right); 1744 emit_dst(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDU : MIPSI_ADDU, dest,
1745 left, right);
1185 } 1746 }
1186} 1747}
1187 1748
1188static void asm_sub(ASMState *as, IRIns *ir) 1749static void asm_sub(ASMState *as, IRIns *ir)
1189{ 1750{
1751#if !LJ_SOFTFP32
1190 if (irt_isnum(ir->t)) { 1752 if (irt_isnum(ir->t)) {
1191 asm_fparith(as, ir, MIPSI_SUB_D); 1753 asm_fpsub(as, ir);
1192 } else { 1754 } else
1755#endif
1756 {
1193 Reg dest = ra_dest(as, ir, RSET_GPR); 1757 Reg dest = ra_dest(as, ir, RSET_GPR);
1194 Reg right, left = ra_alloc2(as, ir, RSET_GPR); 1758 Reg right, left = ra_alloc2(as, ir, RSET_GPR);
1195 right = (left >> 8); left &= 255; 1759 right = (left >> 8); left &= 255;
1196 emit_dst(as, MIPSI_SUBU, dest, left, right); 1760 emit_dst(as, (LJ_64 && irt_is64(ir->t)) ? MIPSI_DSUBU : MIPSI_SUBU, dest,
1761 left, right);
1197 } 1762 }
1198} 1763}
1199 1764
1200static void asm_mul(ASMState *as, IRIns *ir) 1765static void asm_mul(ASMState *as, IRIns *ir)
1201{ 1766{
1767#if !LJ_SOFTFP32
1202 if (irt_isnum(ir->t)) { 1768 if (irt_isnum(ir->t)) {
1203 asm_fparith(as, ir, MIPSI_MUL_D); 1769 asm_fpmul(as, ir);
1204 } else { 1770 } else
1771#endif
1772 {
1205 Reg dest = ra_dest(as, ir, RSET_GPR); 1773 Reg dest = ra_dest(as, ir, RSET_GPR);
1206 Reg right, left = ra_alloc2(as, ir, RSET_GPR); 1774 Reg right, left = ra_alloc2(as, ir, RSET_GPR);
1207 right = (left >> 8); left &= 255; 1775 right = (left >> 8); left &= 255;
1208 emit_dst(as, MIPSI_MUL, dest, left, right); 1776 if (LJ_64 && irt_is64(ir->t)) {
1777#if !LJ_TARGET_MIPSR6
1778 emit_dst(as, MIPSI_MFLO, dest, 0, 0);
1779 emit_dst(as, MIPSI_DMULT, 0, left, right);
1780#else
1781 emit_dst(as, MIPSI_DMUL, dest, left, right);
1782#endif
1783 } else {
1784 emit_dst(as, MIPSI_MUL, dest, left, right);
1785 }
1209 } 1786 }
1210} 1787}
1211 1788
1789#if !LJ_SOFTFP32
1790static void asm_fpdiv(ASMState *as, IRIns *ir)
1791{
1792#if !LJ_SOFTFP
1793 asm_fparith(as, ir, MIPSI_DIV_D);
1794#else
1795 asm_callid(as, ir, IRCALL_softfp_div);
1796#endif
1797}
1798#endif
1799
1212static void asm_neg(ASMState *as, IRIns *ir) 1800static void asm_neg(ASMState *as, IRIns *ir)
1213{ 1801{
1802#if !LJ_SOFTFP
1214 if (irt_isnum(ir->t)) { 1803 if (irt_isnum(ir->t)) {
1215 asm_fpunary(as, ir, MIPSI_NEG_D); 1804 asm_fpunary(as, ir, MIPSI_NEG_D);
1216 } else { 1805 } else
1806#elif LJ_64 /* && LJ_SOFTFP */
1807 if (irt_isnum(ir->t)) {
1217 Reg dest = ra_dest(as, ir, RSET_GPR); 1808 Reg dest = ra_dest(as, ir, RSET_GPR);
1218 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 1809 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1219 emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); 1810 emit_dst(as, MIPSI_XOR, dest, left,
1811 ra_allock(as, 0x8000000000000000ll, rset_exclude(RSET_GPR, dest)));
1812 } else
1813#endif
1814 {
1815 Reg dest = ra_dest(as, ir, RSET_GPR);
1816 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1817 emit_dst(as, (LJ_64 && irt_is64(ir->t)) ? MIPSI_DSUBU : MIPSI_SUBU, dest,
1818 RID_ZERO, left);
1220 } 1819 }
1221} 1820}
1222 1821
1822#if !LJ_SOFTFP
1823#define asm_abs(as, ir) asm_fpunary(as, ir, MIPSI_ABS_D)
1824#elif LJ_64 /* && LJ_SOFTFP */
1825static void asm_abs(ASMState *as, IRIns *ir)
1826{
1827 Reg dest = ra_dest(as, ir, RSET_GPR);
1828 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1829 emit_tsml(as, MIPSI_DEXTM, dest, left, 30, 0);
1830}
1831#endif
1832
1223static void asm_arithov(ASMState *as, IRIns *ir) 1833static void asm_arithov(ASMState *as, IRIns *ir)
1224{ 1834{
1835 /* TODO MIPSR6: bovc/bnvc. Caveat: no delay slot to load RID_TMP. */
1225 Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR); 1836 Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR);
1837 lj_assertA(!irt_is64(ir->t), "bad usage");
1226 if (irref_isk(ir->op2)) { 1838 if (irref_isk(ir->op2)) {
1227 int k = IR(ir->op2)->i; 1839 int k = IR(ir->op2)->i;
1228 if (ir->o == IR_SUBOV) k = -k; 1840 if (ir->o == IR_SUBOV) k = -k;
@@ -1253,16 +1865,29 @@ static void asm_arithov(ASMState *as, IRIns *ir)
1253 emit_move(as, RID_TMP, dest == left ? left : right); 1865 emit_move(as, RID_TMP, dest == left ? left : right);
1254} 1866}
1255 1867
1868#define asm_addov(as, ir) asm_arithov(as, ir)
1869#define asm_subov(as, ir) asm_arithov(as, ir)
1870
1256static void asm_mulov(ASMState *as, IRIns *ir) 1871static void asm_mulov(ASMState *as, IRIns *ir)
1257{ 1872{
1258#if LJ_DUALNUM 1873 Reg dest = ra_dest(as, ir, RSET_GPR);
1259#error "NYI: MULOV" 1874 Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR);
1875 right = (left >> 8); left &= 255;
1876 tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left),
1877 right), dest));
1878 asm_guard(as, MIPSI_BNE, RID_TMP, tmp);
1879 emit_dta(as, MIPSI_SRA, RID_TMP, dest, 31);
1880#if !LJ_TARGET_MIPSR6
1881 emit_dst(as, MIPSI_MFHI, tmp, 0, 0);
1882 emit_dst(as, MIPSI_MFLO, dest, 0, 0);
1883 emit_dst(as, MIPSI_MULT, 0, left, right);
1260#else 1884#else
1261 UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused in single-number mode. */ 1885 emit_dst(as, MIPSI_MUL, dest, left, right);
1886 emit_dst(as, MIPSI_MUH, tmp, left, right);
1262#endif 1887#endif
1263} 1888}
1264 1889
1265#if LJ_HASFFI 1890#if LJ_32 && LJ_HASFFI
1266static void asm_add64(ASMState *as, IRIns *ir) 1891static void asm_add64(ASMState *as, IRIns *ir)
1267{ 1892{
1268 Reg dest = ra_dest(as, ir, RSET_GPR); 1893 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -1346,7 +1971,7 @@ static void asm_neg64(ASMState *as, IRIns *ir)
1346} 1971}
1347#endif 1972#endif
1348 1973
1349static void asm_bitnot(ASMState *as, IRIns *ir) 1974static void asm_bnot(ASMState *as, IRIns *ir)
1350{ 1975{
1351 Reg left, right, dest = ra_dest(as, ir, RSET_GPR); 1976 Reg left, right, dest = ra_dest(as, ir, RSET_GPR);
1352 IRIns *irl = IR(ir->op1); 1977 IRIns *irl = IR(ir->op1);
@@ -1360,11 +1985,12 @@ static void asm_bitnot(ASMState *as, IRIns *ir)
1360 emit_dst(as, MIPSI_NOR, dest, left, right); 1985 emit_dst(as, MIPSI_NOR, dest, left, right);
1361} 1986}
1362 1987
1363static void asm_bitswap(ASMState *as, IRIns *ir) 1988static void asm_bswap(ASMState *as, IRIns *ir)
1364{ 1989{
1365 Reg dest = ra_dest(as, ir, RSET_GPR); 1990 Reg dest = ra_dest(as, ir, RSET_GPR);
1366 Reg left = ra_alloc1(as, ir->op1, RSET_GPR); 1991 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1367 if ((as->flags & JIT_F_MIPS32R2)) { 1992#if LJ_32
1993 if ((as->flags & JIT_F_MIPSXXR2)) {
1368 emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16); 1994 emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16);
1369 emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left); 1995 emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left);
1370 } else { 1996 } else {
@@ -1379,6 +2005,15 @@ static void asm_bitswap(ASMState *as, IRIns *ir)
1379 emit_dta(as, MIPSI_SRL, tmp, left, 24); 2005 emit_dta(as, MIPSI_SRL, tmp, left, 24);
1380 emit_dta(as, MIPSI_SLL, RID_TMP, left, 24); 2006 emit_dta(as, MIPSI_SLL, RID_TMP, left, 24);
1381 } 2007 }
2008#else
2009 if (irt_is64(ir->t)) {
2010 emit_dst(as, MIPSI_DSHD, dest, 0, RID_TMP);
2011 emit_dst(as, MIPSI_DSBH, RID_TMP, 0, left);
2012 } else {
2013 emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16);
2014 emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left);
2015 }
2016#endif
1382} 2017}
1383 2018
1384static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) 2019static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
@@ -1386,7 +2021,7 @@ static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
1386 Reg dest = ra_dest(as, ir, RSET_GPR); 2021 Reg dest = ra_dest(as, ir, RSET_GPR);
1387 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 2022 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1388 if (irref_isk(ir->op2)) { 2023 if (irref_isk(ir->op2)) {
1389 int32_t k = IR(ir->op2)->i; 2024 intptr_t k = get_kval(as, ir->op2);
1390 if (checku16(k)) { 2025 if (checku16(k)) {
1391 emit_tsi(as, mik, dest, left, k); 2026 emit_tsi(as, mik, dest, left, k);
1392 return; 2027 return;
@@ -1396,22 +2031,34 @@ static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
1396 emit_dst(as, mi, dest, left, right); 2031 emit_dst(as, mi, dest, left, right);
1397} 2032}
1398 2033
2034#define asm_band(as, ir) asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI)
2035#define asm_bor(as, ir) asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI)
2036#define asm_bxor(as, ir) asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI)
2037
1399static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) 2038static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
1400{ 2039{
1401 Reg dest = ra_dest(as, ir, RSET_GPR); 2040 Reg dest = ra_dest(as, ir, RSET_GPR);
1402 if (irref_isk(ir->op2)) { /* Constant shifts. */ 2041 if (irref_isk(ir->op2)) { /* Constant shifts. */
1403 uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31); 2042 uint32_t shift = (uint32_t)IR(ir->op2)->i;
1404 emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR), shift); 2043 if (LJ_64 && irt_is64(ir->t)) mik |= (shift & 32) ? MIPSI_D32 : MIPSI_D;
2044 emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR),
2045 (shift & 31));
1405 } else { 2046 } else {
1406 Reg right, left = ra_alloc2(as, ir, RSET_GPR); 2047 Reg right, left = ra_alloc2(as, ir, RSET_GPR);
1407 right = (left >> 8); left &= 255; 2048 right = (left >> 8); left &= 255;
2049 if (LJ_64 && irt_is64(ir->t)) mi |= MIPSI_DV;
1408 emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */ 2050 emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */
1409 } 2051 }
1410} 2052}
1411 2053
1412static void asm_bitror(ASMState *as, IRIns *ir) 2054#define asm_bshl(as, ir) asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL)
2055#define asm_bshr(as, ir) asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL)
2056#define asm_bsar(as, ir) asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA)
2057#define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
2058
2059static void asm_bror(ASMState *as, IRIns *ir)
1413{ 2060{
1414 if ((as->flags & JIT_F_MIPS32R2)) { 2061 if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) {
1415 asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR); 2062 asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR);
1416 } else { 2063 } else {
1417 Reg dest = ra_dest(as, ir, RSET_GPR); 2064 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -1430,55 +2077,182 @@ static void asm_bitror(ASMState *as, IRIns *ir)
1430 } 2077 }
1431} 2078}
1432 2079
2080#if LJ_SOFTFP
2081static void asm_sfpmin_max(ASMState *as, IRIns *ir)
2082{
2083 CCallInfo ci = lj_ir_callinfo[(IROp)ir->o == IR_MIN ? IRCALL_lj_vm_sfmin : IRCALL_lj_vm_sfmax];
2084#if LJ_64
2085 IRRef args[2];
2086 args[0] = ir->op1;
2087 args[1] = ir->op2;
2088#else
2089 IRRef args[4];
2090 args[0^LJ_BE] = ir->op1;
2091 args[1^LJ_BE] = (ir+1)->op1;
2092 args[2^LJ_BE] = ir->op2;
2093 args[3^LJ_BE] = (ir+1)->op2;
2094#endif
2095 asm_setupresult(as, ir, &ci);
2096 emit_call(as, (void *)ci.func, 0);
2097 ci.func = NULL;
2098 asm_gencall(as, &ci, args);
2099}
2100#endif
2101
1433static void asm_min_max(ASMState *as, IRIns *ir, int ismax) 2102static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
1434{ 2103{
1435 if (irt_isnum(ir->t)) { 2104 if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
2105#if LJ_SOFTFP
2106 asm_sfpmin_max(as, ir);
2107#else
1436 Reg dest = ra_dest(as, ir, RSET_FPR); 2108 Reg dest = ra_dest(as, ir, RSET_FPR);
1437 Reg right, left = ra_alloc2(as, ir, RSET_FPR); 2109 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1438 right = (left >> 8); left &= 255; 2110 right = (left >> 8); left &= 255;
2111#if !LJ_TARGET_MIPSR6
1439 if (dest == left) { 2112 if (dest == left) {
1440 emit_fg(as, MIPSI_MOVT_D, dest, right); 2113 emit_fg(as, MIPSI_MOVF_D, dest, right);
1441 } else { 2114 } else {
1442 emit_fg(as, MIPSI_MOVF_D, dest, left); 2115 emit_fg(as, MIPSI_MOVT_D, dest, left);
1443 if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right); 2116 if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right);
1444 } 2117 }
1445 emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? left : right, ismax ? right : left); 2118 emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? right : left, ismax ? left : right);
2119#else
2120 emit_fgh(as, ismax ? MIPSI_MAX_D : MIPSI_MIN_D, dest, left, right);
2121#endif
2122#endif
1446 } else { 2123 } else {
1447 Reg dest = ra_dest(as, ir, RSET_GPR); 2124 Reg dest = ra_dest(as, ir, RSET_GPR);
1448 Reg right, left = ra_alloc2(as, ir, RSET_GPR); 2125 Reg right, left = ra_alloc2(as, ir, RSET_GPR);
1449 right = (left >> 8); left &= 255; 2126 right = (left >> 8); left &= 255;
1450 if (dest == left) { 2127 if (left == right) {
1451 emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP); 2128 if (dest != left) emit_move(as, dest, left);
1452 } else { 2129 } else {
1453 emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP); 2130#if !LJ_TARGET_MIPSR6
1454 if (dest != right) emit_move(as, dest, right); 2131 if (dest == left) {
2132 emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP);
2133 } else {
2134 emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP);
2135 if (dest != right) emit_move(as, dest, right);
2136 }
2137#else
2138 emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
2139 if (dest != right) {
2140 emit_dst(as, MIPSI_SELNEZ, RID_TMP, right, RID_TMP);
2141 emit_dst(as, MIPSI_SELEQZ, dest, left, RID_TMP);
2142 } else {
2143 emit_dst(as, MIPSI_SELEQZ, RID_TMP, left, RID_TMP);
2144 emit_dst(as, MIPSI_SELNEZ, dest, right, RID_TMP);
2145 }
2146#endif
2147 emit_dst(as, MIPSI_SLT, RID_TMP,
2148 ismax ? left : right, ismax ? right : left);
1455 } 2149 }
1456 emit_dst(as, MIPSI_SLT, RID_TMP,
1457 ismax ? left : right, ismax ? right : left);
1458 } 2150 }
1459} 2151}
1460 2152
2153#define asm_min(as, ir) asm_min_max(as, ir, 0)
2154#define asm_max(as, ir) asm_min_max(as, ir, 1)
2155
1461/* -- Comparisons --------------------------------------------------------- */ 2156/* -- Comparisons --------------------------------------------------------- */
1462 2157
2158#if LJ_SOFTFP
2159/* SFP comparisons. */
2160static void asm_sfpcomp(ASMState *as, IRIns *ir)
2161{
2162 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
2163 RegSet drop = RSET_SCRATCH;
2164 Reg r;
2165#if LJ_64
2166 IRRef args[2];
2167 args[0] = ir->op1;
2168 args[1] = ir->op2;
2169#else
2170 IRRef args[4];
2171 args[LJ_LE ? 0 : 1] = ir->op1; args[LJ_LE ? 1 : 0] = (ir+1)->op1;
2172 args[LJ_LE ? 2 : 3] = ir->op2; args[LJ_LE ? 3 : 2] = (ir+1)->op2;
2173#endif
2174
2175 for (r = REGARG_FIRSTGPR; r <= REGARG_FIRSTGPR+(LJ_64?1:3); r++) {
2176 if (!rset_test(as->freeset, r) &&
2177 regcost_ref(as->cost[r]) == args[r-REGARG_FIRSTGPR])
2178 rset_clear(drop, r);
2179 }
2180 ra_evictset(as, drop);
2181
2182 asm_setupresult(as, ir, ci);
2183
2184 switch ((IROp)ir->o) {
2185 case IR_LT:
2186 asm_guard(as, MIPSI_BGEZ, RID_RET, 0);
2187 break;
2188 case IR_ULT:
2189 asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
2190 emit_loadi(as, RID_TMP, 1);
2191 asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO);
2192 break;
2193 case IR_GE:
2194 asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
2195 emit_loadi(as, RID_TMP, 2);
2196 asm_guard(as, MIPSI_BLTZ, RID_RET, 0);
2197 break;
2198 case IR_LE:
2199 asm_guard(as, MIPSI_BGTZ, RID_RET, 0);
2200 break;
2201 case IR_GT:
2202 asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
2203 emit_loadi(as, RID_TMP, 2);
2204 asm_guard(as, MIPSI_BLEZ, RID_RET, 0);
2205 break;
2206 case IR_UGE:
2207 asm_guard(as, MIPSI_BLTZ, RID_RET, 0);
2208 break;
2209 case IR_ULE:
2210 asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
2211 emit_loadi(as, RID_TMP, 1);
2212 break;
2213 case IR_UGT: case IR_ABC:
2214 asm_guard(as, MIPSI_BLEZ, RID_RET, 0);
2215 break;
2216 case IR_EQ: case IR_NE:
2217 asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_RET, RID_ZERO);
2218 default:
2219 break;
2220 }
2221 asm_gencall(as, ci, args);
2222}
2223#endif
2224
1463static void asm_comp(ASMState *as, IRIns *ir) 2225static void asm_comp(ASMState *as, IRIns *ir)
1464{ 2226{
1465 /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */ 2227 /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
1466 IROp op = ir->o; 2228 IROp op = ir->o;
1467 if (irt_isnum(ir->t)) { 2229 if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
2230#if LJ_SOFTFP
2231 asm_sfpcomp(as, ir);
2232#else
2233#if !LJ_TARGET_MIPSR6
1468 Reg right, left = ra_alloc2(as, ir, RSET_FPR); 2234 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1469 right = (left >> 8); left &= 255; 2235 right = (left >> 8); left &= 255;
1470 asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0); 2236 asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
1471 emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right); 2237 emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right);
2238#else
2239 Reg tmp, right, left = ra_alloc2(as, ir, RSET_FPR);
2240 right = (left >> 8); left &= 255;
2241 tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_FPR, left), right));
2242 asm_guard(as, (op&1) ? MIPSI_BC1NEZ : MIPSI_BC1EQZ, 0, (tmp&31));
2243 emit_fgh(as, MIPSI_CMP_LT_D + ((op&3) ^ ((op>>2)&1)), tmp, left, right);
2244#endif
2245#endif
1472 } else { 2246 } else {
1473 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); 2247 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
1474 if (op == IR_ABC) op = IR_UGT; 2248 if (op == IR_ABC) op = IR_UGT;
1475 if ((op&4) == 0 && irref_isk(ir->op2) && IR(ir->op2)->i == 0) { 2249 if ((op&4) == 0 && irref_isk(ir->op2) && get_kval(as, ir->op2) == 0) {
1476 MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) : 2250 MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) :
1477 ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ); 2251 ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ);
1478 asm_guard(as, mi, left, 0); 2252 asm_guard(as, mi, left, 0);
1479 } else { 2253 } else {
1480 if (irref_isk(ir->op2)) { 2254 if (irref_isk(ir->op2)) {
1481 int32_t k = IR(ir->op2)->i; 2255 intptr_t k = get_kval(as, ir->op2);
1482 if ((op&2)) k++; 2256 if ((op&2)) k++;
1483 if (checki16(k)) { 2257 if (checki16(k)) {
1484 asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); 2258 asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
@@ -1495,19 +2269,28 @@ static void asm_comp(ASMState *as, IRIns *ir)
1495 } 2269 }
1496} 2270}
1497 2271
1498static void asm_compeq(ASMState *as, IRIns *ir) 2272static void asm_equal(ASMState *as, IRIns *ir)
1499{ 2273{
1500 Reg right, left = ra_alloc2(as, ir, irt_isnum(ir->t) ? RSET_FPR : RSET_GPR); 2274 Reg right, left = ra_alloc2(as, ir, (!LJ_SOFTFP && irt_isnum(ir->t)) ?
2275 RSET_FPR : RSET_GPR);
1501 right = (left >> 8); left &= 255; 2276 right = (left >> 8); left &= 255;
1502 if (irt_isnum(ir->t)) { 2277 if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
2278#if LJ_SOFTFP
2279 asm_sfpcomp(as, ir);
2280#elif !LJ_TARGET_MIPSR6
1503 asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0); 2281 asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
1504 emit_fgh(as, MIPSI_C_EQ_D, 0, left, right); 2282 emit_fgh(as, MIPSI_C_EQ_D, 0, left, right);
2283#else
2284 Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_FPR, left), right));
2285 asm_guard(as, (ir->o & 1) ? MIPSI_BC1NEZ : MIPSI_BC1EQZ, 0, (tmp&31));
2286 emit_fgh(as, MIPSI_CMP_EQ_D, tmp, left, right);
2287#endif
1505 } else { 2288 } else {
1506 asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right); 2289 asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right);
1507 } 2290 }
1508} 2291}
1509 2292
1510#if LJ_HASFFI 2293#if LJ_32 && LJ_HASFFI
1511/* 64 bit integer comparisons. */ 2294/* 64 bit integer comparisons. */
1512static void asm_comp64(ASMState *as, IRIns *ir) 2295static void asm_comp64(ASMState *as, IRIns *ir)
1513{ 2296{
@@ -1549,51 +2332,101 @@ static void asm_comp64eq(ASMState *as, IRIns *ir)
1549/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ 2332/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1550static void asm_hiop(ASMState *as, IRIns *ir) 2333static void asm_hiop(ASMState *as, IRIns *ir)
1551{ 2334{
1552#if LJ_HASFFI 2335#if LJ_32 && (LJ_HASFFI || LJ_SOFTFP)
1553 /* HIOP is marked as a store because it needs its own DCE logic. */ 2336 /* HIOP is marked as a store because it needs its own DCE logic. */
1554 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ 2337 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
1555 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; 2338 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
1556 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ 2339 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
1557 as->curins--; /* Always skip the CONV. */ 2340 as->curins--; /* Always skip the CONV. */
2341#if LJ_HASFFI && !LJ_SOFTFP
1558 if (usehi || uselo) 2342 if (usehi || uselo)
1559 asm_conv64(as, ir); 2343 asm_conv64(as, ir);
1560 return; 2344 return;
2345#endif
1561 } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */ 2346 } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */
1562 as->curins--; /* Always skip the loword comparison. */ 2347 as->curins--; /* Always skip the loword comparison. */
2348#if LJ_SOFTFP
2349 if (!irt_isint(ir->t)) {
2350 asm_sfpcomp(as, ir-1);
2351 return;
2352 }
2353#endif
2354#if LJ_HASFFI
1563 asm_comp64(as, ir); 2355 asm_comp64(as, ir);
2356#endif
1564 return; 2357 return;
1565 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ 2358 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
1566 as->curins--; /* Always skip the loword comparison. */ 2359 as->curins--; /* Always skip the loword comparison. */
2360#if LJ_SOFTFP
2361 if (!irt_isint(ir->t)) {
2362 asm_sfpcomp(as, ir-1);
2363 return;
2364 }
2365#endif
2366#if LJ_HASFFI
1567 asm_comp64eq(as, ir); 2367 asm_comp64eq(as, ir);
2368#endif
2369 return;
2370#if LJ_SOFTFP
2371 } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
2372 as->curins--; /* Always skip the loword min/max. */
2373 if (uselo || usehi)
2374 asm_sfpmin_max(as, ir-1);
1568 return; 2375 return;
2376#endif
1569 } else if ((ir-1)->o == IR_XSTORE) { 2377 } else if ((ir-1)->o == IR_XSTORE) {
1570 as->curins--; /* Handle both stores here. */ 2378 as->curins--; /* Handle both stores here. */
1571 if ((ir-1)->r != RID_SINK) { 2379 if ((ir-1)->r != RID_SINK) {
1572 asm_xstore(as, ir, LJ_LE ? 4 : 0); 2380 asm_xstore_(as, ir, LJ_LE ? 4 : 0);
1573 asm_xstore(as, ir-1, LJ_LE ? 0 : 4); 2381 asm_xstore_(as, ir-1, LJ_LE ? 0 : 4);
1574 } 2382 }
1575 return; 2383 return;
1576 } 2384 }
1577 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ 2385 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
1578 switch ((ir-1)->o) { 2386 switch ((ir-1)->o) {
2387#if LJ_HASFFI
1579 case IR_ADD: as->curins--; asm_add64(as, ir); break; 2388 case IR_ADD: as->curins--; asm_add64(as, ir); break;
1580 case IR_SUB: as->curins--; asm_sub64(as, ir); break; 2389 case IR_SUB: as->curins--; asm_sub64(as, ir); break;
1581 case IR_NEG: as->curins--; asm_neg64(as, ir); break; 2390 case IR_NEG: as->curins--; asm_neg64(as, ir); break;
2391#endif
2392#if LJ_SOFTFP
2393 case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2394 case IR_STRTO:
2395 if (!uselo)
2396 ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
2397 break;
2398#endif
1582 case IR_CALLN: 2399 case IR_CALLN:
2400 case IR_CALLS:
1583 case IR_CALLXS: 2401 case IR_CALLXS:
1584 if (!uselo) 2402 if (!uselo)
1585 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ 2403 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
1586 break; 2404 break;
2405#if LJ_SOFTFP
2406 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR:
2407#endif
1587 case IR_CNEWI: 2408 case IR_CNEWI:
1588 /* Nothing to do here. Handled by lo op itself. */ 2409 /* Nothing to do here. Handled by lo op itself. */
1589 break; 2410 break;
1590 default: lua_assert(0); break; 2411 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
1591 } 2412 }
1592#else 2413#else
1593 UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */ 2414 /* Unused on MIPS64 or without SOFTFP or FFI. */
2415 UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP");
1594#endif 2416#endif
1595} 2417}
1596 2418
2419/* -- Profiling ----------------------------------------------------------- */
2420
2421static void asm_prof(ASMState *as, IRIns *ir)
2422{
2423 UNUSED(ir);
2424 asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO);
2425 emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, HOOK_PROFILE);
2426 emit_lsglptr(as, MIPSI_LBU, RID_TMP,
2427 (int32_t)offsetof(global_State, hookmask));
2428}
2429
1597/* -- Stack handling ------------------------------------------------------ */ 2430/* -- Stack handling ------------------------------------------------------ */
1598 2431
1599/* Check Lua stack size for overflow. Use exit handler as fallback. */ 2432/* Check Lua stack size for overflow. Use exit handler as fallback. */
@@ -1604,47 +2437,70 @@ static void asm_stack_check(ASMState *as, BCReg topslot,
1604 Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE; 2437 Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
1605 ExitNo oldsnap = as->snapno; 2438 ExitNo oldsnap = as->snapno;
1606 rset_clear(allow, pbase); 2439 rset_clear(allow, pbase);
2440#if LJ_32
1607 tmp = allow ? rset_pickbot(allow) : 2441 tmp = allow ? rset_pickbot(allow) :
1608 (pbase == RID_RETHI ? RID_RETLO : RID_RETHI); 2442 (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
2443#else
2444 tmp = allow ? rset_pickbot(allow) : RID_RET;
2445#endif
1609 as->snapno = exitno; 2446 as->snapno = exitno;
1610 asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO); 2447 asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO);
1611 as->snapno = oldsnap; 2448 as->snapno = oldsnap;
1612 if (allow == RSET_EMPTY) /* Restore temp. register. */ 2449 if (allow == RSET_EMPTY) /* Restore temp. register. */
1613 emit_tsi(as, MIPSI_LW, tmp, RID_SP, 0); 2450 emit_tsi(as, MIPSI_AL, tmp, RID_SP, 0);
1614 else 2451 else
1615 ra_modified(as, tmp); 2452 ra_modified(as, tmp);
1616 emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot)); 2453 emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot));
1617 emit_dst(as, MIPSI_SUBU, RID_TMP, tmp, pbase); 2454 emit_dst(as, MIPSI_ASUBU, RID_TMP, tmp, pbase);
1618 emit_tsi(as, MIPSI_LW, tmp, tmp, offsetof(lua_State, maxstack)); 2455 emit_tsi(as, MIPSI_AL, tmp, tmp, offsetof(lua_State, maxstack));
1619 if (pbase == RID_TMP) 2456 if (pbase == RID_TMP)
1620 emit_getgl(as, RID_TMP, jit_base); 2457 emit_getgl(as, RID_TMP, jit_base);
1621 emit_getgl(as, tmp, jit_L); 2458 emit_getgl(as, tmp, cur_L);
1622 if (allow == RSET_EMPTY) /* Spill temp. register. */ 2459 if (allow == RSET_EMPTY) /* Spill temp. register. */
1623 emit_tsi(as, MIPSI_SW, tmp, RID_SP, 0); 2460 emit_tsi(as, MIPSI_AS, tmp, RID_SP, 0);
1624} 2461}
1625 2462
1626/* Restore Lua stack from on-trace state. */ 2463/* Restore Lua stack from on-trace state. */
1627static void asm_stack_restore(ASMState *as, SnapShot *snap) 2464static void asm_stack_restore(ASMState *as, SnapShot *snap)
1628{ 2465{
1629 SnapEntry *map = &as->T->snapmap[snap->mapofs]; 2466 SnapEntry *map = &as->T->snapmap[snap->mapofs];
1630 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; 2467#if LJ_32 || defined(LUA_USE_ASSERT)
2468 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2];
2469#endif
1631 MSize n, nent = snap->nent; 2470 MSize n, nent = snap->nent;
1632 /* Store the value of all modified slots to the Lua stack. */ 2471 /* Store the value of all modified slots to the Lua stack. */
1633 for (n = 0; n < nent; n++) { 2472 for (n = 0; n < nent; n++) {
1634 SnapEntry sn = map[n]; 2473 SnapEntry sn = map[n];
1635 BCReg s = snap_slot(sn); 2474 BCReg s = snap_slot(sn);
1636 int32_t ofs = 8*((int32_t)s-1); 2475 int32_t ofs = 8*((int32_t)s-1-LJ_FR2);
1637 IRRef ref = snap_ref(sn); 2476 IRRef ref = snap_ref(sn);
1638 IRIns *ir = IR(ref); 2477 IRIns *ir = IR(ref);
1639 if ((sn & SNAP_NORESTORE)) 2478 if ((sn & SNAP_NORESTORE))
1640 continue; 2479 continue;
1641 if (irt_isnum(ir->t)) { 2480 if (irt_isnum(ir->t)) {
2481#if LJ_SOFTFP32
2482 Reg tmp;
2483 RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
2484 /* LJ_SOFTFP: must be a number constant. */
2485 lj_assertA(irref_isk(ref), "unsplit FP op");
2486 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow);
2487 emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?4:0));
2488 if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1);
2489 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, allow);
2490 emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?0:4));
2491#elif LJ_SOFTFP /* && LJ_64 */
2492 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
2493 emit_tsi(as, MIPSI_SD, src, RID_BASE, ofs);
2494#else
1642 Reg src = ra_alloc1(as, ref, RSET_FPR); 2495 Reg src = ra_alloc1(as, ref, RSET_FPR);
1643 emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs); 2496 emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs);
2497#endif
1644 } else { 2498 } else {
1645 Reg type; 2499#if LJ_32
1646 RegSet allow = rset_exclude(RSET_GPR, RID_BASE); 2500 RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
1647 lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); 2501 Reg type;
2502 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
2503 "restore of IR type %d", irt_type(ir->t));
1648 if (!irt_ispri(ir->t)) { 2504 if (!irt_ispri(ir->t)) {
1649 Reg src = ra_alloc1(as, ref, allow); 2505 Reg src = ra_alloc1(as, ref, allow);
1650 rset_clear(allow, src); 2506 rset_clear(allow, src);
@@ -1653,14 +2509,21 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
1653 if ((sn & (SNAP_CONT|SNAP_FRAME))) { 2509 if ((sn & (SNAP_CONT|SNAP_FRAME))) {
1654 if (s == 0) continue; /* Do not overwrite link to previous frame. */ 2510 if (s == 0) continue; /* Do not overwrite link to previous frame. */
1655 type = ra_allock(as, (int32_t)(*flinks--), allow); 2511 type = ra_allock(as, (int32_t)(*flinks--), allow);
2512#if LJ_SOFTFP
2513 } else if ((sn & SNAP_SOFTFPNUM)) {
2514 type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPR, RID_BASE));
2515#endif
1656 } else { 2516 } else {
1657 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); 2517 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
1658 } 2518 }
1659 emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4)); 2519 emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4));
2520#else
2521 asm_tvstore64(as, RID_BASE, ofs, ref);
2522#endif
1660 } 2523 }
1661 checkmclim(as); 2524 checkmclim(as);
1662 } 2525 }
1663 lua_assert(map + nent == flinks); 2526 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
1664} 2527}
1665 2528
1666/* -- GC handling --------------------------------------------------------- */ 2529/* -- GC handling --------------------------------------------------------- */
@@ -1680,7 +2543,7 @@ static void asm_gc_check(ASMState *as)
1680 args[0] = ASMREF_TMP1; /* global_State *g */ 2543 args[0] = ASMREF_TMP1; /* global_State *g */
1681 args[1] = ASMREF_TMP2; /* MSize steps */ 2544 args[1] = ASMREF_TMP2; /* MSize steps */
1682 asm_gencall(as, ci, args); 2545 asm_gencall(as, ci, args);
1683 emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); 2546 emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
1684 tmp = ra_releasetmp(as, ASMREF_TMP2); 2547 tmp = ra_releasetmp(as, ASMREF_TMP2);
1685 emit_loadi(as, tmp, as->gcsteps); 2548 emit_loadi(as, tmp, as->gcsteps);
1686 /* Jump around GC step if GC total < GC threshold. */ 2549 /* Jump around GC step if GC total < GC threshold. */
@@ -1755,7 +2618,7 @@ static void asm_tail_fixup(ASMState *as, TraceNo lnk)
1755 MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp; 2618 MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp;
1756 int32_t spadj = as->T->spadjust; 2619 int32_t spadj = as->T->spadjust;
1757 MCode *p = as->mctop-1; 2620 MCode *p = as->mctop-1;
1758 *p = spadj ? (MIPSI_ADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP; 2621 *p = spadj ? (MIPSI_AADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP;
1759 p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); 2622 p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
1760} 2623}
1761 2624
@@ -1766,139 +2629,26 @@ static void asm_tail_prep(ASMState *as)
1766 as->invmcp = as->loopref ? as->mcp : NULL; 2629 as->invmcp = as->loopref ? as->mcp : NULL;
1767} 2630}
1768 2631
1769/* -- Instruction dispatch ------------------------------------------------ */
1770
1771/* Assemble a single instruction. */
1772static void asm_ir(ASMState *as, IRIns *ir)
1773{
1774 switch ((IROp)ir->o) {
1775 /* Miscellaneous ops. */
1776 case IR_LOOP: asm_loop(as); break;
1777 case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
1778 case IR_USE:
1779 ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
1780 case IR_PHI: asm_phi(as, ir); break;
1781 case IR_HIOP: asm_hiop(as, ir); break;
1782 case IR_GCSTEP: asm_gcstep(as, ir); break;
1783
1784 /* Guarded assertions. */
1785 case IR_EQ: case IR_NE: asm_compeq(as, ir); break;
1786 case IR_LT: case IR_GE: case IR_LE: case IR_GT:
1787 case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
1788 case IR_ABC:
1789 asm_comp(as, ir);
1790 break;
1791
1792 case IR_RETF: asm_retf(as, ir); break;
1793
1794 /* Bit ops. */
1795 case IR_BNOT: asm_bitnot(as, ir); break;
1796 case IR_BSWAP: asm_bitswap(as, ir); break;
1797
1798 case IR_BAND: asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI); break;
1799 case IR_BOR: asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI); break;
1800 case IR_BXOR: asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI); break;
1801
1802 case IR_BSHL: asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL); break;
1803 case IR_BSHR: asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL); break;
1804 case IR_BSAR: asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA); break;
1805 case IR_BROL: lua_assert(0); break;
1806 case IR_BROR: asm_bitror(as, ir); break;
1807
1808 /* Arithmetic ops. */
1809 case IR_ADD: asm_add(as, ir); break;
1810 case IR_SUB: asm_sub(as, ir); break;
1811 case IR_MUL: asm_mul(as, ir); break;
1812 case IR_DIV: asm_fparith(as, ir, MIPSI_DIV_D); break;
1813 case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
1814 case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
1815 case IR_NEG: asm_neg(as, ir); break;
1816
1817 case IR_ABS: asm_fpunary(as, ir, MIPSI_ABS_D); break;
1818 case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
1819 case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
1820 case IR_MIN: asm_min_max(as, ir, 0); break;
1821 case IR_MAX: asm_min_max(as, ir, 1); break;
1822 case IR_FPMATH:
1823 if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
1824 break;
1825 if (ir->op2 <= IRFPM_TRUNC)
1826 asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2);
1827 else if (ir->op2 == IRFPM_SQRT)
1828 asm_fpunary(as, ir, MIPSI_SQRT_D);
1829 else
1830 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
1831 break;
1832
1833 /* Overflow-checking arithmetic ops. */
1834 case IR_ADDOV: asm_arithov(as, ir); break;
1835 case IR_SUBOV: asm_arithov(as, ir); break;
1836 case IR_MULOV: asm_mulov(as, ir); break;
1837
1838 /* Memory references. */
1839 case IR_AREF: asm_aref(as, ir); break;
1840 case IR_HREF: asm_href(as, ir); break;
1841 case IR_HREFK: asm_hrefk(as, ir); break;
1842 case IR_NEWREF: asm_newref(as, ir); break;
1843 case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
1844 case IR_FREF: asm_fref(as, ir); break;
1845 case IR_STRREF: asm_strref(as, ir); break;
1846
1847 /* Loads and stores. */
1848 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1849 asm_ahuvload(as, ir);
1850 break;
1851 case IR_FLOAD: asm_fload(as, ir); break;
1852 case IR_XLOAD: asm_xload(as, ir); break;
1853 case IR_SLOAD: asm_sload(as, ir); break;
1854
1855 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
1856 case IR_FSTORE: asm_fstore(as, ir); break;
1857 case IR_XSTORE: asm_xstore(as, ir, 0); break;
1858
1859 /* Allocations. */
1860 case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
1861 case IR_TNEW: asm_tnew(as, ir); break;
1862 case IR_TDUP: asm_tdup(as, ir); break;
1863 case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
1864
1865 /* Write barriers. */
1866 case IR_TBAR: asm_tbar(as, ir); break;
1867 case IR_OBAR: asm_obar(as, ir); break;
1868
1869 /* Type conversions. */
1870 case IR_CONV: asm_conv(as, ir); break;
1871 case IR_TOBIT: asm_tobit(as, ir); break;
1872 case IR_TOSTR: asm_tostr(as, ir); break;
1873 case IR_STRTO: asm_strto(as, ir); break;
1874
1875 /* Calls. */
1876 case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
1877 case IR_CALLXS: asm_callx(as, ir); break;
1878 case IR_CARG: break;
1879
1880 default:
1881 setintV(&as->J->errinfo, ir->o);
1882 lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
1883 break;
1884 }
1885}
1886
1887/* -- Trace setup --------------------------------------------------------- */ 2632/* -- Trace setup --------------------------------------------------------- */
1888 2633
1889/* Ensure there are enough stack slots for call arguments. */ 2634/* Ensure there are enough stack slots for call arguments. */
1890static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) 2635static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
1891{ 2636{
1892 IRRef args[CCI_NARGS_MAX*2]; 2637 IRRef args[CCI_NARGS_MAX*2];
1893 uint32_t i, nargs = (int)CCI_NARGS(ci); 2638 uint32_t i, nargs = CCI_XNARGS(ci);
2639#if LJ_32
1894 int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; 2640 int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
2641#else
2642 int nslots = 0, ngpr = REGARG_NUMGPR;
2643#endif
1895 asm_collectargs(as, ir, ci, args); 2644 asm_collectargs(as, ir, ci, args);
1896 for (i = 0; i < nargs; i++) { 2645 for (i = 0; i < nargs; i++) {
1897 if (args[i] && irt_isfp(IR(args[i])->t) && 2646#if LJ_32
2647 if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t) &&
1898 nfpr > 0 && !(ci->flags & CCI_VARARG)) { 2648 nfpr > 0 && !(ci->flags & CCI_VARARG)) {
1899 nfpr--; 2649 nfpr--;
1900 ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1; 2650 ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1;
1901 } else if (args[i] && irt_isnum(IR(args[i])->t)) { 2651 } else if (!LJ_SOFTFP && args[i] && irt_isnum(IR(args[i])->t)) {
1902 nfpr = 0; 2652 nfpr = 0;
1903 ngpr = ngpr & ~1; 2653 ngpr = ngpr & ~1;
1904 if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1; 2654 if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1;
@@ -1906,6 +2656,9 @@ static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
1906 nfpr = 0; 2656 nfpr = 0;
1907 if (ngpr > 0) ngpr--; else nslots++; 2657 if (ngpr > 0) ngpr--; else nslots++;
1908 } 2658 }
2659#else
2660 if (ngpr > 0) ngpr--; else nslots += 2;
2661#endif
1909 } 2662 }
1910 if (nslots > as->evenspill) /* Leave room for args in stack slots. */ 2663 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
1911 as->evenspill = nslots; 2664 as->evenspill = nslots;
@@ -1936,7 +2689,12 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
1936 if (((p[-1] ^ (px-p)) & 0xffffu) == 0 && 2689 if (((p[-1] ^ (px-p)) & 0xffffu) == 0 &&
1937 ((p[-1] & 0xf0000000u) == MIPSI_BEQ || 2690 ((p[-1] & 0xf0000000u) == MIPSI_BEQ ||
1938 (p[-1] & 0xfc1e0000u) == MIPSI_BLTZ || 2691 (p[-1] & 0xfc1e0000u) == MIPSI_BLTZ ||
1939 (p[-1] & 0xffe00000u) == MIPSI_BC1F)) { 2692#if !LJ_TARGET_MIPSR6
2693 (p[-1] & 0xffe00000u) == MIPSI_BC1F
2694#else
2695 (p[-1] & 0xff600000u) == MIPSI_BC1EQZ
2696#endif
2697 )) {
1940 ptrdiff_t delta = target - p; 2698 ptrdiff_t delta = target - p;
1941 if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */ 2699 if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */
1942 patchbranch: 2700 patchbranch:
@@ -1963,7 +2721,7 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
1963 } 2721 }
1964 } else if (p+1 == pe) { 2722 } else if (p+1 == pe) {
1965 /* Patch NOP after code for inverted loop branch. Use of J is ok. */ 2723 /* Patch NOP after code for inverted loop branch. Use of J is ok. */
1966 lua_assert(p[1] == MIPSI_NOP); 2724 lj_assertJ(p[1] == MIPSI_NOP, "expected NOP");
1967 p[1] = tjump; 2725 p[1] = tjump;
1968 *p = MIPSI_NOP; /* Replace the load of the exit number. */ 2726 *p = MIPSI_NOP; /* Replace the load of the exit number. */
1969 cstop = p+2; 2727 cstop = p+2;
diff --git a/src/lj_asm_ppc.h b/src/lj_asm_ppc.h
index dc092db2..77ab09d6 100644
--- a/src/lj_asm_ppc.h
+++ b/src/lj_asm_ppc.h
@@ -181,7 +181,7 @@ static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
181 return; 181 return;
182 } 182 }
183 } else if (ir->o == IR_STRREF) { 183 } else if (ir->o == IR_STRREF) {
184 lua_assert(ofs == 0); 184 lj_assertA(ofs == 0, "bad usage");
185 ofs = (int32_t)sizeof(GCstr); 185 ofs = (int32_t)sizeof(GCstr);
186 if (irref_isk(ir->op2)) { 186 if (irref_isk(ir->op2)) {
187 ofs += IR(ir->op2)->i; 187 ofs += IR(ir->op2)->i;
@@ -226,6 +226,7 @@ static void asm_fusexrefx(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
226 emit_tab(as, pi, rt, left, right); 226 emit_tab(as, pi, rt, left, right);
227} 227}
228 228
229#if !LJ_SOFTFP
229/* Fuse to multiply-add/sub instruction. */ 230/* Fuse to multiply-add/sub instruction. */
230static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir) 231static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir)
231{ 232{
@@ -245,24 +246,30 @@ static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir)
245 } 246 }
246 return 0; 247 return 0;
247} 248}
249#endif
248 250
249/* -- Calls --------------------------------------------------------------- */ 251/* -- Calls --------------------------------------------------------------- */
250 252
251/* Generate a call to a C function. */ 253/* Generate a call to a C function. */
252static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) 254static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
253{ 255{
254 uint32_t n, nargs = CCI_NARGS(ci); 256 uint32_t n, nargs = CCI_XNARGS(ci);
255 int32_t ofs = 8; 257 int32_t ofs = 8;
256 Reg gpr = REGARG_FIRSTGPR, fpr = REGARG_FIRSTFPR; 258 Reg gpr = REGARG_FIRSTGPR;
259#if !LJ_SOFTFP
260 Reg fpr = REGARG_FIRSTFPR;
261#endif
257 if ((void *)ci->func) 262 if ((void *)ci->func)
258 emit_call(as, (void *)ci->func); 263 emit_call(as, (void *)ci->func);
259 for (n = 0; n < nargs; n++) { /* Setup args. */ 264 for (n = 0; n < nargs; n++) { /* Setup args. */
260 IRRef ref = args[n]; 265 IRRef ref = args[n];
261 if (ref) { 266 if (ref) {
262 IRIns *ir = IR(ref); 267 IRIns *ir = IR(ref);
268#if !LJ_SOFTFP
263 if (irt_isfp(ir->t)) { 269 if (irt_isfp(ir->t)) {
264 if (fpr <= REGARG_LASTFPR) { 270 if (fpr <= REGARG_LASTFPR) {
265 lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */ 271 lj_assertA(rset_test(as->freeset, fpr),
272 "reg %d not free", fpr); /* Already evicted. */
266 ra_leftov(as, fpr, ref); 273 ra_leftov(as, fpr, ref);
267 fpr++; 274 fpr++;
268 } else { 275 } else {
@@ -271,9 +278,12 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
271 emit_spstore(as, ir, r, ofs); 278 emit_spstore(as, ir, r, ofs);
272 ofs += irt_isnum(ir->t) ? 8 : 4; 279 ofs += irt_isnum(ir->t) ? 8 : 4;
273 } 280 }
274 } else { 281 } else
282#endif
283 {
275 if (gpr <= REGARG_LASTGPR) { 284 if (gpr <= REGARG_LASTGPR) {
276 lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */ 285 lj_assertA(rset_test(as->freeset, gpr),
286 "reg %d not free", gpr); /* Already evicted. */
277 ra_leftov(as, gpr, ref); 287 ra_leftov(as, gpr, ref);
278 gpr++; 288 gpr++;
279 } else { 289 } else {
@@ -290,8 +300,10 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
290 } 300 }
291 checkmclim(as); 301 checkmclim(as);
292 } 302 }
303#if !LJ_SOFTFP
293 if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */ 304 if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */
294 emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6); 305 emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6);
306#endif
295} 307}
296 308
297/* Setup result reg/sp for call. Evict scratch regs. */ 309/* Setup result reg/sp for call. Evict scratch regs. */
@@ -299,16 +311,18 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
299{ 311{
300 RegSet drop = RSET_SCRATCH; 312 RegSet drop = RSET_SCRATCH;
301 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t)); 313 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
314#if !LJ_SOFTFP
302 if ((ci->flags & CCI_NOFPRCLOBBER)) 315 if ((ci->flags & CCI_NOFPRCLOBBER))
303 drop &= ~RSET_FPR; 316 drop &= ~RSET_FPR;
317#endif
304 if (ra_hasreg(ir->r)) 318 if (ra_hasreg(ir->r))
305 rset_clear(drop, ir->r); /* Dest reg handled below. */ 319 rset_clear(drop, ir->r); /* Dest reg handled below. */
306 if (hiop && ra_hasreg((ir+1)->r)) 320 if (hiop && ra_hasreg((ir+1)->r))
307 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ 321 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
308 ra_evictset(as, drop); /* Evictions must be performed first. */ 322 ra_evictset(as, drop); /* Evictions must be performed first. */
309 if (ra_used(ir)) { 323 if (ra_used(ir)) {
310 lua_assert(!irt_ispri(ir->t)); 324 lj_assertA(!irt_ispri(ir->t), "PRI dest");
311 if (irt_isfp(ir->t)) { 325 if (!LJ_SOFTFP && irt_isfp(ir->t)) {
312 if ((ci->flags & CCI_CASTU64)) { 326 if ((ci->flags & CCI_CASTU64)) {
313 /* Use spill slot or temp slots. */ 327 /* Use spill slot or temp slots. */
314 int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP; 328 int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
@@ -323,23 +337,16 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
323 } else { 337 } else {
324 ra_destreg(as, ir, RID_FPRET); 338 ra_destreg(as, ir, RID_FPRET);
325 } 339 }
340#if LJ_32
326 } else if (hiop) { 341 } else if (hiop) {
327 ra_destpair(as, ir); 342 ra_destpair(as, ir);
343#endif
328 } else { 344 } else {
329 ra_destreg(as, ir, RID_RET); 345 ra_destreg(as, ir, RID_RET);
330 } 346 }
331 } 347 }
332} 348}
333 349
334static void asm_call(ASMState *as, IRIns *ir)
335{
336 IRRef args[CCI_NARGS_MAX];
337 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
338 asm_collectargs(as, ir, ci, args);
339 asm_setupresult(as, ir, ci);
340 asm_gencall(as, ci, args);
341}
342
343static void asm_callx(ASMState *as, IRIns *ir) 350static void asm_callx(ASMState *as, IRIns *ir)
344{ 351{
345 IRRef args[CCI_NARGS_MAX*2]; 352 IRRef args[CCI_NARGS_MAX*2];
@@ -352,7 +359,7 @@ static void asm_callx(ASMState *as, IRIns *ir)
352 func = ir->op2; irf = IR(func); 359 func = ir->op2; irf = IR(func);
353 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } 360 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
354 if (irref_isk(func)) { /* Call to constant address. */ 361 if (irref_isk(func)) { /* Call to constant address. */
355 ci.func = (ASMFunction)(void *)(irf->i); 362 ci.func = (ASMFunction)(void *)(intptr_t)(irf->i);
356 } else { /* Need a non-argument register for indirect calls. */ 363 } else { /* Need a non-argument register for indirect calls. */
357 RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1); 364 RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
358 Reg freg = ra_alloc1(as, func, allow); 365 Reg freg = ra_alloc1(as, func, allow);
@@ -363,16 +370,6 @@ static void asm_callx(ASMState *as, IRIns *ir)
363 asm_gencall(as, &ci, args); 370 asm_gencall(as, &ci, args);
364} 371}
365 372
366static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
367{
368 const CCallInfo *ci = &lj_ir_callinfo[id];
369 IRRef args[2];
370 args[0] = ir->op1;
371 args[1] = ir->op2;
372 asm_setupresult(as, ir, ci);
373 asm_gencall(as, ci, args);
374}
375
376/* -- Returns ------------------------------------------------------------- */ 373/* -- Returns ------------------------------------------------------------- */
377 374
378/* Return to lower frame. Guard that it goes to the right spot. */ 375/* Return to lower frame. Guard that it goes to the right spot. */
@@ -380,7 +377,7 @@ static void asm_retf(ASMState *as, IRIns *ir)
380{ 377{
381 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); 378 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
382 void *pc = ir_kptr(IR(ir->op2)); 379 void *pc = ir_kptr(IR(ir->op2));
383 int32_t delta = 1+bc_a(*((const BCIns *)pc - 1)); 380 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
384 as->topslot -= (BCReg)delta; 381 as->topslot -= (BCReg)delta;
385 if ((int32_t)as->topslot < 0) as->topslot = 0; 382 if ((int32_t)as->topslot < 0) as->topslot = 0;
386 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ 383 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
@@ -394,6 +391,7 @@ static void asm_retf(ASMState *as, IRIns *ir)
394 391
395/* -- Type conversions ---------------------------------------------------- */ 392/* -- Type conversions ---------------------------------------------------- */
396 393
394#if !LJ_SOFTFP
397static void asm_tointg(ASMState *as, IRIns *ir, Reg left) 395static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
398{ 396{
399 RegSet allow = RSET_FPR; 397 RegSet allow = RSET_FPR;
@@ -410,8 +408,7 @@ static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
410 emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000); 408 emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000);
411 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); 409 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
412 emit_lsptr(as, PPCI_LFS, (fbias & 31), 410 emit_lsptr(as, PPCI_LFS, (fbias & 31),
413 (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)), 411 (void *)&as->J->k32[LJ_K32_2P52_2P31], RSET_GPR);
414 RSET_GPR);
415 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); 412 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
416 emit_fb(as, PPCI_FCTIWZ, tmp, left); 413 emit_fb(as, PPCI_FCTIWZ, tmp, left);
417} 414}
@@ -427,15 +424,27 @@ static void asm_tobit(ASMState *as, IRIns *ir)
427 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); 424 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
428 emit_fab(as, PPCI_FADD, tmp, left, right); 425 emit_fab(as, PPCI_FADD, tmp, left, right);
429} 426}
427#endif
430 428
431static void asm_conv(ASMState *as, IRIns *ir) 429static void asm_conv(ASMState *as, IRIns *ir)
432{ 430{
433 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); 431 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
432#if !LJ_SOFTFP
434 int stfp = (st == IRT_NUM || st == IRT_FLOAT); 433 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
434#endif
435 IRRef lref = ir->op1; 435 IRRef lref = ir->op1;
436 lua_assert(irt_type(ir->t) != st); 436 /* 64 bit integer conversions are handled by SPLIT. */
437 lua_assert(!(irt_isint64(ir->t) || 437 lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)),
438 (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */ 438 "IR %04d has unsplit 64 bit type",
439 (int)(ir - as->ir) - REF_BIAS);
440#if LJ_SOFTFP
441 /* FP conversions are handled by SPLIT. */
442 lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
443 "IR %04d has FP type",
444 (int)(ir - as->ir) - REF_BIAS);
445 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
446#else
447 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
439 if (irt_isfp(ir->t)) { 448 if (irt_isfp(ir->t)) {
440 Reg dest = ra_dest(as, ir, RSET_FPR); 449 Reg dest = ra_dest(as, ir, RSET_FPR);
441 if (stfp) { /* FP to FP conversion. */ 450 if (stfp) { /* FP to FP conversion. */
@@ -450,13 +459,11 @@ static void asm_conv(ASMState *as, IRIns *ir)
450 Reg left = ra_alloc1(as, lref, allow); 459 Reg left = ra_alloc1(as, lref, allow);
451 Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left)); 460 Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left));
452 Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); 461 Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
453 const float *kbias;
454 if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest); 462 if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest);
455 emit_fab(as, PPCI_FSUB, dest, dest, fbias); 463 emit_fab(as, PPCI_FSUB, dest, dest, fbias);
456 emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP); 464 emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
457 kbias = (const float *)lj_ir_k64_find(as->J, U64x(59800004,59800000)); 465 emit_lsptr(as, PPCI_LFS, (fbias & 31),
458 if (st == IRT_U32) kbias++; 466 &as->J->k32[st == IRT_U32 ? LJ_K32_2P52 : LJ_K32_2P52_2P31],
459 emit_lsptr(as, PPCI_LFS, (fbias & 31), (void *)kbias,
460 rset_clear(allow, hibias)); 467 rset_clear(allow, hibias));
461 emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP, 468 emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP,
462 RID_SP, SPOFS_TMPLO); 469 RID_SP, SPOFS_TMPLO);
@@ -466,7 +473,8 @@ static void asm_conv(ASMState *as, IRIns *ir)
466 } else if (stfp) { /* FP to integer conversion. */ 473 } else if (stfp) { /* FP to integer conversion. */
467 if (irt_isguard(ir->t)) { 474 if (irt_isguard(ir->t)) {
468 /* Checked conversions are only supported from number to int. */ 475 /* Checked conversions are only supported from number to int. */
469 lua_assert(irt_isint(ir->t) && st == IRT_NUM); 476 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
477 "bad type for checked CONV");
470 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); 478 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
471 } else { 479 } else {
472 Reg dest = ra_dest(as, ir, RSET_GPR); 480 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -489,19 +497,20 @@ static void asm_conv(ASMState *as, IRIns *ir)
489 emit_fb(as, PPCI_FCTIWZ, tmp, tmp); 497 emit_fb(as, PPCI_FCTIWZ, tmp, tmp);
490 emit_fab(as, PPCI_FSUB, tmp, left, tmp); 498 emit_fab(as, PPCI_FSUB, tmp, left, tmp);
491 emit_lsptr(as, PPCI_LFS, (tmp & 31), 499 emit_lsptr(as, PPCI_LFS, (tmp & 31),
492 (void *)lj_ir_k64_find(as->J, U64x(4f000000,00000000)), 500 (void *)&as->J->k32[LJ_K32_2P31], RSET_GPR);
493 RSET_GPR);
494 } else { 501 } else {
495 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); 502 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
496 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP); 503 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
497 emit_fb(as, PPCI_FCTIWZ, tmp, left); 504 emit_fb(as, PPCI_FCTIWZ, tmp, left);
498 } 505 }
499 } 506 }
500 } else { 507 } else
508#endif
509 {
501 Reg dest = ra_dest(as, ir, RSET_GPR); 510 Reg dest = ra_dest(as, ir, RSET_GPR);
502 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ 511 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
503 Reg left = ra_alloc1(as, ir->op1, RSET_GPR); 512 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
504 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); 513 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
505 if ((ir->op2 & IRCONV_SEXT)) 514 if ((ir->op2 & IRCONV_SEXT))
506 emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left); 515 emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left);
507 else 516 else
@@ -513,46 +522,50 @@ static void asm_conv(ASMState *as, IRIns *ir)
513 } 522 }
514} 523}
515 524
516#if LJ_HASFFI
517static void asm_conv64(ASMState *as, IRIns *ir)
518{
519 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
520 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
521 IRCallID id;
522 const CCallInfo *ci;
523 IRRef args[2];
524 args[0] = ir->op1;
525 args[1] = (ir-1)->op1;
526 if (st == IRT_NUM || st == IRT_FLOAT) {
527 id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
528 ir--;
529 } else {
530 id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
531 }
532 ci = &lj_ir_callinfo[id];
533 asm_setupresult(as, ir, ci);
534 asm_gencall(as, ci, args);
535}
536#endif
537
538static void asm_strto(ASMState *as, IRIns *ir) 525static void asm_strto(ASMState *as, IRIns *ir)
539{ 526{
540 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; 527 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
541 IRRef args[2]; 528 IRRef args[2];
542 int32_t ofs; 529 int32_t ofs = SPOFS_TMP;
530#if LJ_SOFTFP
531 ra_evictset(as, RSET_SCRATCH);
532 if (ra_used(ir)) {
533 if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
534 (ir->s & 1) == LJ_BE && (ir->s ^ 1) == (ir+1)->s) {
535 int i;
536 for (i = 0; i < 2; i++) {
537 Reg r = (ir+i)->r;
538 if (ra_hasreg(r)) {
539 ra_free(as, r);
540 ra_modified(as, r);
541 emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
542 }
543 }
544 ofs = sps_scale(ir->s & ~1);
545 } else {
546 Reg rhi = ra_dest(as, ir+1, RSET_GPR);
547 Reg rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
548 emit_tai(as, PPCI_LWZ, rhi, RID_SP, ofs);
549 emit_tai(as, PPCI_LWZ, rlo, RID_SP, ofs+4);
550 }
551 }
552#else
543 RegSet drop = RSET_SCRATCH; 553 RegSet drop = RSET_SCRATCH;
544 if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */ 554 if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
545 ra_evictset(as, drop); 555 ra_evictset(as, drop);
556 if (ir->s) ofs = sps_scale(ir->s);
557#endif
546 asm_guardcc(as, CC_EQ); 558 asm_guardcc(as, CC_EQ);
547 emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */ 559 emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */
548 args[0] = ir->op1; /* GCstr *str */ 560 args[0] = ir->op1; /* GCstr *str */
549 args[1] = ASMREF_TMP1; /* TValue *n */ 561 args[1] = ASMREF_TMP1; /* TValue *n */
550 asm_gencall(as, ci, args); 562 asm_gencall(as, ci, args);
551 /* Store the result to the spill slot or temp slots. */ 563 /* Store the result to the spill slot or temp slots. */
552 ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
553 emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs); 564 emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs);
554} 565}
555 566
567/* -- Memory references --------------------------------------------------- */
568
556/* Get pointer to TValue. */ 569/* Get pointer to TValue. */
557static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) 570static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
558{ 571{
@@ -566,37 +579,19 @@ static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
566 /* Otherwise use g->tmptv to hold the TValue. */ 579 /* Otherwise use g->tmptv to hold the TValue. */
567 RegSet allow = rset_exclude(RSET_GPR, dest); 580 RegSet allow = rset_exclude(RSET_GPR, dest);
568 Reg type; 581 Reg type;
569 emit_tai(as, PPCI_ADDI, dest, RID_JGL, offsetof(global_State, tmptv)-32768); 582 emit_tai(as, PPCI_ADDI, dest, RID_JGL, (int32_t)offsetof(global_State, tmptv)-32768);
570 if (!irt_ispri(ir->t)) { 583 if (!irt_ispri(ir->t)) {
571 Reg src = ra_alloc1(as, ref, allow); 584 Reg src = ra_alloc1(as, ref, allow);
572 emit_setgl(as, src, tmptv.gcr); 585 emit_setgl(as, src, tmptv.gcr);
573 } 586 }
574 type = ra_allock(as, irt_toitype(ir->t), allow); 587 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
588 type = ra_alloc1(as, ref+1, allow);
589 else
590 type = ra_allock(as, irt_toitype(ir->t), allow);
575 emit_setgl(as, type, tmptv.it); 591 emit_setgl(as, type, tmptv.it);
576 } 592 }
577} 593}
578 594
579static void asm_tostr(ASMState *as, IRIns *ir)
580{
581 IRRef args[2];
582 args[0] = ASMREF_L;
583 as->gcsteps++;
584 if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
585 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
586 args[1] = ASMREF_TMP1; /* const lua_Number * */
587 asm_setupresult(as, ir, ci); /* GCstr * */
588 asm_gencall(as, ci, args);
589 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
590 } else {
591 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
592 args[1] = ir->op1; /* int32_t k */
593 asm_setupresult(as, ir, ci); /* GCstr * */
594 asm_gencall(as, ci, args);
595 }
596}
597
598/* -- Memory references --------------------------------------------------- */
599
600static void asm_aref(ASMState *as, IRIns *ir) 595static void asm_aref(ASMState *as, IRIns *ir)
601{ 596{
602 Reg dest = ra_dest(as, ir, RSET_GPR); 597 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -636,11 +631,27 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
636 Reg tisnum = RID_NONE, tmpnum = RID_NONE; 631 Reg tisnum = RID_NONE, tmpnum = RID_NONE;
637 IRRef refkey = ir->op2; 632 IRRef refkey = ir->op2;
638 IRIns *irkey = IR(refkey); 633 IRIns *irkey = IR(refkey);
634 int isk = irref_isk(refkey);
639 IRType1 kt = irkey->t; 635 IRType1 kt = irkey->t;
640 uint32_t khash; 636 uint32_t khash;
641 MCLabel l_end, l_loop, l_next; 637 MCLabel l_end, l_loop, l_next;
642 638
643 rset_clear(allow, tab); 639 rset_clear(allow, tab);
640#if LJ_SOFTFP
641 if (!isk) {
642 key = ra_alloc1(as, refkey, allow);
643 rset_clear(allow, key);
644 if (irkey[1].o == IR_HIOP) {
645 if (ra_hasreg((irkey+1)->r)) {
646 tmpnum = (irkey+1)->r;
647 ra_noweak(as, tmpnum);
648 } else {
649 tmpnum = ra_allocref(as, refkey+1, allow);
650 }
651 rset_clear(allow, tmpnum);
652 }
653 }
654#else
644 if (irt_isnum(kt)) { 655 if (irt_isnum(kt)) {
645 key = ra_alloc1(as, refkey, RSET_FPR); 656 key = ra_alloc1(as, refkey, RSET_FPR);
646 tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key)); 657 tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
@@ -650,6 +661,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
650 key = ra_alloc1(as, refkey, allow); 661 key = ra_alloc1(as, refkey, allow);
651 rset_clear(allow, key); 662 rset_clear(allow, key);
652 } 663 }
664#endif
653 tmp2 = ra_scratch(as, allow); 665 tmp2 = ra_scratch(as, allow);
654 rset_clear(allow, tmp2); 666 rset_clear(allow, tmp2);
655 667
@@ -672,7 +684,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
672 asm_guardcc(as, CC_EQ); 684 asm_guardcc(as, CC_EQ);
673 else 685 else
674 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end); 686 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
675 if (irt_isnum(kt)) { 687 if (!LJ_SOFTFP && irt_isnum(kt)) {
676 emit_fab(as, PPCI_FCMPU, 0, tmpnum, key); 688 emit_fab(as, PPCI_FCMPU, 0, tmpnum, key);
677 emit_condbranch(as, PPCI_BC, CC_GE, l_next); 689 emit_condbranch(as, PPCI_BC, CC_GE, l_next);
678 emit_ab(as, PPCI_CMPLW, tmp1, tisnum); 690 emit_ab(as, PPCI_CMPLW, tmp1, tisnum);
@@ -682,7 +694,10 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
682 emit_ab(as, PPCI_CMPW, tmp2, key); 694 emit_ab(as, PPCI_CMPW, tmp2, key);
683 emit_condbranch(as, PPCI_BC, CC_NE, l_next); 695 emit_condbranch(as, PPCI_BC, CC_NE, l_next);
684 } 696 }
685 emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t)); 697 if (LJ_SOFTFP && ra_hasreg(tmpnum))
698 emit_ab(as, PPCI_CMPW, tmp1, tmpnum);
699 else
700 emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t));
686 if (!irt_ispri(kt)) 701 if (!irt_ispri(kt))
687 emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr)); 702 emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
688 } 703 }
@@ -691,35 +706,41 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
691 (((char *)as->mcp-(char *)l_loop) & 0xffffu); 706 (((char *)as->mcp-(char *)l_loop) & 0xffffu);
692 707
693 /* Load main position relative to tab->node into dest. */ 708 /* Load main position relative to tab->node into dest. */
694 khash = irref_isk(refkey) ? ir_khash(irkey) : 1; 709 khash = isk ? ir_khash(as, irkey) : 1;
695 if (khash == 0) { 710 if (khash == 0) {
696 emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node)); 711 emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
697 } else { 712 } else {
698 Reg tmphash = tmp1; 713 Reg tmphash = tmp1;
699 if (irref_isk(refkey)) 714 if (isk)
700 tmphash = ra_allock(as, khash, allow); 715 tmphash = ra_allock(as, khash, allow);
701 emit_tab(as, PPCI_ADD, dest, dest, tmp1); 716 emit_tab(as, PPCI_ADD, dest, dest, tmp1);
702 emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node)); 717 emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node));
703 emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash); 718 emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash);
704 emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node)); 719 emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
705 emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask)); 720 emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
706 if (irref_isk(refkey)) { 721 if (isk) {
707 /* Nothing to do. */ 722 /* Nothing to do. */
708 } else if (irt_isstr(kt)) { 723 } else if (irt_isstr(kt)) {
709 emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, hash)); 724 emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, sid));
710 } else { /* Must match with hash*() in lj_tab.c. */ 725 } else { /* Must match with hash*() in lj_tab.c. */
711 emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1); 726 emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1);
712 emit_rotlwi(as, tmp2, tmp2, HASH_ROT3); 727 emit_rotlwi(as, tmp2, tmp2, HASH_ROT3);
713 emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2); 728 emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2);
714 emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31); 729 emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31);
715 emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2); 730 emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2);
716 if (irt_isnum(kt)) { 731 if (LJ_SOFTFP ? (irkey[1].o == IR_HIOP) : irt_isnum(kt)) {
732#if LJ_SOFTFP
733 emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
734 emit_rotlwi(as, dest, tmp1, HASH_ROT1);
735 emit_tab(as, PPCI_ADD, tmp1, tmpnum, tmpnum);
736#else
717 int32_t ofs = ra_spill(as, irkey); 737 int32_t ofs = ra_spill(as, irkey);
718 emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1); 738 emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1);
719 emit_rotlwi(as, dest, tmp1, HASH_ROT1); 739 emit_rotlwi(as, dest, tmp1, HASH_ROT1);
720 emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1); 740 emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1);
721 emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4); 741 emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4);
722 emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs); 742 emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs);
743#endif
723 } else { 744 } else {
724 emit_asb(as, PPCI_XOR, tmp2, key, tmp1); 745 emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
725 emit_rotlwi(as, dest, tmp1, HASH_ROT1); 746 emit_rotlwi(as, dest, tmp1, HASH_ROT1);
@@ -740,7 +761,7 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
740 Reg node = ra_alloc1(as, ir->op1, RSET_GPR); 761 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
741 Reg key = RID_NONE, type = RID_TMP, idx = node; 762 Reg key = RID_NONE, type = RID_TMP, idx = node;
742 RegSet allow = rset_exclude(RSET_GPR, node); 763 RegSet allow = rset_exclude(RSET_GPR, node);
743 lua_assert(ofs % sizeof(Node) == 0); 764 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
744 if (ofs > 32736) { 765 if (ofs > 32736) {
745 idx = dest; 766 idx = dest;
746 rset_clear(allow, dest); 767 rset_clear(allow, dest);
@@ -773,20 +794,6 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
773 } 794 }
774} 795}
775 796
776static void asm_newref(ASMState *as, IRIns *ir)
777{
778 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
779 IRRef args[3];
780 if (ir->r == RID_SINK)
781 return;
782 args[0] = ASMREF_L; /* lua_State *L */
783 args[1] = ir->op1; /* GCtab *t */
784 args[2] = ASMREF_TMP1; /* cTValue *key */
785 asm_setupresult(as, ir, ci); /* TValue * */
786 asm_gencall(as, ci, args);
787 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
788}
789
790static void asm_uref(ASMState *as, IRIns *ir) 797static void asm_uref(ASMState *as, IRIns *ir)
791{ 798{
792 Reg dest = ra_dest(as, ir, RSET_GPR); 799 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -813,7 +820,7 @@ static void asm_uref(ASMState *as, IRIns *ir)
813static void asm_fref(ASMState *as, IRIns *ir) 820static void asm_fref(ASMState *as, IRIns *ir)
814{ 821{
815 UNUSED(as); UNUSED(ir); 822 UNUSED(as); UNUSED(ir);
816 lua_assert(!ra_used(ir)); 823 lj_assertA(!ra_used(ir), "unfused FREF");
817} 824}
818 825
819static void asm_strref(ASMState *as, IRIns *ir) 826static void asm_strref(ASMState *as, IRIns *ir)
@@ -853,26 +860,28 @@ static void asm_strref(ASMState *as, IRIns *ir)
853 860
854/* -- Loads and stores ---------------------------------------------------- */ 861/* -- Loads and stores ---------------------------------------------------- */
855 862
856static PPCIns asm_fxloadins(IRIns *ir) 863static PPCIns asm_fxloadins(ASMState *as, IRIns *ir)
857{ 864{
865 UNUSED(as);
858 switch (irt_type(ir->t)) { 866 switch (irt_type(ir->t)) {
859 case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */ 867 case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */
860 case IRT_U8: return PPCI_LBZ; 868 case IRT_U8: return PPCI_LBZ;
861 case IRT_I16: return PPCI_LHA; 869 case IRT_I16: return PPCI_LHA;
862 case IRT_U16: return PPCI_LHZ; 870 case IRT_U16: return PPCI_LHZ;
863 case IRT_NUM: return PPCI_LFD; 871 case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return PPCI_LFD;
864 case IRT_FLOAT: return PPCI_LFS; 872 case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_LFS;
865 default: return PPCI_LWZ; 873 default: return PPCI_LWZ;
866 } 874 }
867} 875}
868 876
869static PPCIns asm_fxstoreins(IRIns *ir) 877static PPCIns asm_fxstoreins(ASMState *as, IRIns *ir)
870{ 878{
879 UNUSED(as);
871 switch (irt_type(ir->t)) { 880 switch (irt_type(ir->t)) {
872 case IRT_I8: case IRT_U8: return PPCI_STB; 881 case IRT_I8: case IRT_U8: return PPCI_STB;
873 case IRT_I16: case IRT_U16: return PPCI_STH; 882 case IRT_I16: case IRT_U16: return PPCI_STH;
874 case IRT_NUM: return PPCI_STFD; 883 case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return PPCI_STFD;
875 case IRT_FLOAT: return PPCI_STFS; 884 case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_STFS;
876 default: return PPCI_STW; 885 default: return PPCI_STW;
877 } 886 }
878} 887}
@@ -880,18 +889,24 @@ static PPCIns asm_fxstoreins(IRIns *ir)
880static void asm_fload(ASMState *as, IRIns *ir) 889static void asm_fload(ASMState *as, IRIns *ir)
881{ 890{
882 Reg dest = ra_dest(as, ir, RSET_GPR); 891 Reg dest = ra_dest(as, ir, RSET_GPR);
883 Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); 892 PPCIns pi = asm_fxloadins(as, ir);
884 PPCIns pi = asm_fxloadins(ir); 893 Reg idx;
885 int32_t ofs; 894 int32_t ofs;
886 if (ir->op2 == IRFL_TAB_ARRAY) { 895 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
887 ofs = asm_fuseabase(as, ir->op1); 896 idx = RID_JGL;
888 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ 897 ofs = (ir->op2 << 2) - 32768;
889 emit_tai(as, PPCI_ADDI, dest, idx, ofs); 898 } else {
890 return; 899 idx = ra_alloc1(as, ir->op1, RSET_GPR);
900 if (ir->op2 == IRFL_TAB_ARRAY) {
901 ofs = asm_fuseabase(as, ir->op1);
902 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
903 emit_tai(as, PPCI_ADDI, dest, idx, ofs);
904 return;
905 }
891 } 906 }
907 ofs = field_ofs[ir->op2];
892 } 908 }
893 ofs = field_ofs[ir->op2]; 909 lj_assertA(!irt_isi8(ir->t), "unsupported FLOAD I8");
894 lua_assert(!irt_isi8(ir->t));
895 emit_tai(as, pi, dest, idx, ofs); 910 emit_tai(as, pi, dest, idx, ofs);
896} 911}
897 912
@@ -902,21 +917,22 @@ static void asm_fstore(ASMState *as, IRIns *ir)
902 IRIns *irf = IR(ir->op1); 917 IRIns *irf = IR(ir->op1);
903 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); 918 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
904 int32_t ofs = field_ofs[irf->op2]; 919 int32_t ofs = field_ofs[irf->op2];
905 PPCIns pi = asm_fxstoreins(ir); 920 PPCIns pi = asm_fxstoreins(as, ir);
906 emit_tai(as, pi, src, idx, ofs); 921 emit_tai(as, pi, src, idx, ofs);
907 } 922 }
908} 923}
909 924
910static void asm_xload(ASMState *as, IRIns *ir) 925static void asm_xload(ASMState *as, IRIns *ir)
911{ 926{
912 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); 927 Reg dest = ra_dest(as, ir,
913 lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); 928 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
929 lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
914 if (irt_isi8(ir->t)) 930 if (irt_isi8(ir->t))
915 emit_as(as, PPCI_EXTSB, dest, dest); 931 emit_as(as, PPCI_EXTSB, dest, dest);
916 asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); 932 asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
917} 933}
918 934
919static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs) 935static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
920{ 936{
921 IRIns *irb; 937 IRIns *irb;
922 if (ir->r == RID_SINK) 938 if (ir->r == RID_SINK)
@@ -927,22 +943,35 @@ static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
927 Reg src = ra_alloc1(as, irb->op1, RSET_GPR); 943 Reg src = ra_alloc1(as, irb->op1, RSET_GPR);
928 asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src)); 944 asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src));
929 } else { 945 } else {
930 Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); 946 Reg src = ra_alloc1(as, ir->op2,
931 asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, 947 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
948 asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
932 rset_exclude(RSET_GPR, src), ofs); 949 rset_exclude(RSET_GPR, src), ofs);
933 } 950 }
934} 951}
935 952
953#define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
954
936static void asm_ahuvload(ASMState *as, IRIns *ir) 955static void asm_ahuvload(ASMState *as, IRIns *ir)
937{ 956{
938 IRType1 t = ir->t; 957 IRType1 t = ir->t;
939 Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx; 958 Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx;
940 RegSet allow = RSET_GPR; 959 RegSet allow = RSET_GPR;
941 int32_t ofs = AHUREF_LSX; 960 int32_t ofs = AHUREF_LSX;
961 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) {
962 t.irt = IRT_NUM;
963 if (ra_used(ir+1)) {
964 type = ra_dest(as, ir+1, allow);
965 rset_clear(allow, type);
966 }
967 ofs = 0;
968 }
942 if (ra_used(ir)) { 969 if (ra_used(ir)) {
943 lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); 970 lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
944 if (!irt_isnum(t)) ofs = 0; 971 irt_isint(ir->t) || irt_isaddr(ir->t),
945 dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); 972 "bad load type %d", irt_type(ir->t));
973 if (LJ_SOFTFP || !irt_isnum(t)) ofs = 0;
974 dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
946 rset_clear(allow, dest); 975 rset_clear(allow, dest);
947 } 976 }
948 idx = asm_fuseahuref(as, ir->op1, &ofs, allow); 977 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
@@ -951,12 +980,13 @@ static void asm_ahuvload(ASMState *as, IRIns *ir)
951 asm_guardcc(as, CC_GE); 980 asm_guardcc(as, CC_GE);
952 emit_ab(as, PPCI_CMPLW, type, tisnum); 981 emit_ab(as, PPCI_CMPLW, type, tisnum);
953 if (ra_hasreg(dest)) { 982 if (ra_hasreg(dest)) {
954 if (ofs == AHUREF_LSX) { 983 if (!LJ_SOFTFP && ofs == AHUREF_LSX) {
955 tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, 984 tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR,
956 (idx&255)), (idx>>8))); 985 (idx&255)), (idx>>8)));
957 emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp); 986 emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp);
958 } else { 987 } else {
959 emit_fai(as, PPCI_LFD, dest, idx, ofs); 988 emit_fai(as, LJ_SOFTFP ? PPCI_LWZ : PPCI_LFD, dest, idx,
989 ofs+4*LJ_SOFTFP);
960 } 990 }
961 } 991 }
962 } else { 992 } else {
@@ -979,7 +1009,7 @@ static void asm_ahustore(ASMState *as, IRIns *ir)
979 int32_t ofs = AHUREF_LSX; 1009 int32_t ofs = AHUREF_LSX;
980 if (ir->r == RID_SINK) 1010 if (ir->r == RID_SINK)
981 return; 1011 return;
982 if (irt_isnum(ir->t)) { 1012 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
983 src = ra_alloc1(as, ir->op2, RSET_FPR); 1013 src = ra_alloc1(as, ir->op2, RSET_FPR);
984 } else { 1014 } else {
985 if (!irt_ispri(ir->t)) { 1015 if (!irt_ispri(ir->t)) {
@@ -987,11 +1017,14 @@ static void asm_ahustore(ASMState *as, IRIns *ir)
987 rset_clear(allow, src); 1017 rset_clear(allow, src);
988 ofs = 0; 1018 ofs = 0;
989 } 1019 }
990 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); 1020 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
1021 type = ra_alloc1(as, (ir+1)->op2, allow);
1022 else
1023 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
991 rset_clear(allow, type); 1024 rset_clear(allow, type);
992 } 1025 }
993 idx = asm_fuseahuref(as, ir->op1, &ofs, allow); 1026 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
994 if (irt_isnum(ir->t)) { 1027 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
995 if (ofs == AHUREF_LSX) { 1028 if (ofs == AHUREF_LSX) {
996 emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP); 1029 emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP);
997 emit_slwi(as, RID_TMP, (idx>>8), 3); 1030 emit_slwi(as, RID_TMP, (idx>>8), 3);
@@ -1016,21 +1049,38 @@ static void asm_sload(ASMState *as, IRIns *ir)
1016 IRType1 t = ir->t; 1049 IRType1 t = ir->t;
1017 Reg dest = RID_NONE, type = RID_NONE, base; 1050 Reg dest = RID_NONE, type = RID_NONE, base;
1018 RegSet allow = RSET_GPR; 1051 RegSet allow = RSET_GPR;
1019 lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ 1052 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
1020 lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); 1053 if (hiop)
1021 lua_assert(LJ_DUALNUM || 1054 t.irt = IRT_NUM;
1022 !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); 1055 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1056 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1057 lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1058 "inconsistent SLOAD variant");
1059 lj_assertA(LJ_DUALNUM ||
1060 !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)),
1061 "bad SLOAD type");
1062#if LJ_SOFTFP
1063 lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
1064 "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
1065 if (hiop && ra_used(ir+1)) {
1066 type = ra_dest(as, ir+1, allow);
1067 rset_clear(allow, type);
1068 }
1069#else
1023 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { 1070 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1024 dest = ra_scratch(as, RSET_FPR); 1071 dest = ra_scratch(as, RSET_FPR);
1025 asm_tointg(as, ir, dest); 1072 asm_tointg(as, ir, dest);
1026 t.irt = IRT_NUM; /* Continue with a regular number type check. */ 1073 t.irt = IRT_NUM; /* Continue with a regular number type check. */
1027 } else if (ra_used(ir)) { 1074 } else
1028 lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); 1075#endif
1029 dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR); 1076 if (ra_used(ir)) {
1077 lj_assertA(irt_isnum(t) || irt_isint(t) || irt_isaddr(t),
1078 "bad SLOAD type %d", irt_type(ir->t));
1079 dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
1030 rset_clear(allow, dest); 1080 rset_clear(allow, dest);
1031 base = ra_alloc1(as, REF_BASE, allow); 1081 base = ra_alloc1(as, REF_BASE, allow);
1032 rset_clear(allow, base); 1082 rset_clear(allow, base);
1033 if ((ir->op2 & IRSLOAD_CONVERT)) { 1083 if (!LJ_SOFTFP && (ir->op2 & IRSLOAD_CONVERT)) {
1034 if (irt_isint(t)) { 1084 if (irt_isint(t)) {
1035 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO); 1085 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
1036 dest = ra_scratch(as, RSET_FPR); 1086 dest = ra_scratch(as, RSET_FPR);
@@ -1044,7 +1094,7 @@ static void asm_sload(ASMState *as, IRIns *ir)
1044 emit_fab(as, PPCI_FSUB, dest, dest, fbias); 1094 emit_fab(as, PPCI_FSUB, dest, dest, fbias);
1045 emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP); 1095 emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
1046 emit_lsptr(as, PPCI_LFS, (fbias & 31), 1096 emit_lsptr(as, PPCI_LFS, (fbias & 31),
1047 (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)), 1097 (void *)&as->J->k32[LJ_K32_2P52_2P31],
1048 rset_clear(allow, hibias)); 1098 rset_clear(allow, hibias));
1049 emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO); 1099 emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO);
1050 emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI); 1100 emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
@@ -1062,10 +1112,13 @@ dotypecheck:
1062 if ((ir->op2 & IRSLOAD_TYPECHECK)) { 1112 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1063 Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow); 1113 Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
1064 asm_guardcc(as, CC_GE); 1114 asm_guardcc(as, CC_GE);
1065 emit_ab(as, PPCI_CMPLW, RID_TMP, tisnum); 1115#if !LJ_SOFTFP
1066 type = RID_TMP; 1116 type = RID_TMP;
1117#endif
1118 emit_ab(as, PPCI_CMPLW, type, tisnum);
1067 } 1119 }
1068 if (ra_hasreg(dest)) emit_fai(as, PPCI_LFD, dest, base, ofs-4); 1120 if (ra_hasreg(dest)) emit_fai(as, LJ_SOFTFP ? PPCI_LWZ : PPCI_LFD, dest,
1121 base, ofs-(LJ_SOFTFP?0:4));
1069 } else { 1122 } else {
1070 if ((ir->op2 & IRSLOAD_TYPECHECK)) { 1123 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1071 asm_guardcc(as, CC_NE); 1124 asm_guardcc(as, CC_NE);
@@ -1083,19 +1136,16 @@ dotypecheck:
1083static void asm_cnew(ASMState *as, IRIns *ir) 1136static void asm_cnew(ASMState *as, IRIns *ir)
1084{ 1137{
1085 CTState *cts = ctype_ctsG(J2G(as->J)); 1138 CTState *cts = ctype_ctsG(J2G(as->J));
1086 CTypeID ctypeid = (CTypeID)IR(ir->op1)->i; 1139 CTypeID id = (CTypeID)IR(ir->op1)->i;
1087 CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ? 1140 CTSize sz;
1088 lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i; 1141 CTInfo info = lj_ctype_info(cts, id, &sz);
1089 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; 1142 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1090 IRRef args[2]; 1143 IRRef args[4];
1091 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1092 RegSet drop = RSET_SCRATCH; 1144 RegSet drop = RSET_SCRATCH;
1093 lua_assert(sz != CTSIZE_INVALID); 1145 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1146 "bad CNEW/CNEWI operands");
1094 1147
1095 args[0] = ASMREF_L; /* lua_State *L */
1096 args[1] = ASMREF_TMP1; /* MSize size */
1097 as->gcsteps++; 1148 as->gcsteps++;
1098
1099 if (ra_hasreg(ir->r)) 1149 if (ra_hasreg(ir->r))
1100 rset_clear(drop, ir->r); /* Dest reg handled below. */ 1150 rset_clear(drop, ir->r); /* Dest reg handled below. */
1101 ra_evictset(as, drop); 1151 ra_evictset(as, drop);
@@ -1104,11 +1154,12 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1104 1154
1105 /* Initialize immutable cdata object. */ 1155 /* Initialize immutable cdata object. */
1106 if (ir->o == IR_CNEWI) { 1156 if (ir->o == IR_CNEWI) {
1157 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1107 int32_t ofs = sizeof(GCcdata); 1158 int32_t ofs = sizeof(GCcdata);
1108 lua_assert(sz == 4 || sz == 8); 1159 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1109 if (sz == 8) { 1160 if (sz == 8) {
1110 ofs += 4; 1161 ofs += 4;
1111 lua_assert((ir+1)->o == IR_HIOP); 1162 lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI");
1112 } 1163 }
1113 for (;;) { 1164 for (;;) {
1114 Reg r = ra_alloc1(as, ir->op2, allow); 1165 Reg r = ra_alloc1(as, ir->op2, allow);
@@ -1117,18 +1168,28 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1117 if (ofs == sizeof(GCcdata)) break; 1168 if (ofs == sizeof(GCcdata)) break;
1118 ofs -= 4; ir++; 1169 ofs -= 4; ir++;
1119 } 1170 }
1171 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1172 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1173 args[0] = ASMREF_L; /* lua_State *L */
1174 args[1] = ir->op1; /* CTypeID id */
1175 args[2] = ir->op2; /* CTSize sz */
1176 args[3] = ASMREF_TMP1; /* CTSize align */
1177 asm_gencall(as, ci, args);
1178 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1179 return;
1120 } 1180 }
1181
1121 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ 1182 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1122 emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct)); 1183 emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
1123 emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid)); 1184 emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
1124 emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA); 1185 emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA);
1125 emit_ti(as, PPCI_LI, RID_TMP, ctypeid); /* Lower 16 bit used. Sign-ext ok. */ 1186 emit_ti(as, PPCI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */
1187 args[0] = ASMREF_L; /* lua_State *L */
1188 args[1] = ASMREF_TMP1; /* MSize size */
1126 asm_gencall(as, ci, args); 1189 asm_gencall(as, ci, args);
1127 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), 1190 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
1128 ra_releasetmp(as, ASMREF_TMP1)); 1191 ra_releasetmp(as, ASMREF_TMP1));
1129} 1192}
1130#else
1131#define asm_cnew(as, ir) ((void)0)
1132#endif 1193#endif
1133 1194
1134/* -- Write barriers ------------------------------------------------------ */ 1195/* -- Write barriers ------------------------------------------------------ */
@@ -1142,7 +1203,7 @@ static void asm_tbar(ASMState *as, IRIns *ir)
1142 emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist)); 1203 emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist));
1143 emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked)); 1204 emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked));
1144 emit_setgl(as, tab, gc.grayagain); 1205 emit_setgl(as, tab, gc.grayagain);
1145 lua_assert(LJ_GC_BLACK == 0x04); 1206 lj_assertA(LJ_GC_BLACK == 0x04, "bad LJ_GC_BLACK");
1146 emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */ 1207 emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */
1147 emit_getgl(as, link, gc.grayagain); 1208 emit_getgl(as, link, gc.grayagain);
1148 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end); 1209 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
@@ -1157,7 +1218,7 @@ static void asm_obar(ASMState *as, IRIns *ir)
1157 MCLabel l_end; 1218 MCLabel l_end;
1158 Reg obj, val, tmp; 1219 Reg obj, val, tmp;
1159 /* No need for other object barriers (yet). */ 1220 /* No need for other object barriers (yet). */
1160 lua_assert(IR(ir->op1)->o == IR_UREFC); 1221 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1161 ra_evictset(as, RSET_SCRATCH); 1222 ra_evictset(as, RSET_SCRATCH);
1162 l_end = emit_label(as); 1223 l_end = emit_label(as);
1163 args[0] = ASMREF_TMP1; /* global_State *g */ 1224 args[0] = ASMREF_TMP1; /* global_State *g */
@@ -1178,6 +1239,7 @@ static void asm_obar(ASMState *as, IRIns *ir)
1178 1239
1179/* -- Arithmetic and logic operations ------------------------------------- */ 1240/* -- Arithmetic and logic operations ------------------------------------- */
1180 1241
1242#if !LJ_SOFTFP
1181static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi) 1243static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi)
1182{ 1244{
1183 Reg dest = ra_dest(as, ir, RSET_FPR); 1245 Reg dest = ra_dest(as, ir, RSET_FPR);
@@ -1196,31 +1258,24 @@ static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi)
1196 emit_fb(as, pi, dest, left); 1258 emit_fb(as, pi, dest, left);
1197} 1259}
1198 1260
1199static int asm_fpjoin_pow(ASMState *as, IRIns *ir) 1261static void asm_fpmath(ASMState *as, IRIns *ir)
1200{ 1262{
1201 IRIns *irp = IR(ir->op1); 1263 if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT))
1202 if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) { 1264 asm_fpunary(as, ir, PPCI_FSQRT);
1203 IRIns *irpp = IR(irp->op1); 1265 else
1204 if (irpp == ir-2 && irpp->o == IR_FPMATH && 1266 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
1205 irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
1206 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
1207 IRRef args[2];
1208 args[0] = irpp->op1;
1209 args[1] = irp->op2;
1210 asm_setupresult(as, ir, ci);
1211 asm_gencall(as, ci, args);
1212 return 1;
1213 }
1214 }
1215 return 0;
1216} 1267}
1268#endif
1217 1269
1218static void asm_add(ASMState *as, IRIns *ir) 1270static void asm_add(ASMState *as, IRIns *ir)
1219{ 1271{
1272#if !LJ_SOFTFP
1220 if (irt_isnum(ir->t)) { 1273 if (irt_isnum(ir->t)) {
1221 if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD)) 1274 if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD))
1222 asm_fparith(as, ir, PPCI_FADD); 1275 asm_fparith(as, ir, PPCI_FADD);
1223 } else { 1276 } else
1277#endif
1278 {
1224 Reg dest = ra_dest(as, ir, RSET_GPR); 1279 Reg dest = ra_dest(as, ir, RSET_GPR);
1225 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 1280 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1226 PPCIns pi; 1281 PPCIns pi;
@@ -1259,10 +1314,13 @@ static void asm_add(ASMState *as, IRIns *ir)
1259 1314
1260static void asm_sub(ASMState *as, IRIns *ir) 1315static void asm_sub(ASMState *as, IRIns *ir)
1261{ 1316{
1317#if !LJ_SOFTFP
1262 if (irt_isnum(ir->t)) { 1318 if (irt_isnum(ir->t)) {
1263 if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB)) 1319 if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB))
1264 asm_fparith(as, ir, PPCI_FSUB); 1320 asm_fparith(as, ir, PPCI_FSUB);
1265 } else { 1321 } else
1322#endif
1323 {
1266 PPCIns pi = PPCI_SUBF; 1324 PPCIns pi = PPCI_SUBF;
1267 Reg dest = ra_dest(as, ir, RSET_GPR); 1325 Reg dest = ra_dest(as, ir, RSET_GPR);
1268 Reg left, right; 1326 Reg left, right;
@@ -1288,9 +1346,12 @@ static void asm_sub(ASMState *as, IRIns *ir)
1288 1346
1289static void asm_mul(ASMState *as, IRIns *ir) 1347static void asm_mul(ASMState *as, IRIns *ir)
1290{ 1348{
1349#if !LJ_SOFTFP
1291 if (irt_isnum(ir->t)) { 1350 if (irt_isnum(ir->t)) {
1292 asm_fparith(as, ir, PPCI_FMUL); 1351 asm_fparith(as, ir, PPCI_FMUL);
1293 } else { 1352 } else
1353#endif
1354 {
1294 PPCIns pi = PPCI_MULLW; 1355 PPCIns pi = PPCI_MULLW;
1295 Reg dest = ra_dest(as, ir, RSET_GPR); 1356 Reg dest = ra_dest(as, ir, RSET_GPR);
1296 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 1357 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
@@ -1312,11 +1373,16 @@ static void asm_mul(ASMState *as, IRIns *ir)
1312 } 1373 }
1313} 1374}
1314 1375
1376#define asm_fpdiv(as, ir) asm_fparith(as, ir, PPCI_FDIV)
1377
1315static void asm_neg(ASMState *as, IRIns *ir) 1378static void asm_neg(ASMState *as, IRIns *ir)
1316{ 1379{
1380#if !LJ_SOFTFP
1317 if (irt_isnum(ir->t)) { 1381 if (irt_isnum(ir->t)) {
1318 asm_fpunary(as, ir, PPCI_FNEG); 1382 asm_fpunary(as, ir, PPCI_FNEG);
1319 } else { 1383 } else
1384#endif
1385 {
1320 Reg dest, left; 1386 Reg dest, left;
1321 PPCIns pi = PPCI_NEG; 1387 PPCIns pi = PPCI_NEG;
1322 if (as->flagmcp == as->mcp) { 1388 if (as->flagmcp == as->mcp) {
@@ -1330,6 +1396,8 @@ static void asm_neg(ASMState *as, IRIns *ir)
1330 } 1396 }
1331} 1397}
1332 1398
1399#define asm_abs(as, ir) asm_fpunary(as, ir, PPCI_FABS)
1400
1333static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi) 1401static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi)
1334{ 1402{
1335 Reg dest, left, right; 1403 Reg dest, left, right;
@@ -1345,6 +1413,10 @@ static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi)
1345 emit_tab(as, pi|PPCF_DOT, dest, left, right); 1413 emit_tab(as, pi|PPCF_DOT, dest, left, right);
1346} 1414}
1347 1415
1416#define asm_addov(as, ir) asm_arithov(as, ir, PPCI_ADDO)
1417#define asm_subov(as, ir) asm_arithov(as, ir, PPCI_SUBFO)
1418#define asm_mulov(as, ir) asm_arithov(as, ir, PPCI_MULLWO)
1419
1348#if LJ_HASFFI 1420#if LJ_HASFFI
1349static void asm_add64(ASMState *as, IRIns *ir) 1421static void asm_add64(ASMState *as, IRIns *ir)
1350{ 1422{
@@ -1424,7 +1496,7 @@ static void asm_neg64(ASMState *as, IRIns *ir)
1424} 1496}
1425#endif 1497#endif
1426 1498
1427static void asm_bitnot(ASMState *as, IRIns *ir) 1499static void asm_bnot(ASMState *as, IRIns *ir)
1428{ 1500{
1429 Reg dest, left, right; 1501 Reg dest, left, right;
1430 PPCIns pi = PPCI_NOR; 1502 PPCIns pi = PPCI_NOR;
@@ -1451,7 +1523,7 @@ nofuse:
1451 emit_asb(as, pi, dest, left, right); 1523 emit_asb(as, pi, dest, left, right);
1452} 1524}
1453 1525
1454static void asm_bitswap(ASMState *as, IRIns *ir) 1526static void asm_bswap(ASMState *as, IRIns *ir)
1455{ 1527{
1456 Reg dest = ra_dest(as, ir, RSET_GPR); 1528 Reg dest = ra_dest(as, ir, RSET_GPR);
1457 IRIns *irx; 1529 IRIns *irx;
@@ -1472,32 +1544,6 @@ static void asm_bitswap(ASMState *as, IRIns *ir)
1472 } 1544 }
1473} 1545}
1474 1546
1475static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
1476{
1477 Reg dest = ra_dest(as, ir, RSET_GPR);
1478 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1479 if (irref_isk(ir->op2)) {
1480 int32_t k = IR(ir->op2)->i;
1481 Reg tmp = left;
1482 if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) {
1483 if (!checku16(k)) {
1484 emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16));
1485 if ((k & 0xffff) == 0) return;
1486 }
1487 emit_asi(as, pik, dest, left, k);
1488 return;
1489 }
1490 }
1491 /* May fail due to spills/restores above, but simplifies the logic. */
1492 if (as->flagmcp == as->mcp) {
1493 as->flagmcp = NULL;
1494 as->mcp++;
1495 pi |= PPCF_DOT;
1496 }
1497 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1498 emit_asb(as, pi, dest, left, right);
1499}
1500
1501/* Fuse BAND with contiguous bitmask and a shift to rlwinm. */ 1547/* Fuse BAND with contiguous bitmask and a shift to rlwinm. */
1502static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref) 1548static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref)
1503{ 1549{
@@ -1528,7 +1574,7 @@ nofuse:
1528 *--as->mcp = pi | PPCF_T(left); 1574 *--as->mcp = pi | PPCF_T(left);
1529} 1575}
1530 1576
1531static void asm_bitand(ASMState *as, IRIns *ir) 1577static void asm_band(ASMState *as, IRIns *ir)
1532{ 1578{
1533 Reg dest, left, right; 1579 Reg dest, left, right;
1534 IRRef lref = ir->op1; 1580 IRRef lref = ir->op1;
@@ -1583,6 +1629,35 @@ static void asm_bitand(ASMState *as, IRIns *ir)
1583 emit_asb(as, PPCI_AND ^ dot, dest, left, right); 1629 emit_asb(as, PPCI_AND ^ dot, dest, left, right);
1584} 1630}
1585 1631
1632static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
1633{
1634 Reg dest = ra_dest(as, ir, RSET_GPR);
1635 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1636 if (irref_isk(ir->op2)) {
1637 int32_t k = IR(ir->op2)->i;
1638 Reg tmp = left;
1639 if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) {
1640 if (!checku16(k)) {
1641 emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16));
1642 if ((k & 0xffff) == 0) return;
1643 }
1644 emit_asi(as, pik, dest, left, k);
1645 return;
1646 }
1647 }
1648 /* May fail due to spills/restores above, but simplifies the logic. */
1649 if (as->flagmcp == as->mcp) {
1650 as->flagmcp = NULL;
1651 as->mcp++;
1652 pi |= PPCF_DOT;
1653 }
1654 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1655 emit_asb(as, pi, dest, left, right);
1656}
1657
1658#define asm_bor(as, ir) asm_bitop(as, ir, PPCI_OR, PPCI_ORI)
1659#define asm_bxor(as, ir) asm_bitop(as, ir, PPCI_XOR, PPCI_XORI)
1660
1586static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik) 1661static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
1587{ 1662{
1588 Reg dest, left; 1663 Reg dest, left;
@@ -1608,9 +1683,48 @@ static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
1608 } 1683 }
1609} 1684}
1610 1685
1686#define asm_bshl(as, ir) asm_bitshift(as, ir, PPCI_SLW, 0)
1687#define asm_bshr(as, ir) asm_bitshift(as, ir, PPCI_SRW, 1)
1688#define asm_bsar(as, ir) asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI)
1689#define asm_brol(as, ir) \
1690 asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31), \
1691 PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31))
1692#define asm_bror(as, ir) lj_assertA(0, "unexpected BROR")
1693
1694#if LJ_SOFTFP
1695static void asm_sfpmin_max(ASMState *as, IRIns *ir)
1696{
1697 CCallInfo ci = lj_ir_callinfo[IRCALL_softfp_cmp];
1698 IRRef args[4];
1699 MCLabel l_right, l_end;
1700 Reg desthi = ra_dest(as, ir, RSET_GPR), destlo = ra_dest(as, ir+1, RSET_GPR);
1701 Reg righthi, lefthi = ra_alloc2(as, ir, RSET_GPR);
1702 Reg rightlo, leftlo = ra_alloc2(as, ir+1, RSET_GPR);
1703 PPCCC cond = (IROp)ir->o == IR_MIN ? CC_EQ : CC_NE;
1704 righthi = (lefthi >> 8); lefthi &= 255;
1705 rightlo = (leftlo >> 8); leftlo &= 255;
1706 args[0^LJ_BE] = ir->op1; args[1^LJ_BE] = (ir+1)->op1;
1707 args[2^LJ_BE] = ir->op2; args[3^LJ_BE] = (ir+1)->op2;
1708 l_end = emit_label(as);
1709 if (desthi != righthi) emit_mr(as, desthi, righthi);
1710 if (destlo != rightlo) emit_mr(as, destlo, rightlo);
1711 l_right = emit_label(as);
1712 if (l_end != l_right) emit_jmp(as, l_end);
1713 if (desthi != lefthi) emit_mr(as, desthi, lefthi);
1714 if (destlo != leftlo) emit_mr(as, destlo, leftlo);
1715 if (l_right == as->mcp+1) {
1716 cond ^= 4; l_right = l_end; ++as->mcp;
1717 }
1718 emit_condbranch(as, PPCI_BC, cond, l_right);
1719 ra_evictset(as, RSET_SCRATCH);
1720 emit_cmpi(as, RID_RET, 1);
1721 asm_gencall(as, &ci, args);
1722}
1723#endif
1724
1611static void asm_min_max(ASMState *as, IRIns *ir, int ismax) 1725static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
1612{ 1726{
1613 if (irt_isnum(ir->t)) { 1727 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1614 Reg dest = ra_dest(as, ir, RSET_FPR); 1728 Reg dest = ra_dest(as, ir, RSET_FPR);
1615 Reg tmp = dest; 1729 Reg tmp = dest;
1616 Reg right, left = ra_alloc2(as, ir, RSET_FPR); 1730 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
@@ -1618,9 +1732,8 @@ static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
1618 if (tmp == left || tmp == right) 1732 if (tmp == left || tmp == right)
1619 tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR, 1733 tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR,
1620 dest), left), right)); 1734 dest), left), right));
1621 emit_facb(as, PPCI_FSEL, dest, tmp, 1735 emit_facb(as, PPCI_FSEL, dest, tmp, left, right);
1622 ismax ? left : right, ismax ? right : left); 1736 emit_fab(as, PPCI_FSUB, tmp, ismax ? left : right, ismax ? right : left);
1623 emit_fab(as, PPCI_FSUB, tmp, left, right);
1624 } else { 1737 } else {
1625 Reg dest = ra_dest(as, ir, RSET_GPR); 1738 Reg dest = ra_dest(as, ir, RSET_GPR);
1626 Reg tmp1 = RID_TMP, tmp2 = dest; 1739 Reg tmp1 = RID_TMP, tmp2 = dest;
@@ -1638,6 +1751,9 @@ static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
1638 } 1751 }
1639} 1752}
1640 1753
1754#define asm_min(as, ir) asm_min_max(as, ir, 0)
1755#define asm_max(as, ir) asm_min_max(as, ir, 1)
1756
1641/* -- Comparisons --------------------------------------------------------- */ 1757/* -- Comparisons --------------------------------------------------------- */
1642 1758
1643#define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */ 1759#define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */
@@ -1695,7 +1811,7 @@ static void asm_intcomp_(ASMState *as, IRRef lref, IRRef rref, Reg cr, PPCCC cc)
1695static void asm_comp(ASMState *as, IRIns *ir) 1811static void asm_comp(ASMState *as, IRIns *ir)
1696{ 1812{
1697 PPCCC cc = asm_compmap[ir->o]; 1813 PPCCC cc = asm_compmap[ir->o];
1698 if (irt_isnum(ir->t)) { 1814 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1699 Reg right, left = ra_alloc2(as, ir, RSET_FPR); 1815 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1700 right = (left >> 8); left &= 255; 1816 right = (left >> 8); left &= 255;
1701 asm_guardcc(as, (cc >> 4)); 1817 asm_guardcc(as, (cc >> 4));
@@ -1714,6 +1830,46 @@ static void asm_comp(ASMState *as, IRIns *ir)
1714 } 1830 }
1715} 1831}
1716 1832
1833#define asm_equal(as, ir) asm_comp(as, ir)
1834
1835#if LJ_SOFTFP
1836/* SFP comparisons. */
1837static void asm_sfpcomp(ASMState *as, IRIns *ir)
1838{
1839 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
1840 RegSet drop = RSET_SCRATCH;
1841 Reg r;
1842 IRRef args[4];
1843 args[0^LJ_BE] = ir->op1; args[1^LJ_BE] = (ir+1)->op1;
1844 args[2^LJ_BE] = ir->op2; args[3^LJ_BE] = (ir+1)->op2;
1845
1846 for (r = REGARG_FIRSTGPR; r <= REGARG_FIRSTGPR+3; r++) {
1847 if (!rset_test(as->freeset, r) &&
1848 regcost_ref(as->cost[r]) == args[r-REGARG_FIRSTGPR])
1849 rset_clear(drop, r);
1850 }
1851 ra_evictset(as, drop);
1852 asm_setupresult(as, ir, ci);
1853 switch ((IROp)ir->o) {
1854 case IR_ULT:
1855 asm_guardcc(as, CC_EQ);
1856 emit_ai(as, PPCI_CMPWI, RID_RET, 0);
1857 case IR_ULE:
1858 asm_guardcc(as, CC_EQ);
1859 emit_ai(as, PPCI_CMPWI, RID_RET, 1);
1860 break;
1861 case IR_GE: case IR_GT:
1862 asm_guardcc(as, CC_EQ);
1863 emit_ai(as, PPCI_CMPWI, RID_RET, 2);
1864 default:
1865 asm_guardcc(as, (asm_compmap[ir->o] & 0xf));
1866 emit_ai(as, PPCI_CMPWI, RID_RET, 0);
1867 break;
1868 }
1869 asm_gencall(as, ci, args);
1870}
1871#endif
1872
1717#if LJ_HASFFI 1873#if LJ_HASFFI
1718/* 64 bit integer comparisons. */ 1874/* 64 bit integer comparisons. */
1719static void asm_comp64(ASMState *as, IRIns *ir) 1875static void asm_comp64(ASMState *as, IRIns *ir)
@@ -1743,47 +1899,89 @@ static void asm_comp64(ASMState *as, IRIns *ir)
1743/* Hiword op of a split 64 bit op. Previous op must be the loword op. */ 1899/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1744static void asm_hiop(ASMState *as, IRIns *ir) 1900static void asm_hiop(ASMState *as, IRIns *ir)
1745{ 1901{
1746#if LJ_HASFFI 1902#if LJ_HASFFI || LJ_SOFTFP
1747 /* HIOP is marked as a store because it needs its own DCE logic. */ 1903 /* HIOP is marked as a store because it needs its own DCE logic. */
1748 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ 1904 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
1749 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; 1905 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
1750 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ 1906 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
1751 as->curins--; /* Always skip the CONV. */ 1907 as->curins--; /* Always skip the CONV. */
1908#if LJ_HASFFI && !LJ_SOFTFP
1752 if (usehi || uselo) 1909 if (usehi || uselo)
1753 asm_conv64(as, ir); 1910 asm_conv64(as, ir);
1754 return; 1911 return;
1912#endif
1755 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ 1913 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
1756 as->curins--; /* Always skip the loword comparison. */ 1914 as->curins--; /* Always skip the loword comparison. */
1915#if LJ_SOFTFP
1916 if (!irt_isint(ir->t)) {
1917 asm_sfpcomp(as, ir-1);
1918 return;
1919 }
1920#endif
1921#if LJ_HASFFI
1757 asm_comp64(as, ir); 1922 asm_comp64(as, ir);
1923#endif
1758 return; 1924 return;
1925#if LJ_SOFTFP
1926 } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
1927 as->curins--; /* Always skip the loword min/max. */
1928 if (uselo || usehi)
1929 asm_sfpmin_max(as, ir-1);
1930 return;
1931#endif
1759 } else if ((ir-1)->o == IR_XSTORE) { 1932 } else if ((ir-1)->o == IR_XSTORE) {
1760 as->curins--; /* Handle both stores here. */ 1933 as->curins--; /* Handle both stores here. */
1761 if ((ir-1)->r != RID_SINK) { 1934 if ((ir-1)->r != RID_SINK) {
1762 asm_xstore(as, ir, 0); 1935 asm_xstore_(as, ir, 0);
1763 asm_xstore(as, ir-1, 4); 1936 asm_xstore_(as, ir-1, 4);
1764 } 1937 }
1765 return; 1938 return;
1766 } 1939 }
1767 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ 1940 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
1768 switch ((ir-1)->o) { 1941 switch ((ir-1)->o) {
1942#if LJ_HASFFI
1769 case IR_ADD: as->curins--; asm_add64(as, ir); break; 1943 case IR_ADD: as->curins--; asm_add64(as, ir); break;
1770 case IR_SUB: as->curins--; asm_sub64(as, ir); break; 1944 case IR_SUB: as->curins--; asm_sub64(as, ir); break;
1771 case IR_NEG: as->curins--; asm_neg64(as, ir); break; 1945 case IR_NEG: as->curins--; asm_neg64(as, ir); break;
1946#endif
1947#if LJ_SOFTFP
1948 case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1949 case IR_STRTO:
1950 if (!uselo)
1951 ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
1952 break;
1953#endif
1772 case IR_CALLN: 1954 case IR_CALLN:
1955 case IR_CALLS:
1773 case IR_CALLXS: 1956 case IR_CALLXS:
1774 if (!uselo) 1957 if (!uselo)
1775 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ 1958 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
1776 break; 1959 break;
1960#if LJ_SOFTFP
1961 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR:
1962#endif
1777 case IR_CNEWI: 1963 case IR_CNEWI:
1778 /* Nothing to do here. Handled by lo op itself. */ 1964 /* Nothing to do here. Handled by lo op itself. */
1779 break; 1965 break;
1780 default: lua_assert(0); break; 1966 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
1781 } 1967 }
1782#else 1968#else
1783 UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */ 1969 /* Unused without SOFTFP or FFI. */
1970 UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP");
1784#endif 1971#endif
1785} 1972}
1786 1973
1974/* -- Profiling ----------------------------------------------------------- */
1975
1976static void asm_prof(ASMState *as, IRIns *ir)
1977{
1978 UNUSED(ir);
1979 asm_guardcc(as, CC_NE);
1980 emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, HOOK_PROFILE);
1981 emit_lsglptr(as, PPCI_LBZ, RID_TMP,
1982 (int32_t)offsetof(global_State, hookmask));
1983}
1984
1787/* -- Stack handling ------------------------------------------------------ */ 1985/* -- Stack handling ------------------------------------------------------ */
1788 1986
1789/* Check Lua stack size for overflow. Use exit handler as fallback. */ 1987/* Check Lua stack size for overflow. Use exit handler as fallback. */
@@ -1805,7 +2003,7 @@ static void asm_stack_check(ASMState *as, BCReg topslot,
1805 emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack)); 2003 emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack));
1806 if (pbase == RID_TMP) 2004 if (pbase == RID_TMP)
1807 emit_getgl(as, RID_TMP, jit_base); 2005 emit_getgl(as, RID_TMP, jit_base);
1808 emit_getgl(as, tmp, jit_L); 2006 emit_getgl(as, tmp, cur_L);
1809 if (allow == RSET_EMPTY) /* Spill temp. register. */ 2007 if (allow == RSET_EMPTY) /* Spill temp. register. */
1810 emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW); 2008 emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW);
1811} 2009}
@@ -1826,12 +2024,25 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
1826 if ((sn & SNAP_NORESTORE)) 2024 if ((sn & SNAP_NORESTORE))
1827 continue; 2025 continue;
1828 if (irt_isnum(ir->t)) { 2026 if (irt_isnum(ir->t)) {
2027#if LJ_SOFTFP
2028 Reg tmp;
2029 RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
2030 /* LJ_SOFTFP: must be a number constant. */
2031 lj_assertA(irref_isk(ref), "unsplit FP op");
2032 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow);
2033 emit_tai(as, PPCI_STW, tmp, RID_BASE, ofs+(LJ_BE?4:0));
2034 if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1);
2035 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, allow);
2036 emit_tai(as, PPCI_STW, tmp, RID_BASE, ofs+(LJ_BE?0:4));
2037#else
1829 Reg src = ra_alloc1(as, ref, RSET_FPR); 2038 Reg src = ra_alloc1(as, ref, RSET_FPR);
1830 emit_fai(as, PPCI_STFD, src, RID_BASE, ofs); 2039 emit_fai(as, PPCI_STFD, src, RID_BASE, ofs);
2040#endif
1831 } else { 2041 } else {
1832 Reg type; 2042 Reg type;
1833 RegSet allow = rset_exclude(RSET_GPR, RID_BASE); 2043 RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
1834 lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); 2044 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
2045 "restore of IR type %d", irt_type(ir->t));
1835 if (!irt_ispri(ir->t)) { 2046 if (!irt_ispri(ir->t)) {
1836 Reg src = ra_alloc1(as, ref, allow); 2047 Reg src = ra_alloc1(as, ref, allow);
1837 rset_clear(allow, src); 2048 rset_clear(allow, src);
@@ -1840,6 +2051,10 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
1840 if ((sn & (SNAP_CONT|SNAP_FRAME))) { 2051 if ((sn & (SNAP_CONT|SNAP_FRAME))) {
1841 if (s == 0) continue; /* Do not overwrite link to previous frame. */ 2052 if (s == 0) continue; /* Do not overwrite link to previous frame. */
1842 type = ra_allock(as, (int32_t)(*flinks--), allow); 2053 type = ra_allock(as, (int32_t)(*flinks--), allow);
2054#if LJ_SOFTFP
2055 } else if ((sn & SNAP_SOFTFPNUM)) {
2056 type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPR, RID_BASE));
2057#endif
1843 } else { 2058 } else {
1844 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); 2059 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
1845 } 2060 }
@@ -1847,7 +2062,7 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
1847 } 2062 }
1848 checkmclim(as); 2063 checkmclim(as);
1849 } 2064 }
1850 lua_assert(map + nent == flinks); 2065 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
1851} 2066}
1852 2067
1853/* -- GC handling --------------------------------------------------------- */ 2068/* -- GC handling --------------------------------------------------------- */
@@ -1945,7 +2160,7 @@ static void asm_tail_fixup(ASMState *as, TraceNo lnk)
1945 as->mctop = p; 2160 as->mctop = p;
1946 } else { 2161 } else {
1947 /* Patch stack adjustment. */ 2162 /* Patch stack adjustment. */
1948 lua_assert(checki16(CFRAME_SIZE+spadj)); 2163 lj_assertA(checki16(CFRAME_SIZE+spadj), "stack adjustment out of range");
1949 p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj); 2164 p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj);
1950 p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj; 2165 p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj;
1951 } 2166 }
@@ -1966,147 +2181,25 @@ static void asm_tail_prep(ASMState *as)
1966 } 2181 }
1967} 2182}
1968 2183
1969/* -- Instruction dispatch ------------------------------------------------ */
1970
1971/* Assemble a single instruction. */
1972static void asm_ir(ASMState *as, IRIns *ir)
1973{
1974 switch ((IROp)ir->o) {
1975 /* Miscellaneous ops. */
1976 case IR_LOOP: asm_loop(as); break;
1977 case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
1978 case IR_USE:
1979 ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
1980 case IR_PHI: asm_phi(as, ir); break;
1981 case IR_HIOP: asm_hiop(as, ir); break;
1982 case IR_GCSTEP: asm_gcstep(as, ir); break;
1983
1984 /* Guarded assertions. */
1985 case IR_EQ: case IR_NE:
1986 if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
1987 as->curins--;
1988 asm_href(as, ir-1, (IROp)ir->o);
1989 break;
1990 }
1991 /* fallthrough */
1992 case IR_LT: case IR_GE: case IR_LE: case IR_GT:
1993 case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
1994 case IR_ABC:
1995 asm_comp(as, ir);
1996 break;
1997
1998 case IR_RETF: asm_retf(as, ir); break;
1999
2000 /* Bit ops. */
2001 case IR_BNOT: asm_bitnot(as, ir); break;
2002 case IR_BSWAP: asm_bitswap(as, ir); break;
2003
2004 case IR_BAND: asm_bitand(as, ir); break;
2005 case IR_BOR: asm_bitop(as, ir, PPCI_OR, PPCI_ORI); break;
2006 case IR_BXOR: asm_bitop(as, ir, PPCI_XOR, PPCI_XORI); break;
2007
2008 case IR_BSHL: asm_bitshift(as, ir, PPCI_SLW, 0); break;
2009 case IR_BSHR: asm_bitshift(as, ir, PPCI_SRW, 1); break;
2010 case IR_BSAR: asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI); break;
2011 case IR_BROL: asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31),
2012 PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31)); break;
2013 case IR_BROR: lua_assert(0); break;
2014
2015 /* Arithmetic ops. */
2016 case IR_ADD: asm_add(as, ir); break;
2017 case IR_SUB: asm_sub(as, ir); break;
2018 case IR_MUL: asm_mul(as, ir); break;
2019 case IR_DIV: asm_fparith(as, ir, PPCI_FDIV); break;
2020 case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
2021 case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
2022 case IR_NEG: asm_neg(as, ir); break;
2023
2024 case IR_ABS: asm_fpunary(as, ir, PPCI_FABS); break;
2025 case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
2026 case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
2027 case IR_MIN: asm_min_max(as, ir, 0); break;
2028 case IR_MAX: asm_min_max(as, ir, 1); break;
2029 case IR_FPMATH:
2030 if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
2031 break;
2032 if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT))
2033 asm_fpunary(as, ir, PPCI_FSQRT);
2034 else
2035 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
2036 break;
2037
2038 /* Overflow-checking arithmetic ops. */
2039 case IR_ADDOV: asm_arithov(as, ir, PPCI_ADDO); break;
2040 case IR_SUBOV: asm_arithov(as, ir, PPCI_SUBFO); break;
2041 case IR_MULOV: asm_arithov(as, ir, PPCI_MULLWO); break;
2042
2043 /* Memory references. */
2044 case IR_AREF: asm_aref(as, ir); break;
2045 case IR_HREF: asm_href(as, ir, 0); break;
2046 case IR_HREFK: asm_hrefk(as, ir); break;
2047 case IR_NEWREF: asm_newref(as, ir); break;
2048 case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
2049 case IR_FREF: asm_fref(as, ir); break;
2050 case IR_STRREF: asm_strref(as, ir); break;
2051
2052 /* Loads and stores. */
2053 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2054 asm_ahuvload(as, ir);
2055 break;
2056 case IR_FLOAD: asm_fload(as, ir); break;
2057 case IR_XLOAD: asm_xload(as, ir); break;
2058 case IR_SLOAD: asm_sload(as, ir); break;
2059
2060 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
2061 case IR_FSTORE: asm_fstore(as, ir); break;
2062 case IR_XSTORE: asm_xstore(as, ir, 0); break;
2063
2064 /* Allocations. */
2065 case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
2066 case IR_TNEW: asm_tnew(as, ir); break;
2067 case IR_TDUP: asm_tdup(as, ir); break;
2068 case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
2069
2070 /* Write barriers. */
2071 case IR_TBAR: asm_tbar(as, ir); break;
2072 case IR_OBAR: asm_obar(as, ir); break;
2073
2074 /* Type conversions. */
2075 case IR_CONV: asm_conv(as, ir); break;
2076 case IR_TOBIT: asm_tobit(as, ir); break;
2077 case IR_TOSTR: asm_tostr(as, ir); break;
2078 case IR_STRTO: asm_strto(as, ir); break;
2079
2080 /* Calls. */
2081 case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
2082 case IR_CALLXS: asm_callx(as, ir); break;
2083 case IR_CARG: break;
2084
2085 default:
2086 setintV(&as->J->errinfo, ir->o);
2087 lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
2088 break;
2089 }
2090}
2091
2092/* -- Trace setup --------------------------------------------------------- */ 2184/* -- Trace setup --------------------------------------------------------- */
2093 2185
2094/* Ensure there are enough stack slots for call arguments. */ 2186/* Ensure there are enough stack slots for call arguments. */
2095static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) 2187static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2096{ 2188{
2097 IRRef args[CCI_NARGS_MAX*2]; 2189 IRRef args[CCI_NARGS_MAX*2];
2098 uint32_t i, nargs = (int)CCI_NARGS(ci); 2190 uint32_t i, nargs = CCI_XNARGS(ci);
2099 int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; 2191 int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
2100 asm_collectargs(as, ir, ci, args); 2192 asm_collectargs(as, ir, ci, args);
2101 for (i = 0; i < nargs; i++) 2193 for (i = 0; i < nargs; i++)
2102 if (args[i] && irt_isfp(IR(args[i])->t)) { 2194 if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) {
2103 if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1; 2195 if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1;
2104 } else { 2196 } else {
2105 if (ngpr > 0) ngpr--; else nslots++; 2197 if (ngpr > 0) ngpr--; else nslots++;
2106 } 2198 }
2107 if (nslots > as->evenspill) /* Leave room for args in stack slots. */ 2199 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
2108 as->evenspill = nslots; 2200 as->evenspill = nslots;
2109 return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); 2201 return (!LJ_SOFTFP && irt_isfp(ir->t)) ? REGSP_HINT(RID_FPRET) :
2202 REGSP_HINT(RID_RET);
2110} 2203}
2111 2204
2112static void asm_setup_target(ASMState *as) 2205static void asm_setup_target(ASMState *as)
@@ -2144,14 +2237,16 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
2144 } else if ((ins & 0xfc000000u) == PPCI_B && 2237 } else if ((ins & 0xfc000000u) == PPCI_B &&
2145 ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) { 2238 ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) {
2146 ptrdiff_t delta = (char *)target - (char *)p; 2239 ptrdiff_t delta = (char *)target - (char *)p;
2147 lua_assert(((delta + 0x02000000) >> 26) == 0); 2240 lj_assertJ(((delta + 0x02000000) >> 26) == 0,
2241 "branch target out of range");
2148 *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu); 2242 *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
2149 if (!cstart) cstart = p; 2243 if (!cstart) cstart = p;
2150 } 2244 }
2151 } 2245 }
2152 { /* Always patch long-range branch in exit stub itself. */ 2246 { /* Always patch long-range branch in exit stub itself. */
2153 ptrdiff_t delta = (char *)target - (char *)px - clearso; 2247 ptrdiff_t delta = (char *)target - (char *)px - clearso;
2154 lua_assert(((delta + 0x02000000) >> 26) == 0); 2248 lj_assertJ(((delta + 0x02000000) >> 26) == 0,
2249 "branch target out of range");
2155 *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu); 2250 *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
2156 } 2251 }
2157 if (!cstart) cstart = px; 2252 if (!cstart) cstart = px;
diff --git a/src/lj_asm_x86.h b/src/lj_asm_x86.h
index 68b40b31..e40b5e54 100644
--- a/src/lj_asm_x86.h
+++ b/src/lj_asm_x86.h
@@ -21,15 +21,17 @@ static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
21 } 21 }
22 /* Push the high byte of the exitno for each exit stub group. */ 22 /* Push the high byte of the exitno for each exit stub group. */
23 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8); 23 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8);
24#if !LJ_GC64
24 /* Store DISPATCH at original stack slot 0. Account for the two push ops. */ 25 /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
25 *mxp++ = XI_MOVmi; 26 *mxp++ = XI_MOVmi;
26 *mxp++ = MODRM(XM_OFS8, 0, RID_ESP); 27 *mxp++ = MODRM(XM_OFS8, 0, RID_ESP);
27 *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP); 28 *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
28 *mxp++ = 2*sizeof(void *); 29 *mxp++ = 2*sizeof(void *);
29 *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4; 30 *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4;
31#endif
30 /* Jump to exit handler which fills in the ExitState. */ 32 /* Jump to exit handler which fills in the ExitState. */
31 *mxp++ = XI_JMP; mxp += 4; 33 *mxp++ = XI_JMP; mxp += 4;
32 *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler); 34 *((int32_t *)(mxp-4)) = jmprel(as->J, mxp, (MCode *)(void *)lj_vm_exit_handler);
33 /* Commit the code for this group (even if assembly fails later on). */ 35 /* Commit the code for this group (even if assembly fails later on). */
34 lj_mcode_commitbot(as->J, mxp); 36 lj_mcode_commitbot(as->J, mxp);
35 as->mcbot = mxp; 37 as->mcbot = mxp;
@@ -58,14 +60,18 @@ static void asm_guardcc(ASMState *as, int cc)
58 MCode *p = as->mcp; 60 MCode *p = as->mcp;
59 if (LJ_UNLIKELY(p == as->invmcp)) { 61 if (LJ_UNLIKELY(p == as->invmcp)) {
60 as->loopinv = 1; 62 as->loopinv = 1;
61 *(int32_t *)(p+1) = jmprel(p+5, target); 63 *(int32_t *)(p+1) = jmprel(as->J, p+5, target);
62 target = p; 64 target = p;
63 cc ^= 1; 65 cc ^= 1;
64 if (as->realign) { 66 if (as->realign) {
67 if (LJ_GC64 && LJ_UNLIKELY(as->mrm.base == RID_RIP))
68 as->mrm.ofs += 2; /* Fixup RIP offset for pending fused load. */
65 emit_sjcc(as, cc, target); 69 emit_sjcc(as, cc, target);
66 return; 70 return;
67 } 71 }
68 } 72 }
73 if (LJ_GC64 && LJ_UNLIKELY(as->mrm.base == RID_RIP))
74 as->mrm.ofs += 6; /* Fixup RIP offset for pending fused load. */
69 emit_jcc(as, cc, target); 75 emit_jcc(as, cc, target);
70} 76}
71 77
@@ -79,6 +85,15 @@ static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
79{ 85{
80 if (irref_isk(ref)) { 86 if (irref_isk(ref)) {
81 IRIns *ir = IR(ref); 87 IRIns *ir = IR(ref);
88#if LJ_GC64
89 if (ir->o == IR_KNULL || !irt_is64(ir->t)) {
90 *k = ir->i;
91 return 1;
92 } else if (checki32((int64_t)ir_k64(ir)->u64)) {
93 *k = (int32_t)ir_k64(ir)->u64;
94 return 1;
95 }
96#else
82 if (ir->o != IR_KINT64) { 97 if (ir->o != IR_KINT64) {
83 *k = ir->i; 98 *k = ir->i;
84 return 1; 99 return 1;
@@ -86,6 +101,7 @@ static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
86 *k = (int32_t)ir_kint64(ir)->u64; 101 *k = (int32_t)ir_kint64(ir)->u64;
87 return 1; 102 return 1;
88 } 103 }
104#endif
89 } 105 }
90 return 0; 106 return 0;
91} 107}
@@ -115,7 +131,7 @@ static IRRef asm_fuseabase(ASMState *as, IRRef ref)
115 as->mrm.ofs = 0; 131 as->mrm.ofs = 0;
116 if (irb->o == IR_FLOAD) { 132 if (irb->o == IR_FLOAD) {
117 IRIns *ira = IR(irb->op1); 133 IRIns *ira = IR(irb->op1);
118 lua_assert(irb->op2 == IRFL_TAB_ARRAY); 134 lj_assertA(irb->op2 == IRFL_TAB_ARRAY, "expected FLOAD TAB_ARRAY");
119 /* We can avoid the FLOAD of t->array for colocated arrays. */ 135 /* We can avoid the FLOAD of t->array for colocated arrays. */
120 if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE && 136 if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE &&
121 !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) { 137 !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) {
@@ -134,7 +150,7 @@ static IRRef asm_fuseabase(ASMState *as, IRRef ref)
134static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow) 150static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow)
135{ 151{
136 IRIns *irx; 152 IRIns *irx;
137 lua_assert(ir->o == IR_AREF); 153 lj_assertA(ir->o == IR_AREF, "expected AREF");
138 as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow); 154 as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow);
139 irx = IR(ir->op2); 155 irx = IR(ir->op2);
140 if (irref_isk(ir->op2)) { 156 if (irref_isk(ir->op2)) {
@@ -185,14 +201,25 @@ static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow)
185 if (irref_isk(ir->op1)) { 201 if (irref_isk(ir->op1)) {
186 GCfunc *fn = ir_kfunc(IR(ir->op1)); 202 GCfunc *fn = ir_kfunc(IR(ir->op1));
187 GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv; 203 GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
204#if LJ_GC64
205 int64_t ofs = dispofs(as, &uv->tv);
206 if (checki32(ofs) && checki32(ofs+4)) {
207 as->mrm.ofs = (int32_t)ofs;
208 as->mrm.base = RID_DISPATCH;
209 as->mrm.idx = RID_NONE;
210 return;
211 }
212#else
188 as->mrm.ofs = ptr2addr(&uv->tv); 213 as->mrm.ofs = ptr2addr(&uv->tv);
189 as->mrm.base = as->mrm.idx = RID_NONE; 214 as->mrm.base = as->mrm.idx = RID_NONE;
190 return; 215 return;
216#endif
191 } 217 }
192 break; 218 break;
193 default: 219 default:
194 lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO || 220 lj_assertA(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO ||
195 ir->o == IR_KKPTR); 221 ir->o == IR_KKPTR,
222 "bad IR op %d", ir->o);
196 break; 223 break;
197 } 224 }
198 } 225 }
@@ -204,26 +231,53 @@ static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow)
204/* Fuse FLOAD/FREF reference into memory operand. */ 231/* Fuse FLOAD/FREF reference into memory operand. */
205static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) 232static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow)
206{ 233{
207 lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF); 234 lj_assertA(ir->o == IR_FLOAD || ir->o == IR_FREF,
208 as->mrm.ofs = field_ofs[ir->op2]; 235 "bad IR op %d", ir->o);
209 as->mrm.idx = RID_NONE; 236 as->mrm.idx = RID_NONE;
237 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
238#if LJ_GC64
239 as->mrm.ofs = (int32_t)(ir->op2 << 2) - GG_OFS(dispatch);
240 as->mrm.base = RID_DISPATCH;
241#else
242 as->mrm.ofs = (int32_t)(ir->op2 << 2) + ptr2addr(J2GG(as->J));
243 as->mrm.base = RID_NONE;
244#endif
245 return;
246 }
247 as->mrm.ofs = field_ofs[ir->op2];
210 if (irref_isk(ir->op1)) { 248 if (irref_isk(ir->op1)) {
211 as->mrm.ofs += IR(ir->op1)->i; 249 IRIns *op1 = IR(ir->op1);
250#if LJ_GC64
251 if (ir->op1 == REF_NIL) {
252 as->mrm.ofs -= GG_OFS(dispatch);
253 as->mrm.base = RID_DISPATCH;
254 return;
255 } else if (op1->o == IR_KPTR || op1->o == IR_KKPTR) {
256 intptr_t ofs = dispofs(as, ir_kptr(op1));
257 if (checki32(as->mrm.ofs + ofs)) {
258 as->mrm.ofs += (int32_t)ofs;
259 as->mrm.base = RID_DISPATCH;
260 return;
261 }
262 }
263#else
264 as->mrm.ofs += op1->i;
212 as->mrm.base = RID_NONE; 265 as->mrm.base = RID_NONE;
213 } else { 266 return;
214 as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); 267#endif
215 } 268 }
269 as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
216} 270}
217 271
218/* Fuse string reference into memory operand. */ 272/* Fuse string reference into memory operand. */
219static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow) 273static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow)
220{ 274{
221 IRIns *irr; 275 IRIns *irr;
222 lua_assert(ir->o == IR_STRREF); 276 lj_assertA(ir->o == IR_STRREF, "bad IR op %d", ir->o);
223 as->mrm.base = as->mrm.idx = RID_NONE; 277 as->mrm.base = as->mrm.idx = RID_NONE;
224 as->mrm.scale = XM_SCALE1; 278 as->mrm.scale = XM_SCALE1;
225 as->mrm.ofs = sizeof(GCstr); 279 as->mrm.ofs = sizeof(GCstr);
226 if (irref_isk(ir->op1)) { 280 if (!LJ_GC64 && irref_isk(ir->op1)) {
227 as->mrm.ofs += IR(ir->op1)->i; 281 as->mrm.ofs += IR(ir->op1)->i;
228 } else { 282 } else {
229 Reg r = ra_alloc1(as, ir->op1, allow); 283 Reg r = ra_alloc1(as, ir->op1, allow);
@@ -255,10 +309,20 @@ static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow)
255 IRIns *ir = IR(ref); 309 IRIns *ir = IR(ref);
256 as->mrm.idx = RID_NONE; 310 as->mrm.idx = RID_NONE;
257 if (ir->o == IR_KPTR || ir->o == IR_KKPTR) { 311 if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
312#if LJ_GC64
313 intptr_t ofs = dispofs(as, ir_kptr(ir));
314 if (checki32(ofs)) {
315 as->mrm.ofs = (int32_t)ofs;
316 as->mrm.base = RID_DISPATCH;
317 return;
318 }
319 } if (0) {
320#else
258 as->mrm.ofs = ir->i; 321 as->mrm.ofs = ir->i;
259 as->mrm.base = RID_NONE; 322 as->mrm.base = RID_NONE;
260 } else if (ir->o == IR_STRREF) { 323 } else if (ir->o == IR_STRREF) {
261 asm_fusestrref(as, ir, allow); 324 asm_fusestrref(as, ir, allow);
325#endif
262 } else { 326 } else {
263 as->mrm.ofs = 0; 327 as->mrm.ofs = 0;
264 if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) { 328 if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) {
@@ -301,7 +365,47 @@ static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow)
301 } 365 }
302} 366}
303 367
304/* Fuse load into memory operand. */ 368/* Fuse load of 64 bit IR constant into memory operand. */
369static Reg asm_fuseloadk64(ASMState *as, IRIns *ir)
370{
371 const uint64_t *k = &ir_k64(ir)->u64;
372 if (!LJ_GC64 || checki32((intptr_t)k)) {
373 as->mrm.ofs = ptr2addr(k);
374 as->mrm.base = RID_NONE;
375#if LJ_GC64
376 } else if (checki32(dispofs(as, k))) {
377 as->mrm.ofs = (int32_t)dispofs(as, k);
378 as->mrm.base = RID_DISPATCH;
379 } else if (checki32(mcpofs(as, k)) && checki32(mcpofs(as, k+1)) &&
380 checki32(mctopofs(as, k)) && checki32(mctopofs(as, k+1))) {
381 as->mrm.ofs = (int32_t)mcpofs(as, k);
382 as->mrm.base = RID_RIP;
383 } else { /* Intern 64 bit constant at bottom of mcode. */
384 if (ir->i) {
385 lj_assertA(*k == *(uint64_t*)(as->mctop - ir->i),
386 "bad interned 64 bit constant");
387 } else {
388 while ((uintptr_t)as->mcbot & 7) *as->mcbot++ = XI_INT3;
389 *(uint64_t*)as->mcbot = *k;
390 ir->i = (int32_t)(as->mctop - as->mcbot);
391 as->mcbot += 8;
392 as->mclim = as->mcbot + MCLIM_REDZONE;
393 lj_mcode_commitbot(as->J, as->mcbot);
394 }
395 as->mrm.ofs = (int32_t)mcpofs(as, as->mctop - ir->i);
396 as->mrm.base = RID_RIP;
397#endif
398 }
399 as->mrm.idx = RID_NONE;
400 return RID_MRM;
401}
402
403/* Fuse load into memory operand.
404**
405** Important caveat: this may emit RIP-relative loads! So don't place any
406** code emitters between this function and the use of its result.
407** The only permitted exception is asm_guardcc().
408*/
305static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow) 409static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
306{ 410{
307 IRIns *ir = IR(ref); 411 IRIns *ir = IR(ref);
@@ -319,27 +423,36 @@ static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
319 } 423 }
320 if (ir->o == IR_KNUM) { 424 if (ir->o == IR_KNUM) {
321 RegSet avail = as->freeset & ~as->modset & RSET_FPR; 425 RegSet avail = as->freeset & ~as->modset & RSET_FPR;
322 lua_assert(allow != RSET_EMPTY); 426 lj_assertA(allow != RSET_EMPTY, "no register allowed");
323 if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ 427 if (!(avail & (avail-1))) /* Fuse if less than two regs available. */
324 as->mrm.ofs = ptr2addr(ir_knum(ir)); 428 return asm_fuseloadk64(as, ir);
325 as->mrm.base = as->mrm.idx = RID_NONE;
326 return RID_MRM;
327 }
328 } else if (ref == REF_BASE || ir->o == IR_KINT64) { 429 } else if (ref == REF_BASE || ir->o == IR_KINT64) {
329 RegSet avail = as->freeset & ~as->modset & RSET_GPR; 430 RegSet avail = as->freeset & ~as->modset & RSET_GPR;
330 lua_assert(allow != RSET_EMPTY); 431 lj_assertA(allow != RSET_EMPTY, "no register allowed");
331 if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ 432 if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
332 as->mrm.ofs = ptr2addr(ref == REF_BASE ? (void *)&J2G(as->J)->jit_base : (void *)ir_kint64(ir)); 433 if (ref == REF_BASE) {
333 as->mrm.base = as->mrm.idx = RID_NONE; 434#if LJ_GC64
334 return RID_MRM; 435 as->mrm.ofs = (int32_t)dispofs(as, &J2G(as->J)->jit_base);
436 as->mrm.base = RID_DISPATCH;
437#else
438 as->mrm.ofs = ptr2addr(&J2G(as->J)->jit_base);
439 as->mrm.base = RID_NONE;
440#endif
441 as->mrm.idx = RID_NONE;
442 return RID_MRM;
443 } else {
444 return asm_fuseloadk64(as, ir);
445 }
335 } 446 }
336 } else if (mayfuse(as, ref)) { 447 } else if (mayfuse(as, ref)) {
337 RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR; 448 RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR;
338 if (ir->o == IR_SLOAD) { 449 if (ir->o == IR_SLOAD) {
339 if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) && 450 if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) &&
340 noconflict(as, ref, IR_RETF, 0)) { 451 noconflict(as, ref, IR_RETF, 0) &&
452 !(LJ_GC64 && irt_isaddr(ir->t))) {
341 as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow); 453 as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow);
342 as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0); 454 as->mrm.ofs = 8*((int32_t)ir->op1-1-LJ_FR2) +
455 (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
343 as->mrm.idx = RID_NONE; 456 as->mrm.idx = RID_NONE;
344 return RID_MRM; 457 return RID_MRM;
345 } 458 }
@@ -351,7 +464,8 @@ static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
351 return RID_MRM; 464 return RID_MRM;
352 } 465 }
353 } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) { 466 } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) {
354 if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) { 467 if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0) &&
468 !(LJ_GC64 && irt_isaddr(ir->t))) {
355 asm_fuseahuref(as, ir->op1, xallow); 469 asm_fuseahuref(as, ir->op1, xallow);
356 return RID_MRM; 470 return RID_MRM;
357 } 471 }
@@ -364,11 +478,15 @@ static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
364 asm_fusexref(as, ir->op1, xallow); 478 asm_fusexref(as, ir->op1, xallow);
365 return RID_MRM; 479 return RID_MRM;
366 } 480 }
367 } else if (ir->o == IR_VLOAD) { 481 } else if (ir->o == IR_VLOAD && !(LJ_GC64 && irt_isaddr(ir->t))) {
368 asm_fuseahuref(as, ir->op1, xallow); 482 asm_fuseahuref(as, ir->op1, xallow);
369 return RID_MRM; 483 return RID_MRM;
370 } 484 }
371 } 485 }
486 if (ir->o == IR_FLOAD && ir->op1 == REF_NIL) {
487 asm_fusefref(as, ir, RSET_EMPTY);
488 return RID_MRM;
489 }
372 if (!(as->freeset & allow) && !emit_canremat(ref) && 490 if (!(as->freeset & allow) && !emit_canremat(ref) &&
373 (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref))) 491 (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref)))
374 goto fusespill; 492 goto fusespill;
@@ -392,7 +510,7 @@ static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64)
392/* Count the required number of stack slots for a call. */ 510/* Count the required number of stack slots for a call. */
393static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args) 511static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args)
394{ 512{
395 uint32_t i, nargs = CCI_NARGS(ci); 513 uint32_t i, nargs = CCI_XNARGS(ci);
396 int nslots = 0; 514 int nslots = 0;
397#if LJ_64 515#if LJ_64
398 if (LJ_ABI_WIN) { 516 if (LJ_ABI_WIN) {
@@ -425,7 +543,7 @@ static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args)
425/* Generate a call to a C function. */ 543/* Generate a call to a C function. */
426static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) 544static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
427{ 545{
428 uint32_t n, nargs = CCI_NARGS(ci); 546 uint32_t n, nargs = CCI_XNARGS(ci);
429 int32_t ofs = STACKARG_OFS; 547 int32_t ofs = STACKARG_OFS;
430#if LJ_64 548#if LJ_64
431 uint32_t gprs = REGARG_GPRS; 549 uint32_t gprs = REGARG_GPRS;
@@ -485,13 +603,14 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
485 if (r) { /* Argument is in a register. */ 603 if (r) { /* Argument is in a register. */
486 if (r < RID_MAX_GPR && ref < ASMREF_TMP1) { 604 if (r < RID_MAX_GPR && ref < ASMREF_TMP1) {
487#if LJ_64 605#if LJ_64
488 if (ir->o == IR_KINT64) 606 if (LJ_GC64 ? !(ir->o == IR_KINT || ir->o == IR_KNULL) : ir->o == IR_KINT64)
489 emit_loadu64(as, r, ir_kint64(ir)->u64); 607 emit_loadu64(as, r, ir_k64(ir)->u64);
490 else 608 else
491#endif 609#endif
492 emit_loadi(as, r, ir->i); 610 emit_loadi(as, r, ir->i);
493 } else { 611 } else {
494 lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */ 612 /* Must have been evicted. */
613 lj_assertA(rset_test(as->freeset, r), "reg %d not free", r);
495 if (ra_hasreg(ir->r)) { 614 if (ra_hasreg(ir->r)) {
496 ra_noweak(as, ir->r); 615 ra_noweak(as, ir->r);
497 emit_movrr(as, ir, r, ir->r); 616 emit_movrr(as, ir, r, ir->r);
@@ -500,7 +619,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
500 } 619 }
501 } 620 }
502 } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */ 621 } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */
503 lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */ 622 lj_assertA(!(irt_isfloat(ir->t) && irref_isk(ref)),
623 "unexpected float constant");
504 if (LJ_32 && (ofs & 4) && irref_isk(ref)) { 624 if (LJ_32 && (ofs & 4) && irref_isk(ref)) {
505 /* Split stores for unaligned FP consts. */ 625 /* Split stores for unaligned FP consts. */
506 emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo); 626 emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo);
@@ -560,7 +680,7 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
560 if (ra_hasreg(dest)) { 680 if (ra_hasreg(dest)) {
561 ra_free(as, dest); 681 ra_free(as, dest);
562 ra_modified(as, dest); 682 ra_modified(as, dest);
563 emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, 683 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS,
564 dest, RID_ESP, ofs); 684 dest, RID_ESP, ofs);
565 } 685 }
566 if ((ci->flags & CCI_CASTU64)) { 686 if ((ci->flags & CCI_CASTU64)) {
@@ -576,7 +696,7 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
576 ra_destpair(as, ir); 696 ra_destpair(as, ir);
577#endif 697#endif
578 } else { 698 } else {
579 lua_assert(!irt_ispri(ir->t)); 699 lj_assertA(!irt_ispri(ir->t), "PRI dest");
580 ra_destreg(as, ir, RID_RET); 700 ra_destreg(as, ir, RID_RET);
581 } 701 }
582 } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) { 702 } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) {
@@ -584,15 +704,6 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
584 } 704 }
585} 705}
586 706
587static void asm_call(ASMState *as, IRIns *ir)
588{
589 IRRef args[CCI_NARGS_MAX];
590 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
591 asm_collectargs(as, ir, ci, args);
592 asm_setupresult(as, ir, ci);
593 asm_gencall(as, ci, args);
594}
595
596/* Return a constant function pointer or NULL for indirect calls. */ 707/* Return a constant function pointer or NULL for indirect calls. */
597static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func) 708static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func)
598{ 709{
@@ -651,15 +762,23 @@ static void asm_callx(ASMState *as, IRIns *ir)
651static void asm_retf(ASMState *as, IRIns *ir) 762static void asm_retf(ASMState *as, IRIns *ir)
652{ 763{
653 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); 764 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
765#if LJ_FR2
766 Reg rpc = ra_scratch(as, rset_exclude(RSET_GPR, base));
767#endif
654 void *pc = ir_kptr(IR(ir->op2)); 768 void *pc = ir_kptr(IR(ir->op2));
655 int32_t delta = 1+bc_a(*((const BCIns *)pc - 1)); 769 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
656 as->topslot -= (BCReg)delta; 770 as->topslot -= (BCReg)delta;
657 if ((int32_t)as->topslot < 0) as->topslot = 0; 771 if ((int32_t)as->topslot < 0) as->topslot = 0;
658 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ 772 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
659 emit_setgl(as, base, jit_base); 773 emit_setgl(as, base, jit_base);
660 emit_addptr(as, base, -8*delta); 774 emit_addptr(as, base, -8*delta);
661 asm_guardcc(as, CC_NE); 775 asm_guardcc(as, CC_NE);
776#if LJ_FR2
777 emit_rmro(as, XO_CMP, rpc|REX_GC64, base, -8);
778 emit_loadu64(as, rpc, u64ptr(pc));
779#else
662 emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc)); 780 emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc));
781#endif
663} 782}
664 783
665/* -- Type conversions ---------------------------------------------------- */ 784/* -- Type conversions ---------------------------------------------------- */
@@ -672,8 +791,7 @@ static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
672 asm_guardcc(as, CC_NE); 791 asm_guardcc(as, CC_NE);
673 emit_rr(as, XO_UCOMISD, left, tmp); 792 emit_rr(as, XO_UCOMISD, left, tmp);
674 emit_rr(as, XO_CVTSI2SD, tmp, dest); 793 emit_rr(as, XO_CVTSI2SD, tmp, dest);
675 if (!(as->flags & JIT_F_SPLIT_XMM)) 794 emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */
676 emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */
677 emit_rr(as, XO_CVTTSD2SI, dest, left); 795 emit_rr(as, XO_CVTTSD2SI, dest, left);
678 /* Can't fuse since left is needed twice. */ 796 /* Can't fuse since left is needed twice. */
679} 797}
@@ -684,8 +802,9 @@ static void asm_tobit(ASMState *as, IRIns *ir)
684 Reg tmp = ra_noreg(IR(ir->op1)->r) ? 802 Reg tmp = ra_noreg(IR(ir->op1)->r) ?
685 ra_alloc1(as, ir->op1, RSET_FPR) : 803 ra_alloc1(as, ir->op1, RSET_FPR) :
686 ra_scratch(as, RSET_FPR); 804 ra_scratch(as, RSET_FPR);
687 Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp)); 805 Reg right;
688 emit_rr(as, XO_MOVDto, tmp, dest); 806 emit_rr(as, XO_MOVDto, tmp, dest);
807 right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp));
689 emit_mrm(as, XO_ADDSD, tmp, right); 808 emit_mrm(as, XO_ADDSD, tmp, right);
690 ra_left(as, tmp, ir->op1); 809 ra_left(as, tmp, ir->op1);
691} 810}
@@ -696,8 +815,10 @@ static void asm_conv(ASMState *as, IRIns *ir)
696 int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64)); 815 int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64));
697 int stfp = (st == IRT_NUM || st == IRT_FLOAT); 816 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
698 IRRef lref = ir->op1; 817 IRRef lref = ir->op1;
699 lua_assert(irt_type(ir->t) != st); 818 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
700 lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */ 819 lj_assertA(!(LJ_32 && (irt_isint64(ir->t) || st64)),
820 "IR %04d has unsplit 64 bit type",
821 (int)(ir - as->ir) - REF_BIAS);
701 if (irt_isfp(ir->t)) { 822 if (irt_isfp(ir->t)) {
702 Reg dest = ra_dest(as, ir, RSET_FPR); 823 Reg dest = ra_dest(as, ir, RSET_FPR);
703 if (stfp) { /* FP to FP conversion. */ 824 if (stfp) { /* FP to FP conversion. */
@@ -706,13 +827,13 @@ static void asm_conv(ASMState *as, IRIns *ir)
706 if (left == dest) return; /* Avoid the XO_XORPS. */ 827 if (left == dest) return; /* Avoid the XO_XORPS. */
707 } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */ 828 } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */
708 /* number = (2^52+2^51 .. u32) - (2^52+2^51) */ 829 /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
709 cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000)); 830 cTValue *k = &as->J->k64[LJ_K64_TOBIT];
710 Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); 831 Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
711 if (irt_isfloat(ir->t)) 832 if (irt_isfloat(ir->t))
712 emit_rr(as, XO_CVTSD2SS, dest, dest); 833 emit_rr(as, XO_CVTSD2SS, dest, dest);
713 emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */ 834 emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */
714 emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */ 835 emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */
715 emit_loadn(as, bias, k); 836 emit_rma(as, XO_MOVSD, bias, k);
716 emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR)); 837 emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR));
717 return; 838 return;
718 } else { /* Integer to FP conversion. */ 839 } else { /* Integer to FP conversion. */
@@ -721,7 +842,7 @@ static void asm_conv(ASMState *as, IRIns *ir)
721 asm_fuseloadm(as, lref, RSET_GPR, st64); 842 asm_fuseloadm(as, lref, RSET_GPR, st64);
722 if (LJ_64 && st == IRT_U64) { 843 if (LJ_64 && st == IRT_U64) {
723 MCLabel l_end = emit_label(as); 844 MCLabel l_end = emit_label(as);
724 const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000)); 845 cTValue *k = &as->J->k64[LJ_K64_2P64];
725 emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */ 846 emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */
726 emit_sjcc(as, CC_NS, l_end); 847 emit_sjcc(as, CC_NS, l_end);
727 emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */ 848 emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */
@@ -729,18 +850,16 @@ static void asm_conv(ASMState *as, IRIns *ir)
729 emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS, 850 emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS,
730 dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left); 851 dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left);
731 } 852 }
732 if (!(as->flags & JIT_F_SPLIT_XMM)) 853 emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
733 emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
734 } else if (stfp) { /* FP to integer conversion. */ 854 } else if (stfp) { /* FP to integer conversion. */
735 if (irt_isguard(ir->t)) { 855 if (irt_isguard(ir->t)) {
736 /* Checked conversions are only supported from number to int. */ 856 /* Checked conversions are only supported from number to int. */
737 lua_assert(irt_isint(ir->t) && st == IRT_NUM); 857 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
858 "bad type for checked CONV");
738 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); 859 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
739 } else { 860 } else {
740 Reg dest = ra_dest(as, ir, RSET_GPR); 861 Reg dest = ra_dest(as, ir, RSET_GPR);
741 x86Op op = st == IRT_NUM ? 862 x86Op op = st == IRT_NUM ? XO_CVTTSD2SI : XO_CVTTSS2SI;
742 ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSD2SI : XO_CVTSD2SI) :
743 ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSS2SI : XO_CVTSS2SI);
744 if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) { 863 if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) {
745 /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */ 864 /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
746 /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */ 865 /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
@@ -751,30 +870,27 @@ static void asm_conv(ASMState *as, IRIns *ir)
751 emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000); 870 emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000);
752 emit_rr(as, op, dest|REX_64, tmp); 871 emit_rr(as, op, dest|REX_64, tmp);
753 if (st == IRT_NUM) 872 if (st == IRT_NUM)
754 emit_rma(as, XO_ADDSD, tmp, lj_ir_k64_find(as->J, 873 emit_rma(as, XO_ADDSD, tmp, &as->J->k64[LJ_K64_M2P64_31]);
755 LJ_64 ? U64x(c3f00000,00000000) : U64x(c1e00000,00000000)));
756 else 874 else
757 emit_rma(as, XO_ADDSS, tmp, lj_ir_k64_find(as->J, 875 emit_rma(as, XO_ADDSS, tmp, &as->J->k32[LJ_K32_M2P64_31]);
758 LJ_64 ? U64x(00000000,df800000) : U64x(00000000,cf000000)));
759 emit_sjcc(as, CC_NS, l_end); 876 emit_sjcc(as, CC_NS, l_end);
760 emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */ 877 emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */
761 emit_rr(as, op, dest|REX_64, tmp); 878 emit_rr(as, op, dest|REX_64, tmp);
762 ra_left(as, tmp, lref); 879 ra_left(as, tmp, lref);
763 } else { 880 } else {
764 Reg left = asm_fuseload(as, lref, RSET_FPR);
765 if (LJ_64 && irt_isu32(ir->t)) 881 if (LJ_64 && irt_isu32(ir->t))
766 emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */ 882 emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */
767 emit_mrm(as, op, 883 emit_mrm(as, op,
768 dest|((LJ_64 && 884 dest|((LJ_64 &&
769 (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0), 885 (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0),
770 left); 886 asm_fuseload(as, lref, RSET_FPR));
771 } 887 }
772 } 888 }
773 } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ 889 } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
774 Reg left, dest = ra_dest(as, ir, RSET_GPR); 890 Reg left, dest = ra_dest(as, ir, RSET_GPR);
775 RegSet allow = RSET_GPR; 891 RegSet allow = RSET_GPR;
776 x86Op op; 892 x86Op op;
777 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); 893 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
778 if (st == IRT_I8) { 894 if (st == IRT_I8) {
779 op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX; 895 op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX;
780 } else if (st == IRT_U8) { 896 } else if (st == IRT_U8) {
@@ -834,20 +950,18 @@ static void asm_conv_fp_int64(ASMState *as, IRIns *ir)
834 if (ra_hasreg(dest)) { 950 if (ra_hasreg(dest)) {
835 ra_free(as, dest); 951 ra_free(as, dest);
836 ra_modified(as, dest); 952 ra_modified(as, dest);
837 emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, 953 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, dest, RID_ESP, ofs);
838 dest, RID_ESP, ofs);
839 } 954 }
840 emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, 955 emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
841 irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); 956 irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
842 if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) { 957 if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) {
843 /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */ 958 /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
844 MCLabel l_end = emit_label(as); 959 MCLabel l_end = emit_label(as);
845 emit_rma(as, XO_FADDq, XOg_FADDq, 960 emit_rma(as, XO_FADDq, XOg_FADDq, &as->J->k64[LJ_K64_2P64]);
846 lj_ir_k64_find(as->J, U64x(43f00000,00000000)));
847 emit_sjcc(as, CC_NS, l_end); 961 emit_sjcc(as, CC_NS, l_end);
848 emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */ 962 emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */
849 } else { 963 } else {
850 lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64); 964 lj_assertA(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64, "bad type for CONV");
851 } 965 }
852 emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0); 966 emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0);
853 /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */ 967 /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
@@ -861,9 +975,8 @@ static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
861 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); 975 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
862 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); 976 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
863 Reg lo, hi; 977 Reg lo, hi;
864 lua_assert(st == IRT_NUM || st == IRT_FLOAT); 978 lj_assertA(st == IRT_NUM || st == IRT_FLOAT, "bad type for CONV");
865 lua_assert(dt == IRT_I64 || dt == IRT_U64); 979 lj_assertA(dt == IRT_I64 || dt == IRT_U64, "bad type for CONV");
866 lua_assert(((ir-1)->op2 & IRCONV_TRUNC));
867 hi = ra_dest(as, ir, RSET_GPR); 980 hi = ra_dest(as, ir, RSET_GPR);
868 lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi)); 981 lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi));
869 if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0); 982 if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0);
@@ -884,8 +997,7 @@ static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
884 emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); 997 emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
885 else 998 else
886 emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); 999 emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
887 emit_rma(as, XO_FADDq, XOg_FADDq, 1000 emit_rma(as, XO_FADDq, XOg_FADDq, &as->J->k64[LJ_K64_M2P64]);
888 lj_ir_k64_find(as->J, U64x(c3f00000,00000000)));
889 emit_sjcc(as, CC_NS, l_pop); 1001 emit_sjcc(as, CC_NS, l_pop);
890 emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */ 1002 emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */
891 } 1003 }
@@ -906,6 +1018,14 @@ static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
906 st == IRT_NUM ? XOg_FLDq: XOg_FLDd, 1018 st == IRT_NUM ? XOg_FLDq: XOg_FLDd,
907 asm_fuseload(as, ir->op1, RSET_EMPTY)); 1019 asm_fuseload(as, ir->op1, RSET_EMPTY));
908} 1020}
1021
1022static void asm_conv64(ASMState *as, IRIns *ir)
1023{
1024 if (irt_isfp(ir->t))
1025 asm_conv_fp_int64(as, ir);
1026 else
1027 asm_conv_int64_fp(as, ir);
1028}
909#endif 1029#endif
910 1030
911static void asm_strto(ASMState *as, IRIns *ir) 1031static void asm_strto(ASMState *as, IRIns *ir)
@@ -927,54 +1047,60 @@ static void asm_strto(ASMState *as, IRIns *ir)
927 RID_ESP, sps_scale(ir->s)); 1047 RID_ESP, sps_scale(ir->s));
928} 1048}
929 1049
930static void asm_tostr(ASMState *as, IRIns *ir) 1050/* -- Memory references --------------------------------------------------- */
1051
1052/* Get pointer to TValue. */
1053static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
931{ 1054{
932 IRIns *irl = IR(ir->op1); 1055 IRIns *ir = IR(ref);
933 IRRef args[2]; 1056 if (irt_isnum(ir->t)) {
934 args[0] = ASMREF_L; 1057 /* For numbers use the constant itself or a spill slot as a TValue. */
935 as->gcsteps++; 1058 if (irref_isk(ref))
936 if (irt_isnum(irl->t)) { 1059 emit_loada(as, dest, ir_knum(ir));
937 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum]; 1060 else
938 args[1] = ASMREF_TMP1; /* const lua_Number * */ 1061 emit_rmro(as, XO_LEA, dest|REX_64, RID_ESP, ra_spill(as, ir));
939 asm_setupresult(as, ir, ci); /* GCstr * */
940 asm_gencall(as, ci, args);
941 emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
942 RID_ESP, ra_spill(as, irl));
943 } else { 1062 } else {
944 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint]; 1063 /* Otherwise use g->tmptv to hold the TValue. */
945 args[1] = ir->op1; /* int32_t k */ 1064#if LJ_GC64
946 asm_setupresult(as, ir, ci); /* GCstr * */ 1065 if (irref_isk(ref)) {
947 asm_gencall(as, ci, args); 1066 TValue k;
1067 lj_ir_kvalue(as->J->L, &k, ir);
1068 emit_movmroi(as, dest, 4, k.u32.hi);
1069 emit_movmroi(as, dest, 0, k.u32.lo);
1070 } else {
1071 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
1072 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest));
1073 if (irt_is64(ir->t)) {
1074 emit_u32(as, irt_toitype(ir->t) << 15);
1075 emit_rmro(as, XO_ARITHi, XOg_OR, dest, 4);
1076 } else {
1077 /* Currently, no caller passes integers that might end up here. */
1078 emit_movmroi(as, dest, 4, (irt_toitype(ir->t) << 15));
1079 }
1080 emit_movtomro(as, REX_64IR(ir, src), dest, 0);
1081 }
1082#else
1083 if (!irref_isk(ref)) {
1084 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest));
1085 emit_movtomro(as, REX_64IR(ir, src), dest, 0);
1086 } else if (!irt_ispri(ir->t)) {
1087 emit_movmroi(as, dest, 0, ir->i);
1088 }
1089 if (!(LJ_64 && irt_islightud(ir->t)))
1090 emit_movmroi(as, dest, 4, irt_toitype(ir->t));
1091#endif
1092 emit_loada(as, dest, &J2G(as->J)->tmptv);
948 } 1093 }
949} 1094}
950 1095
951/* -- Memory references --------------------------------------------------- */
952
953static void asm_aref(ASMState *as, IRIns *ir) 1096static void asm_aref(ASMState *as, IRIns *ir)
954{ 1097{
955 Reg dest = ra_dest(as, ir, RSET_GPR); 1098 Reg dest = ra_dest(as, ir, RSET_GPR);
956 asm_fusearef(as, ir, RSET_GPR); 1099 asm_fusearef(as, ir, RSET_GPR);
957 if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0)) 1100 if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0))
958 emit_mrm(as, XO_LEA, dest, RID_MRM); 1101 emit_mrm(as, XO_LEA, dest|REX_GC64, RID_MRM);
959 else if (as->mrm.base != dest) 1102 else if (as->mrm.base != dest)
960 emit_rr(as, XO_MOV, dest, as->mrm.base); 1103 emit_rr(as, XO_MOV, dest|REX_GC64, as->mrm.base);
961}
962
963/* Merge NE(HREF, niltv) check. */
964static MCode *merge_href_niltv(ASMState *as, IRIns *ir)
965{
966 /* Assumes nothing else generates NE of HREF. */
967 if ((ir[1].o == IR_NE || ir[1].o == IR_EQ) && ir[1].op1 == as->curins &&
968 ra_hasreg(ir->r)) {
969 MCode *p = as->mcp;
970 p += (LJ_64 && *p != XI_ARITHi) ? 7+6 : 6+6;
971 /* Ensure no loop branch inversion happened. */
972 if (p[-6] == 0x0f && p[-5] == XI_JCCn+(CC_NE^(ir[1].o & 1))) {
973 as->mcp = p; /* Kill cmp reg, imm32 + jz exit. */
974 return p + *(int32_t *)(p-4); /* Return exit address. */
975 }
976 }
977 return NULL;
978} 1104}
979 1105
980/* Inlined hash lookup. Specialized for key type and for const keys. 1106/* Inlined hash lookup. Specialized for key type and for const keys.
@@ -985,10 +1111,10 @@ static MCode *merge_href_niltv(ASMState *as, IRIns *ir)
985** } while ((n = nextnode(n))); 1111** } while ((n = nextnode(n)));
986** return niltv(L); 1112** return niltv(L);
987*/ 1113*/
988static void asm_href(ASMState *as, IRIns *ir) 1114static void asm_href(ASMState *as, IRIns *ir, IROp merge)
989{ 1115{
990 MCode *nilexit = merge_href_niltv(as, ir); /* Do this before any restores. */
991 RegSet allow = RSET_GPR; 1116 RegSet allow = RSET_GPR;
1117 int destused = ra_used(ir);
992 Reg dest = ra_dest(as, ir, allow); 1118 Reg dest = ra_dest(as, ir, allow);
993 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); 1119 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
994 Reg key = RID_NONE, tmp = RID_NONE; 1120 Reg key = RID_NONE, tmp = RID_NONE;
@@ -1001,28 +1127,26 @@ static void asm_href(ASMState *as, IRIns *ir)
1001 if (!isk) { 1127 if (!isk) {
1002 rset_clear(allow, tab); 1128 rset_clear(allow, tab);
1003 key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow); 1129 key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
1004 if (!irt_isstr(kt)) 1130 if (LJ_GC64 || !irt_isstr(kt))
1005 tmp = ra_scratch(as, rset_exclude(allow, key)); 1131 tmp = ra_scratch(as, rset_exclude(allow, key));
1006 } 1132 }
1007 1133
1008 /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */ 1134 /* Key not found in chain: jump to exit (if merged) or load niltv. */
1009 l_end = emit_label(as); 1135 l_end = emit_label(as);
1010 if (nilexit && ir[1].o == IR_NE) { 1136 if (merge == IR_NE)
1011 emit_jcc(as, CC_E, nilexit); /* XI_JMP is not found by lj_asm_patchexit. */ 1137 asm_guardcc(as, CC_E); /* XI_JMP is not found by lj_asm_patchexit. */
1012 nilexit = NULL; 1138 else if (destused)
1013 } else {
1014 emit_loada(as, dest, niltvg(J2G(as->J))); 1139 emit_loada(as, dest, niltvg(J2G(as->J)));
1015 }
1016 1140
1017 /* Follow hash chain until the end. */ 1141 /* Follow hash chain until the end. */
1018 l_loop = emit_sjcc_label(as, CC_NZ); 1142 l_loop = emit_sjcc_label(as, CC_NZ);
1019 emit_rr(as, XO_TEST, dest, dest); 1143 emit_rr(as, XO_TEST, dest|REX_GC64, dest);
1020 emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next)); 1144 emit_rmro(as, XO_MOV, dest|REX_GC64, dest, offsetof(Node, next));
1021 l_next = emit_label(as); 1145 l_next = emit_label(as);
1022 1146
1023 /* Type and value comparison. */ 1147 /* Type and value comparison. */
1024 if (nilexit) 1148 if (merge == IR_EQ)
1025 emit_jcc(as, CC_E, nilexit); 1149 asm_guardcc(as, CC_E);
1026 else 1150 else
1027 emit_sjcc(as, CC_E, l_end); 1151 emit_sjcc(as, CC_E, l_end);
1028 if (irt_isnum(kt)) { 1152 if (irt_isnum(kt)) {
@@ -1038,7 +1162,7 @@ static void asm_href(ASMState *as, IRIns *ir)
1038 emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n)); 1162 emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n));
1039 emit_sjcc(as, CC_AE, l_next); 1163 emit_sjcc(as, CC_AE, l_next);
1040 /* The type check avoids NaN penalties and complaints from Valgrind. */ 1164 /* The type check avoids NaN penalties and complaints from Valgrind. */
1041#if LJ_64 1165#if LJ_64 && !LJ_GC64
1042 emit_u32(as, LJ_TISNUM); 1166 emit_u32(as, LJ_TISNUM);
1043 emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); 1167 emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
1044#else 1168#else
@@ -1046,13 +1170,31 @@ static void asm_href(ASMState *as, IRIns *ir)
1046 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); 1170 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
1047#endif 1171#endif
1048 } 1172 }
1049#if LJ_64 1173#if LJ_64 && !LJ_GC64
1050 } else if (irt_islightud(kt)) { 1174 } else if (irt_islightud(kt)) {
1051 emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64)); 1175 emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64));
1052#endif 1176#endif
1177#if LJ_GC64
1178 } else if (irt_isaddr(kt)) {
1179 if (isk) {
1180 TValue k;
1181 k.u64 = ((uint64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64;
1182 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
1183 k.u32.lo);
1184 emit_sjcc(as, CC_NE, l_next);
1185 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
1186 k.u32.hi);
1187 } else {
1188 emit_rmro(as, XO_CMP, tmp|REX_64, dest, offsetof(Node, key.u64));
1189 }
1190 } else {
1191 lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
1192 emit_u32(as, (irt_toitype(kt)<<15)|0x7fff);
1193 emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
1194#else
1053 } else { 1195 } else {
1054 if (!irt_ispri(kt)) { 1196 if (!irt_ispri(kt)) {
1055 lua_assert(irt_isaddr(kt)); 1197 lj_assertA(irt_isaddr(kt), "bad HREF key type");
1056 if (isk) 1198 if (isk)
1057 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr), 1199 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr),
1058 ptr2addr(ir_kgc(irkey))); 1200 ptr2addr(ir_kgc(irkey)));
@@ -1060,31 +1202,33 @@ static void asm_href(ASMState *as, IRIns *ir)
1060 emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr)); 1202 emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr));
1061 emit_sjcc(as, CC_NE, l_next); 1203 emit_sjcc(as, CC_NE, l_next);
1062 } 1204 }
1063 lua_assert(!irt_isnil(kt)); 1205 lj_assertA(!irt_isnil(kt), "bad HREF key type");
1064 emit_i8(as, irt_toitype(kt)); 1206 emit_i8(as, irt_toitype(kt));
1065 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); 1207 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
1208#endif
1066 } 1209 }
1067 emit_sfixup(as, l_loop); 1210 emit_sfixup(as, l_loop);
1068 checkmclim(as); 1211 checkmclim(as);
1212#if LJ_GC64
1213 if (!isk && irt_isaddr(kt)) {
1214 emit_rr(as, XO_OR, tmp|REX_64, key);
1215 emit_loadu64(as, tmp, (uint64_t)irt_toitype(kt) << 47);
1216 }
1217#endif
1069 1218
1070 /* Load main position relative to tab->node into dest. */ 1219 /* Load main position relative to tab->node into dest. */
1071 khash = isk ? ir_khash(irkey) : 1; 1220 khash = isk ? ir_khash(as, irkey) : 1;
1072 if (khash == 0) { 1221 if (khash == 0) {
1073 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node)); 1222 emit_rmro(as, XO_MOV, dest|REX_GC64, tab, offsetof(GCtab, node));
1074 } else { 1223 } else {
1075 emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node)); 1224 emit_rmro(as, XO_ARITH(XOg_ADD), dest|REX_GC64, tab, offsetof(GCtab,node));
1076 if ((as->flags & JIT_F_PREFER_IMUL)) { 1225 emit_shifti(as, XOg_SHL, dest, 3);
1077 emit_i8(as, sizeof(Node)); 1226 emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0);
1078 emit_rr(as, XO_IMULi8, dest, dest);
1079 } else {
1080 emit_shifti(as, XOg_SHL, dest, 3);
1081 emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0);
1082 }
1083 if (isk) { 1227 if (isk) {
1084 emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash); 1228 emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash);
1085 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); 1229 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
1086 } else if (irt_isstr(kt)) { 1230 } else if (irt_isstr(kt)) {
1087 emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash)); 1231 emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, sid));
1088 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); 1232 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
1089 } else { /* Must match with hashrot() in lj_tab.c. */ 1233 } else { /* Must match with hashrot() in lj_tab.c. */
1090 emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask)); 1234 emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask));
@@ -1107,7 +1251,19 @@ static void asm_href(ASMState *as, IRIns *ir)
1107#endif 1251#endif
1108 } else { 1252 } else {
1109 emit_rr(as, XO_MOV, tmp, key); 1253 emit_rr(as, XO_MOV, tmp, key);
1254#if LJ_GC64
1255 checkmclim(as);
1256 emit_gri(as, XG_ARITHi(XOg_XOR), dest, irt_toitype(kt) << 15);
1257 if ((as->flags & JIT_F_BMI2)) {
1258 emit_i8(as, 32);
1259 emit_mrm(as, XV_RORX|VEX_64, dest, key);
1260 } else {
1261 emit_shifti(as, XOg_SHR|REX_64, dest, 32);
1262 emit_rr(as, XO_MOV, dest|REX_64, key|REX_64);
1263 }
1264#else
1110 emit_rmro(as, XO_LEA, dest, key, HASH_BIAS); 1265 emit_rmro(as, XO_LEA, dest, key, HASH_BIAS);
1266#endif
1111 } 1267 }
1112 } 1268 }
1113 } 1269 }
@@ -1123,15 +1279,15 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
1123#if !LJ_64 1279#if !LJ_64
1124 MCLabel l_exit; 1280 MCLabel l_exit;
1125#endif 1281#endif
1126 lua_assert(ofs % sizeof(Node) == 0); 1282 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
1127 if (ra_hasreg(dest)) { 1283 if (ra_hasreg(dest)) {
1128 if (ofs != 0) { 1284 if (ofs != 0) {
1129 if (dest == node && !(as->flags & JIT_F_LEA_AGU)) 1285 if (dest == node)
1130 emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs); 1286 emit_gri(as, XG_ARITHi(XOg_ADD), dest|REX_GC64, ofs);
1131 else 1287 else
1132 emit_rmro(as, XO_LEA, dest, node, ofs); 1288 emit_rmro(as, XO_LEA, dest|REX_GC64, node, ofs);
1133 } else if (dest != node) { 1289 } else if (dest != node) {
1134 emit_rr(as, XO_MOV, dest, node); 1290 emit_rr(as, XO_MOV, dest|REX_GC64, node);
1135 } 1291 }
1136 } 1292 }
1137 asm_guardcc(as, CC_NE); 1293 asm_guardcc(as, CC_NE);
@@ -1140,16 +1296,28 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
1140 Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node)); 1296 Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node));
1141 emit_rmro(as, XO_CMP, key|REX_64, node, 1297 emit_rmro(as, XO_CMP, key|REX_64, node,
1142 ofs + (int32_t)offsetof(Node, key.u64)); 1298 ofs + (int32_t)offsetof(Node, key.u64));
1143 lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t)); 1299 lj_assertA(irt_isnum(irkey->t) || irt_isgcv(irkey->t),
1300 "bad HREFK key type");
1144 /* Assumes -0.0 is already canonicalized to +0.0. */ 1301 /* Assumes -0.0 is already canonicalized to +0.0. */
1145 emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 : 1302 emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 :
1303#if LJ_GC64
1304 ((uint64_t)irt_toitype(irkey->t) << 47) |
1305 (uint64_t)ir_kgc(irkey));
1306#else
1146 ((uint64_t)irt_toitype(irkey->t) << 32) | 1307 ((uint64_t)irt_toitype(irkey->t) << 32) |
1147 (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey))); 1308 (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey)));
1309#endif
1148 } else { 1310 } else {
1149 lua_assert(!irt_isnil(irkey->t)); 1311 lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type");
1312#if LJ_GC64
1313 emit_i32(as, (irt_toitype(irkey->t)<<15)|0x7fff);
1314 emit_rmro(as, XO_ARITHi, XOg_CMP, node,
1315 ofs + (int32_t)offsetof(Node, key.it));
1316#else
1150 emit_i8(as, irt_toitype(irkey->t)); 1317 emit_i8(as, irt_toitype(irkey->t));
1151 emit_rmro(as, XO_ARITHi8, XOg_CMP, node, 1318 emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
1152 ofs + (int32_t)offsetof(Node, key.it)); 1319 ofs + (int32_t)offsetof(Node, key.it));
1320#endif
1153 } 1321 }
1154#else 1322#else
1155 l_exit = emit_label(as); 1323 l_exit = emit_label(as);
@@ -1164,13 +1332,13 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
1164 (int32_t)ir_knum(irkey)->u32.hi); 1332 (int32_t)ir_knum(irkey)->u32.hi);
1165 } else { 1333 } else {
1166 if (!irt_ispri(irkey->t)) { 1334 if (!irt_ispri(irkey->t)) {
1167 lua_assert(irt_isgcv(irkey->t)); 1335 lj_assertA(irt_isgcv(irkey->t), "bad HREFK key type");
1168 emit_gmroi(as, XG_ARITHi(XOg_CMP), node, 1336 emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
1169 ofs + (int32_t)offsetof(Node, key.gcr), 1337 ofs + (int32_t)offsetof(Node, key.gcr),
1170 ptr2addr(ir_kgc(irkey))); 1338 ptr2addr(ir_kgc(irkey)));
1171 emit_sjcc(as, CC_NE, l_exit); 1339 emit_sjcc(as, CC_NE, l_exit);
1172 } 1340 }
1173 lua_assert(!irt_isnil(irkey->t)); 1341 lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type");
1174 emit_i8(as, irt_toitype(irkey->t)); 1342 emit_i8(as, irt_toitype(irkey->t));
1175 emit_rmro(as, XO_ARITHi8, XOg_CMP, node, 1343 emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
1176 ofs + (int32_t)offsetof(Node, key.it)); 1344 ofs + (int32_t)offsetof(Node, key.it));
@@ -1178,61 +1346,27 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
1178#endif 1346#endif
1179} 1347}
1180 1348
1181static void asm_newref(ASMState *as, IRIns *ir)
1182{
1183 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
1184 IRRef args[3];
1185 IRIns *irkey;
1186 Reg tmp;
1187 if (ir->r == RID_SINK)
1188 return;
1189 args[0] = ASMREF_L; /* lua_State *L */
1190 args[1] = ir->op1; /* GCtab *t */
1191 args[2] = ASMREF_TMP1; /* cTValue *key */
1192 asm_setupresult(as, ir, ci); /* TValue * */
1193 asm_gencall(as, ci, args);
1194 tmp = ra_releasetmp(as, ASMREF_TMP1);
1195 irkey = IR(ir->op2);
1196 if (irt_isnum(irkey->t)) {
1197 /* For numbers use the constant itself or a spill slot as a TValue. */
1198 if (irref_isk(ir->op2))
1199 emit_loada(as, tmp, ir_knum(irkey));
1200 else
1201 emit_rmro(as, XO_LEA, tmp|REX_64, RID_ESP, ra_spill(as, irkey));
1202 } else {
1203 /* Otherwise use g->tmptv to hold the TValue. */
1204 if (!irref_isk(ir->op2)) {
1205 Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp));
1206 emit_movtomro(as, REX_64IR(irkey, src), tmp, 0);
1207 } else if (!irt_ispri(irkey->t)) {
1208 emit_movmroi(as, tmp, 0, irkey->i);
1209 }
1210 if (!(LJ_64 && irt_islightud(irkey->t)))
1211 emit_movmroi(as, tmp, 4, irt_toitype(irkey->t));
1212 emit_loada(as, tmp, &J2G(as->J)->tmptv);
1213 }
1214}
1215
1216static void asm_uref(ASMState *as, IRIns *ir) 1349static void asm_uref(ASMState *as, IRIns *ir)
1217{ 1350{
1218 Reg dest = ra_dest(as, ir, RSET_GPR); 1351 Reg dest = ra_dest(as, ir, RSET_GPR);
1219 if (irref_isk(ir->op1)) { 1352 if (irref_isk(ir->op1)) {
1220 GCfunc *fn = ir_kfunc(IR(ir->op1)); 1353 GCfunc *fn = ir_kfunc(IR(ir->op1));
1221 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; 1354 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
1222 emit_rma(as, XO_MOV, dest, v); 1355 emit_rma(as, XO_MOV, dest|REX_GC64, v);
1223 } else { 1356 } else {
1224 Reg uv = ra_scratch(as, RSET_GPR); 1357 Reg uv = ra_scratch(as, RSET_GPR);
1225 Reg func = ra_alloc1(as, ir->op1, RSET_GPR); 1358 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
1226 if (ir->o == IR_UREFC) { 1359 if (ir->o == IR_UREFC) {
1227 emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv)); 1360 emit_rmro(as, XO_LEA, dest|REX_GC64, uv, offsetof(GCupval, tv));
1228 asm_guardcc(as, CC_NE); 1361 asm_guardcc(as, CC_NE);
1229 emit_i8(as, 1); 1362 emit_i8(as, 1);
1230 emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed)); 1363 emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed));
1231 } else { 1364 } else {
1232 emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v)); 1365 emit_rmro(as, XO_MOV, dest|REX_GC64, uv, offsetof(GCupval, v));
1233 } 1366 }
1234 emit_rmro(as, XO_MOV, uv, func, 1367 emit_rmro(as, XO_MOV, uv|REX_GC64, func,
1235 (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); 1368 (int32_t)offsetof(GCfuncL, uvptr) +
1369 (int32_t)sizeof(MRef) * (int32_t)(ir->op2 >> 8));
1236 } 1370 }
1237} 1371}
1238 1372
@@ -1250,9 +1384,9 @@ static void asm_strref(ASMState *as, IRIns *ir)
1250 if (as->mrm.base == RID_NONE) 1384 if (as->mrm.base == RID_NONE)
1251 emit_loadi(as, dest, as->mrm.ofs); 1385 emit_loadi(as, dest, as->mrm.ofs);
1252 else if (as->mrm.base == dest && as->mrm.idx == RID_NONE) 1386 else if (as->mrm.base == dest && as->mrm.idx == RID_NONE)
1253 emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs); 1387 emit_gri(as, XG_ARITHi(XOg_ADD), dest|REX_GC64, as->mrm.ofs);
1254 else 1388 else
1255 emit_mrm(as, XO_LEA, dest, RID_MRM); 1389 emit_mrm(as, XO_LEA, dest|REX_GC64, RID_MRM);
1256} 1390}
1257 1391
1258/* -- Loads and stores ---------------------------------------------------- */ 1392/* -- Loads and stores ---------------------------------------------------- */
@@ -1271,19 +1405,23 @@ static void asm_fxload(ASMState *as, IRIns *ir)
1271 case IRT_U8: xo = XO_MOVZXb; break; 1405 case IRT_U8: xo = XO_MOVZXb; break;
1272 case IRT_I16: xo = XO_MOVSXw; break; 1406 case IRT_I16: xo = XO_MOVSXw; break;
1273 case IRT_U16: xo = XO_MOVZXw; break; 1407 case IRT_U16: xo = XO_MOVZXw; break;
1274 case IRT_NUM: xo = XMM_MOVRM(as); break; 1408 case IRT_NUM: xo = XO_MOVSD; break;
1275 case IRT_FLOAT: xo = XO_MOVSS; break; 1409 case IRT_FLOAT: xo = XO_MOVSS; break;
1276 default: 1410 default:
1277 if (LJ_64 && irt_is64(ir->t)) 1411 if (LJ_64 && irt_is64(ir->t))
1278 dest |= REX_64; 1412 dest |= REX_64;
1279 else 1413 else
1280 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); 1414 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t),
1415 "unsplit 64 bit load");
1281 xo = XO_MOV; 1416 xo = XO_MOV;
1282 break; 1417 break;
1283 } 1418 }
1284 emit_mrm(as, xo, dest, RID_MRM); 1419 emit_mrm(as, xo, dest, RID_MRM);
1285} 1420}
1286 1421
1422#define asm_fload(as, ir) asm_fxload(as, ir)
1423#define asm_xload(as, ir) asm_fxload(as, ir)
1424
1287static void asm_fxstore(ASMState *as, IRIns *ir) 1425static void asm_fxstore(ASMState *as, IRIns *ir)
1288{ 1426{
1289 RegSet allow = RSET_GPR; 1427 RegSet allow = RSET_GPR;
@@ -1318,14 +1456,17 @@ static void asm_fxstore(ASMState *as, IRIns *ir)
1318 case IRT_I16: case IRT_U16: xo = XO_MOVtow; break; 1456 case IRT_I16: case IRT_U16: xo = XO_MOVtow; break;
1319 case IRT_NUM: xo = XO_MOVSDto; break; 1457 case IRT_NUM: xo = XO_MOVSDto; break;
1320 case IRT_FLOAT: xo = XO_MOVSSto; break; 1458 case IRT_FLOAT: xo = XO_MOVSSto; break;
1321#if LJ_64 1459#if LJ_64 && !LJ_GC64
1322 case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */ 1460 case IRT_LIGHTUD:
1461 /* NYI: mask 64 bit lightuserdata. */
1462 lj_assertA(0, "store of lightuserdata");
1323#endif 1463#endif
1324 default: 1464 default:
1325 if (LJ_64 && irt_is64(ir->t)) 1465 if (LJ_64 && irt_is64(ir->t))
1326 src |= REX_64; 1466 src |= REX_64;
1327 else 1467 else
1328 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); 1468 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t),
1469 "unsplit 64 bit store");
1329 xo = XO_MOVto; 1470 xo = XO_MOVto;
1330 break; 1471 break;
1331 } 1472 }
@@ -1339,15 +1480,18 @@ static void asm_fxstore(ASMState *as, IRIns *ir)
1339 emit_i8(as, k); 1480 emit_i8(as, k);
1340 emit_mrm(as, XO_MOVmib, 0, RID_MRM); 1481 emit_mrm(as, XO_MOVmib, 0, RID_MRM);
1341 } else { 1482 } else {
1342 lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) || 1483 lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) ||
1343 irt_isaddr(ir->t)); 1484 irt_isaddr(ir->t), "bad store type");
1344 emit_i32(as, k); 1485 emit_i32(as, k);
1345 emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM); 1486 emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM);
1346 } 1487 }
1347 } 1488 }
1348} 1489}
1349 1490
1350#if LJ_64 1491#define asm_fstore(as, ir) asm_fxstore(as, ir)
1492#define asm_xstore(as, ir) asm_fxstore(as, ir)
1493
1494#if LJ_64 && !LJ_GC64
1351static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck) 1495static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck)
1352{ 1496{
1353 if (ra_used(ir) || typecheck) { 1497 if (ra_used(ir) || typecheck) {
@@ -1369,9 +1513,13 @@ static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck)
1369 1513
1370static void asm_ahuvload(ASMState *as, IRIns *ir) 1514static void asm_ahuvload(ASMState *as, IRIns *ir)
1371{ 1515{
1372 lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || 1516#if LJ_GC64
1373 (LJ_DUALNUM && irt_isint(ir->t))); 1517 Reg tmp = RID_NONE;
1374#if LJ_64 1518#endif
1519 lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
1520 (LJ_DUALNUM && irt_isint(ir->t)),
1521 "bad load type %d", irt_type(ir->t));
1522#if LJ_64 && !LJ_GC64
1375 if (irt_islightud(ir->t)) { 1523 if (irt_islightud(ir->t)) {
1376 Reg dest = asm_load_lightud64(as, ir, 1); 1524 Reg dest = asm_load_lightud64(as, ir, 1);
1377 if (ra_hasreg(dest)) { 1525 if (ra_hasreg(dest)) {
@@ -1385,20 +1533,65 @@ static void asm_ahuvload(ASMState *as, IRIns *ir)
1385 RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; 1533 RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
1386 Reg dest = ra_dest(as, ir, allow); 1534 Reg dest = ra_dest(as, ir, allow);
1387 asm_fuseahuref(as, ir->op1, RSET_GPR); 1535 asm_fuseahuref(as, ir->op1, RSET_GPR);
1388 emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), dest, RID_MRM); 1536#if LJ_GC64
1537 if (irt_isaddr(ir->t)) {
1538 emit_shifti(as, XOg_SHR|REX_64, dest, 17);
1539 asm_guardcc(as, CC_NE);
1540 emit_i8(as, irt_toitype(ir->t));
1541 emit_rr(as, XO_ARITHi8, XOg_CMP, dest);
1542 emit_i8(as, XI_O16);
1543 if ((as->flags & JIT_F_BMI2)) {
1544 emit_i8(as, 47);
1545 emit_mrm(as, XV_RORX|VEX_64, dest, RID_MRM);
1546 } else {
1547 emit_shifti(as, XOg_ROR|REX_64, dest, 47);
1548 emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
1549 }
1550 return;
1551 } else
1552#endif
1553 emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XO_MOVSD, dest, RID_MRM);
1389 } else { 1554 } else {
1390 asm_fuseahuref(as, ir->op1, RSET_GPR); 1555 RegSet gpr = RSET_GPR;
1556#if LJ_GC64
1557 if (irt_isaddr(ir->t)) {
1558 tmp = ra_scratch(as, RSET_GPR);
1559 gpr = rset_exclude(gpr, tmp);
1560 }
1561#endif
1562 asm_fuseahuref(as, ir->op1, gpr);
1391 } 1563 }
1392 /* Always do the type check, even if the load result is unused. */ 1564 /* Always do the type check, even if the load result is unused. */
1393 as->mrm.ofs += 4; 1565 as->mrm.ofs += 4;
1394 asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE); 1566 asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE);
1395 if (LJ_64 && irt_type(ir->t) >= IRT_NUM) { 1567 if (LJ_64 && irt_type(ir->t) >= IRT_NUM) {
1396 lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t)); 1568 lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t),
1569 "bad load type %d", irt_type(ir->t));
1570#if LJ_GC64
1571 emit_u32(as, LJ_TISNUM << 15);
1572#else
1397 emit_u32(as, LJ_TISNUM); 1573 emit_u32(as, LJ_TISNUM);
1574#endif
1398 emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM); 1575 emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
1576#if LJ_GC64
1577 } else if (irt_isaddr(ir->t)) {
1578 as->mrm.ofs -= 4;
1579 emit_i8(as, irt_toitype(ir->t));
1580 emit_mrm(as, XO_ARITHi8, XOg_CMP, tmp);
1581 emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
1582 emit_mrm(as, XO_MOV, tmp|REX_64, RID_MRM);
1583 } else if (irt_isnil(ir->t)) {
1584 as->mrm.ofs -= 4;
1585 emit_i8(as, -1);
1586 emit_mrm(as, XO_ARITHi8, XOg_CMP|REX_64, RID_MRM);
1587 } else {
1588 emit_u32(as, (irt_toitype(ir->t) << 15) | 0x7fff);
1589 emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
1590#else
1399 } else { 1591 } else {
1400 emit_i8(as, irt_toitype(ir->t)); 1592 emit_i8(as, irt_toitype(ir->t));
1401 emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM); 1593 emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM);
1594#endif
1402 } 1595 }
1403} 1596}
1404 1597
@@ -1410,12 +1603,28 @@ static void asm_ahustore(ASMState *as, IRIns *ir)
1410 Reg src = ra_alloc1(as, ir->op2, RSET_FPR); 1603 Reg src = ra_alloc1(as, ir->op2, RSET_FPR);
1411 asm_fuseahuref(as, ir->op1, RSET_GPR); 1604 asm_fuseahuref(as, ir->op1, RSET_GPR);
1412 emit_mrm(as, XO_MOVSDto, src, RID_MRM); 1605 emit_mrm(as, XO_MOVSDto, src, RID_MRM);
1413#if LJ_64 1606#if LJ_64 && !LJ_GC64
1414 } else if (irt_islightud(ir->t)) { 1607 } else if (irt_islightud(ir->t)) {
1415 Reg src = ra_alloc1(as, ir->op2, RSET_GPR); 1608 Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
1416 asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src)); 1609 asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src));
1417 emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM); 1610 emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
1418#endif 1611#endif
1612#if LJ_GC64
1613 } else if (irref_isk(ir->op2)) {
1614 TValue k;
1615 lj_ir_kvalue(as->J->L, &k, IR(ir->op2));
1616 asm_fuseahuref(as, ir->op1, RSET_GPR);
1617 if (tvisnil(&k)) {
1618 emit_i32(as, -1);
1619 emit_mrm(as, XO_MOVmi, REX_64, RID_MRM);
1620 } else {
1621 emit_u32(as, k.u32.lo);
1622 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1623 as->mrm.ofs += 4;
1624 emit_u32(as, k.u32.hi);
1625 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1626 }
1627#endif
1419 } else { 1628 } else {
1420 IRIns *irr = IR(ir->op2); 1629 IRIns *irr = IR(ir->op2);
1421 RegSet allow = RSET_GPR; 1630 RegSet allow = RSET_GPR;
@@ -1426,34 +1635,55 @@ static void asm_ahustore(ASMState *as, IRIns *ir)
1426 } 1635 }
1427 asm_fuseahuref(as, ir->op1, allow); 1636 asm_fuseahuref(as, ir->op1, allow);
1428 if (ra_hasreg(src)) { 1637 if (ra_hasreg(src)) {
1638#if LJ_GC64
1639 if (!(LJ_DUALNUM && irt_isinteger(ir->t))) {
1640 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
1641 as->mrm.ofs += 4;
1642 emit_u32(as, irt_toitype(ir->t) << 15);
1643 emit_mrm(as, XO_ARITHi, XOg_OR, RID_MRM);
1644 as->mrm.ofs -= 4;
1645 emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
1646 return;
1647 }
1648#endif
1429 emit_mrm(as, XO_MOVto, src, RID_MRM); 1649 emit_mrm(as, XO_MOVto, src, RID_MRM);
1430 } else if (!irt_ispri(irr->t)) { 1650 } else if (!irt_ispri(irr->t)) {
1431 lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t))); 1651 lj_assertA(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)),
1652 "bad store type");
1432 emit_i32(as, irr->i); 1653 emit_i32(as, irr->i);
1433 emit_mrm(as, XO_MOVmi, 0, RID_MRM); 1654 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1434 } 1655 }
1435 as->mrm.ofs += 4; 1656 as->mrm.ofs += 4;
1657#if LJ_GC64
1658 lj_assertA(LJ_DUALNUM && irt_isinteger(ir->t), "bad store type");
1659 emit_i32(as, LJ_TNUMX << 15);
1660#else
1436 emit_i32(as, (int32_t)irt_toitype(ir->t)); 1661 emit_i32(as, (int32_t)irt_toitype(ir->t));
1662#endif
1437 emit_mrm(as, XO_MOVmi, 0, RID_MRM); 1663 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1438 } 1664 }
1439} 1665}
1440 1666
1441static void asm_sload(ASMState *as, IRIns *ir) 1667static void asm_sload(ASMState *as, IRIns *ir)
1442{ 1668{
1443 int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); 1669 int32_t ofs = 8*((int32_t)ir->op1-1-LJ_FR2) +
1670 (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
1444 IRType1 t = ir->t; 1671 IRType1 t = ir->t;
1445 Reg base; 1672 Reg base;
1446 lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ 1673 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1447 lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); 1674 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1448 lua_assert(LJ_DUALNUM || 1675 lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1449 !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); 1676 "inconsistent SLOAD variant");
1677 lj_assertA(LJ_DUALNUM ||
1678 !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)),
1679 "bad SLOAD type");
1450 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { 1680 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1451 Reg left = ra_scratch(as, RSET_FPR); 1681 Reg left = ra_scratch(as, RSET_FPR);
1452 asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */ 1682 asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */
1453 base = ra_alloc1(as, REF_BASE, RSET_GPR); 1683 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1454 emit_rmro(as, XMM_MOVRM(as), left, base, ofs); 1684 emit_rmro(as, XO_MOVSD, left, base, ofs);
1455 t.irt = IRT_NUM; /* Continue with a regular number type check. */ 1685 t.irt = IRT_NUM; /* Continue with a regular number type check. */
1456#if LJ_64 1686#if LJ_64 && !LJ_GC64
1457 } else if (irt_islightud(t)) { 1687 } else if (irt_islightud(t)) {
1458 Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK)); 1688 Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK));
1459 if (ra_hasreg(dest)) { 1689 if (ra_hasreg(dest)) {
@@ -1466,14 +1696,43 @@ static void asm_sload(ASMState *as, IRIns *ir)
1466 RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR; 1696 RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR;
1467 Reg dest = ra_dest(as, ir, allow); 1697 Reg dest = ra_dest(as, ir, allow);
1468 base = ra_alloc1(as, REF_BASE, RSET_GPR); 1698 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1469 lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); 1699 lj_assertA(irt_isnum(t) || irt_isint(t) || irt_isaddr(t),
1700 "bad SLOAD type %d", irt_type(t));
1470 if ((ir->op2 & IRSLOAD_CONVERT)) { 1701 if ((ir->op2 & IRSLOAD_CONVERT)) {
1471 t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */ 1702 t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */
1472 emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTSD2SI, dest, base, ofs); 1703 emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTTSD2SI, dest, base, ofs);
1473 } else if (irt_isnum(t)) {
1474 emit_rmro(as, XMM_MOVRM(as), dest, base, ofs);
1475 } else { 1704 } else {
1476 emit_rmro(as, XO_MOV, dest, base, ofs); 1705#if LJ_GC64
1706 if (irt_isaddr(t)) {
1707 /* LJ_GC64 type check + tag removal without BMI2 and with BMI2:
1708 **
1709 ** mov r64, [addr] rorx r64, [addr], 47
1710 ** ror r64, 47
1711 ** cmp r16, itype cmp r16, itype
1712 ** jne ->exit jne ->exit
1713 ** shr r64, 16 shr r64, 16
1714 */
1715 emit_shifti(as, XOg_SHR|REX_64, dest, 17);
1716 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1717 asm_guardcc(as, CC_NE);
1718 emit_i8(as, irt_toitype(t));
1719 emit_rr(as, XO_ARITHi8, XOg_CMP, dest);
1720 emit_i8(as, XI_O16);
1721 }
1722 if ((as->flags & JIT_F_BMI2)) {
1723 emit_i8(as, 47);
1724 emit_rmro(as, XV_RORX|VEX_64, dest, base, ofs);
1725 } else {
1726 if ((ir->op2 & IRSLOAD_TYPECHECK))
1727 emit_shifti(as, XOg_ROR|REX_64, dest, 47);
1728 else
1729 emit_shifti(as, XOg_SHL|REX_64, dest, 17);
1730 emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
1731 }
1732 return;
1733 } else
1734#endif
1735 emit_rmro(as, irt_isnum(t) ? XO_MOVSD : XO_MOV, dest, base, ofs);
1477 } 1736 }
1478 } else { 1737 } else {
1479 if (!(ir->op2 & IRSLOAD_TYPECHECK)) 1738 if (!(ir->op2 & IRSLOAD_TYPECHECK))
@@ -1484,12 +1743,44 @@ static void asm_sload(ASMState *as, IRIns *ir)
1484 /* Need type check, even if the load result is unused. */ 1743 /* Need type check, even if the load result is unused. */
1485 asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE); 1744 asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE);
1486 if (LJ_64 && irt_type(t) >= IRT_NUM) { 1745 if (LJ_64 && irt_type(t) >= IRT_NUM) {
1487 lua_assert(irt_isinteger(t) || irt_isnum(t)); 1746 lj_assertA(irt_isinteger(t) || irt_isnum(t),
1747 "bad SLOAD type %d", irt_type(t));
1748#if LJ_GC64
1749 emit_u32(as, LJ_TISNUM << 15);
1750#else
1488 emit_u32(as, LJ_TISNUM); 1751 emit_u32(as, LJ_TISNUM);
1752#endif
1753 emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
1754#if LJ_GC64
1755 } else if (irt_isnil(t)) {
1756 /* LJ_GC64 type check for nil:
1757 **
1758 ** cmp qword [addr], -1
1759 ** jne ->exit
1760 */
1761 emit_i8(as, -1);
1762 emit_rmro(as, XO_ARITHi8, XOg_CMP|REX_64, base, ofs);
1763 } else if (irt_ispri(t)) {
1764 emit_u32(as, (irt_toitype(t) << 15) | 0x7fff);
1489 emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4); 1765 emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
1490 } else { 1766 } else {
1767 /* LJ_GC64 type check only:
1768 **
1769 ** mov r64, [addr]
1770 ** sar r64, 47
1771 ** cmp r32, itype
1772 ** jne ->exit
1773 */
1774 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, base));
1775 emit_i8(as, irt_toitype(t));
1776 emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
1777 emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
1778 emit_rmro(as, XO_MOV, tmp|REX_64, base, ofs);
1779#else
1780 } else {
1491 emit_i8(as, irt_toitype(t)); 1781 emit_i8(as, irt_toitype(t));
1492 emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4); 1782 emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4);
1783#endif
1493 } 1784 }
1494 } 1785 }
1495} 1786}
@@ -1500,15 +1791,14 @@ static void asm_sload(ASMState *as, IRIns *ir)
1500static void asm_cnew(ASMState *as, IRIns *ir) 1791static void asm_cnew(ASMState *as, IRIns *ir)
1501{ 1792{
1502 CTState *cts = ctype_ctsG(J2G(as->J)); 1793 CTState *cts = ctype_ctsG(J2G(as->J));
1503 CTypeID ctypeid = (CTypeID)IR(ir->op1)->i; 1794 CTypeID id = (CTypeID)IR(ir->op1)->i;
1504 CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ? 1795 CTSize sz;
1505 lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i; 1796 CTInfo info = lj_ctype_info(cts, id, &sz);
1506 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; 1797 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1507 IRRef args[2]; 1798 IRRef args[4];
1508 lua_assert(sz != CTSIZE_INVALID); 1799 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1800 "bad CNEW/CNEWI operands");
1509 1801
1510 args[0] = ASMREF_L; /* lua_State *L */
1511 args[1] = ASMREF_TMP1; /* MSize size */
1512 as->gcsteps++; 1802 as->gcsteps++;
1513 asm_setupresult(as, ir, ci); /* GCcdata * */ 1803 asm_setupresult(as, ir, ci); /* GCcdata * */
1514 1804
@@ -1519,8 +1809,9 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1519 Reg r64 = sz == 8 ? REX_64 : 0; 1809 Reg r64 = sz == 8 ? REX_64 : 0;
1520 if (irref_isk(ir->op2)) { 1810 if (irref_isk(ir->op2)) {
1521 IRIns *irk = IR(ir->op2); 1811 IRIns *irk = IR(ir->op2);
1522 uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 : 1812 uint64_t k = (irk->o == IR_KINT64 ||
1523 (uint64_t)(uint32_t)irk->i; 1813 (LJ_GC64 && (irk->o == IR_KPTR || irk->o == IR_KKPTR))) ?
1814 ir_k64(irk)->u64 : (uint64_t)(uint32_t)irk->i;
1524 if (sz == 4 || checki32((int64_t)k)) { 1815 if (sz == 4 || checki32((int64_t)k)) {
1525 emit_i32(as, (int32_t)k); 1816 emit_i32(as, (int32_t)k);
1526 emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata)); 1817 emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata));
@@ -1536,7 +1827,7 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1536 int32_t ofs = sizeof(GCcdata); 1827 int32_t ofs = sizeof(GCcdata);
1537 if (sz == 8) { 1828 if (sz == 8) {
1538 ofs += 4; ir++; 1829 ofs += 4; ir++;
1539 lua_assert(ir->o == IR_HIOP); 1830 lj_assertA(ir->o == IR_HIOP, "missing CNEWI HIOP");
1540 } 1831 }
1541 do { 1832 do {
1542 if (irref_isk(ir->op2)) { 1833 if (irref_isk(ir->op2)) {
@@ -1550,21 +1841,30 @@ static void asm_cnew(ASMState *as, IRIns *ir)
1550 ofs -= 4; ir--; 1841 ofs -= 4; ir--;
1551 } while (1); 1842 } while (1);
1552#endif 1843#endif
1553 lua_assert(sz == 4 || sz == 8); 1844 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1845 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1846 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1847 args[0] = ASMREF_L; /* lua_State *L */
1848 args[1] = ir->op1; /* CTypeID id */
1849 args[2] = ir->op2; /* CTSize sz */
1850 args[3] = ASMREF_TMP1; /* CTSize align */
1851 asm_gencall(as, ci, args);
1852 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1853 return;
1554 } 1854 }
1555 1855
1556 /* Combine initialization of marked, gct and ctypeid. */ 1856 /* Combine initialization of marked, gct and ctypeid. */
1557 emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked)); 1857 emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked));
1558 emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX, 1858 emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX,
1559 (int32_t)((~LJ_TCDATA<<8)+(ctypeid<<16))); 1859 (int32_t)((~LJ_TCDATA<<8)+(id<<16)));
1560 emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES); 1860 emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES);
1561 emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite); 1861 emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite);
1562 1862
1863 args[0] = ASMREF_L; /* lua_State *L */
1864 args[1] = ASMREF_TMP1; /* MSize size */
1563 asm_gencall(as, ci, args); 1865 asm_gencall(as, ci, args);
1564 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata))); 1866 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata)));
1565} 1867}
1566#else
1567#define asm_cnew(as, ir) ((void)0)
1568#endif 1868#endif
1569 1869
1570/* -- Write barriers ------------------------------------------------------ */ 1870/* -- Write barriers ------------------------------------------------------ */
@@ -1574,7 +1874,7 @@ static void asm_tbar(ASMState *as, IRIns *ir)
1574 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); 1874 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1575 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab)); 1875 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1576 MCLabel l_end = emit_label(as); 1876 MCLabel l_end = emit_label(as);
1577 emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist)); 1877 emit_movtomro(as, tmp|REX_GC64, tab, offsetof(GCtab, gclist));
1578 emit_setgl(as, tab, gc.grayagain); 1878 emit_setgl(as, tab, gc.grayagain);
1579 emit_getgl(as, tmp, gc.grayagain); 1879 emit_getgl(as, tmp, gc.grayagain);
1580 emit_i8(as, ~LJ_GC_BLACK); 1880 emit_i8(as, ~LJ_GC_BLACK);
@@ -1591,7 +1891,7 @@ static void asm_obar(ASMState *as, IRIns *ir)
1591 MCLabel l_end; 1891 MCLabel l_end;
1592 Reg obj; 1892 Reg obj;
1593 /* No need for other object barriers (yet). */ 1893 /* No need for other object barriers (yet). */
1594 lua_assert(IR(ir->op1)->o == IR_UREFC); 1894 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1595 ra_evictset(as, RSET_SCRATCH); 1895 ra_evictset(as, RSET_SCRATCH);
1596 l_end = emit_label(as); 1896 l_end = emit_label(as);
1597 args[0] = ASMREF_TMP1; /* global_State *g */ 1897 args[0] = ASMREF_TMP1; /* global_State *g */
@@ -1637,36 +1937,9 @@ static void asm_x87load(ASMState *as, IRRef ref)
1637 } 1937 }
1638} 1938}
1639 1939
1640/* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */
1641static int fpmjoin_pow(ASMState *as, IRIns *ir)
1642{
1643 IRIns *irp = IR(ir->op1);
1644 if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
1645 IRIns *irpp = IR(irp->op1);
1646 if (irpp == ir-2 && irpp->o == IR_FPMATH &&
1647 irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
1648 /* The modified regs must match with the *.dasc implementation. */
1649 RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX);
1650 IRIns *irx;
1651 if (ra_hasreg(ir->r))
1652 rset_clear(drop, ir->r); /* Dest reg handled below. */
1653 ra_evictset(as, drop);
1654 ra_destreg(as, ir, RID_XMM0);
1655 emit_call(as, lj_vm_pow_sse);
1656 irx = IR(irpp->op1);
1657 if (ra_noreg(irx->r) && ra_gethint(irx->r) == RID_XMM1)
1658 irx->r = RID_INIT; /* Avoid allocating xmm1 for x. */
1659 ra_left(as, RID_XMM0, irpp->op1);
1660 ra_left(as, RID_XMM1, irp->op2);
1661 return 1;
1662 }
1663 }
1664 return 0;
1665}
1666
1667static void asm_fpmath(ASMState *as, IRIns *ir) 1940static void asm_fpmath(ASMState *as, IRIns *ir)
1668{ 1941{
1669 IRFPMathOp fpm = ir->o == IR_FPMATH ? (IRFPMathOp)ir->op2 : IRFPM_OTHER; 1942 IRFPMathOp fpm = (IRFPMathOp)ir->op2;
1670 if (fpm == IRFPM_SQRT) { 1943 if (fpm == IRFPM_SQRT) {
1671 Reg dest = ra_dest(as, ir, RSET_FPR); 1944 Reg dest = ra_dest(as, ir, RSET_FPR);
1672 Reg left = asm_fuseload(as, ir->op1, RSET_FPR); 1945 Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
@@ -1697,53 +1970,27 @@ static void asm_fpmath(ASMState *as, IRIns *ir)
1697 fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse); 1970 fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse);
1698 ra_left(as, RID_XMM0, ir->op1); 1971 ra_left(as, RID_XMM0, ir->op1);
1699 } 1972 }
1700 } else if (fpm == IRFPM_EXP2 && fpmjoin_pow(as, ir)) { 1973 } else {
1701 /* Rejoined to pow(). */ 1974 asm_callid(as, ir, IRCALL_lj_vm_floor + fpm);
1702 } else { /* Handle x87 ops. */
1703 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
1704 Reg dest = ir->r;
1705 if (ra_hasreg(dest)) {
1706 ra_free(as, dest);
1707 ra_modified(as, dest);
1708 emit_rmro(as, XMM_MOVRM(as), dest, RID_ESP, ofs);
1709 }
1710 emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs);
1711 switch (fpm) { /* st0 = lj_vm_*(st0) */
1712 case IRFPM_EXP: emit_call(as, lj_vm_exp_x87); break;
1713 case IRFPM_EXP2: emit_call(as, lj_vm_exp2_x87); break;
1714 case IRFPM_SIN: emit_x87op(as, XI_FSIN); break;
1715 case IRFPM_COS: emit_x87op(as, XI_FCOS); break;
1716 case IRFPM_TAN: emit_x87op(as, XI_FPOP); emit_x87op(as, XI_FPTAN); break;
1717 case IRFPM_LOG: case IRFPM_LOG2: case IRFPM_LOG10:
1718 /* Note: the use of fyl2xp1 would be pointless here. When computing
1719 ** log(1.0+eps) the precision is already lost after 1.0 is added.
1720 ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense.
1721 */
1722 emit_x87op(as, XI_FYL2X); break;
1723 case IRFPM_OTHER:
1724 switch (ir->o) {
1725 case IR_ATAN2:
1726 emit_x87op(as, XI_FPATAN); asm_x87load(as, ir->op2); break;
1727 case IR_LDEXP:
1728 emit_x87op(as, XI_FPOP1); emit_x87op(as, XI_FSCALE); break;
1729 default: lua_assert(0); break;
1730 }
1731 break;
1732 default: lua_assert(0); break;
1733 }
1734 asm_x87load(as, ir->op1);
1735 switch (fpm) {
1736 case IRFPM_LOG: emit_x87op(as, XI_FLDLN2); break;
1737 case IRFPM_LOG2: emit_x87op(as, XI_FLD1); break;
1738 case IRFPM_LOG10: emit_x87op(as, XI_FLDLG2); break;
1739 case IRFPM_OTHER:
1740 if (ir->o == IR_LDEXP) asm_x87load(as, ir->op2);
1741 break;
1742 default: break;
1743 }
1744 } 1975 }
1745} 1976}
1746 1977
1978static void asm_ldexp(ASMState *as, IRIns *ir)
1979{
1980 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
1981 Reg dest = ir->r;
1982 if (ra_hasreg(dest)) {
1983 ra_free(as, dest);
1984 ra_modified(as, dest);
1985 emit_rmro(as, XO_MOVSD, dest, RID_ESP, ofs);
1986 }
1987 emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs);
1988 emit_x87op(as, XI_FPOP1);
1989 emit_x87op(as, XI_FSCALE);
1990 asm_x87load(as, ir->op1);
1991 asm_x87load(as, ir->op2);
1992}
1993
1747static void asm_fppowi(ASMState *as, IRIns *ir) 1994static void asm_fppowi(ASMState *as, IRIns *ir)
1748{ 1995{
1749 /* The modified regs must match with the *.dasc implementation. */ 1996 /* The modified regs must match with the *.dasc implementation. */
@@ -1757,33 +2004,11 @@ static void asm_fppowi(ASMState *as, IRIns *ir)
1757 ra_left(as, RID_EAX, ir->op2); 2004 ra_left(as, RID_EAX, ir->op2);
1758} 2005}
1759 2006
1760#if LJ_64 && LJ_HASFFI
1761static void asm_arith64(ASMState *as, IRIns *ir, IRCallID id)
1762{
1763 const CCallInfo *ci = &lj_ir_callinfo[id];
1764 IRRef args[2];
1765 args[0] = ir->op1;
1766 args[1] = ir->op2;
1767 asm_setupresult(as, ir, ci);
1768 asm_gencall(as, ci, args);
1769}
1770#endif
1771
1772static void asm_intmod(ASMState *as, IRIns *ir)
1773{
1774 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_vm_modi];
1775 IRRef args[2];
1776 args[0] = ir->op1;
1777 args[1] = ir->op2;
1778 asm_setupresult(as, ir, ci);
1779 asm_gencall(as, ci, args);
1780}
1781
1782static int asm_swapops(ASMState *as, IRIns *ir) 2007static int asm_swapops(ASMState *as, IRIns *ir)
1783{ 2008{
1784 IRIns *irl = IR(ir->op1); 2009 IRIns *irl = IR(ir->op1);
1785 IRIns *irr = IR(ir->op2); 2010 IRIns *irr = IR(ir->op2);
1786 lua_assert(ra_noreg(irr->r)); 2011 lj_assertA(ra_noreg(irr->r), "bad usage");
1787 if (!irm_iscomm(lj_ir_mode[ir->o])) 2012 if (!irm_iscomm(lj_ir_mode[ir->o]))
1788 return 0; /* Can't swap non-commutative operations. */ 2013 return 0; /* Can't swap non-commutative operations. */
1789 if (irref_isk(ir->op2)) 2014 if (irref_isk(ir->op2))
@@ -1955,11 +2180,28 @@ static void asm_add(ASMState *as, IRIns *ir)
1955{ 2180{
1956 if (irt_isnum(ir->t)) 2181 if (irt_isnum(ir->t))
1957 asm_fparith(as, ir, XO_ADDSD); 2182 asm_fparith(as, ir, XO_ADDSD);
1958 else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp || 2183 else if (as->flagmcp == as->mcp || irt_is64(ir->t) || !asm_lea(as, ir))
1959 irt_is64(ir->t) || !asm_lea(as, ir))
1960 asm_intarith(as, ir, XOg_ADD); 2184 asm_intarith(as, ir, XOg_ADD);
1961} 2185}
1962 2186
2187static void asm_sub(ASMState *as, IRIns *ir)
2188{
2189 if (irt_isnum(ir->t))
2190 asm_fparith(as, ir, XO_SUBSD);
2191 else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
2192 asm_intarith(as, ir, XOg_SUB);
2193}
2194
2195static void asm_mul(ASMState *as, IRIns *ir)
2196{
2197 if (irt_isnum(ir->t))
2198 asm_fparith(as, ir, XO_MULSD);
2199 else
2200 asm_intarith(as, ir, XOg_X_IMUL);
2201}
2202
2203#define asm_fpdiv(as, ir) asm_fparith(as, ir, XO_DIVSD)
2204
1963static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg) 2205static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg)
1964{ 2206{
1965 Reg dest = ra_dest(as, ir, RSET_GPR); 2207 Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -1967,7 +2209,17 @@ static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg)
1967 ra_left(as, dest, ir->op1); 2209 ra_left(as, dest, ir->op1);
1968} 2210}
1969 2211
1970static void asm_min_max(ASMState *as, IRIns *ir, int cc) 2212static void asm_neg(ASMState *as, IRIns *ir)
2213{
2214 if (irt_isnum(ir->t))
2215 asm_fparith(as, ir, XO_XORPS);
2216 else
2217 asm_neg_not(as, ir, XOg_NEG);
2218}
2219
2220#define asm_abs(as, ir) asm_fparith(as, ir, XO_ANDPS)
2221
2222static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
1971{ 2223{
1972 Reg right, dest = ra_dest(as, ir, RSET_GPR); 2224 Reg right, dest = ra_dest(as, ir, RSET_GPR);
1973 IRRef lref = ir->op1, rref = ir->op2; 2225 IRRef lref = ir->op1, rref = ir->op2;
@@ -1978,7 +2230,30 @@ static void asm_min_max(ASMState *as, IRIns *ir, int cc)
1978 ra_left(as, dest, lref); 2230 ra_left(as, dest, lref);
1979} 2231}
1980 2232
1981static void asm_bitswap(ASMState *as, IRIns *ir) 2233static void asm_min(ASMState *as, IRIns *ir)
2234{
2235 if (irt_isnum(ir->t))
2236 asm_fparith(as, ir, XO_MINSD);
2237 else
2238 asm_intmin_max(as, ir, CC_G);
2239}
2240
2241static void asm_max(ASMState *as, IRIns *ir)
2242{
2243 if (irt_isnum(ir->t))
2244 asm_fparith(as, ir, XO_MAXSD);
2245 else
2246 asm_intmin_max(as, ir, CC_L);
2247}
2248
2249/* Note: don't use LEA for overflow-checking arithmetic! */
2250#define asm_addov(as, ir) asm_intarith(as, ir, XOg_ADD)
2251#define asm_subov(as, ir) asm_intarith(as, ir, XOg_SUB)
2252#define asm_mulov(as, ir) asm_intarith(as, ir, XOg_X_IMUL)
2253
2254#define asm_bnot(as, ir) asm_neg_not(as, ir, XOg_NOT)
2255
2256static void asm_bswap(ASMState *as, IRIns *ir)
1982{ 2257{
1983 Reg dest = ra_dest(as, ir, RSET_GPR); 2258 Reg dest = ra_dest(as, ir, RSET_GPR);
1984 as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24), 2259 as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24),
@@ -1986,7 +2261,11 @@ static void asm_bitswap(ASMState *as, IRIns *ir)
1986 ra_left(as, dest, ir->op1); 2261 ra_left(as, dest, ir->op1);
1987} 2262}
1988 2263
1989static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs) 2264#define asm_band(as, ir) asm_intarith(as, ir, XOg_AND)
2265#define asm_bor(as, ir) asm_intarith(as, ir, XOg_OR)
2266#define asm_bxor(as, ir) asm_intarith(as, ir, XOg_XOR)
2267
2268static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs, x86Op xv)
1990{ 2269{
1991 IRRef rref = ir->op2; 2270 IRRef rref = ir->op2;
1992 IRIns *irr = IR(rref); 2271 IRIns *irr = IR(rref);
@@ -1995,11 +2274,27 @@ static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs)
1995 int shift; 2274 int shift;
1996 dest = ra_dest(as, ir, RSET_GPR); 2275 dest = ra_dest(as, ir, RSET_GPR);
1997 shift = irr->i & (irt_is64(ir->t) ? 63 : 31); 2276 shift = irr->i & (irt_is64(ir->t) ? 63 : 31);
2277 if (!xv && shift && (as->flags & JIT_F_BMI2)) {
2278 Reg left = asm_fuseloadm(as, ir->op1, RSET_GPR, irt_is64(ir->t));
2279 if (left != dest) { /* BMI2 rotate right by constant. */
2280 emit_i8(as, xs == XOg_ROL ? -shift : shift);
2281 emit_mrm(as, VEX_64IR(ir, XV_RORX), dest, left);
2282 return;
2283 }
2284 }
1998 switch (shift) { 2285 switch (shift) {
1999 case 0: break; 2286 case 0: break;
2000 case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break; 2287 case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break;
2001 default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break; 2288 default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break;
2002 } 2289 }
2290 } else if ((as->flags & JIT_F_BMI2) && xv) { /* BMI2 variable shifts. */
2291 Reg left, right;
2292 dest = ra_dest(as, ir, RSET_GPR);
2293 right = ra_alloc1(as, rref, RSET_GPR);
2294 left = asm_fuseloadm(as, ir->op1, rset_exclude(RSET_GPR, right),
2295 irt_is64(ir->t));
2296 emit_mrm(as, VEX_64IR(ir, xv) ^ (right << 19), dest, left);
2297 return;
2003 } else { /* Variable shifts implicitly use register cl (i.e. ecx). */ 2298 } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
2004 Reg right; 2299 Reg right;
2005 dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX)); 2300 dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX));
@@ -2025,6 +2320,12 @@ static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs)
2025 */ 2320 */
2026} 2321}
2027 2322
2323#define asm_bshl(as, ir) asm_bitshift(as, ir, XOg_SHL, XV_SHLX)
2324#define asm_bshr(as, ir) asm_bitshift(as, ir, XOg_SHR, XV_SHRX)
2325#define asm_bsar(as, ir) asm_bitshift(as, ir, XOg_SAR, XV_SARX)
2326#define asm_brol(as, ir) asm_bitshift(as, ir, XOg_ROL, 0)
2327#define asm_bror(as, ir) asm_bitshift(as, ir, XOg_ROR, 0)
2328
2028/* -- Comparisons --------------------------------------------------------- */ 2329/* -- Comparisons --------------------------------------------------------- */
2029 2330
2030/* Virtual flags for unordered FP comparisons. */ 2331/* Virtual flags for unordered FP comparisons. */
@@ -2051,8 +2352,9 @@ static const uint16_t asm_compmap[IR_ABC+1] = {
2051}; 2352};
2052 2353
2053/* FP and integer comparisons. */ 2354/* FP and integer comparisons. */
2054static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc) 2355static void asm_comp(ASMState *as, IRIns *ir)
2055{ 2356{
2357 uint32_t cc = asm_compmap[ir->o];
2056 if (irt_isnum(ir->t)) { 2358 if (irt_isnum(ir->t)) {
2057 IRRef lref = ir->op1; 2359 IRRef lref = ir->op1;
2058 IRRef rref = ir->op2; 2360 IRRef rref = ir->op2;
@@ -2073,7 +2375,6 @@ static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
2073 cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */ 2375 cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
2074 } 2376 }
2075 left = ra_alloc1(as, lref, RSET_FPR); 2377 left = ra_alloc1(as, lref, RSET_FPR);
2076 right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left));
2077 l_around = emit_label(as); 2378 l_around = emit_label(as);
2078 asm_guardcc(as, cc >> 4); 2379 asm_guardcc(as, cc >> 4);
2079 if (cc & VCC_P) { /* Extra CC_P branch required? */ 2380 if (cc & VCC_P) { /* Extra CC_P branch required? */
@@ -2090,14 +2391,16 @@ static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
2090 emit_jcc(as, CC_P, as->mcp); 2391 emit_jcc(as, CC_P, as->mcp);
2091 } 2392 }
2092 } 2393 }
2394 right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left));
2093 emit_mrm(as, XO_UCOMISD, left, right); 2395 emit_mrm(as, XO_UCOMISD, left, right);
2094 } else { 2396 } else {
2095 IRRef lref = ir->op1, rref = ir->op2; 2397 IRRef lref = ir->op1, rref = ir->op2;
2096 IROp leftop = (IROp)(IR(lref)->o); 2398 IROp leftop = (IROp)(IR(lref)->o);
2097 Reg r64 = REX_64IR(ir, 0); 2399 Reg r64 = REX_64IR(ir, 0);
2098 int32_t imm = 0; 2400 int32_t imm = 0;
2099 lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || 2401 lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) ||
2100 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t)); 2402 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t),
2403 "bad comparison data type %d", irt_type(ir->t));
2101 /* Swap constants (only for ABC) and fusable loads to the right. */ 2404 /* Swap constants (only for ABC) and fusable loads to the right. */
2102 if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) { 2405 if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) {
2103 if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */ 2406 if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */
@@ -2179,7 +2482,7 @@ static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
2179 /* Use test r,r instead of cmp r,0. */ 2482 /* Use test r,r instead of cmp r,0. */
2180 x86Op xo = XO_TEST; 2483 x86Op xo = XO_TEST;
2181 if (irt_isu8(ir->t)) { 2484 if (irt_isu8(ir->t)) {
2182 lua_assert(ir->o == IR_EQ || ir->o == IR_NE); 2485 lj_assertA(ir->o == IR_EQ || ir->o == IR_NE, "bad usage");
2183 xo = XO_TESTb; 2486 xo = XO_TESTb;
2184 if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) { 2487 if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) {
2185 if (LJ_64) { 2488 if (LJ_64) {
@@ -2207,6 +2510,8 @@ static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
2207 } 2510 }
2208} 2511}
2209 2512
2513#define asm_equal(as, ir) asm_comp(as, ir)
2514
2210#if LJ_32 && LJ_HASFFI 2515#if LJ_32 && LJ_HASFFI
2211/* 64 bit integer comparisons in 32 bit mode. */ 2516/* 64 bit integer comparisons in 32 bit mode. */
2212static void asm_comp_int64(ASMState *as, IRIns *ir) 2517static void asm_comp_int64(ASMState *as, IRIns *ir)
@@ -2289,13 +2594,9 @@ static void asm_hiop(ASMState *as, IRIns *ir)
2289 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ 2594 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
2290 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; 2595 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
2291 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ 2596 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
2292 if (usehi || uselo) {
2293 if (irt_isfp(ir->t))
2294 asm_conv_fp_int64(as, ir);
2295 else
2296 asm_conv_int64_fp(as, ir);
2297 }
2298 as->curins--; /* Always skip the CONV. */ 2597 as->curins--; /* Always skip the CONV. */
2598 if (usehi || uselo)
2599 asm_conv64(as, ir);
2299 return; 2600 return;
2300 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ 2601 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
2301 asm_comp_int64(as, ir); 2602 asm_comp_int64(as, ir);
@@ -2337,13 +2638,24 @@ static void asm_hiop(ASMState *as, IRIns *ir)
2337 case IR_CNEWI: 2638 case IR_CNEWI:
2338 /* Nothing to do here. Handled by CNEWI itself. */ 2639 /* Nothing to do here. Handled by CNEWI itself. */
2339 break; 2640 break;
2340 default: lua_assert(0); break; 2641 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
2341 } 2642 }
2342#else 2643#else
2343 UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */ 2644 /* Unused on x64 or without FFI. */
2645 UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP");
2344#endif 2646#endif
2345} 2647}
2346 2648
2649/* -- Profiling ----------------------------------------------------------- */
2650
2651static void asm_prof(ASMState *as, IRIns *ir)
2652{
2653 UNUSED(ir);
2654 asm_guardcc(as, CC_NE);
2655 emit_i8(as, HOOK_PROFILE);
2656 emit_rma(as, XO_GROUP3b, XOg_TEST, &J2G(as->J)->hookmask);
2657}
2658
2347/* -- Stack handling ------------------------------------------------------ */ 2659/* -- Stack handling ------------------------------------------------------ */
2348 2660
2349/* Check Lua stack size for overflow. Use exit handler as fallback. */ 2661/* Check Lua stack size for overflow. Use exit handler as fallback. */
@@ -2358,14 +2670,19 @@ static void asm_stack_check(ASMState *as, BCReg topslot,
2358 emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0); 2670 emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0);
2359 else 2671 else
2360 ra_modified(as, r); 2672 ra_modified(as, r);
2361 emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot)); 2673 emit_gri(as, XG_ARITHi(XOg_CMP), r|REX_GC64, (int32_t)(8*topslot));
2362 if (ra_hasreg(pbase) && pbase != r) 2674 if (ra_hasreg(pbase) && pbase != r)
2363 emit_rr(as, XO_ARITH(XOg_SUB), r, pbase); 2675 emit_rr(as, XO_ARITH(XOg_SUB), r|REX_GC64, pbase);
2364 else 2676 else
2677#if LJ_GC64
2678 emit_rmro(as, XO_ARITH(XOg_SUB), r|REX_64, RID_DISPATCH,
2679 (int32_t)dispofs(as, &J2G(as->J)->jit_base));
2680#else
2365 emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE, 2681 emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE,
2366 ptr2addr(&J2G(as->J)->jit_base)); 2682 ptr2addr(&J2G(as->J)->jit_base));
2367 emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack)); 2683#endif
2368 emit_getgl(as, r, jit_L); 2684 emit_rmro(as, XO_MOV, r|REX_GC64, r, offsetof(lua_State, maxstack));
2685 emit_getgl(as, r, cur_L);
2369 if (allow == RSET_EMPTY) /* Spill temp. register. */ 2686 if (allow == RSET_EMPTY) /* Spill temp. register. */
2370 emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0); 2687 emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0);
2371} 2688}
@@ -2374,13 +2691,15 @@ static void asm_stack_check(ASMState *as, BCReg topslot,
2374static void asm_stack_restore(ASMState *as, SnapShot *snap) 2691static void asm_stack_restore(ASMState *as, SnapShot *snap)
2375{ 2692{
2376 SnapEntry *map = &as->T->snapmap[snap->mapofs]; 2693 SnapEntry *map = &as->T->snapmap[snap->mapofs];
2377 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; 2694#if !LJ_FR2 || defined(LUA_USE_ASSERT)
2695 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2];
2696#endif
2378 MSize n, nent = snap->nent; 2697 MSize n, nent = snap->nent;
2379 /* Store the value of all modified slots to the Lua stack. */ 2698 /* Store the value of all modified slots to the Lua stack. */
2380 for (n = 0; n < nent; n++) { 2699 for (n = 0; n < nent; n++) {
2381 SnapEntry sn = map[n]; 2700 SnapEntry sn = map[n];
2382 BCReg s = snap_slot(sn); 2701 BCReg s = snap_slot(sn);
2383 int32_t ofs = 8*((int32_t)s-1); 2702 int32_t ofs = 8*((int32_t)s-1-LJ_FR2);
2384 IRRef ref = snap_ref(sn); 2703 IRRef ref = snap_ref(sn);
2385 IRIns *ir = IR(ref); 2704 IRIns *ir = IR(ref);
2386 if ((sn & SNAP_NORESTORE)) 2705 if ((sn & SNAP_NORESTORE))
@@ -2389,25 +2708,54 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
2389 Reg src = ra_alloc1(as, ref, RSET_FPR); 2708 Reg src = ra_alloc1(as, ref, RSET_FPR);
2390 emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs); 2709 emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs);
2391 } else { 2710 } else {
2392 lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || 2711 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) ||
2393 (LJ_DUALNUM && irt_isinteger(ir->t))); 2712 (LJ_DUALNUM && irt_isinteger(ir->t)),
2713 "restore of IR type %d", irt_type(ir->t));
2394 if (!irref_isk(ref)) { 2714 if (!irref_isk(ref)) {
2395 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); 2715 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
2716#if LJ_GC64
2717 if (irt_is64(ir->t)) {
2718 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
2719 emit_u32(as, irt_toitype(ir->t) << 15);
2720 emit_rmro(as, XO_ARITHi, XOg_OR, RID_BASE, ofs+4);
2721 } else if (LJ_DUALNUM && irt_isinteger(ir->t)) {
2722 emit_movmroi(as, RID_BASE, ofs+4, LJ_TISNUM << 15);
2723 } else {
2724 emit_movmroi(as, RID_BASE, ofs+4, (irt_toitype(ir->t)<<15)|0x7fff);
2725 }
2726#endif
2396 emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs); 2727 emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
2728#if LJ_GC64
2729 } else {
2730 TValue k;
2731 lj_ir_kvalue(as->J->L, &k, ir);
2732 if (tvisnil(&k)) {
2733 emit_i32(as, -1);
2734 emit_rmro(as, XO_MOVmi, REX_64, RID_BASE, ofs);
2735 } else {
2736 emit_movmroi(as, RID_BASE, ofs+4, k.u32.hi);
2737 emit_movmroi(as, RID_BASE, ofs, k.u32.lo);
2738 }
2739#else
2397 } else if (!irt_ispri(ir->t)) { 2740 } else if (!irt_ispri(ir->t)) {
2398 emit_movmroi(as, RID_BASE, ofs, ir->i); 2741 emit_movmroi(as, RID_BASE, ofs, ir->i);
2742#endif
2399 } 2743 }
2400 if ((sn & (SNAP_CONT|SNAP_FRAME))) { 2744 if ((sn & (SNAP_CONT|SNAP_FRAME))) {
2745#if !LJ_FR2
2401 if (s != 0) /* Do not overwrite link to previous frame. */ 2746 if (s != 0) /* Do not overwrite link to previous frame. */
2402 emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--)); 2747 emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--));
2748#endif
2749#if !LJ_GC64
2403 } else { 2750 } else {
2404 if (!(LJ_64 && irt_islightud(ir->t))) 2751 if (!(LJ_64 && irt_islightud(ir->t)))
2405 emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t)); 2752 emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t));
2753#endif
2406 } 2754 }
2407 } 2755 }
2408 checkmclim(as); 2756 checkmclim(as);
2409 } 2757 }
2410 lua_assert(map + nent == flinks); 2758 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
2411} 2759}
2412 2760
2413/* -- GC handling --------------------------------------------------------- */ 2761/* -- GC handling --------------------------------------------------------- */
@@ -2428,11 +2776,15 @@ static void asm_gc_check(ASMState *as)
2428 args[1] = ASMREF_TMP2; /* MSize steps */ 2776 args[1] = ASMREF_TMP2; /* MSize steps */
2429 asm_gencall(as, ci, args); 2777 asm_gencall(as, ci, args);
2430 tmp = ra_releasetmp(as, ASMREF_TMP1); 2778 tmp = ra_releasetmp(as, ASMREF_TMP1);
2779#if LJ_GC64
2780 emit_rmro(as, XO_LEA, tmp|REX_64, RID_DISPATCH, GG_DISP2G);
2781#else
2431 emit_loada(as, tmp, J2G(as->J)); 2782 emit_loada(as, tmp, J2G(as->J));
2783#endif
2432 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps); 2784 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps);
2433 /* Jump around GC step if GC total < GC threshold. */ 2785 /* Jump around GC step if GC total < GC threshold. */
2434 emit_sjcc(as, CC_B, l_end); 2786 emit_sjcc(as, CC_B, l_end);
2435 emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold); 2787 emit_opgl(as, XO_ARITH(XOg_CMP), tmp|REX_GC64, gc.threshold);
2436 emit_getgl(as, tmp, gc.total); 2788 emit_getgl(as, tmp, gc.total);
2437 as->gcsteps = 0; 2789 as->gcsteps = 0;
2438 checkmclim(as); 2790 checkmclim(as);
@@ -2447,16 +2799,16 @@ static void asm_loop_fixup(ASMState *as)
2447 MCode *target = as->mcp; 2799 MCode *target = as->mcp;
2448 if (as->realign) { /* Realigned loops use short jumps. */ 2800 if (as->realign) { /* Realigned loops use short jumps. */
2449 as->realign = NULL; /* Stop another retry. */ 2801 as->realign = NULL; /* Stop another retry. */
2450 lua_assert(((intptr_t)target & 15) == 0); 2802 lj_assertA(((intptr_t)target & 15) == 0, "loop realign failed");
2451 if (as->loopinv) { /* Inverted loop branch? */ 2803 if (as->loopinv) { /* Inverted loop branch? */
2452 p -= 5; 2804 p -= 5;
2453 p[0] = XI_JMP; 2805 p[0] = XI_JMP;
2454 lua_assert(target - p >= -128); 2806 lj_assertA(target - p >= -128, "loop realign failed");
2455 p[-1] = (MCode)(target - p); /* Patch sjcc. */ 2807 p[-1] = (MCode)(target - p); /* Patch sjcc. */
2456 if (as->loopinv == 2) 2808 if (as->loopinv == 2)
2457 p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */ 2809 p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */
2458 } else { 2810 } else {
2459 lua_assert(target - p >= -128); 2811 lj_assertA(target - p >= -128, "loop realign failed");
2460 p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */ 2812 p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */
2461 p[-2] = XI_JMPs; 2813 p[-2] = XI_JMPs;
2462 } 2814 }
@@ -2497,7 +2849,7 @@ static void asm_head_root_base(ASMState *as)
2497 if (rset_test(as->modset, r) || irt_ismarked(ir->t)) 2849 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2498 ir->r = RID_INIT; /* No inheritance for modified BASE register. */ 2850 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2499 if (r != RID_BASE) 2851 if (r != RID_BASE)
2500 emit_rr(as, XO_MOV, r, RID_BASE); 2852 emit_rr(as, XO_MOV, r|REX_GC64, RID_BASE);
2501 } 2853 }
2502} 2854}
2503 2855
@@ -2513,8 +2865,9 @@ static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
2513 if (irp->r == r) { 2865 if (irp->r == r) {
2514 rset_clear(allow, r); /* Mark same BASE register as coalesced. */ 2866 rset_clear(allow, r); /* Mark same BASE register as coalesced. */
2515 } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { 2867 } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
2868 /* Move from coalesced parent reg. */
2516 rset_clear(allow, irp->r); 2869 rset_clear(allow, irp->r);
2517 emit_rr(as, XO_MOV, r, irp->r); /* Move from coalesced parent reg. */ 2870 emit_rr(as, XO_MOV, r|REX_GC64, irp->r);
2518 } else { 2871 } else {
2519 emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ 2872 emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
2520 } 2873 }
@@ -2532,7 +2885,7 @@ static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2532 MCode *target, *q; 2885 MCode *target, *q;
2533 int32_t spadj = as->T->spadjust; 2886 int32_t spadj = as->T->spadjust;
2534 if (spadj == 0) { 2887 if (spadj == 0) {
2535 p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0); 2888 p -= LJ_64 ? 7 : 6;
2536 } else { 2889 } else {
2537 MCode *p1; 2890 MCode *p1;
2538 /* Patch stack adjustment. */ 2891 /* Patch stack adjustment. */
@@ -2544,24 +2897,15 @@ static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2544 p1 = p-9; 2897 p1 = p-9;
2545 *(int32_t *)p1 = spadj; 2898 *(int32_t *)p1 = spadj;
2546 } 2899 }
2547 if ((as->flags & JIT_F_LEA_AGU)) {
2548#if LJ_64 2900#if LJ_64
2549 p1[-4] = 0x48; 2901 p1[-3] = 0x48;
2550#endif 2902#endif
2551 p1[-3] = (MCode)XI_LEA; 2903 p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi);
2552 p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP); 2904 p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP);
2553 p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
2554 } else {
2555#if LJ_64
2556 p1[-3] = 0x48;
2557#endif
2558 p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi);
2559 p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP);
2560 }
2561 } 2905 }
2562 /* Patch exit branch. */ 2906 /* Patch exit branch. */
2563 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; 2907 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
2564 *(int32_t *)(p-4) = jmprel(p, target); 2908 *(int32_t *)(p-4) = jmprel(as->J, p, target);
2565 p[-5] = XI_JMP; 2909 p[-5] = XI_JMP;
2566 /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */ 2910 /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
2567 for (q = as->mctop-1; q >= p; q--) 2911 for (q = as->mctop-1; q >= p; q--)
@@ -2588,168 +2932,11 @@ static void asm_tail_prep(ASMState *as)
2588 as->invmcp = as->mcp = p; 2932 as->invmcp = as->mcp = p;
2589 } else { 2933 } else {
2590 /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */ 2934 /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
2591 as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0)); 2935 as->mcp = p - (LJ_64 ? 7 : 6);
2592 as->invmcp = NULL; 2936 as->invmcp = NULL;
2593 } 2937 }
2594} 2938}
2595 2939
2596/* -- Instruction dispatch ------------------------------------------------ */
2597
2598/* Assemble a single instruction. */
2599static void asm_ir(ASMState *as, IRIns *ir)
2600{
2601 switch ((IROp)ir->o) {
2602 /* Miscellaneous ops. */
2603 case IR_LOOP: asm_loop(as); break;
2604 case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
2605 case IR_USE:
2606 ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
2607 case IR_PHI: asm_phi(as, ir); break;
2608 case IR_HIOP: asm_hiop(as, ir); break;
2609 case IR_GCSTEP: asm_gcstep(as, ir); break;
2610
2611 /* Guarded assertions. */
2612 case IR_LT: case IR_GE: case IR_LE: case IR_GT:
2613 case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
2614 case IR_EQ: case IR_NE: case IR_ABC:
2615 asm_comp(as, ir, asm_compmap[ir->o]);
2616 break;
2617
2618 case IR_RETF: asm_retf(as, ir); break;
2619
2620 /* Bit ops. */
2621 case IR_BNOT: asm_neg_not(as, ir, XOg_NOT); break;
2622 case IR_BSWAP: asm_bitswap(as, ir); break;
2623
2624 case IR_BAND: asm_intarith(as, ir, XOg_AND); break;
2625 case IR_BOR: asm_intarith(as, ir, XOg_OR); break;
2626 case IR_BXOR: asm_intarith(as, ir, XOg_XOR); break;
2627
2628 case IR_BSHL: asm_bitshift(as, ir, XOg_SHL); break;
2629 case IR_BSHR: asm_bitshift(as, ir, XOg_SHR); break;
2630 case IR_BSAR: asm_bitshift(as, ir, XOg_SAR); break;
2631 case IR_BROL: asm_bitshift(as, ir, XOg_ROL); break;
2632 case IR_BROR: asm_bitshift(as, ir, XOg_ROR); break;
2633
2634 /* Arithmetic ops. */
2635 case IR_ADD: asm_add(as, ir); break;
2636 case IR_SUB:
2637 if (irt_isnum(ir->t))
2638 asm_fparith(as, ir, XO_SUBSD);
2639 else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
2640 asm_intarith(as, ir, XOg_SUB);
2641 break;
2642 case IR_MUL:
2643 if (irt_isnum(ir->t))
2644 asm_fparith(as, ir, XO_MULSD);
2645 else
2646 asm_intarith(as, ir, XOg_X_IMUL);
2647 break;
2648 case IR_DIV:
2649#if LJ_64 && LJ_HASFFI
2650 if (!irt_isnum(ir->t))
2651 asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
2652 IRCALL_lj_carith_divu64);
2653 else
2654#endif
2655 asm_fparith(as, ir, XO_DIVSD);
2656 break;
2657 case IR_MOD:
2658#if LJ_64 && LJ_HASFFI
2659 if (!irt_isint(ir->t))
2660 asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
2661 IRCALL_lj_carith_modu64);
2662 else
2663#endif
2664 asm_intmod(as, ir);
2665 break;
2666
2667 case IR_NEG:
2668 if (irt_isnum(ir->t))
2669 asm_fparith(as, ir, XO_XORPS);
2670 else
2671 asm_neg_not(as, ir, XOg_NEG);
2672 break;
2673 case IR_ABS: asm_fparith(as, ir, XO_ANDPS); break;
2674
2675 case IR_MIN:
2676 if (irt_isnum(ir->t))
2677 asm_fparith(as, ir, XO_MINSD);
2678 else
2679 asm_min_max(as, ir, CC_G);
2680 break;
2681 case IR_MAX:
2682 if (irt_isnum(ir->t))
2683 asm_fparith(as, ir, XO_MAXSD);
2684 else
2685 asm_min_max(as, ir, CC_L);
2686 break;
2687
2688 case IR_FPMATH: case IR_ATAN2: case IR_LDEXP:
2689 asm_fpmath(as, ir);
2690 break;
2691 case IR_POW:
2692#if LJ_64 && LJ_HASFFI
2693 if (!irt_isnum(ir->t))
2694 asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
2695 IRCALL_lj_carith_powu64);
2696 else
2697#endif
2698 asm_fppowi(as, ir);
2699 break;
2700
2701 /* Overflow-checking arithmetic ops. Note: don't use LEA here! */
2702 case IR_ADDOV: asm_intarith(as, ir, XOg_ADD); break;
2703 case IR_SUBOV: asm_intarith(as, ir, XOg_SUB); break;
2704 case IR_MULOV: asm_intarith(as, ir, XOg_X_IMUL); break;
2705
2706 /* Memory references. */
2707 case IR_AREF: asm_aref(as, ir); break;
2708 case IR_HREF: asm_href(as, ir); break;
2709 case IR_HREFK: asm_hrefk(as, ir); break;
2710 case IR_NEWREF: asm_newref(as, ir); break;
2711 case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
2712 case IR_FREF: asm_fref(as, ir); break;
2713 case IR_STRREF: asm_strref(as, ir); break;
2714
2715 /* Loads and stores. */
2716 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2717 asm_ahuvload(as, ir);
2718 break;
2719 case IR_FLOAD: case IR_XLOAD: asm_fxload(as, ir); break;
2720 case IR_SLOAD: asm_sload(as, ir); break;
2721
2722 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
2723 case IR_FSTORE: case IR_XSTORE: asm_fxstore(as, ir); break;
2724
2725 /* Allocations. */
2726 case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
2727 case IR_TNEW: asm_tnew(as, ir); break;
2728 case IR_TDUP: asm_tdup(as, ir); break;
2729 case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
2730
2731 /* Write barriers. */
2732 case IR_TBAR: asm_tbar(as, ir); break;
2733 case IR_OBAR: asm_obar(as, ir); break;
2734
2735 /* Type conversions. */
2736 case IR_TOBIT: asm_tobit(as, ir); break;
2737 case IR_CONV: asm_conv(as, ir); break;
2738 case IR_TOSTR: asm_tostr(as, ir); break;
2739 case IR_STRTO: asm_strto(as, ir); break;
2740
2741 /* Calls. */
2742 case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
2743 case IR_CALLXS: asm_callx(as, ir); break;
2744 case IR_CARG: break;
2745
2746 default:
2747 setintV(&as->J->errinfo, ir->o);
2748 lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
2749 break;
2750 }
2751}
2752
2753/* -- Trace setup --------------------------------------------------------- */ 2940/* -- Trace setup --------------------------------------------------------- */
2754 2941
2755/* Ensure there are enough stack slots for call arguments. */ 2942/* Ensure there are enough stack slots for call arguments. */
@@ -2772,6 +2959,7 @@ static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2772static void asm_setup_target(ASMState *as) 2959static void asm_setup_target(ASMState *as)
2773{ 2960{
2774 asm_exitstub_setup(as, as->T->nsnap); 2961 asm_exitstub_setup(as, as->T->nsnap);
2962 as->mrm.base = 0;
2775} 2963}
2776 2964
2777/* -- Trace patching ------------------------------------------------------ */ 2965/* -- Trace patching ------------------------------------------------------ */
@@ -2884,17 +3072,23 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
2884 MSize len = T->szmcode; 3072 MSize len = T->szmcode;
2885 MCode *px = exitstub_addr(J, exitno) - 6; 3073 MCode *px = exitstub_addr(J, exitno) - 6;
2886 MCode *pe = p+len-6; 3074 MCode *pe = p+len-6;
2887 uint32_t stateaddr = u32ptr(&J2G(J)->vmstate); 3075#if LJ_GC64
3076 uint32_t statei = (uint32_t)(GG_OFS(g.vmstate) - GG_OFS(dispatch));
3077#else
3078 uint32_t statei = u32ptr(&J2G(J)->vmstate);
3079#endif
2888 if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px) 3080 if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px)
2889 *(int32_t *)(p+len-4) = jmprel(p+len, target); 3081 *(int32_t *)(p+len-4) = jmprel(J, p+len, target);
2890 /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */ 3082 /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
2891 for (; p < pe; p += asm_x86_inslen(p)) 3083 for (; p < pe; p += asm_x86_inslen(p)) {
2892 if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi) 3084 intptr_t ofs = LJ_GC64 ? (p[0] & 0xf0) == 0x40 : LJ_64;
3085 if (*(uint32_t *)(p+2+ofs) == statei && p[ofs+LJ_GC64-LJ_64] == XI_MOVmi)
2893 break; 3086 break;
2894 lua_assert(p < pe); 3087 }
3088 lj_assertJ(p < pe, "instruction length decoder failed");
2895 for (; p < pe; p += asm_x86_inslen(p)) 3089 for (; p < pe; p += asm_x86_inslen(p))
2896 if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) 3090 if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px)
2897 *(int32_t *)(p+2) = jmprel(p+6, target); 3091 *(int32_t *)(p+2) = jmprel(J, p+6, target);
2898 lj_mcode_sync(T->mcode, T->mcode + T->szmcode); 3092 lj_mcode_sync(T->mcode, T->mcode + T->szmcode);
2899 lj_mcode_patch(J, mcarea, 1); 3093 lj_mcode_patch(J, mcarea, 1);
2900} 3094}
diff --git a/src/lj_assert.c b/src/lj_assert.c
new file mode 100644
index 00000000..7989dbe6
--- /dev/null
+++ b/src/lj_assert.c
@@ -0,0 +1,28 @@
1/*
2** Internal assertions.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#define lj_assert_c
7#define LUA_CORE
8
9#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
10
11#include <stdio.h>
12
13#include "lj_obj.h"
14
15void lj_assert_fail(global_State *g, const char *file, int line,
16 const char *func, const char *fmt, ...)
17{
18 va_list argp;
19 va_start(argp, fmt);
20 fprintf(stderr, "LuaJIT ASSERT %s:%d: %s: ", file, line, func);
21 vfprintf(stderr, fmt, argp);
22 fputc('\n', stderr);
23 va_end(argp);
24 UNUSED(g); /* May be NULL. TODO: optionally dump state. */
25 abort();
26}
27
28#endif
diff --git a/src/lj_bc.h b/src/lj_bc.h
index 8fd7a2ed..44c78f83 100644
--- a/src/lj_bc.h
+++ b/src/lj_bc.h
@@ -89,6 +89,8 @@
89 _(ISFC, dst, ___, var, ___) \ 89 _(ISFC, dst, ___, var, ___) \
90 _(IST, ___, ___, var, ___) \ 90 _(IST, ___, ___, var, ___) \
91 _(ISF, ___, ___, var, ___) \ 91 _(ISF, ___, ___, var, ___) \
92 _(ISTYPE, var, ___, lit, ___) \
93 _(ISNUM, var, ___, lit, ___) \
92 \ 94 \
93 /* Unary ops. */ \ 95 /* Unary ops. */ \
94 _(MOV, dst, ___, var, ___) \ 96 _(MOV, dst, ___, var, ___) \
@@ -143,10 +145,12 @@
143 _(TGETV, dst, var, var, index) \ 145 _(TGETV, dst, var, var, index) \
144 _(TGETS, dst, var, str, index) \ 146 _(TGETS, dst, var, str, index) \
145 _(TGETB, dst, var, lit, index) \ 147 _(TGETB, dst, var, lit, index) \
148 _(TGETR, dst, var, var, index) \
146 _(TSETV, var, var, var, newindex) \ 149 _(TSETV, var, var, var, newindex) \
147 _(TSETS, var, var, str, newindex) \ 150 _(TSETS, var, var, str, newindex) \
148 _(TSETB, var, var, lit, newindex) \ 151 _(TSETB, var, var, lit, newindex) \
149 _(TSETM, base, ___, num, newindex) \ 152 _(TSETM, base, ___, num, newindex) \
153 _(TSETR, var, var, var, newindex) \
150 \ 154 \
151 /* Calls and vararg handling. T = tail call. */ \ 155 /* Calls and vararg handling. T = tail call. */ \
152 _(CALLM, base, lit, lit, call) \ 156 _(CALLM, base, lit, lit, call) \
diff --git a/src/lj_bcdump.h b/src/lj_bcdump.h
index ad564619..f458d41f 100644
--- a/src/lj_bcdump.h
+++ b/src/lj_bcdump.h
@@ -36,14 +36,15 @@
36/* If you perform *any* kind of private modifications to the bytecode itself 36/* If you perform *any* kind of private modifications to the bytecode itself
37** or to the dump format, you *must* set BCDUMP_VERSION to 0x80 or higher. 37** or to the dump format, you *must* set BCDUMP_VERSION to 0x80 or higher.
38*/ 38*/
39#define BCDUMP_VERSION 1 39#define BCDUMP_VERSION 2
40 40
41/* Compatibility flags. */ 41/* Compatibility flags. */
42#define BCDUMP_F_BE 0x01 42#define BCDUMP_F_BE 0x01
43#define BCDUMP_F_STRIP 0x02 43#define BCDUMP_F_STRIP 0x02
44#define BCDUMP_F_FFI 0x04 44#define BCDUMP_F_FFI 0x04
45#define BCDUMP_F_FR2 0x08
45 46
46#define BCDUMP_F_KNOWN (BCDUMP_F_FFI*2-1) 47#define BCDUMP_F_KNOWN (BCDUMP_F_FR2*2-1)
47 48
48/* Type codes for the GC constants of a prototype. Plus length for strings. */ 49/* Type codes for the GC constants of a prototype. Plus length for strings. */
49enum { 50enum {
@@ -61,6 +62,7 @@ enum {
61 62
62LJ_FUNC int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, 63LJ_FUNC int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer,
63 void *data, int strip); 64 void *data, int strip);
65LJ_FUNC GCproto *lj_bcread_proto(LexState *ls);
64LJ_FUNC GCproto *lj_bcread(LexState *ls); 66LJ_FUNC GCproto *lj_bcread(LexState *ls);
65 67
66#endif 68#endif
diff --git a/src/lj_bcread.c b/src/lj_bcread.c
index b88794eb..1d9547be 100644
--- a/src/lj_bcread.c
+++ b/src/lj_bcread.c
@@ -9,6 +9,7 @@
9#include "lj_obj.h" 9#include "lj_obj.h"
10#include "lj_gc.h" 10#include "lj_gc.h"
11#include "lj_err.h" 11#include "lj_err.h"
12#include "lj_buf.h"
12#include "lj_str.h" 13#include "lj_str.h"
13#include "lj_tab.h" 14#include "lj_tab.h"
14#include "lj_bc.h" 15#include "lj_bc.h"
@@ -20,6 +21,7 @@
20#include "lj_lex.h" 21#include "lj_lex.h"
21#include "lj_bcdump.h" 22#include "lj_bcdump.h"
22#include "lj_state.h" 23#include "lj_state.h"
24#include "lj_strfmt.h"
23 25
24/* Reuse some lexer fields for our own purposes. */ 26/* Reuse some lexer fields for our own purposes. */
25#define bcread_flags(ls) ls->level 27#define bcread_flags(ls) ls->level
@@ -38,85 +40,74 @@ static LJ_NOINLINE void bcread_error(LexState *ls, ErrMsg em)
38 const char *name = ls->chunkarg; 40 const char *name = ls->chunkarg;
39 if (*name == BCDUMP_HEAD1) name = "(binary)"; 41 if (*name == BCDUMP_HEAD1) name = "(binary)";
40 else if (*name == '@' || *name == '=') name++; 42 else if (*name == '@' || *name == '=') name++;
41 lj_str_pushf(L, "%s: %s", name, err2msg(em)); 43 lj_strfmt_pushf(L, "%s: %s", name, err2msg(em));
42 lj_err_throw(L, LUA_ERRSYNTAX); 44 lj_err_throw(L, LUA_ERRSYNTAX);
43} 45}
44 46
45/* Resize input buffer. */ 47/* Refill buffer. */
46static void bcread_resize(LexState *ls, MSize len)
47{
48 if (ls->sb.sz < len) {
49 MSize sz = ls->sb.sz * 2;
50 while (len > sz) sz = sz * 2;
51 lj_str_resizebuf(ls->L, &ls->sb, sz);
52 /* Caveat: this may change ls->sb.buf which may affect ls->p. */
53 }
54}
55
56/* Refill buffer if needed. */
57static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need) 48static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need)
58{ 49{
59 lua_assert(len != 0); 50 lj_assertLS(len != 0, "empty refill");
60 if (len > LJ_MAX_MEM || ls->current < 0) 51 if (len > LJ_MAX_BUF || ls->c < 0)
61 bcread_error(ls, LJ_ERR_BCBAD); 52 bcread_error(ls, LJ_ERR_BCBAD);
62 do { 53 do {
63 const char *buf; 54 const char *buf;
64 size_t size; 55 size_t sz;
65 if (ls->n) { /* Copy remainder to buffer. */ 56 char *p = sbufB(&ls->sb);
66 if (ls->sb.n) { /* Move down in buffer. */ 57 MSize n = (MSize)(ls->pe - ls->p);
67 lua_assert(ls->p + ls->n == ls->sb.buf + ls->sb.n); 58 if (n) { /* Copy remainder to buffer. */
68 if (ls->n != ls->sb.n) 59 if (sbuflen(&ls->sb)) { /* Move down in buffer. */
69 memmove(ls->sb.buf, ls->p, ls->n); 60 lj_assertLS(ls->pe == sbufP(&ls->sb), "bad buffer pointer");
61 if (ls->p != p) memmove(p, ls->p, n);
70 } else { /* Copy from buffer provided by reader. */ 62 } else { /* Copy from buffer provided by reader. */
71 bcread_resize(ls, len); 63 p = lj_buf_need(&ls->sb, len);
72 memcpy(ls->sb.buf, ls->p, ls->n); 64 memcpy(p, ls->p, n);
73 } 65 }
74 ls->p = ls->sb.buf; 66 ls->p = p;
67 ls->pe = p + n;
75 } 68 }
76 ls->sb.n = ls->n; 69 setsbufP(&ls->sb, p + n);
77 buf = ls->rfunc(ls->L, ls->rdata, &size); /* Get more data from reader. */ 70 buf = ls->rfunc(ls->L, ls->rdata, &sz); /* Get more data from reader. */
78 if (buf == NULL || size == 0) { /* EOF? */ 71 if (buf == NULL || sz == 0) { /* EOF? */
79 if (need) bcread_error(ls, LJ_ERR_BCBAD); 72 if (need) bcread_error(ls, LJ_ERR_BCBAD);
80 ls->current = -1; /* Only bad if we get called again. */ 73 ls->c = -1; /* Only bad if we get called again. */
81 break; 74 break;
82 } 75 }
83 if (size >= LJ_MAX_MEM - ls->sb.n) lj_err_mem(ls->L); 76 if (sz >= LJ_MAX_BUF - n) lj_err_mem(ls->L);
84 if (ls->sb.n) { /* Append to buffer. */ 77 if (n) { /* Append to buffer. */
85 MSize n = ls->sb.n + (MSize)size; 78 n += (MSize)sz;
86 bcread_resize(ls, n < len ? len : n); 79 p = lj_buf_need(&ls->sb, n < len ? len : n);
87 memcpy(ls->sb.buf + ls->sb.n, buf, size); 80 memcpy(sbufP(&ls->sb), buf, sz);
88 ls->n = ls->sb.n = n; 81 setsbufP(&ls->sb, p + n);
89 ls->p = ls->sb.buf; 82 ls->p = p;
83 ls->pe = p + n;
90 } else { /* Return buffer provided by reader. */ 84 } else { /* Return buffer provided by reader. */
91 ls->n = (MSize)size;
92 ls->p = buf; 85 ls->p = buf;
86 ls->pe = buf + sz;
93 } 87 }
94 } while (ls->n < len); 88 } while ((MSize)(ls->pe - ls->p) < len);
95} 89}
96 90
97/* Need a certain number of bytes. */ 91/* Need a certain number of bytes. */
98static LJ_AINLINE void bcread_need(LexState *ls, MSize len) 92static LJ_AINLINE void bcread_need(LexState *ls, MSize len)
99{ 93{
100 if (LJ_UNLIKELY(ls->n < len)) 94 if (LJ_UNLIKELY((MSize)(ls->pe - ls->p) < len))
101 bcread_fill(ls, len, 1); 95 bcread_fill(ls, len, 1);
102} 96}
103 97
104/* Want to read up to a certain number of bytes, but may need less. */ 98/* Want to read up to a certain number of bytes, but may need less. */
105static LJ_AINLINE void bcread_want(LexState *ls, MSize len) 99static LJ_AINLINE void bcread_want(LexState *ls, MSize len)
106{ 100{
107 if (LJ_UNLIKELY(ls->n < len)) 101 if (LJ_UNLIKELY((MSize)(ls->pe - ls->p) < len))
108 bcread_fill(ls, len, 0); 102 bcread_fill(ls, len, 0);
109} 103}
110 104
111#define bcread_dec(ls) check_exp(ls->n > 0, ls->n--)
112#define bcread_consume(ls, len) check_exp(ls->n >= (len), ls->n -= (len))
113
114/* Return memory block from buffer. */ 105/* Return memory block from buffer. */
115static uint8_t *bcread_mem(LexState *ls, MSize len) 106static LJ_AINLINE uint8_t *bcread_mem(LexState *ls, MSize len)
116{ 107{
117 uint8_t *p = (uint8_t *)ls->p; 108 uint8_t *p = (uint8_t *)ls->p;
118 bcread_consume(ls, len); 109 ls->p += len;
119 ls->p = (char *)p + len; 110 lj_assertLS(ls->p <= ls->pe, "buffer read overflow");
120 return p; 111 return p;
121} 112}
122 113
@@ -129,25 +120,15 @@ static void bcread_block(LexState *ls, void *q, MSize len)
129/* Read byte from buffer. */ 120/* Read byte from buffer. */
130static LJ_AINLINE uint32_t bcread_byte(LexState *ls) 121static LJ_AINLINE uint32_t bcread_byte(LexState *ls)
131{ 122{
132 bcread_dec(ls); 123 lj_assertLS(ls->p < ls->pe, "buffer read overflow");
133 return (uint32_t)(uint8_t)*ls->p++; 124 return (uint32_t)(uint8_t)*ls->p++;
134} 125}
135 126
136/* Read ULEB128 value from buffer. */ 127/* Read ULEB128 value from buffer. */
137static uint32_t bcread_uleb128(LexState *ls) 128static LJ_AINLINE uint32_t bcread_uleb128(LexState *ls)
138{ 129{
139 const uint8_t *p = (const uint8_t *)ls->p; 130 uint32_t v = lj_buf_ruleb128(&ls->p);
140 uint32_t v = *p++; 131 lj_assertLS(ls->p <= ls->pe, "buffer read overflow");
141 if (LJ_UNLIKELY(v >= 0x80)) {
142 int sh = 0;
143 v &= 0x7f;
144 do {
145 v |= ((*p & 0x7f) << (sh += 7));
146 bcread_dec(ls);
147 } while (*p++ >= 0x80);
148 }
149 bcread_dec(ls);
150 ls->p = (char *)p;
151 return v; 132 return v;
152} 133}
153 134
@@ -161,11 +142,10 @@ static uint32_t bcread_uleb128_33(LexState *ls)
161 v &= 0x3f; 142 v &= 0x3f;
162 do { 143 do {
163 v |= ((*p & 0x7f) << (sh += 7)); 144 v |= ((*p & 0x7f) << (sh += 7));
164 bcread_dec(ls);
165 } while (*p++ >= 0x80); 145 } while (*p++ >= 0x80);
166 } 146 }
167 bcread_dec(ls);
168 ls->p = (char *)p; 147 ls->p = (char *)p;
148 lj_assertLS(ls->p <= ls->pe, "buffer read overflow");
169 return v; 149 return v;
170} 150}
171 151
@@ -212,8 +192,8 @@ static void bcread_ktabk(LexState *ls, TValue *o)
212 o->u32.lo = bcread_uleb128(ls); 192 o->u32.lo = bcread_uleb128(ls);
213 o->u32.hi = bcread_uleb128(ls); 193 o->u32.hi = bcread_uleb128(ls);
214 } else { 194 } else {
215 lua_assert(tp <= BCDUMP_KTAB_TRUE); 195 lj_assertLS(tp <= BCDUMP_KTAB_TRUE, "bad constant type %d", tp);
216 setitype(o, ~tp); 196 setpriV(o, ~tp);
217 } 197 }
218} 198}
219 199
@@ -234,7 +214,7 @@ static GCtab *bcread_ktab(LexState *ls)
234 for (i = 0; i < nhash; i++) { 214 for (i = 0; i < nhash; i++) {
235 TValue key; 215 TValue key;
236 bcread_ktabk(ls, &key); 216 bcread_ktabk(ls, &key);
237 lua_assert(!tvisnil(&key)); 217 lj_assertLS(!tvisnil(&key), "nil key");
238 bcread_ktabk(ls, lj_tab_set(ls->L, t, &key)); 218 bcread_ktabk(ls, lj_tab_set(ls->L, t, &key));
239 } 219 }
240 } 220 }
@@ -271,7 +251,7 @@ static void bcread_kgc(LexState *ls, GCproto *pt, MSize sizekgc)
271#endif 251#endif
272 } else { 252 } else {
273 lua_State *L = ls->L; 253 lua_State *L = ls->L;
274 lua_assert(tp == BCDUMP_KGC_CHILD); 254 lj_assertLS(tp == BCDUMP_KGC_CHILD, "bad constant type %d", tp);
275 if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */ 255 if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */
276 bcread_error(ls, LJ_ERR_BCBAD); 256 bcread_error(ls, LJ_ERR_BCBAD);
277 L->top--; 257 L->top--;
@@ -327,25 +307,13 @@ static void bcread_uv(LexState *ls, GCproto *pt, MSize sizeuv)
327} 307}
328 308
329/* Read a prototype. */ 309/* Read a prototype. */
330static GCproto *bcread_proto(LexState *ls) 310GCproto *lj_bcread_proto(LexState *ls)
331{ 311{
332 GCproto *pt; 312 GCproto *pt;
333 MSize framesize, numparams, flags, sizeuv, sizekgc, sizekn, sizebc, sizept; 313 MSize framesize, numparams, flags, sizeuv, sizekgc, sizekn, sizebc, sizept;
334 MSize ofsk, ofsuv, ofsdbg; 314 MSize ofsk, ofsuv, ofsdbg;
335 MSize sizedbg = 0; 315 MSize sizedbg = 0;
336 BCLine firstline = 0, numline = 0; 316 BCLine firstline = 0, numline = 0;
337 MSize len, startn;
338
339 /* Read length. */
340 if (ls->n > 0 && ls->p[0] == 0) { /* Shortcut EOF. */
341 ls->n--; ls->p++;
342 return NULL;
343 }
344 bcread_want(ls, 5);
345 len = bcread_uleb128(ls);
346 if (!len) return NULL; /* EOF */
347 bcread_need(ls, len);
348 startn = ls->n;
349 317
350 /* Read prototype header. */ 318 /* Read prototype header. */
351 flags = bcread_byte(ls); 319 flags = bcread_byte(ls);
@@ -414,9 +382,6 @@ static GCproto *bcread_proto(LexState *ls)
414 setmref(pt->uvinfo, NULL); 382 setmref(pt->uvinfo, NULL);
415 setmref(pt->varinfo, NULL); 383 setmref(pt->varinfo, NULL);
416 } 384 }
417
418 if (len != startn - ls->n)
419 bcread_error(ls, LJ_ERR_BCBAD);
420 return pt; 385 return pt;
421} 386}
422 387
@@ -430,6 +395,7 @@ static int bcread_header(LexState *ls)
430 bcread_byte(ls) != BCDUMP_VERSION) return 0; 395 bcread_byte(ls) != BCDUMP_VERSION) return 0;
431 bcread_flags(ls) = flags = bcread_uleb128(ls); 396 bcread_flags(ls) = flags = bcread_uleb128(ls);
432 if ((flags & ~(BCDUMP_F_KNOWN)) != 0) return 0; 397 if ((flags & ~(BCDUMP_F_KNOWN)) != 0) return 0;
398 if ((flags & BCDUMP_F_FR2) != LJ_FR2*BCDUMP_F_FR2) return 0;
433 if ((flags & BCDUMP_F_FFI)) { 399 if ((flags & BCDUMP_F_FFI)) {
434#if LJ_HASFFI 400#if LJ_HASFFI
435 lua_State *L = ls->L; 401 lua_State *L = ls->L;
@@ -456,19 +422,33 @@ static int bcread_header(LexState *ls)
456GCproto *lj_bcread(LexState *ls) 422GCproto *lj_bcread(LexState *ls)
457{ 423{
458 lua_State *L = ls->L; 424 lua_State *L = ls->L;
459 lua_assert(ls->current == BCDUMP_HEAD1); 425 lj_assertLS(ls->c == BCDUMP_HEAD1, "bad bytecode header");
460 bcread_savetop(L, ls, L->top); 426 bcread_savetop(L, ls, L->top);
461 lj_str_resetbuf(&ls->sb); 427 lj_buf_reset(&ls->sb);
462 /* Check for a valid bytecode dump header. */ 428 /* Check for a valid bytecode dump header. */
463 if (!bcread_header(ls)) 429 if (!bcread_header(ls))
464 bcread_error(ls, LJ_ERR_BCFMT); 430 bcread_error(ls, LJ_ERR_BCFMT);
465 for (;;) { /* Process all prototypes in the bytecode dump. */ 431 for (;;) { /* Process all prototypes in the bytecode dump. */
466 GCproto *pt = bcread_proto(ls); 432 GCproto *pt;
467 if (!pt) break; 433 MSize len;
434 const char *startp;
435 /* Read length. */
436 if (ls->p < ls->pe && ls->p[0] == 0) { /* Shortcut EOF. */
437 ls->p++;
438 break;
439 }
440 bcread_want(ls, 5);
441 len = bcread_uleb128(ls);
442 if (!len) break; /* EOF */
443 bcread_need(ls, len);
444 startp = ls->p;
445 pt = lj_bcread_proto(ls);
446 if (ls->p != startp + len)
447 bcread_error(ls, LJ_ERR_BCBAD);
468 setprotoV(L, L->top, pt); 448 setprotoV(L, L->top, pt);
469 incr_top(L); 449 incr_top(L);
470 } 450 }
471 if ((ls->n && !ls->endmark) || L->top-1 != bcread_oldtop(L, ls)) 451 if ((ls->pe != ls->p && !ls->endmark) || L->top-1 != bcread_oldtop(L, ls))
472 bcread_error(ls, LJ_ERR_BCBAD); 452 bcread_error(ls, LJ_ERR_BCBAD);
473 /* Pop off last prototype. */ 453 /* Pop off last prototype. */
474 L->top--; 454 L->top--;
diff --git a/src/lj_bcwrite.c b/src/lj_bcwrite.c
index f57fcfd6..a8c310b8 100644
--- a/src/lj_bcwrite.c
+++ b/src/lj_bcwrite.c
@@ -8,7 +8,7 @@
8 8
9#include "lj_obj.h" 9#include "lj_obj.h"
10#include "lj_gc.h" 10#include "lj_gc.h"
11#include "lj_str.h" 11#include "lj_buf.h"
12#include "lj_bc.h" 12#include "lj_bc.h"
13#if LJ_HASFFI 13#if LJ_HASFFI
14#include "lj_ctype.h" 14#include "lj_ctype.h"
@@ -17,99 +17,67 @@
17#include "lj_dispatch.h" 17#include "lj_dispatch.h"
18#include "lj_jit.h" 18#include "lj_jit.h"
19#endif 19#endif
20#include "lj_strfmt.h"
20#include "lj_bcdump.h" 21#include "lj_bcdump.h"
21#include "lj_vm.h" 22#include "lj_vm.h"
22 23
23/* Context for bytecode writer. */ 24/* Context for bytecode writer. */
24typedef struct BCWriteCtx { 25typedef struct BCWriteCtx {
25 SBuf sb; /* Output buffer. */ 26 SBuf sb; /* Output buffer. */
26 lua_State *L; /* Lua state. */
27 GCproto *pt; /* Root prototype. */ 27 GCproto *pt; /* Root prototype. */
28 lua_Writer wfunc; /* Writer callback. */ 28 lua_Writer wfunc; /* Writer callback. */
29 void *wdata; /* Writer callback data. */ 29 void *wdata; /* Writer callback data. */
30 int strip; /* Strip debug info. */ 30 int strip; /* Strip debug info. */
31 int status; /* Status from writer callback. */ 31 int status; /* Status from writer callback. */
32#ifdef LUA_USE_ASSERT
33 global_State *g;
34#endif
32} BCWriteCtx; 35} BCWriteCtx;
33 36
34/* -- Output buffer handling ---------------------------------------------- */ 37#ifdef LUA_USE_ASSERT
35 38#define lj_assertBCW(c, ...) lj_assertG_(ctx->g, (c), __VA_ARGS__)
36/* Resize buffer if needed. */ 39#else
37static LJ_NOINLINE void bcwrite_resize(BCWriteCtx *ctx, MSize len) 40#define lj_assertBCW(c, ...) ((void)ctx)
38{ 41#endif
39 MSize sz = ctx->sb.sz * 2;
40 while (ctx->sb.n + len > sz) sz = sz * 2;
41 lj_str_resizebuf(ctx->L, &ctx->sb, sz);
42}
43
44/* Need a certain amount of buffer space. */
45static LJ_AINLINE void bcwrite_need(BCWriteCtx *ctx, MSize len)
46{
47 if (LJ_UNLIKELY(ctx->sb.n + len > ctx->sb.sz))
48 bcwrite_resize(ctx, len);
49}
50
51/* Add memory block to buffer. */
52static void bcwrite_block(BCWriteCtx *ctx, const void *p, MSize len)
53{
54 uint8_t *q = (uint8_t *)(ctx->sb.buf + ctx->sb.n);
55 MSize i;
56 ctx->sb.n += len;
57 for (i = 0; i < len; i++) q[i] = ((uint8_t *)p)[i];
58}
59
60/* Add byte to buffer. */
61static LJ_AINLINE void bcwrite_byte(BCWriteCtx *ctx, uint8_t b)
62{
63 ctx->sb.buf[ctx->sb.n++] = b;
64}
65
66/* Add ULEB128 value to buffer. */
67static void bcwrite_uleb128(BCWriteCtx *ctx, uint32_t v)
68{
69 MSize n = ctx->sb.n;
70 uint8_t *p = (uint8_t *)ctx->sb.buf;
71 for (; v >= 0x80; v >>= 7)
72 p[n++] = (uint8_t)((v & 0x7f) | 0x80);
73 p[n++] = (uint8_t)v;
74 ctx->sb.n = n;
75}
76 42
77/* -- Bytecode writer ----------------------------------------------------- */ 43/* -- Bytecode writer ----------------------------------------------------- */
78 44
79/* Write a single constant key/value of a template table. */ 45/* Write a single constant key/value of a template table. */
80static void bcwrite_ktabk(BCWriteCtx *ctx, cTValue *o, int narrow) 46static void bcwrite_ktabk(BCWriteCtx *ctx, cTValue *o, int narrow)
81{ 47{
82 bcwrite_need(ctx, 1+10); 48 char *p = lj_buf_more(&ctx->sb, 1+10);
83 if (tvisstr(o)) { 49 if (tvisstr(o)) {
84 const GCstr *str = strV(o); 50 const GCstr *str = strV(o);
85 MSize len = str->len; 51 MSize len = str->len;
86 bcwrite_need(ctx, 5+len); 52 p = lj_buf_more(&ctx->sb, 5+len);
87 bcwrite_uleb128(ctx, BCDUMP_KTAB_STR+len); 53 p = lj_strfmt_wuleb128(p, BCDUMP_KTAB_STR+len);
88 bcwrite_block(ctx, strdata(str), len); 54 p = lj_buf_wmem(p, strdata(str), len);
89 } else if (tvisint(o)) { 55 } else if (tvisint(o)) {
90 bcwrite_byte(ctx, BCDUMP_KTAB_INT); 56 *p++ = BCDUMP_KTAB_INT;
91 bcwrite_uleb128(ctx, intV(o)); 57 p = lj_strfmt_wuleb128(p, intV(o));
92 } else if (tvisnum(o)) { 58 } else if (tvisnum(o)) {
93 if (!LJ_DUALNUM && narrow) { /* Narrow number constants to integers. */ 59 if (!LJ_DUALNUM && narrow) { /* Narrow number constants to integers. */
94 lua_Number num = numV(o); 60 lua_Number num = numV(o);
95 int32_t k = lj_num2int(num); 61 int32_t k = lj_num2int(num);
96 if (num == (lua_Number)k) { /* -0 is never a constant. */ 62 if (num == (lua_Number)k) { /* -0 is never a constant. */
97 bcwrite_byte(ctx, BCDUMP_KTAB_INT); 63 *p++ = BCDUMP_KTAB_INT;
98 bcwrite_uleb128(ctx, k); 64 p = lj_strfmt_wuleb128(p, k);
65 setsbufP(&ctx->sb, p);
99 return; 66 return;
100 } 67 }
101 } 68 }
102 bcwrite_byte(ctx, BCDUMP_KTAB_NUM); 69 *p++ = BCDUMP_KTAB_NUM;
103 bcwrite_uleb128(ctx, o->u32.lo); 70 p = lj_strfmt_wuleb128(p, o->u32.lo);
104 bcwrite_uleb128(ctx, o->u32.hi); 71 p = lj_strfmt_wuleb128(p, o->u32.hi);
105 } else { 72 } else {
106 lua_assert(tvispri(o)); 73 lj_assertBCW(tvispri(o), "unhandled type %d", itype(o));
107 bcwrite_byte(ctx, BCDUMP_KTAB_NIL+~itype(o)); 74 *p++ = BCDUMP_KTAB_NIL+~itype(o);
108 } 75 }
76 setsbufP(&ctx->sb, p);
109} 77}
110 78
111/* Write a template table. */ 79/* Write a template table. */
112static void bcwrite_ktab(BCWriteCtx *ctx, const GCtab *t) 80static void bcwrite_ktab(BCWriteCtx *ctx, char *p, const GCtab *t)
113{ 81{
114 MSize narray = 0, nhash = 0; 82 MSize narray = 0, nhash = 0;
115 if (t->asize > 0) { /* Determine max. length of array part. */ 83 if (t->asize > 0) { /* Determine max. length of array part. */
@@ -127,8 +95,9 @@ static void bcwrite_ktab(BCWriteCtx *ctx, const GCtab *t)
127 nhash += !tvisnil(&node[i].val); 95 nhash += !tvisnil(&node[i].val);
128 } 96 }
129 /* Write number of array slots and hash slots. */ 97 /* Write number of array slots and hash slots. */
130 bcwrite_uleb128(ctx, narray); 98 p = lj_strfmt_wuleb128(p, narray);
131 bcwrite_uleb128(ctx, nhash); 99 p = lj_strfmt_wuleb128(p, nhash);
100 setsbufP(&ctx->sb, p);
132 if (narray) { /* Write array entries (may contain nil). */ 101 if (narray) { /* Write array entries (may contain nil). */
133 MSize i; 102 MSize i;
134 TValue *o = tvref(t->array); 103 TValue *o = tvref(t->array);
@@ -155,12 +124,13 @@ static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt)
155 for (i = 0; i < sizekgc; i++, kr++) { 124 for (i = 0; i < sizekgc; i++, kr++) {
156 GCobj *o = gcref(*kr); 125 GCobj *o = gcref(*kr);
157 MSize tp, need = 1; 126 MSize tp, need = 1;
127 char *p;
158 /* Determine constant type and needed size. */ 128 /* Determine constant type and needed size. */
159 if (o->gch.gct == ~LJ_TSTR) { 129 if (o->gch.gct == ~LJ_TSTR) {
160 tp = BCDUMP_KGC_STR + gco2str(o)->len; 130 tp = BCDUMP_KGC_STR + gco2str(o)->len;
161 need = 5+gco2str(o)->len; 131 need = 5+gco2str(o)->len;
162 } else if (o->gch.gct == ~LJ_TPROTO) { 132 } else if (o->gch.gct == ~LJ_TPROTO) {
163 lua_assert((pt->flags & PROTO_CHILD)); 133 lj_assertBCW((pt->flags & PROTO_CHILD), "prototype has unexpected child");
164 tp = BCDUMP_KGC_CHILD; 134 tp = BCDUMP_KGC_CHILD;
165#if LJ_HASFFI 135#if LJ_HASFFI
166 } else if (o->gch.gct == ~LJ_TCDATA) { 136 } else if (o->gch.gct == ~LJ_TCDATA) {
@@ -171,34 +141,38 @@ static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt)
171 } else if (id == CTID_UINT64) { 141 } else if (id == CTID_UINT64) {
172 tp = BCDUMP_KGC_U64; 142 tp = BCDUMP_KGC_U64;
173 } else { 143 } else {
174 lua_assert(id == CTID_COMPLEX_DOUBLE); 144 lj_assertBCW(id == CTID_COMPLEX_DOUBLE,
145 "bad cdata constant CTID %d", id);
175 tp = BCDUMP_KGC_COMPLEX; 146 tp = BCDUMP_KGC_COMPLEX;
176 } 147 }
177#endif 148#endif
178 } else { 149 } else {
179 lua_assert(o->gch.gct == ~LJ_TTAB); 150 lj_assertBCW(o->gch.gct == ~LJ_TTAB,
151 "bad constant GC type %d", o->gch.gct);
180 tp = BCDUMP_KGC_TAB; 152 tp = BCDUMP_KGC_TAB;
181 need = 1+2*5; 153 need = 1+2*5;
182 } 154 }
183 /* Write constant type. */ 155 /* Write constant type. */
184 bcwrite_need(ctx, need); 156 p = lj_buf_more(&ctx->sb, need);
185 bcwrite_uleb128(ctx, tp); 157 p = lj_strfmt_wuleb128(p, tp);
186 /* Write constant data (if any). */ 158 /* Write constant data (if any). */
187 if (tp >= BCDUMP_KGC_STR) { 159 if (tp >= BCDUMP_KGC_STR) {
188 bcwrite_block(ctx, strdata(gco2str(o)), gco2str(o)->len); 160 p = lj_buf_wmem(p, strdata(gco2str(o)), gco2str(o)->len);
189 } else if (tp == BCDUMP_KGC_TAB) { 161 } else if (tp == BCDUMP_KGC_TAB) {
190 bcwrite_ktab(ctx, gco2tab(o)); 162 bcwrite_ktab(ctx, p, gco2tab(o));
163 continue;
191#if LJ_HASFFI 164#if LJ_HASFFI
192 } else if (tp != BCDUMP_KGC_CHILD) { 165 } else if (tp != BCDUMP_KGC_CHILD) {
193 cTValue *p = (TValue *)cdataptr(gco2cd(o)); 166 cTValue *q = (TValue *)cdataptr(gco2cd(o));
194 bcwrite_uleb128(ctx, p[0].u32.lo); 167 p = lj_strfmt_wuleb128(p, q[0].u32.lo);
195 bcwrite_uleb128(ctx, p[0].u32.hi); 168 p = lj_strfmt_wuleb128(p, q[0].u32.hi);
196 if (tp == BCDUMP_KGC_COMPLEX) { 169 if (tp == BCDUMP_KGC_COMPLEX) {
197 bcwrite_uleb128(ctx, p[1].u32.lo); 170 p = lj_strfmt_wuleb128(p, q[1].u32.lo);
198 bcwrite_uleb128(ctx, p[1].u32.hi); 171 p = lj_strfmt_wuleb128(p, q[1].u32.hi);
199 } 172 }
200#endif 173#endif
201 } 174 }
175 setsbufP(&ctx->sb, p);
202 } 176 }
203} 177}
204 178
@@ -207,7 +181,7 @@ static void bcwrite_knum(BCWriteCtx *ctx, GCproto *pt)
207{ 181{
208 MSize i, sizekn = pt->sizekn; 182 MSize i, sizekn = pt->sizekn;
209 cTValue *o = mref(pt->k, TValue); 183 cTValue *o = mref(pt->k, TValue);
210 bcwrite_need(ctx, 10*sizekn); 184 char *p = lj_buf_more(&ctx->sb, 10*sizekn);
211 for (i = 0; i < sizekn; i++, o++) { 185 for (i = 0; i < sizekn; i++, o++) {
212 int32_t k; 186 int32_t k;
213 if (tvisint(o)) { 187 if (tvisint(o)) {
@@ -220,55 +194,55 @@ static void bcwrite_knum(BCWriteCtx *ctx, GCproto *pt)
220 k = lj_num2int(num); 194 k = lj_num2int(num);
221 if (num == (lua_Number)k) { /* -0 is never a constant. */ 195 if (num == (lua_Number)k) { /* -0 is never a constant. */
222 save_int: 196 save_int:
223 bcwrite_uleb128(ctx, 2*(uint32_t)k | ((uint32_t)k & 0x80000000u)); 197 p = lj_strfmt_wuleb128(p, 2*(uint32_t)k | ((uint32_t)k&0x80000000u));
224 if (k < 0) { 198 if (k < 0)
225 char *p = &ctx->sb.buf[ctx->sb.n-1]; 199 p[-1] = (p[-1] & 7) | ((k>>27) & 0x18);
226 *p = (*p & 7) | ((k>>27) & 0x18);
227 }
228 continue; 200 continue;
229 } 201 }
230 } 202 }
231 bcwrite_uleb128(ctx, 1+(2*o->u32.lo | (o->u32.lo & 0x80000000u))); 203 p = lj_strfmt_wuleb128(p, 1+(2*o->u32.lo | (o->u32.lo & 0x80000000u)));
232 if (o->u32.lo >= 0x80000000u) { 204 if (o->u32.lo >= 0x80000000u)
233 char *p = &ctx->sb.buf[ctx->sb.n-1]; 205 p[-1] = (p[-1] & 7) | ((o->u32.lo>>27) & 0x18);
234 *p = (*p & 7) | ((o->u32.lo>>27) & 0x18); 206 p = lj_strfmt_wuleb128(p, o->u32.hi);
235 }
236 bcwrite_uleb128(ctx, o->u32.hi);
237 } 207 }
238 } 208 }
209 setsbufP(&ctx->sb, p);
239} 210}
240 211
241/* Write bytecode instructions. */ 212/* Write bytecode instructions. */
242static void bcwrite_bytecode(BCWriteCtx *ctx, GCproto *pt) 213static char *bcwrite_bytecode(BCWriteCtx *ctx, char *p, GCproto *pt)
243{ 214{
244 MSize nbc = pt->sizebc-1; /* Omit the [JI]FUNC* header. */ 215 MSize nbc = pt->sizebc-1; /* Omit the [JI]FUNC* header. */
245#if LJ_HASJIT 216#if LJ_HASJIT
246 uint8_t *p = (uint8_t *)&ctx->sb.buf[ctx->sb.n]; 217 uint8_t *q = (uint8_t *)p;
247#endif 218#endif
248 bcwrite_block(ctx, proto_bc(pt)+1, nbc*(MSize)sizeof(BCIns)); 219 p = lj_buf_wmem(p, proto_bc(pt)+1, nbc*(MSize)sizeof(BCIns));
220 UNUSED(ctx);
249#if LJ_HASJIT 221#if LJ_HASJIT
250 /* Unpatch modified bytecode containing ILOOP/JLOOP etc. */ 222 /* Unpatch modified bytecode containing ILOOP/JLOOP etc. */
251 if ((pt->flags & PROTO_ILOOP) || pt->trace) { 223 if ((pt->flags & PROTO_ILOOP) || pt->trace) {
252 jit_State *J = L2J(ctx->L); 224 jit_State *J = L2J(sbufL(&ctx->sb));
253 MSize i; 225 MSize i;
254 for (i = 0; i < nbc; i++, p += sizeof(BCIns)) { 226 for (i = 0; i < nbc; i++, q += sizeof(BCIns)) {
255 BCOp op = (BCOp)p[LJ_ENDIAN_SELECT(0, 3)]; 227 BCOp op = (BCOp)q[LJ_ENDIAN_SELECT(0, 3)];
256 if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP || 228 if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP ||
257 op == BC_JFORI) { 229 op == BC_JFORI) {
258 p[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_IFORL+BC_FORL); 230 q[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_IFORL+BC_FORL);
259 } else if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) { 231 } else if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
260 BCReg rd = p[LJ_ENDIAN_SELECT(2, 1)] + (p[LJ_ENDIAN_SELECT(3, 0)] << 8); 232 BCReg rd = q[LJ_ENDIAN_SELECT(2, 1)] + (q[LJ_ENDIAN_SELECT(3, 0)] << 8);
261 memcpy(p, &traceref(J, rd)->startins, 4); 233 memcpy(q, &traceref(J, rd)->startins, 4);
262 } 234 }
263 } 235 }
264 } 236 }
265#endif 237#endif
238 return p;
266} 239}
267 240
268/* Write prototype. */ 241/* Write prototype. */
269static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt) 242static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt)
270{ 243{
271 MSize sizedbg = 0; 244 MSize sizedbg = 0;
245 char *p;
272 246
273 /* Recursively write children of prototype. */ 247 /* Recursively write children of prototype. */
274 if ((pt->flags & PROTO_CHILD)) { 248 if ((pt->flags & PROTO_CHILD)) {
@@ -282,31 +256,32 @@ static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt)
282 } 256 }
283 257
284 /* Start writing the prototype info to a buffer. */ 258 /* Start writing the prototype info to a buffer. */
285 lj_str_resetbuf(&ctx->sb); 259 p = lj_buf_need(&ctx->sb,
286 ctx->sb.n = 5; /* Leave room for final size. */ 260 5+4+6*5+(pt->sizebc-1)*(MSize)sizeof(BCIns)+pt->sizeuv*2);
287 bcwrite_need(ctx, 4+6*5+(pt->sizebc-1)*(MSize)sizeof(BCIns)+pt->sizeuv*2); 261 p += 5; /* Leave room for final size. */
288 262
289 /* Write prototype header. */ 263 /* Write prototype header. */
290 bcwrite_byte(ctx, (pt->flags & (PROTO_CHILD|PROTO_VARARG|PROTO_FFI))); 264 *p++ = (pt->flags & (PROTO_CHILD|PROTO_VARARG|PROTO_FFI));
291 bcwrite_byte(ctx, pt->numparams); 265 *p++ = pt->numparams;
292 bcwrite_byte(ctx, pt->framesize); 266 *p++ = pt->framesize;
293 bcwrite_byte(ctx, pt->sizeuv); 267 *p++ = pt->sizeuv;
294 bcwrite_uleb128(ctx, pt->sizekgc); 268 p = lj_strfmt_wuleb128(p, pt->sizekgc);
295 bcwrite_uleb128(ctx, pt->sizekn); 269 p = lj_strfmt_wuleb128(p, pt->sizekn);
296 bcwrite_uleb128(ctx, pt->sizebc-1); 270 p = lj_strfmt_wuleb128(p, pt->sizebc-1);
297 if (!ctx->strip) { 271 if (!ctx->strip) {
298 if (proto_lineinfo(pt)) 272 if (proto_lineinfo(pt))
299 sizedbg = pt->sizept - (MSize)((char *)proto_lineinfo(pt) - (char *)pt); 273 sizedbg = pt->sizept - (MSize)((char *)proto_lineinfo(pt) - (char *)pt);
300 bcwrite_uleb128(ctx, sizedbg); 274 p = lj_strfmt_wuleb128(p, sizedbg);
301 if (sizedbg) { 275 if (sizedbg) {
302 bcwrite_uleb128(ctx, pt->firstline); 276 p = lj_strfmt_wuleb128(p, pt->firstline);
303 bcwrite_uleb128(ctx, pt->numline); 277 p = lj_strfmt_wuleb128(p, pt->numline);
304 } 278 }
305 } 279 }
306 280
307 /* Write bytecode instructions and upvalue refs. */ 281 /* Write bytecode instructions and upvalue refs. */
308 bcwrite_bytecode(ctx, pt); 282 p = bcwrite_bytecode(ctx, p, pt);
309 bcwrite_block(ctx, proto_uv(pt), pt->sizeuv*2); 283 p = lj_buf_wmem(p, proto_uv(pt), pt->sizeuv*2);
284 setsbufP(&ctx->sb, p);
310 285
311 /* Write constants. */ 286 /* Write constants. */
312 bcwrite_kgc(ctx, pt); 287 bcwrite_kgc(ctx, pt);
@@ -314,18 +289,19 @@ static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt)
314 289
315 /* Write debug info, if not stripped. */ 290 /* Write debug info, if not stripped. */
316 if (sizedbg) { 291 if (sizedbg) {
317 bcwrite_need(ctx, sizedbg); 292 p = lj_buf_more(&ctx->sb, sizedbg);
318 bcwrite_block(ctx, proto_lineinfo(pt), sizedbg); 293 p = lj_buf_wmem(p, proto_lineinfo(pt), sizedbg);
294 setsbufP(&ctx->sb, p);
319 } 295 }
320 296
321 /* Pass buffer to writer function. */ 297 /* Pass buffer to writer function. */
322 if (ctx->status == 0) { 298 if (ctx->status == 0) {
323 MSize n = ctx->sb.n - 5; 299 MSize n = sbuflen(&ctx->sb) - 5;
324 MSize nn = (lj_fls(n)+8)*9 >> 6; 300 MSize nn = (lj_fls(n)+8)*9 >> 6;
325 ctx->sb.n = 5 - nn; 301 char *q = sbufB(&ctx->sb) + (5 - nn);
326 bcwrite_uleb128(ctx, n); /* Fill in final size. */ 302 p = lj_strfmt_wuleb128(q, n); /* Fill in final size. */
327 lua_assert(ctx->sb.n == 5); 303 lj_assertBCW(p == sbufB(&ctx->sb) + 5, "bad ULEB128 write");
328 ctx->status = ctx->wfunc(ctx->L, ctx->sb.buf+5-nn, nn+n, ctx->wdata); 304 ctx->status = ctx->wfunc(sbufL(&ctx->sb), q, nn+n, ctx->wdata);
329 } 305 }
330} 306}
331 307
@@ -335,20 +311,21 @@ static void bcwrite_header(BCWriteCtx *ctx)
335 GCstr *chunkname = proto_chunkname(ctx->pt); 311 GCstr *chunkname = proto_chunkname(ctx->pt);
336 const char *name = strdata(chunkname); 312 const char *name = strdata(chunkname);
337 MSize len = chunkname->len; 313 MSize len = chunkname->len;
338 lj_str_resetbuf(&ctx->sb); 314 char *p = lj_buf_need(&ctx->sb, 5+5+len);
339 bcwrite_need(ctx, 5+5+len); 315 *p++ = BCDUMP_HEAD1;
340 bcwrite_byte(ctx, BCDUMP_HEAD1); 316 *p++ = BCDUMP_HEAD2;
341 bcwrite_byte(ctx, BCDUMP_HEAD2); 317 *p++ = BCDUMP_HEAD3;
342 bcwrite_byte(ctx, BCDUMP_HEAD3); 318 *p++ = BCDUMP_VERSION;
343 bcwrite_byte(ctx, BCDUMP_VERSION); 319 *p++ = (ctx->strip ? BCDUMP_F_STRIP : 0) +
344 bcwrite_byte(ctx, (ctx->strip ? BCDUMP_F_STRIP : 0) + 320 LJ_BE*BCDUMP_F_BE +
345 (LJ_BE ? BCDUMP_F_BE : 0) + 321 ((ctx->pt->flags & PROTO_FFI) ? BCDUMP_F_FFI : 0) +
346 ((ctx->pt->flags & PROTO_FFI) ? BCDUMP_F_FFI : 0)); 322 LJ_FR2*BCDUMP_F_FR2;
347 if (!ctx->strip) { 323 if (!ctx->strip) {
348 bcwrite_uleb128(ctx, len); 324 p = lj_strfmt_wuleb128(p, len);
349 bcwrite_block(ctx, name, len); 325 p = lj_buf_wmem(p, name, len);
350 } 326 }
351 ctx->status = ctx->wfunc(ctx->L, ctx->sb.buf, ctx->sb.n, ctx->wdata); 327 ctx->status = ctx->wfunc(sbufL(&ctx->sb), sbufB(&ctx->sb),
328 (MSize)(p - sbufB(&ctx->sb)), ctx->wdata);
352} 329}
353 330
354/* Write footer of bytecode dump. */ 331/* Write footer of bytecode dump. */
@@ -356,7 +333,7 @@ static void bcwrite_footer(BCWriteCtx *ctx)
356{ 333{
357 if (ctx->status == 0) { 334 if (ctx->status == 0) {
358 uint8_t zero = 0; 335 uint8_t zero = 0;
359 ctx->status = ctx->wfunc(ctx->L, &zero, 1, ctx->wdata); 336 ctx->status = ctx->wfunc(sbufL(&ctx->sb), &zero, 1, ctx->wdata);
360 } 337 }
361} 338}
362 339
@@ -364,8 +341,8 @@ static void bcwrite_footer(BCWriteCtx *ctx)
364static TValue *cpwriter(lua_State *L, lua_CFunction dummy, void *ud) 341static TValue *cpwriter(lua_State *L, lua_CFunction dummy, void *ud)
365{ 342{
366 BCWriteCtx *ctx = (BCWriteCtx *)ud; 343 BCWriteCtx *ctx = (BCWriteCtx *)ud;
367 UNUSED(dummy); 344 UNUSED(L); UNUSED(dummy);
368 lj_str_resizebuf(L, &ctx->sb, 1024); /* Avoids resize for most prototypes. */ 345 lj_buf_need(&ctx->sb, 1024); /* Avoids resize for most prototypes. */
369 bcwrite_header(ctx); 346 bcwrite_header(ctx);
370 bcwrite_proto(ctx, ctx->pt); 347 bcwrite_proto(ctx, ctx->pt);
371 bcwrite_footer(ctx); 348 bcwrite_footer(ctx);
@@ -378,16 +355,18 @@ int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, void *data,
378{ 355{
379 BCWriteCtx ctx; 356 BCWriteCtx ctx;
380 int status; 357 int status;
381 ctx.L = L;
382 ctx.pt = pt; 358 ctx.pt = pt;
383 ctx.wfunc = writer; 359 ctx.wfunc = writer;
384 ctx.wdata = data; 360 ctx.wdata = data;
385 ctx.strip = strip; 361 ctx.strip = strip;
386 ctx.status = 0; 362 ctx.status = 0;
387 lj_str_initbuf(&ctx.sb); 363#ifdef LUA_USE_ASSERT
364 ctx.g = G(L);
365#endif
366 lj_buf_init(L, &ctx.sb);
388 status = lj_vm_cpcall(L, NULL, &ctx, cpwriter); 367 status = lj_vm_cpcall(L, NULL, &ctx, cpwriter);
389 if (status == 0) status = ctx.status; 368 if (status == 0) status = ctx.status;
390 lj_str_freebuf(G(ctx.L), &ctx.sb); 369 lj_buf_free(G(sbufL(&ctx.sb)), &ctx.sb);
391 return status; 370 return status;
392} 371}
393 372
diff --git a/src/lj_buf.c b/src/lj_buf.c
new file mode 100644
index 00000000..935ae488
--- /dev/null
+++ b/src/lj_buf.c
@@ -0,0 +1,232 @@
1/*
2** Buffer handling.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#define lj_buf_c
7#define LUA_CORE
8
9#include "lj_obj.h"
10#include "lj_gc.h"
11#include "lj_err.h"
12#include "lj_buf.h"
13#include "lj_str.h"
14#include "lj_tab.h"
15#include "lj_strfmt.h"
16
17/* -- Buffer management --------------------------------------------------- */
18
19static void buf_grow(SBuf *sb, MSize sz)
20{
21 MSize osz = sbufsz(sb), len = sbuflen(sb), nsz = osz;
22 char *b;
23 if (nsz < LJ_MIN_SBUF) nsz = LJ_MIN_SBUF;
24 while (nsz < sz) nsz += nsz;
25 b = (char *)lj_mem_realloc(sbufL(sb), sbufB(sb), osz, nsz);
26 setmref(sb->b, b);
27 setmref(sb->p, b + len);
28 setmref(sb->e, b + nsz);
29}
30
31LJ_NOINLINE char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz)
32{
33 lj_assertG_(G(sbufL(sb)), sz > sbufsz(sb), "SBuf overflow");
34 if (LJ_UNLIKELY(sz > LJ_MAX_BUF))
35 lj_err_mem(sbufL(sb));
36 buf_grow(sb, sz);
37 return sbufB(sb);
38}
39
40LJ_NOINLINE char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz)
41{
42 MSize len = sbuflen(sb);
43 lj_assertG_(G(sbufL(sb)), sz > sbufleft(sb), "SBuf overflow");
44 if (LJ_UNLIKELY(sz > LJ_MAX_BUF || len + sz > LJ_MAX_BUF))
45 lj_err_mem(sbufL(sb));
46 buf_grow(sb, len + sz);
47 return sbufP(sb);
48}
49
50void LJ_FASTCALL lj_buf_shrink(lua_State *L, SBuf *sb)
51{
52 char *b = sbufB(sb);
53 MSize osz = (MSize)(sbufE(sb) - b);
54 if (osz > 2*LJ_MIN_SBUF) {
55 MSize n = (MSize)(sbufP(sb) - b);
56 b = lj_mem_realloc(L, b, osz, (osz >> 1));
57 setmref(sb->b, b);
58 setmref(sb->p, b + n);
59 setmref(sb->e, b + (osz >> 1));
60 }
61}
62
63char * LJ_FASTCALL lj_buf_tmp(lua_State *L, MSize sz)
64{
65 SBuf *sb = &G(L)->tmpbuf;
66 setsbufL(sb, L);
67 return lj_buf_need(sb, sz);
68}
69
70/* -- Low-level buffer put operations ------------------------------------- */
71
72SBuf *lj_buf_putmem(SBuf *sb, const void *q, MSize len)
73{
74 char *p = lj_buf_more(sb, len);
75 p = lj_buf_wmem(p, q, len);
76 setsbufP(sb, p);
77 return sb;
78}
79
80SBuf * LJ_FASTCALL lj_buf_putchar(SBuf *sb, int c)
81{
82 char *p = lj_buf_more(sb, 1);
83 *p++ = (char)c;
84 setsbufP(sb, p);
85 return sb;
86}
87
88SBuf * LJ_FASTCALL lj_buf_putstr(SBuf *sb, GCstr *s)
89{
90 MSize len = s->len;
91 char *p = lj_buf_more(sb, len);
92 p = lj_buf_wmem(p, strdata(s), len);
93 setsbufP(sb, p);
94 return sb;
95}
96
97/* -- High-level buffer put operations ------------------------------------ */
98
99SBuf * LJ_FASTCALL lj_buf_putstr_reverse(SBuf *sb, GCstr *s)
100{
101 MSize len = s->len;
102 char *p = lj_buf_more(sb, len), *e = p+len;
103 const char *q = strdata(s)+len-1;
104 while (p < e)
105 *p++ = *q--;
106 setsbufP(sb, p);
107 return sb;
108}
109
110SBuf * LJ_FASTCALL lj_buf_putstr_lower(SBuf *sb, GCstr *s)
111{
112 MSize len = s->len;
113 char *p = lj_buf_more(sb, len), *e = p+len;
114 const char *q = strdata(s);
115 for (; p < e; p++, q++) {
116 uint32_t c = *(unsigned char *)q;
117#if LJ_TARGET_PPC
118 *p = c + ((c >= 'A' && c <= 'Z') << 5);
119#else
120 if (c >= 'A' && c <= 'Z') c += 0x20;
121 *p = c;
122#endif
123 }
124 setsbufP(sb, p);
125 return sb;
126}
127
128SBuf * LJ_FASTCALL lj_buf_putstr_upper(SBuf *sb, GCstr *s)
129{
130 MSize len = s->len;
131 char *p = lj_buf_more(sb, len), *e = p+len;
132 const char *q = strdata(s);
133 for (; p < e; p++, q++) {
134 uint32_t c = *(unsigned char *)q;
135#if LJ_TARGET_PPC
136 *p = c - ((c >= 'a' && c <= 'z') << 5);
137#else
138 if (c >= 'a' && c <= 'z') c -= 0x20;
139 *p = c;
140#endif
141 }
142 setsbufP(sb, p);
143 return sb;
144}
145
146SBuf *lj_buf_putstr_rep(SBuf *sb, GCstr *s, int32_t rep)
147{
148 MSize len = s->len;
149 if (rep > 0 && len) {
150 uint64_t tlen = (uint64_t)rep * len;
151 char *p;
152 if (LJ_UNLIKELY(tlen > LJ_MAX_STR))
153 lj_err_mem(sbufL(sb));
154 p = lj_buf_more(sb, (MSize)tlen);
155 if (len == 1) { /* Optimize a common case. */
156 uint32_t c = strdata(s)[0];
157 do { *p++ = c; } while (--rep > 0);
158 } else {
159 const char *e = strdata(s) + len;
160 do {
161 const char *q = strdata(s);
162 do { *p++ = *q++; } while (q < e);
163 } while (--rep > 0);
164 }
165 setsbufP(sb, p);
166 }
167 return sb;
168}
169
170SBuf *lj_buf_puttab(SBuf *sb, GCtab *t, GCstr *sep, int32_t i, int32_t e)
171{
172 MSize seplen = sep ? sep->len : 0;
173 if (i <= e) {
174 for (;;) {
175 cTValue *o = lj_tab_getint(t, i);
176 char *p;
177 if (!o) {
178 badtype: /* Error: bad element type. */
179 setsbufP(sb, (void *)(intptr_t)i); /* Store failing index. */
180 return NULL;
181 } else if (tvisstr(o)) {
182 MSize len = strV(o)->len;
183 p = lj_buf_wmem(lj_buf_more(sb, len + seplen), strVdata(o), len);
184 } else if (tvisint(o)) {
185 p = lj_strfmt_wint(lj_buf_more(sb, STRFMT_MAXBUF_INT+seplen), intV(o));
186 } else if (tvisnum(o)) {
187 p = lj_buf_more(lj_strfmt_putfnum(sb, STRFMT_G14, numV(o)), seplen);
188 } else {
189 goto badtype;
190 }
191 if (i++ == e) {
192 setsbufP(sb, p);
193 break;
194 }
195 if (seplen) p = lj_buf_wmem(p, strdata(sep), seplen);
196 setsbufP(sb, p);
197 }
198 }
199 return sb;
200}
201
202/* -- Miscellaneous buffer operations ------------------------------------- */
203
204GCstr * LJ_FASTCALL lj_buf_tostr(SBuf *sb)
205{
206 return lj_str_new(sbufL(sb), sbufB(sb), sbuflen(sb));
207}
208
209/* Concatenate two strings. */
210GCstr *lj_buf_cat2str(lua_State *L, GCstr *s1, GCstr *s2)
211{
212 MSize len1 = s1->len, len2 = s2->len;
213 char *buf = lj_buf_tmp(L, len1 + len2);
214 memcpy(buf, strdata(s1), len1);
215 memcpy(buf+len1, strdata(s2), len2);
216 return lj_str_new(L, buf, len1 + len2);
217}
218
219/* Read ULEB128 from buffer. */
220uint32_t LJ_FASTCALL lj_buf_ruleb128(const char **pp)
221{
222 const uint8_t *p = (const uint8_t *)*pp;
223 uint32_t v = *p++;
224 if (LJ_UNLIKELY(v >= 0x80)) {
225 int sh = 0;
226 v &= 0x7f;
227 do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80);
228 }
229 *pp = (const char *)p;
230 return v;
231}
232
diff --git a/src/lj_buf.h b/src/lj_buf.h
new file mode 100644
index 00000000..dab13bd2
--- /dev/null
+++ b/src/lj_buf.h
@@ -0,0 +1,103 @@
1/*
2** Buffer handling.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#ifndef _LJ_BUF_H
7#define _LJ_BUF_H
8
9#include "lj_obj.h"
10#include "lj_gc.h"
11#include "lj_str.h"
12
13/* Resizable string buffers. Struct definition in lj_obj.h. */
14#define sbufB(sb) (mref((sb)->b, char))
15#define sbufP(sb) (mref((sb)->p, char))
16#define sbufE(sb) (mref((sb)->e, char))
17#define sbufL(sb) (mref((sb)->L, lua_State))
18#define sbufsz(sb) ((MSize)(sbufE((sb)) - sbufB((sb))))
19#define sbuflen(sb) ((MSize)(sbufP((sb)) - sbufB((sb))))
20#define sbufleft(sb) ((MSize)(sbufE((sb)) - sbufP((sb))))
21#define setsbufP(sb, q) (setmref((sb)->p, (q)))
22#define setsbufL(sb, l) (setmref((sb)->L, (l)))
23
24/* Buffer management */
25LJ_FUNC char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz);
26LJ_FUNC char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz);
27LJ_FUNC void LJ_FASTCALL lj_buf_shrink(lua_State *L, SBuf *sb);
28LJ_FUNC char * LJ_FASTCALL lj_buf_tmp(lua_State *L, MSize sz);
29
30static LJ_AINLINE void lj_buf_init(lua_State *L, SBuf *sb)
31{
32 setsbufL(sb, L);
33 setmref(sb->p, NULL); setmref(sb->e, NULL); setmref(sb->b, NULL);
34}
35
36static LJ_AINLINE void lj_buf_reset(SBuf *sb)
37{
38 setmrefr(sb->p, sb->b);
39}
40
41static LJ_AINLINE SBuf *lj_buf_tmp_(lua_State *L)
42{
43 SBuf *sb = &G(L)->tmpbuf;
44 setsbufL(sb, L);
45 lj_buf_reset(sb);
46 return sb;
47}
48
49static LJ_AINLINE void lj_buf_free(global_State *g, SBuf *sb)
50{
51 lj_mem_free(g, sbufB(sb), sbufsz(sb));
52}
53
54static LJ_AINLINE char *lj_buf_need(SBuf *sb, MSize sz)
55{
56 if (LJ_UNLIKELY(sz > sbufsz(sb)))
57 return lj_buf_need2(sb, sz);
58 return sbufB(sb);
59}
60
61static LJ_AINLINE char *lj_buf_more(SBuf *sb, MSize sz)
62{
63 if (LJ_UNLIKELY(sz > sbufleft(sb)))
64 return lj_buf_more2(sb, sz);
65 return sbufP(sb);
66}
67
68/* Low-level buffer put operations */
69LJ_FUNC SBuf *lj_buf_putmem(SBuf *sb, const void *q, MSize len);
70LJ_FUNC SBuf * LJ_FASTCALL lj_buf_putchar(SBuf *sb, int c);
71LJ_FUNC SBuf * LJ_FASTCALL lj_buf_putstr(SBuf *sb, GCstr *s);
72
73static LJ_AINLINE char *lj_buf_wmem(char *p, const void *q, MSize len)
74{
75 return (char *)memcpy(p, q, len) + len;
76}
77
78static LJ_AINLINE void lj_buf_putb(SBuf *sb, int c)
79{
80 char *p = lj_buf_more(sb, 1);
81 *p++ = (char)c;
82 setsbufP(sb, p);
83}
84
85/* High-level buffer put operations */
86LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_reverse(SBuf *sb, GCstr *s);
87LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_lower(SBuf *sb, GCstr *s);
88LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_upper(SBuf *sb, GCstr *s);
89LJ_FUNC SBuf *lj_buf_putstr_rep(SBuf *sb, GCstr *s, int32_t rep);
90LJ_FUNC SBuf *lj_buf_puttab(SBuf *sb, GCtab *t, GCstr *sep,
91 int32_t i, int32_t e);
92
93/* Miscellaneous buffer operations */
94LJ_FUNCA GCstr * LJ_FASTCALL lj_buf_tostr(SBuf *sb);
95LJ_FUNC GCstr *lj_buf_cat2str(lua_State *L, GCstr *s1, GCstr *s2);
96LJ_FUNC uint32_t LJ_FASTCALL lj_buf_ruleb128(const char **pp);
97
98static LJ_AINLINE GCstr *lj_buf_str(lua_State *L, SBuf *sb)
99{
100 return lj_str_new(L, sbufB(sb), sbuflen(sb));
101}
102
103#endif
diff --git a/src/lj_carith.c b/src/lj_carith.c
index 530d5ddb..65ad2c10 100644
--- a/src/lj_carith.c
+++ b/src/lj_carith.c
@@ -11,10 +11,12 @@
11#include "lj_err.h" 11#include "lj_err.h"
12#include "lj_tab.h" 12#include "lj_tab.h"
13#include "lj_meta.h" 13#include "lj_meta.h"
14#include "lj_ir.h"
14#include "lj_ctype.h" 15#include "lj_ctype.h"
15#include "lj_cconv.h" 16#include "lj_cconv.h"
16#include "lj_cdata.h" 17#include "lj_cdata.h"
17#include "lj_carith.h" 18#include "lj_carith.h"
19#include "lj_strscan.h"
18 20
19/* -- C data arithmetic --------------------------------------------------- */ 21/* -- C data arithmetic --------------------------------------------------- */
20 22
@@ -120,7 +122,7 @@ static int carith_ptr(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
120 setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2)); 122 setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2));
121 return 1; 123 return 1;
122 } else { 124 } else {
123 lua_assert(mm == MM_le); 125 lj_assertL(mm == MM_le, "bad metamethod %d", mm);
124 setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2)); 126 setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2));
125 return 1; 127 return 1;
126 } 128 }
@@ -206,7 +208,9 @@ static int carith_int64(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
206 *up = lj_carith_powu64(u0, u1); 208 *up = lj_carith_powu64(u0, u1);
207 break; 209 break;
208 case MM_unm: *up = (uint64_t)-(int64_t)u0; break; 210 case MM_unm: *up = (uint64_t)-(int64_t)u0; break;
209 default: lua_assert(0); break; 211 default:
212 lj_assertL(0, "bad metamethod %d", mm);
213 break;
210 } 214 }
211 lj_gc_check(L); 215 lj_gc_check(L);
212 return 1; 216 return 1;
@@ -272,6 +276,81 @@ int lj_carith_op(lua_State *L, MMS mm)
272 return lj_carith_meta(L, cts, &ca, mm); 276 return lj_carith_meta(L, cts, &ca, mm);
273} 277}
274 278
279/* -- 64 bit bit operations helpers --------------------------------------- */
280
281#if LJ_64
282#define B64DEF(name) \
283 static LJ_AINLINE uint64_t lj_carith_##name(uint64_t x, int32_t sh)
284#else
285/* Not inlined on 32 bit archs, since some of these are quite lengthy. */
286#define B64DEF(name) \
287 uint64_t LJ_NOINLINE lj_carith_##name(uint64_t x, int32_t sh)
288#endif
289
290B64DEF(shl64) { return x << (sh&63); }
291B64DEF(shr64) { return x >> (sh&63); }
292B64DEF(sar64) { return (uint64_t)((int64_t)x >> (sh&63)); }
293B64DEF(rol64) { return lj_rol(x, (sh&63)); }
294B64DEF(ror64) { return lj_ror(x, (sh&63)); }
295
296#undef B64DEF
297
298uint64_t lj_carith_shift64(uint64_t x, int32_t sh, int op)
299{
300 switch (op) {
301 case IR_BSHL-IR_BSHL: x = lj_carith_shl64(x, sh); break;
302 case IR_BSHR-IR_BSHL: x = lj_carith_shr64(x, sh); break;
303 case IR_BSAR-IR_BSHL: x = lj_carith_sar64(x, sh); break;
304 case IR_BROL-IR_BSHL: x = lj_carith_rol64(x, sh); break;
305 case IR_BROR-IR_BSHL: x = lj_carith_ror64(x, sh); break;
306 default:
307 lj_assertX(0, "bad shift op %d", op);
308 break;
309 }
310 return x;
311}
312
313/* Equivalent to lj_lib_checkbit(), but handles cdata. */
314uint64_t lj_carith_check64(lua_State *L, int narg, CTypeID *id)
315{
316 TValue *o = L->base + narg-1;
317 if (o >= L->top) {
318 err:
319 lj_err_argt(L, narg, LUA_TNUMBER);
320 } else if (LJ_LIKELY(tvisnumber(o))) {
321 /* Handled below. */
322 } else if (tviscdata(o)) {
323 CTState *cts = ctype_cts(L);
324 uint8_t *sp = (uint8_t *)cdataptr(cdataV(o));
325 CTypeID sid = cdataV(o)->ctypeid;
326 CType *s = ctype_get(cts, sid);
327 uint64_t x;
328 if (ctype_isref(s->info)) {
329 sp = *(void **)sp;
330 sid = ctype_cid(s->info);
331 }
332 s = ctype_raw(cts, sid);
333 if (ctype_isenum(s->info)) s = ctype_child(cts, s);
334 if ((s->info & (CTMASK_NUM|CTF_BOOL|CTF_FP|CTF_UNSIGNED)) ==
335 CTINFO(CT_NUM, CTF_UNSIGNED) && s->size == 8)
336 *id = CTID_UINT64; /* Use uint64_t, since it has the highest rank. */
337 else if (!*id)
338 *id = CTID_INT64; /* Use int64_t, unless already set. */
339 lj_cconv_ct_ct(cts, ctype_get(cts, *id), s,
340 (uint8_t *)&x, sp, CCF_ARG(narg));
341 return x;
342 } else if (!(tvisstr(o) && lj_strscan_number(strV(o), o))) {
343 goto err;
344 }
345 if (LJ_LIKELY(tvisint(o))) {
346 return (uint32_t)intV(o);
347 } else {
348 int32_t i = lj_num2bit(numV(o));
349 if (LJ_DUALNUM) setintV(o, i);
350 return (uint32_t)i;
351 }
352}
353
275/* -- 64 bit integer arithmetic helpers ----------------------------------- */ 354/* -- 64 bit integer arithmetic helpers ----------------------------------- */
276 355
277#if LJ_32 && LJ_HASJIT 356#if LJ_32 && LJ_HASJIT
diff --git a/src/lj_carith.h b/src/lj_carith.h
index 207809df..788718d9 100644
--- a/src/lj_carith.h
+++ b/src/lj_carith.h
@@ -12,6 +12,16 @@
12 12
13LJ_FUNC int lj_carith_op(lua_State *L, MMS mm); 13LJ_FUNC int lj_carith_op(lua_State *L, MMS mm);
14 14
15#if LJ_32
16LJ_FUNC uint64_t lj_carith_shl64(uint64_t x, int32_t sh);
17LJ_FUNC uint64_t lj_carith_shr64(uint64_t x, int32_t sh);
18LJ_FUNC uint64_t lj_carith_sar64(uint64_t x, int32_t sh);
19LJ_FUNC uint64_t lj_carith_rol64(uint64_t x, int32_t sh);
20LJ_FUNC uint64_t lj_carith_ror64(uint64_t x, int32_t sh);
21#endif
22LJ_FUNC uint64_t lj_carith_shift64(uint64_t x, int32_t sh, int op);
23LJ_FUNC uint64_t lj_carith_check64(lua_State *L, int narg, CTypeID *id);
24
15#if LJ_32 && LJ_HASJIT 25#if LJ_32 && LJ_HASJIT
16LJ_FUNC int64_t lj_carith_mul64(int64_t x, int64_t k); 26LJ_FUNC int64_t lj_carith_mul64(int64_t x, int64_t k);
17#endif 27#endif
diff --git a/src/lj_ccall.c b/src/lj_ccall.c
index fe1e0a3a..5ac1b4da 100644
--- a/src/lj_ccall.c
+++ b/src/lj_ccall.c
@@ -9,7 +9,6 @@
9 9
10#include "lj_gc.h" 10#include "lj_gc.h"
11#include "lj_err.h" 11#include "lj_err.h"
12#include "lj_str.h"
13#include "lj_tab.h" 12#include "lj_tab.h"
14#include "lj_ctype.h" 13#include "lj_ctype.h"
15#include "lj_cconv.h" 14#include "lj_cconv.h"
@@ -291,56 +290,85 @@
291#define CCALL_HANDLE_RET \ 290#define CCALL_HANDLE_RET \
292 if ((ct->info & CTF_VARARG)) sp = (uint8_t *)&cc->gpr[0]; 291 if ((ct->info & CTF_VARARG)) sp = (uint8_t *)&cc->gpr[0];
293 292
294#elif LJ_TARGET_PPC 293#elif LJ_TARGET_ARM64
295/* -- PPC calling conventions --------------------------------------------- */ 294/* -- ARM64 calling conventions ------------------------------------------- */
296 295
297#define CCALL_HANDLE_STRUCTRET \ 296#define CCALL_HANDLE_STRUCTRET \
298 cc->retref = 1; /* Return all structs by reference. */ \ 297 cc->retref = !ccall_classify_struct(cts, ctr); \
299 cc->gpr[ngpr++] = (GPRArg)dp; 298 if (cc->retref) cc->retp = dp;
299
300#define CCALL_HANDLE_STRUCTRET2 \
301 unsigned int cl = ccall_classify_struct(cts, ctr); \
302 if ((cl & 4)) { /* Combine float HFA from separate registers. */ \
303 CTSize i = (cl >> 8) - 1; \
304 do { ((uint32_t *)dp)[i] = cc->fpr[i].lo; } while (i--); \
305 } else { \
306 if (cl > 1) sp = (uint8_t *)&cc->fpr[0]; \
307 memcpy(dp, sp, ctr->size); \
308 }
300 309
301#define CCALL_HANDLE_COMPLEXRET \ 310#define CCALL_HANDLE_COMPLEXRET \
302 /* Complex values are returned in 2 or 4 GPRs. */ \ 311 /* Complex values are returned in one or two FPRs. */ \
303 cc->retref = 0; 312 cc->retref = 0;
304 313
305#define CCALL_HANDLE_COMPLEXRET2 \ 314#define CCALL_HANDLE_COMPLEXRET2 \
306 memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */ 315 if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
316 ((float *)dp)[0] = cc->fpr[0].f; \
317 ((float *)dp)[1] = cc->fpr[1].f; \
318 } else { /* Copy complex double from FPRs. */ \
319 ((double *)dp)[0] = cc->fpr[0].d; \
320 ((double *)dp)[1] = cc->fpr[1].d; \
321 }
307 322
308#define CCALL_HANDLE_STRUCTARG \ 323#define CCALL_HANDLE_STRUCTARG \
309 rp = cdataptr(lj_cdata_new(cts, did, sz)); \ 324 unsigned int cl = ccall_classify_struct(cts, d); \
310 sz = CTSIZE_PTR; /* Pass all structs by reference. */ 325 if (cl == 0) { /* Pass struct by reference. */ \
326 rp = cdataptr(lj_cdata_new(cts, did, sz)); \
327 sz = CTSIZE_PTR; \
328 } else if (cl > 1) { /* Pass struct in FPRs or on stack. */ \
329 isfp = (cl & 4) ? 2 : 1; \
330 } /* else: Pass struct in GPRs or on stack. */
311 331
312#define CCALL_HANDLE_COMPLEXARG \ 332#define CCALL_HANDLE_COMPLEXARG \
313 /* Pass complex by value in 2 or 4 GPRs. */ 333 /* Pass complex by value in separate (!) FPRs or on stack. */ \
334 isfp = sz == 2*sizeof(float) ? 2 : 1;
314 335
315#define CCALL_HANDLE_REGARG \ 336#define CCALL_HANDLE_REGARG \
316 if (isfp) { /* Try to pass argument in FPRs. */ \ 337 if (LJ_TARGET_IOS && isva) { \
317 if (nfpr + 1 <= CCALL_NARG_FPR) { \ 338 /* IOS: All variadic arguments are on the stack. */ \
339 } else if (isfp) { /* Try to pass argument in FPRs. */ \
340 int n2 = ctype_isvector(d->info) ? 1 : \
341 isfp == 1 ? n : (d->size >> (4-isfp)); \
342 if (nfpr + n2 <= CCALL_NARG_FPR) { \
318 dp = &cc->fpr[nfpr]; \ 343 dp = &cc->fpr[nfpr]; \
319 nfpr += 1; \ 344 nfpr += n2; \
320 d = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
321 goto done; \ 345 goto done; \
346 } else { \
347 nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \
348 if (LJ_TARGET_IOS && d->size < 8) goto err_nyi; \
322 } \ 349 } \
323 } else { /* Try to pass argument in GPRs. */ \ 350 } else { /* Try to pass argument in GPRs. */ \
324 if (n > 1) { \ 351 if (!LJ_TARGET_IOS && (d->info & CTF_ALIGN) > CTALIGN_PTR) \
325 lua_assert(n == 2 || n == 4); /* int64_t or complex (float). */ \ 352 ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
326 if (ctype_isinteger(d->info)) \
327 ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
328 else if (ngpr + n > maxgpr) \
329 ngpr = maxgpr; /* Prevent reordering. */ \
330 } \
331 if (ngpr + n <= maxgpr) { \ 353 if (ngpr + n <= maxgpr) { \
332 dp = &cc->gpr[ngpr]; \ 354 dp = &cc->gpr[ngpr]; \
333 ngpr += n; \ 355 ngpr += n; \
334 goto done; \ 356 goto done; \
357 } else { \
358 ngpr = maxgpr; /* Prevent reordering. */ \
359 if (LJ_TARGET_IOS && d->size < 8) goto err_nyi; \
335 } \ 360 } \
336 } 361 }
337 362
363#if LJ_BE
338#define CCALL_HANDLE_RET \ 364#define CCALL_HANDLE_RET \
339 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ 365 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
340 ctr = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ 366 sp = (uint8_t *)&cc->fpr[0].f;
367#endif
341 368
342#elif LJ_TARGET_PPCSPE 369
343/* -- PPC/SPE calling conventions ----------------------------------------- */ 370#elif LJ_TARGET_PPC
371/* -- PPC calling conventions --------------------------------------------- */
344 372
345#define CCALL_HANDLE_STRUCTRET \ 373#define CCALL_HANDLE_STRUCTRET \
346 cc->retref = 1; /* Return all structs by reference. */ \ 374 cc->retref = 1; /* Return all structs by reference. */ \
@@ -360,12 +388,13 @@
360#define CCALL_HANDLE_COMPLEXARG \ 388#define CCALL_HANDLE_COMPLEXARG \
361 /* Pass complex by value in 2 or 4 GPRs. */ 389 /* Pass complex by value in 2 or 4 GPRs. */
362 390
363/* PPC/SPE has a softfp ABI. */ 391#define CCALL_HANDLE_GPR \
364#define CCALL_HANDLE_REGARG \ 392 /* Try to pass argument in GPRs. */ \
365 if (n > 1) { /* Doesn't fit in a single GPR? */ \ 393 if (n > 1) { \
366 lua_assert(n == 2 || n == 4); /* int64_t, double or complex (float). */ \ 394 /* int64_t or complex (float). */ \
367 if (n == 2) \ 395 lj_assertL(n == 2 || n == 4, "bad GPR size %d", n); \
368 ngpr = (ngpr + 1u) & ~1u; /* Only align 64 bit value to regpair. */ \ 396 if (ctype_isinteger(d->info) || ctype_isfp(d->info)) \
397 ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
369 else if (ngpr + n > maxgpr) \ 398 else if (ngpr + n > maxgpr) \
370 ngpr = maxgpr; /* Prevent reordering. */ \ 399 ngpr = maxgpr; /* Prevent reordering. */ \
371 } \ 400 } \
@@ -373,10 +402,32 @@
373 dp = &cc->gpr[ngpr]; \ 402 dp = &cc->gpr[ngpr]; \
374 ngpr += n; \ 403 ngpr += n; \
375 goto done; \ 404 goto done; \
405 } \
406
407#if LJ_ABI_SOFTFP
408#define CCALL_HANDLE_REGARG CCALL_HANDLE_GPR
409#else
410#define CCALL_HANDLE_REGARG \
411 if (isfp) { /* Try to pass argument in FPRs. */ \
412 if (nfpr + 1 <= CCALL_NARG_FPR) { \
413 dp = &cc->fpr[nfpr]; \
414 nfpr += 1; \
415 d = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
416 goto done; \
417 } \
418 } else { \
419 CCALL_HANDLE_GPR \
376 } 420 }
421#endif
377 422
378#elif LJ_TARGET_MIPS 423#if !LJ_ABI_SOFTFP
379/* -- MIPS calling conventions -------------------------------------------- */ 424#define CCALL_HANDLE_RET \
425 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
426 ctr = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */
427#endif
428
429#elif LJ_TARGET_MIPS32
430/* -- MIPS o32 calling conventions ---------------------------------------- */
380 431
381#define CCALL_HANDLE_STRUCTRET \ 432#define CCALL_HANDLE_STRUCTRET \
382 cc->retref = 1; /* Return all structs by reference. */ \ 433 cc->retref = 1; /* Return all structs by reference. */ \
@@ -386,6 +437,18 @@
386 /* Complex values are returned in 1 or 2 FPRs. */ \ 437 /* Complex values are returned in 1 or 2 FPRs. */ \
387 cc->retref = 0; 438 cc->retref = 0;
388 439
440#if LJ_ABI_SOFTFP
441#define CCALL_HANDLE_COMPLEXRET2 \
442 if (ctr->size == 2*sizeof(float)) { /* Copy complex float from GPRs. */ \
443 ((intptr_t *)dp)[0] = cc->gpr[0]; \
444 ((intptr_t *)dp)[1] = cc->gpr[1]; \
445 } else { /* Copy complex double from GPRs. */ \
446 ((intptr_t *)dp)[0] = cc->gpr[0]; \
447 ((intptr_t *)dp)[1] = cc->gpr[1]; \
448 ((intptr_t *)dp)[2] = cc->gpr[2]; \
449 ((intptr_t *)dp)[3] = cc->gpr[3]; \
450 }
451#else
389#define CCALL_HANDLE_COMPLEXRET2 \ 452#define CCALL_HANDLE_COMPLEXRET2 \
390 if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \ 453 if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
391 ((float *)dp)[0] = cc->fpr[0].f; \ 454 ((float *)dp)[0] = cc->fpr[0].f; \
@@ -394,6 +457,7 @@
394 ((double *)dp)[0] = cc->fpr[0].d; \ 457 ((double *)dp)[0] = cc->fpr[0].d; \
395 ((double *)dp)[1] = cc->fpr[1].d; \ 458 ((double *)dp)[1] = cc->fpr[1].d; \
396 } 459 }
460#endif
397 461
398#define CCALL_HANDLE_STRUCTARG \ 462#define CCALL_HANDLE_STRUCTARG \
399 /* Pass all structs by value in registers and/or on the stack. */ 463 /* Pass all structs by value in registers and/or on the stack. */
@@ -401,6 +465,22 @@
401#define CCALL_HANDLE_COMPLEXARG \ 465#define CCALL_HANDLE_COMPLEXARG \
402 /* Pass complex by value in 2 or 4 GPRs. */ 466 /* Pass complex by value in 2 or 4 GPRs. */
403 467
468#define CCALL_HANDLE_GPR \
469 if ((d->info & CTF_ALIGN) > CTALIGN_PTR) \
470 ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
471 if (ngpr < maxgpr) { \
472 dp = &cc->gpr[ngpr]; \
473 if (ngpr + n > maxgpr) { \
474 nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
475 if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
476 ngpr = maxgpr; \
477 } else { \
478 ngpr += n; \
479 } \
480 goto done; \
481 }
482
483#if !LJ_ABI_SOFTFP /* MIPS32 hard-float */
404#define CCALL_HANDLE_REGARG \ 484#define CCALL_HANDLE_REGARG \
405 if (isfp && nfpr < CCALL_NARG_FPR && !(ct->info & CTF_VARARG)) { \ 485 if (isfp && nfpr < CCALL_NARG_FPR && !(ct->info & CTF_VARARG)) { \
406 /* Try to pass argument in FPRs. */ \ 486 /* Try to pass argument in FPRs. */ \
@@ -409,25 +489,91 @@
409 goto done; \ 489 goto done; \
410 } else { /* Try to pass argument in GPRs. */ \ 490 } else { /* Try to pass argument in GPRs. */ \
411 nfpr = CCALL_NARG_FPR; \ 491 nfpr = CCALL_NARG_FPR; \
412 if ((d->info & CTF_ALIGN) > CTALIGN_PTR) \ 492 CCALL_HANDLE_GPR \
413 ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ 493 }
414 if (ngpr < maxgpr) { \ 494#else /* MIPS32 soft-float */
415 dp = &cc->gpr[ngpr]; \ 495#define CCALL_HANDLE_REGARG CCALL_HANDLE_GPR
416 if (ngpr + n > maxgpr) { \ 496#endif
417 nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \ 497
418 if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \ 498#if !LJ_ABI_SOFTFP
419 ngpr = maxgpr; \ 499/* On MIPS64 soft-float, position of float return values is endian-dependant. */
420 } else { \ 500#define CCALL_HANDLE_RET \
421 ngpr += n; \ 501 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
422 } \ 502 sp = (uint8_t *)&cc->fpr[0].f;
423 goto done; \ 503#endif
424 } \ 504
505#elif LJ_TARGET_MIPS64
506/* -- MIPS n64 calling conventions ---------------------------------------- */
507
508#define CCALL_HANDLE_STRUCTRET \
509 cc->retref = !(sz <= 16); \
510 if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
511
512#define CCALL_HANDLE_STRUCTRET2 \
513 ccall_copy_struct(cc, ctr, dp, sp, ccall_classify_struct(cts, ctr, ct));
514
515#define CCALL_HANDLE_COMPLEXRET \
516 /* Complex values are returned in 1 or 2 FPRs. */ \
517 cc->retref = 0;
518
519#if LJ_ABI_SOFTFP /* MIPS64 soft-float */
520
521#define CCALL_HANDLE_COMPLEXRET2 \
522 if (ctr->size == 2*sizeof(float)) { /* Copy complex float from GPRs. */ \
523 ((intptr_t *)dp)[0] = cc->gpr[0]; \
524 } else { /* Copy complex double from GPRs. */ \
525 ((intptr_t *)dp)[0] = cc->gpr[0]; \
526 ((intptr_t *)dp)[1] = cc->gpr[1]; \
527 }
528
529#define CCALL_HANDLE_COMPLEXARG \
530 /* Pass complex by value in 2 or 4 GPRs. */
531
532/* Position of soft-float 'float' return value depends on endianess. */
533#define CCALL_HANDLE_RET \
534 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
535 sp = (uint8_t *)cc->gpr + LJ_ENDIAN_SELECT(0, 4);
536
537#else /* MIPS64 hard-float */
538
539#define CCALL_HANDLE_COMPLEXRET2 \
540 if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
541 ((float *)dp)[0] = cc->fpr[0].f; \
542 ((float *)dp)[1] = cc->fpr[1].f; \
543 } else { /* Copy complex double from FPRs. */ \
544 ((double *)dp)[0] = cc->fpr[0].d; \
545 ((double *)dp)[1] = cc->fpr[1].d; \
546 }
547
548#define CCALL_HANDLE_COMPLEXARG \
549 if (sz == 2*sizeof(float)) { \
550 isfp = 2; \
551 if (ngpr < maxgpr) \
552 sz *= 2; \
425 } 553 }
426 554
427#define CCALL_HANDLE_RET \ 555#define CCALL_HANDLE_RET \
428 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ 556 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
429 sp = (uint8_t *)&cc->fpr[0].f; 557 sp = (uint8_t *)&cc->fpr[0].f;
430 558
559#endif
560
561#define CCALL_HANDLE_STRUCTARG \
562 /* Pass all structs by value in registers and/or on the stack. */
563
564#define CCALL_HANDLE_REGARG \
565 if (ngpr < maxgpr) { \
566 dp = &cc->gpr[ngpr]; \
567 if (ngpr + n > maxgpr) { \
568 nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
569 if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
570 ngpr = maxgpr; \
571 } else { \
572 ngpr += n; \
573 } \
574 goto done; \
575 }
576
431#else 577#else
432#error "Missing calling convention definitions for this architecture" 578#error "Missing calling convention definitions for this architecture"
433#endif 579#endif
@@ -497,7 +643,8 @@ static void ccall_classify_ct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
497 ccall_classify_struct(cts, ct, rcl, ofs); 643 ccall_classify_struct(cts, ct, rcl, ofs);
498 } else { 644 } else {
499 int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT; 645 int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT;
500 lua_assert(ctype_hassize(ct->info)); 646 lj_assertCTS(ctype_hassize(ct->info),
647 "classify ctype %08x without size", ct->info);
501 if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */ 648 if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */
502 rcl[(ofs >= 8)] |= cl; 649 rcl[(ofs >= 8)] |= cl;
503 } 650 }
@@ -522,12 +669,13 @@ static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
522} 669}
523 670
524/* Try to split up a small struct into registers. */ 671/* Try to split up a small struct into registers. */
525static int ccall_struct_reg(CCallState *cc, GPRArg *dp, int *rcl) 672static int ccall_struct_reg(CCallState *cc, CTState *cts, GPRArg *dp, int *rcl)
526{ 673{
527 MSize ngpr = cc->ngpr, nfpr = cc->nfpr; 674 MSize ngpr = cc->ngpr, nfpr = cc->nfpr;
528 uint32_t i; 675 uint32_t i;
676 UNUSED(cts);
529 for (i = 0; i < 2; i++) { 677 for (i = 0; i < 2; i++) {
530 lua_assert(!(rcl[i] & CCALL_RCL_MEM)); 678 lj_assertCTS(!(rcl[i] & CCALL_RCL_MEM), "pass mem struct in reg");
531 if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */ 679 if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
532 if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */ 680 if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */
533 cc->gpr[ngpr++] = dp[i]; 681 cc->gpr[ngpr++] = dp[i];
@@ -548,7 +696,8 @@ static int ccall_struct_arg(CCallState *cc, CTState *cts, CType *d, int *rcl,
548 dp[0] = dp[1] = 0; 696 dp[0] = dp[1] = 0;
549 /* Convert to temp. struct. */ 697 /* Convert to temp. struct. */
550 lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg)); 698 lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
551 if (ccall_struct_reg(cc, dp, rcl)) { /* Register overflow? Pass on stack. */ 699 if (ccall_struct_reg(cc, cts, dp, rcl)) {
700 /* Register overflow? Pass on stack. */
552 MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1; 701 MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1;
553 if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */ 702 if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */
554 cc->nsp = nsp + n; 703 cc->nsp = nsp + n;
@@ -621,6 +770,125 @@ noth: /* Not a homogeneous float/double aggregate. */
621 770
622#endif 771#endif
623 772
773/* -- ARM64 ABI struct classification ------------------------------------- */
774
775#if LJ_TARGET_ARM64
776
777/* Classify a struct based on its fields. */
778static unsigned int ccall_classify_struct(CTState *cts, CType *ct)
779{
780 CTSize sz = ct->size;
781 unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION);
782 while (ct->sib) {
783 CType *sct;
784 ct = ctype_get(cts, ct->sib);
785 if (ctype_isfield(ct->info)) {
786 sct = ctype_rawchild(cts, ct);
787 if (ctype_isfp(sct->info)) {
788 r |= sct->size;
789 if (!isu) n++; else if (n == 0) n = 1;
790 } else if (ctype_iscomplex(sct->info)) {
791 r |= (sct->size >> 1);
792 if (!isu) n += 2; else if (n < 2) n = 2;
793 } else if (ctype_isstruct(sct->info)) {
794 goto substruct;
795 } else {
796 goto noth;
797 }
798 } else if (ctype_isbitfield(ct->info)) {
799 goto noth;
800 } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
801 sct = ctype_rawchild(cts, ct);
802 substruct:
803 if (sct->size > 0) {
804 unsigned int s = ccall_classify_struct(cts, sct);
805 if (s <= 1) goto noth;
806 r |= (s & 255);
807 if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8);
808 }
809 }
810 }
811 if ((r == 4 || r == 8) && n <= 4)
812 return r + (n << 8);
813noth: /* Not a homogeneous float/double aggregate. */
814 return (sz <= 16); /* Return structs of size <= 16 in GPRs. */
815}
816
817#endif
818
819/* -- MIPS64 ABI struct classification ---------------------------- */
820
821#if LJ_TARGET_MIPS64
822
823#define FTYPE_FLOAT 1
824#define FTYPE_DOUBLE 2
825
826/* Classify FP fields (max. 2) and their types. */
827static unsigned int ccall_classify_struct(CTState *cts, CType *ct, CType *ctf)
828{
829 int n = 0, ft = 0;
830 if ((ctf->info & CTF_VARARG) || (ct->info & CTF_UNION))
831 goto noth;
832 while (ct->sib) {
833 CType *sct;
834 ct = ctype_get(cts, ct->sib);
835 if (n == 2) {
836 goto noth;
837 } else if (ctype_isfield(ct->info)) {
838 sct = ctype_rawchild(cts, ct);
839 if (ctype_isfp(sct->info)) {
840 ft |= (sct->size == 4 ? FTYPE_FLOAT : FTYPE_DOUBLE) << 2*n;
841 n++;
842 } else {
843 goto noth;
844 }
845 } else if (ctype_isbitfield(ct->info) ||
846 ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
847 goto noth;
848 }
849 }
850 if (n <= 2)
851 return ft;
852noth: /* Not a homogeneous float/double aggregate. */
853 return 0; /* Struct is in GPRs. */
854}
855
856static void ccall_copy_struct(CCallState *cc, CType *ctr, void *dp, void *sp,
857 int ft)
858{
859 if (LJ_ABI_SOFTFP ? ft :
860 ((ft & 3) == FTYPE_FLOAT || (ft >> 2) == FTYPE_FLOAT)) {
861 int i, ofs = 0;
862 for (i = 0; ft != 0; i++, ft >>= 2) {
863 if ((ft & 3) == FTYPE_FLOAT) {
864#if LJ_ABI_SOFTFP
865 /* The 2nd FP struct result is in CARG1 (gpr[2]) and not CRET2. */
866 memcpy((uint8_t *)dp + ofs,
867 (uint8_t *)&cc->gpr[2*i] + LJ_ENDIAN_SELECT(0, 4), 4);
868#else
869 *(float *)((uint8_t *)dp + ofs) = cc->fpr[i].f;
870#endif
871 ofs += 4;
872 } else {
873 ofs = (ofs + 7) & ~7; /* 64 bit alignment. */
874#if LJ_ABI_SOFTFP
875 *(intptr_t *)((uint8_t *)dp + ofs) = cc->gpr[2*i];
876#else
877 *(double *)((uint8_t *)dp + ofs) = cc->fpr[i].d;
878#endif
879 ofs += 8;
880 }
881 }
882 } else {
883#if !LJ_ABI_SOFTFP
884 if (ft) sp = (uint8_t *)&cc->fpr[0];
885#endif
886 memcpy(dp, sp, ctr->size);
887 }
888}
889
890#endif
891
624/* -- Common C call handling ---------------------------------------------- */ 892/* -- Common C call handling ---------------------------------------------- */
625 893
626/* Infer the destination CTypeID for a vararg argument. */ 894/* Infer the destination CTypeID for a vararg argument. */
@@ -726,7 +994,7 @@ static int ccall_set_args(lua_State *L, CTState *cts, CType *ct,
726 if (fid) { /* Get argument type from field. */ 994 if (fid) { /* Get argument type from field. */
727 CType *ctf = ctype_get(cts, fid); 995 CType *ctf = ctype_get(cts, fid);
728 fid = ctf->sib; 996 fid = ctf->sib;
729 lua_assert(ctype_isfield(ctf->info)); 997 lj_assertL(ctype_isfield(ctf->info), "field expected");
730 did = ctype_cid(ctf->info); 998 did = ctype_cid(ctf->info);
731 } else { 999 } else {
732 if (!(ct->info & CTF_VARARG)) 1000 if (!(ct->info & CTF_VARARG))
@@ -788,6 +1056,19 @@ static int ccall_set_args(lua_State *L, CTState *cts, CType *ct,
788 *(int32_t *)dp = d->size == 1 ? (int32_t)*(int8_t *)dp : 1056 *(int32_t *)dp = d->size == 1 ? (int32_t)*(int8_t *)dp :
789 (int32_t)*(int16_t *)dp; 1057 (int32_t)*(int16_t *)dp;
790 } 1058 }
1059#if LJ_TARGET_ARM64 && LJ_BE
1060 if (isfp && d->size == sizeof(float))
1061 ((float *)dp)[1] = ((float *)dp)[0]; /* Floats occupy high slot. */
1062#endif
1063#if LJ_TARGET_MIPS64 || (LJ_TARGET_ARM64 && LJ_BE)
1064 if ((ctype_isinteger_or_bool(d->info) || ctype_isenum(d->info)
1065#if LJ_TARGET_MIPS64
1066 || (isfp && nsp == 0)
1067#endif
1068 ) && d->size <= 4) {
1069 *(int64_t *)dp = (int64_t)*(int32_t *)dp; /* Sign-extend to 64 bit. */
1070 }
1071#endif
791#if LJ_TARGET_X64 && LJ_ABI_WIN 1072#if LJ_TARGET_X64 && LJ_ABI_WIN
792 if (isva) { /* Windows/x64 mirrors varargs in both register sets. */ 1073 if (isva) { /* Windows/x64 mirrors varargs in both register sets. */
793 if (nfpr == ngpr) 1074 if (nfpr == ngpr)
@@ -803,13 +1084,19 @@ static int ccall_set_args(lua_State *L, CTState *cts, CType *ct,
803 cc->fpr[nfpr-1].d[0] = cc->fpr[nfpr-2].d[1]; /* Split complex double. */ 1084 cc->fpr[nfpr-1].d[0] = cc->fpr[nfpr-2].d[1]; /* Split complex double. */
804 cc->fpr[nfpr-2].d[1] = 0; 1085 cc->fpr[nfpr-2].d[1] = 0;
805 } 1086 }
1087#elif LJ_TARGET_ARM64 || (LJ_TARGET_MIPS64 && !LJ_ABI_SOFTFP)
1088 if (isfp == 2 && (uint8_t *)dp < (uint8_t *)cc->stack) {
1089 /* Split float HFA or complex float into separate registers. */
1090 CTSize i = (sz >> 2) - 1;
1091 do { ((uint64_t *)dp)[i] = ((uint32_t *)dp)[i]; } while (i--);
1092 }
806#else 1093#else
807 UNUSED(isfp); 1094 UNUSED(isfp);
808#endif 1095#endif
809 } 1096 }
810 if (fid) lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too few arguments. */ 1097 if (fid) lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too few arguments. */
811 1098
812#if LJ_TARGET_X64 || LJ_TARGET_PPC 1099#if LJ_TARGET_X64 || (LJ_TARGET_PPC && !LJ_ABI_SOFTFP)
813 cc->nfpr = nfpr; /* Required for vararg functions. */ 1100 cc->nfpr = nfpr; /* Required for vararg functions. */
814#endif 1101#endif
815 cc->nsp = nsp; 1102 cc->nsp = nsp;
@@ -844,7 +1131,8 @@ static int ccall_get_results(lua_State *L, CTState *cts, CType *ct,
844 CCALL_HANDLE_COMPLEXRET2 1131 CCALL_HANDLE_COMPLEXRET2
845 return 1; /* One GC step. */ 1132 return 1; /* One GC step. */
846 } 1133 }
847 if (LJ_BE && ctype_isinteger_or_bool(ctr->info) && ctr->size < CTSIZE_PTR) 1134 if (LJ_BE && ctr->size < CTSIZE_PTR &&
1135 (ctype_isinteger_or_bool(ctr->info) || ctype_isenum(ctr->info)))
848 sp += (CTSIZE_PTR - ctr->size); 1136 sp += (CTSIZE_PTR - ctr->size);
849#if CCALL_NUM_FPR 1137#if CCALL_NUM_FPR
850 if (ctype_isfp(ctr->info) || ctype_isvector(ctr->info)) 1138 if (ctype_isfp(ctr->info) || ctype_isvector(ctr->info))
@@ -854,7 +1142,8 @@ static int ccall_get_results(lua_State *L, CTState *cts, CType *ct,
854 CCALL_HANDLE_RET 1142 CCALL_HANDLE_RET
855#endif 1143#endif
856 /* No reference types end up here, so there's no need for the CTypeID. */ 1144 /* No reference types end up here, so there's no need for the CTypeID. */
857 lua_assert(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info))); 1145 lj_assertL(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info)),
1146 "unexpected reference ctype");
858 return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp); 1147 return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp);
859} 1148}
860 1149
diff --git a/src/lj_ccall.h b/src/lj_ccall.h
index 5f6d5101..8b3abdf9 100644
--- a/src/lj_ccall.h
+++ b/src/lj_ccall.h
@@ -68,35 +68,56 @@ typedef union FPRArg {
68 float f[2]; 68 float f[2];
69} FPRArg; 69} FPRArg;
70 70
71#elif LJ_TARGET_PPC 71#elif LJ_TARGET_ARM64
72 72
73#define CCALL_NARG_GPR 8 73#define CCALL_NARG_GPR 8
74#define CCALL_NRET_GPR 2
74#define CCALL_NARG_FPR 8 75#define CCALL_NARG_FPR 8
76#define CCALL_NRET_FPR 4
77#define CCALL_SPS_FREE 0
78
79typedef intptr_t GPRArg;
80typedef union FPRArg {
81 double d;
82 struct { LJ_ENDIAN_LOHI(float f; , float g;) };
83 struct { LJ_ENDIAN_LOHI(uint32_t lo; , uint32_t hi;) };
84} FPRArg;
85
86#elif LJ_TARGET_PPC
87
88#define CCALL_NARG_GPR 8
89#define CCALL_NARG_FPR (LJ_ABI_SOFTFP ? 0 : 8)
75#define CCALL_NRET_GPR 4 /* For complex double. */ 90#define CCALL_NRET_GPR 4 /* For complex double. */
76#define CCALL_NRET_FPR 1 91#define CCALL_NRET_FPR (LJ_ABI_SOFTFP ? 0 : 1)
77#define CCALL_SPS_EXTRA 4 92#define CCALL_SPS_EXTRA 4
78#define CCALL_SPS_FREE 0 93#define CCALL_SPS_FREE 0
79 94
80typedef intptr_t GPRArg; 95typedef intptr_t GPRArg;
81typedef double FPRArg; 96typedef double FPRArg;
82 97
83#elif LJ_TARGET_PPCSPE 98#elif LJ_TARGET_MIPS32
84 99
85#define CCALL_NARG_GPR 8 100#define CCALL_NARG_GPR 4
86#define CCALL_NARG_FPR 0 101#define CCALL_NARG_FPR (LJ_ABI_SOFTFP ? 0 : 2)
87#define CCALL_NRET_GPR 4 /* For softfp complex double. */ 102#define CCALL_NRET_GPR (LJ_ABI_SOFTFP ? 4 : 2)
88#define CCALL_NRET_FPR 0 103#define CCALL_NRET_FPR (LJ_ABI_SOFTFP ? 0 : 2)
89#define CCALL_SPS_FREE 0 /* NYI */ 104#define CCALL_SPS_EXTRA 7
105#define CCALL_SPS_FREE 1
90 106
91typedef intptr_t GPRArg; 107typedef intptr_t GPRArg;
108typedef union FPRArg {
109 double d;
110 struct { LJ_ENDIAN_LOHI(float f; , float g;) };
111} FPRArg;
92 112
93#elif LJ_TARGET_MIPS 113#elif LJ_TARGET_MIPS64
94 114
95#define CCALL_NARG_GPR 4 115/* FP args are positional and overlay the GPR array. */
96#define CCALL_NARG_FPR 2 116#define CCALL_NARG_GPR 8
117#define CCALL_NARG_FPR 0
97#define CCALL_NRET_GPR 2 118#define CCALL_NRET_GPR 2
98#define CCALL_NRET_FPR 2 119#define CCALL_NRET_FPR (LJ_ABI_SOFTFP ? 0 : 2)
99#define CCALL_SPS_EXTRA 7 120#define CCALL_SPS_EXTRA 3
100#define CCALL_SPS_FREE 1 121#define CCALL_SPS_FREE 1
101 122
102typedef intptr_t GPRArg; 123typedef intptr_t GPRArg;
@@ -145,6 +166,8 @@ typedef LJ_ALIGN(CCALL_ALIGN_CALLSTATE) struct CCallState {
145 uint8_t nfpr; /* Number of arguments in FPRs. */ 166 uint8_t nfpr; /* Number of arguments in FPRs. */
146#elif LJ_TARGET_X86 167#elif LJ_TARGET_X86
147 uint8_t resx87; /* Result on x87 stack: 1:float, 2:double. */ 168 uint8_t resx87; /* Result on x87 stack: 1:float, 2:double. */
169#elif LJ_TARGET_ARM64
170 void *retp; /* Aggregate return pointer in x8. */
148#elif LJ_TARGET_PPC 171#elif LJ_TARGET_PPC
149 uint8_t nfpr; /* Number of arguments in FPRs. */ 172 uint8_t nfpr; /* Number of arguments in FPRs. */
150#endif 173#endif
diff --git a/src/lj_ccallback.c b/src/lj_ccallback.c
index 26377f82..49775d2b 100644
--- a/src/lj_ccallback.c
+++ b/src/lj_ccallback.c
@@ -27,7 +27,7 @@
27 27
28#if LJ_OS_NOJIT 28#if LJ_OS_NOJIT
29 29
30/* Disabled callback support. */ 30/* Callbacks disabled. */
31#define CALLBACK_SLOT2OFS(slot) (0*(slot)) 31#define CALLBACK_SLOT2OFS(slot) (0*(slot))
32#define CALLBACK_OFS2SLOT(ofs) (0*(ofs)) 32#define CALLBACK_OFS2SLOT(ofs) (0*(ofs))
33#define CALLBACK_MAX_SLOT 0 33#define CALLBACK_MAX_SLOT 0
@@ -35,7 +35,7 @@
35#elif LJ_TARGET_X86ORX64 35#elif LJ_TARGET_X86ORX64
36 36
37#define CALLBACK_MCODE_HEAD (LJ_64 ? 8 : 0) 37#define CALLBACK_MCODE_HEAD (LJ_64 ? 8 : 0)
38#define CALLBACK_MCODE_GROUP (-2+1+2+5+(LJ_64 ? 6 : 5)) 38#define CALLBACK_MCODE_GROUP (-2+1+2+(LJ_GC64 ? 10 : 5)+(LJ_64 ? 6 : 5))
39 39
40#define CALLBACK_SLOT2OFS(slot) \ 40#define CALLBACK_SLOT2OFS(slot) \
41 (CALLBACK_MCODE_HEAD + CALLBACK_MCODE_GROUP*((slot)/32) + 4*(slot)) 41 (CALLBACK_MCODE_HEAD + CALLBACK_MCODE_GROUP*((slot)/32) + 4*(slot))
@@ -54,23 +54,22 @@ static MSize CALLBACK_OFS2SLOT(MSize ofs)
54#elif LJ_TARGET_ARM 54#elif LJ_TARGET_ARM
55 55
56#define CALLBACK_MCODE_HEAD 32 56#define CALLBACK_MCODE_HEAD 32
57#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot)) 57
58#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8) 58#elif LJ_TARGET_ARM64
59#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE)) 59
60#define CALLBACK_MCODE_HEAD 32
60 61
61#elif LJ_TARGET_PPC 62#elif LJ_TARGET_PPC
62 63
63#define CALLBACK_MCODE_HEAD 24 64#define CALLBACK_MCODE_HEAD 24
64#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
65#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
66#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
67 65
68#elif LJ_TARGET_MIPS 66#elif LJ_TARGET_MIPS32
69 67
70#define CALLBACK_MCODE_HEAD 24 68#define CALLBACK_MCODE_HEAD 20
71#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot)) 69
72#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8) 70#elif LJ_TARGET_MIPS64
73#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE)) 71
72#define CALLBACK_MCODE_HEAD 52
74 73
75#else 74#else
76 75
@@ -81,6 +80,12 @@ static MSize CALLBACK_OFS2SLOT(MSize ofs)
81 80
82#endif 81#endif
83 82
83#ifndef CALLBACK_SLOT2OFS
84#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
85#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
86#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
87#endif
88
84/* Convert callback slot number to callback function pointer. */ 89/* Convert callback slot number to callback function pointer. */
85static void *callback_slot2ptr(CTState *cts, MSize slot) 90static void *callback_slot2ptr(CTState *cts, MSize slot)
86{ 91{
@@ -102,9 +107,9 @@ MSize lj_ccallback_ptr2slot(CTState *cts, void *p)
102/* Initialize machine code for callback function pointers. */ 107/* Initialize machine code for callback function pointers. */
103#if LJ_OS_NOJIT 108#if LJ_OS_NOJIT
104/* Disabled callback support. */ 109/* Disabled callback support. */
105#define callback_mcode_init(g, p) UNUSED(p) 110#define callback_mcode_init(g, p) (p)
106#elif LJ_TARGET_X86ORX64 111#elif LJ_TARGET_X86ORX64
107static void callback_mcode_init(global_State *g, uint8_t *page) 112static void *callback_mcode_init(global_State *g, uint8_t *page)
108{ 113{
109 uint8_t *p = page; 114 uint8_t *p = page;
110 uint8_t *target = (uint8_t *)(void *)lj_vm_ffi_callback; 115 uint8_t *target = (uint8_t *)(void *)lj_vm_ffi_callback;
@@ -119,8 +124,13 @@ static void callback_mcode_init(global_State *g, uint8_t *page)
119 /* push ebp/rbp; mov ah, slot>>8; mov ebp, &g. */ 124 /* push ebp/rbp; mov ah, slot>>8; mov ebp, &g. */
120 *p++ = XI_PUSH + RID_EBP; 125 *p++ = XI_PUSH + RID_EBP;
121 *p++ = XI_MOVrib | (RID_EAX+4); *p++ = (uint8_t)(slot >> 8); 126 *p++ = XI_MOVrib | (RID_EAX+4); *p++ = (uint8_t)(slot >> 8);
127#if LJ_GC64
128 *p++ = 0x48; *p++ = XI_MOVri | RID_EBP;
129 *(uint64_t *)p = (uint64_t)(g); p += 8;
130#else
122 *p++ = XI_MOVri | RID_EBP; 131 *p++ = XI_MOVri | RID_EBP;
123 *(int32_t *)p = i32ptr(g); p += 4; 132 *(int32_t *)p = i32ptr(g); p += 4;
133#endif
124#if LJ_64 134#if LJ_64
125 /* jmp [rip-pageofs] where lj_vm_ffi_callback is stored. */ 135 /* jmp [rip-pageofs] where lj_vm_ffi_callback is stored. */
126 *p++ = XI_GROUP5; *p++ = XM_OFS0 + (XOg_JMP<<3) + RID_EBP; 136 *p++ = XI_GROUP5; *p++ = XM_OFS0 + (XOg_JMP<<3) + RID_EBP;
@@ -133,10 +143,10 @@ static void callback_mcode_init(global_State *g, uint8_t *page)
133 *p++ = XI_JMPs; *p++ = (uint8_t)((2+2)*(31-(slot&31)) - 2); 143 *p++ = XI_JMPs; *p++ = (uint8_t)((2+2)*(31-(slot&31)) - 2);
134 } 144 }
135 } 145 }
136 lua_assert(p - page <= CALLBACK_MCODE_SIZE); 146 return p;
137} 147}
138#elif LJ_TARGET_ARM 148#elif LJ_TARGET_ARM
139static void callback_mcode_init(global_State *g, uint32_t *page) 149static void *callback_mcode_init(global_State *g, uint32_t *page)
140{ 150{
141 uint32_t *p = page; 151 uint32_t *p = page;
142 void *target = (void *)lj_vm_ffi_callback; 152 void *target = (void *)lj_vm_ffi_callback;
@@ -155,10 +165,30 @@ static void callback_mcode_init(global_State *g, uint32_t *page)
155 *p = ARMI_B | ((page-p-2) & 0x00ffffffu); 165 *p = ARMI_B | ((page-p-2) & 0x00ffffffu);
156 p++; 166 p++;
157 } 167 }
158 lua_assert(p - page <= CALLBACK_MCODE_SIZE); 168 return p;
169}
170#elif LJ_TARGET_ARM64
171static void *callback_mcode_init(global_State *g, uint32_t *page)
172{
173 uint32_t *p = page;
174 void *target = (void *)lj_vm_ffi_callback;
175 MSize slot;
176 *p++ = A64I_LE(A64I_LDRLx | A64F_D(RID_X11) | A64F_S19(4));
177 *p++ = A64I_LE(A64I_LDRLx | A64F_D(RID_X10) | A64F_S19(5));
178 *p++ = A64I_LE(A64I_BR | A64F_N(RID_X11));
179 *p++ = A64I_LE(A64I_NOP);
180 ((void **)p)[0] = target;
181 ((void **)p)[1] = g;
182 p += 4;
183 for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
184 *p++ = A64I_LE(A64I_MOVZw | A64F_D(RID_X9) | A64F_U16(slot));
185 *p = A64I_LE(A64I_B | A64F_S26((page-p) & 0x03ffffffu));
186 p++;
187 }
188 return p;
159} 189}
160#elif LJ_TARGET_PPC 190#elif LJ_TARGET_PPC
161static void callback_mcode_init(global_State *g, uint32_t *page) 191static void *callback_mcode_init(global_State *g, uint32_t *page)
162{ 192{
163 uint32_t *p = page; 193 uint32_t *p = page;
164 void *target = (void *)lj_vm_ffi_callback; 194 void *target = (void *)lj_vm_ffi_callback;
@@ -174,30 +204,43 @@ static void callback_mcode_init(global_State *g, uint32_t *page)
174 *p = PPCI_B | (((page-p) & 0x00ffffffu) << 2); 204 *p = PPCI_B | (((page-p) & 0x00ffffffu) << 2);
175 p++; 205 p++;
176 } 206 }
177 lua_assert(p - page <= CALLBACK_MCODE_SIZE); 207 return p;
178} 208}
179#elif LJ_TARGET_MIPS 209#elif LJ_TARGET_MIPS
180static void callback_mcode_init(global_State *g, uint32_t *page) 210static void *callback_mcode_init(global_State *g, uint32_t *page)
181{ 211{
182 uint32_t *p = page; 212 uint32_t *p = page;
183 void *target = (void *)lj_vm_ffi_callback; 213 uintptr_t target = (uintptr_t)(void *)lj_vm_ffi_callback;
214 uintptr_t ug = (uintptr_t)(void *)g;
184 MSize slot; 215 MSize slot;
185 *p++ = MIPSI_SW | MIPSF_T(RID_R1)|MIPSF_S(RID_SP) | 0; 216#if LJ_TARGET_MIPS32
186 *p++ = MIPSI_LUI | MIPSF_T(RID_R3) | (u32ptr(target) >> 16); 217 *p++ = MIPSI_LUI | MIPSF_T(RID_R3) | (target >> 16);
187 *p++ = MIPSI_LUI | MIPSF_T(RID_R2) | (u32ptr(g) >> 16); 218 *p++ = MIPSI_LUI | MIPSF_T(RID_R2) | (ug >> 16);
188 *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) |(u32ptr(target)&0xffff); 219#else
220 *p++ = MIPSI_LUI | MIPSF_T(RID_R3) | (target >> 48);
221 *p++ = MIPSI_LUI | MIPSF_T(RID_R2) | (ug >> 48);
222 *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) | ((target >> 32) & 0xffff);
223 *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | ((ug >> 32) & 0xffff);
224 *p++ = MIPSI_DSLL | MIPSF_D(RID_R3)|MIPSF_T(RID_R3) | MIPSF_A(16);
225 *p++ = MIPSI_DSLL | MIPSF_D(RID_R2)|MIPSF_T(RID_R2) | MIPSF_A(16);
226 *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) | ((target >> 16) & 0xffff);
227 *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | ((ug >> 16) & 0xffff);
228 *p++ = MIPSI_DSLL | MIPSF_D(RID_R3)|MIPSF_T(RID_R3) | MIPSF_A(16);
229 *p++ = MIPSI_DSLL | MIPSF_D(RID_R2)|MIPSF_T(RID_R2) | MIPSF_A(16);
230#endif
231 *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) | (target & 0xffff);
189 *p++ = MIPSI_JR | MIPSF_S(RID_R3); 232 *p++ = MIPSI_JR | MIPSF_S(RID_R3);
190 *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | (u32ptr(g)&0xffff); 233 *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | (ug & 0xffff);
191 for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) { 234 for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
192 *p = MIPSI_B | ((page-p-1) & 0x0000ffffu); 235 *p = MIPSI_B | ((page-p-1) & 0x0000ffffu);
193 p++; 236 p++;
194 *p++ = MIPSI_LI | MIPSF_T(RID_R1) | slot; 237 *p++ = MIPSI_LI | MIPSF_T(RID_R1) | slot;
195 } 238 }
196 lua_assert(p - page <= CALLBACK_MCODE_SIZE); 239 return p;
197} 240}
198#else 241#else
199/* Missing support for this architecture. */ 242/* Missing support for this architecture. */
200#define callback_mcode_init(g, p) UNUSED(p) 243#define callback_mcode_init(g, p) (p)
201#endif 244#endif
202 245
203/* -- Machine code management --------------------------------------------- */ 246/* -- Machine code management --------------------------------------------- */
@@ -220,11 +263,11 @@ static void callback_mcode_init(global_State *g, uint32_t *page)
220static void callback_mcode_new(CTState *cts) 263static void callback_mcode_new(CTState *cts)
221{ 264{
222 size_t sz = (size_t)CALLBACK_MCODE_SIZE; 265 size_t sz = (size_t)CALLBACK_MCODE_SIZE;
223 void *p; 266 void *p, *pe;
224 if (CALLBACK_MAX_SLOT == 0) 267 if (CALLBACK_MAX_SLOT == 0)
225 lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); 268 lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
226#if LJ_TARGET_WINDOWS 269#if LJ_TARGET_WINDOWS
227 p = VirtualAlloc(NULL, sz, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); 270 p = LJ_WIN_VALLOC(NULL, sz, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
228 if (!p) 271 if (!p)
229 lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); 272 lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
230#elif LJ_TARGET_POSIX 273#elif LJ_TARGET_POSIX
@@ -237,12 +280,15 @@ static void callback_mcode_new(CTState *cts)
237 p = lj_mem_new(cts->L, sz); 280 p = lj_mem_new(cts->L, sz);
238#endif 281#endif
239 cts->cb.mcode = p; 282 cts->cb.mcode = p;
240 callback_mcode_init(cts->g, p); 283 pe = callback_mcode_init(cts->g, p);
284 UNUSED(pe);
285 lj_assertCTS((size_t)((char *)pe - (char *)p) <= sz,
286 "miscalculated CALLBACK_MAX_SLOT");
241 lj_mcode_sync(p, (char *)p + sz); 287 lj_mcode_sync(p, (char *)p + sz);
242#if LJ_TARGET_WINDOWS 288#if LJ_TARGET_WINDOWS
243 { 289 {
244 DWORD oprot; 290 DWORD oprot;
245 VirtualProtect(p, sz, PAGE_EXECUTE_READ, &oprot); 291 LJ_WIN_VPROTECT(p, sz, PAGE_EXECUTE_READ, &oprot);
246 } 292 }
247#elif LJ_TARGET_POSIX 293#elif LJ_TARGET_POSIX
248 mprotect(p, sz, (PROT_READ|PROT_EXEC)); 294 mprotect(p, sz, (PROT_READ|PROT_EXEC));
@@ -351,33 +397,78 @@ void lj_ccallback_mcode_free(CTState *cts)
351 goto done; \ 397 goto done; \
352 } CALLBACK_HANDLE_REGARG_FP2 398 } CALLBACK_HANDLE_REGARG_FP2
353 399
354#elif LJ_TARGET_PPC 400#elif LJ_TARGET_ARM64
355 401
356#define CALLBACK_HANDLE_REGARG \ 402#define CALLBACK_HANDLE_REGARG \
357 if (isfp) { \ 403 if (isfp) { \
358 if (nfpr + 1 <= CCALL_NARG_FPR) { \ 404 if (nfpr + n <= CCALL_NARG_FPR) { \
359 sp = &cts->cb.fpr[nfpr++]; \ 405 sp = &cts->cb.fpr[nfpr]; \
360 cta = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \ 406 nfpr += n; \
361 goto done; \ 407 goto done; \
408 } else { \
409 nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \
362 } \ 410 } \
363 } else { /* Try to pass argument in GPRs. */ \ 411 } else { \
364 if (n > 1) { \ 412 if (!LJ_TARGET_IOS && n > 1) \
365 lua_assert(ctype_isinteger(cta->info) && n == 2); /* int64_t. */ \ 413 ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
366 ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
367 } \
368 if (ngpr + n <= maxgpr) { \ 414 if (ngpr + n <= maxgpr) { \
369 sp = &cts->cb.gpr[ngpr]; \ 415 sp = &cts->cb.gpr[ngpr]; \
370 ngpr += n; \ 416 ngpr += n; \
371 goto done; \ 417 goto done; \
418 } else { \
419 ngpr = CCALL_NARG_GPR; /* Prevent reordering. */ \
420 } \
421 }
422
423#elif LJ_TARGET_PPC
424
425#define CALLBACK_HANDLE_GPR \
426 if (n > 1) { \
427 lj_assertCTS(((LJ_ABI_SOFTFP && ctype_isnum(cta->info)) || /* double. */ \
428 ctype_isinteger(cta->info)) && n == 2, /* int64_t. */ \
429 "bad GPR type"); \
430 ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
431 } \
432 if (ngpr + n <= maxgpr) { \
433 sp = &cts->cb.gpr[ngpr]; \
434 ngpr += n; \
435 goto done; \
436 }
437
438#if LJ_ABI_SOFTFP
439#define CALLBACK_HANDLE_REGARG \
440 CALLBACK_HANDLE_GPR \
441 UNUSED(isfp);
442#else
443#define CALLBACK_HANDLE_REGARG \
444 if (isfp) { \
445 if (nfpr + 1 <= CCALL_NARG_FPR) { \
446 sp = &cts->cb.fpr[nfpr++]; \
447 cta = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
448 goto done; \
372 } \ 449 } \
450 } else { /* Try to pass argument in GPRs. */ \
451 CALLBACK_HANDLE_GPR \
373 } 452 }
453#endif
374 454
455#if !LJ_ABI_SOFTFP
375#define CALLBACK_HANDLE_RET \ 456#define CALLBACK_HANDLE_RET \
376 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ 457 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
377 *(double *)dp = *(float *)dp; /* FPRs always hold doubles. */ 458 *(double *)dp = *(float *)dp; /* FPRs always hold doubles. */
459#endif
378 460
379#elif LJ_TARGET_MIPS 461#elif LJ_TARGET_MIPS32
380 462
463#define CALLBACK_HANDLE_GPR \
464 if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
465 if (ngpr + n <= maxgpr) { \
466 sp = &cts->cb.gpr[ngpr]; \
467 ngpr += n; \
468 goto done; \
469 }
470
471#if !LJ_ABI_SOFTFP /* MIPS32 hard-float */
381#define CALLBACK_HANDLE_REGARG \ 472#define CALLBACK_HANDLE_REGARG \
382 if (isfp && nfpr < CCALL_NARG_FPR) { /* Try to pass argument in FPRs. */ \ 473 if (isfp && nfpr < CCALL_NARG_FPR) { /* Try to pass argument in FPRs. */ \
383 sp = (void *)((uint8_t *)&cts->cb.fpr[nfpr] + ((LJ_BE && n==1) ? 4 : 0)); \ 474 sp = (void *)((uint8_t *)&cts->cb.fpr[nfpr] + ((LJ_BE && n==1) ? 4 : 0)); \
@@ -385,13 +476,36 @@ void lj_ccallback_mcode_free(CTState *cts)
385 goto done; \ 476 goto done; \
386 } else { /* Try to pass argument in GPRs. */ \ 477 } else { /* Try to pass argument in GPRs. */ \
387 nfpr = CCALL_NARG_FPR; \ 478 nfpr = CCALL_NARG_FPR; \
388 if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ 479 CALLBACK_HANDLE_GPR \
389 if (ngpr + n <= maxgpr) { \
390 sp = &cts->cb.gpr[ngpr]; \
391 ngpr += n; \
392 goto done; \
393 } \
394 } 480 }
481#else /* MIPS32 soft-float */
482#define CALLBACK_HANDLE_REGARG \
483 CALLBACK_HANDLE_GPR \
484 UNUSED(isfp);
485#endif
486
487#define CALLBACK_HANDLE_RET \
488 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
489 ((float *)dp)[1] = *(float *)dp;
490
491#elif LJ_TARGET_MIPS64
492
493#if !LJ_ABI_SOFTFP /* MIPS64 hard-float */
494#define CALLBACK_HANDLE_REGARG \
495 if (ngpr + n <= maxgpr) { \
496 sp = isfp ? (void*) &cts->cb.fpr[ngpr] : (void*) &cts->cb.gpr[ngpr]; \
497 ngpr += n; \
498 goto done; \
499 }
500#else /* MIPS64 soft-float */
501#define CALLBACK_HANDLE_REGARG \
502 if (ngpr + n <= maxgpr) { \
503 UNUSED(isfp); \
504 sp = (void*) &cts->cb.gpr[ngpr]; \
505 ngpr += n; \
506 goto done; \
507 }
508#endif
395 509
396#define CALLBACK_HANDLE_RET \ 510#define CALLBACK_HANDLE_RET \
397 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \ 511 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
@@ -411,6 +525,7 @@ static void callback_conv_args(CTState *cts, lua_State *L)
411 int gcsteps = 0; 525 int gcsteps = 0;
412 CType *ct; 526 CType *ct;
413 GCfunc *fn; 527 GCfunc *fn;
528 int fntp;
414 MSize ngpr = 0, nsp = 0, maxgpr = CCALL_NARG_GPR; 529 MSize ngpr = 0, nsp = 0, maxgpr = CCALL_NARG_GPR;
415#if CCALL_NARG_FPR 530#if CCALL_NARG_FPR
416 MSize nfpr = 0; 531 MSize nfpr = 0;
@@ -421,18 +536,27 @@ static void callback_conv_args(CTState *cts, lua_State *L)
421 536
422 if (slot < cts->cb.sizeid && (id = cts->cb.cbid[slot]) != 0) { 537 if (slot < cts->cb.sizeid && (id = cts->cb.cbid[slot]) != 0) {
423 ct = ctype_get(cts, id); 538 ct = ctype_get(cts, id);
424 rid = ctype_cid(ct->info); 539 rid = ctype_cid(ct->info); /* Return type. x86: +(spadj<<16). */
425 fn = funcV(lj_tab_getint(cts->miscmap, (int32_t)slot)); 540 fn = funcV(lj_tab_getint(cts->miscmap, (int32_t)slot));
541 fntp = LJ_TFUNC;
426 } else { /* Must set up frame first, before throwing the error. */ 542 } else { /* Must set up frame first, before throwing the error. */
427 ct = NULL; 543 ct = NULL;
428 rid = 0; 544 rid = 0;
429 fn = (GCfunc *)L; 545 fn = (GCfunc *)L;
546 fntp = LJ_TTHREAD;
547 }
548 /* Continuation returns from callback. */
549 if (LJ_FR2) {
550 (o++)->u64 = LJ_CONT_FFI_CALLBACK;
551 (o++)->u64 = rid;
552 o++;
553 } else {
554 o->u32.lo = LJ_CONT_FFI_CALLBACK;
555 o->u32.hi = rid;
556 o++;
430 } 557 }
431 o->u32.lo = LJ_CONT_FFI_CALLBACK; /* Continuation returns from callback. */ 558 setframe_gc(o, obj2gco(fn), fntp);
432 o->u32.hi = rid; /* Return type. x86: +(spadj<<16). */ 559 setframe_ftsz(o, ((char *)(o+1) - (char *)L->base) + FRAME_CONT);
433 o++;
434 setframe_gc(o, obj2gco(fn));
435 setframe_ftsz(o, (int)((char *)(o+1) - (char *)L->base) + FRAME_CONT);
436 L->top = L->base = ++o; 560 L->top = L->base = ++o;
437 if (!ct) 561 if (!ct)
438 lj_err_caller(cts->L, LJ_ERR_FFI_BADCBACK); 562 lj_err_caller(cts->L, LJ_ERR_FFI_BADCBACK);
@@ -459,7 +583,7 @@ static void callback_conv_args(CTState *cts, lua_State *L)
459 CTSize sz; 583 CTSize sz;
460 int isfp; 584 int isfp;
461 MSize n; 585 MSize n;
462 lua_assert(ctype_isfield(ctf->info)); 586 lj_assertCTS(ctype_isfield(ctf->info), "field expected");
463 cta = ctype_rawchild(cts, ctf); 587 cta = ctype_rawchild(cts, ctf);
464 isfp = ctype_isfp(cta->info); 588 isfp = ctype_isfp(cta->info);
465 sz = (cta->size + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1); 589 sz = (cta->size + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1);
@@ -474,7 +598,11 @@ static void callback_conv_args(CTState *cts, lua_State *L)
474 nsp += n; 598 nsp += n;
475 599
476 done: 600 done:
477 if (LJ_BE && cta->size < CTSIZE_PTR) 601 if (LJ_BE && cta->size < CTSIZE_PTR
602#if LJ_TARGET_MIPS64
603 && !(isfp && nsp)
604#endif
605 )
478 sp = (void *)((uint8_t *)sp + CTSIZE_PTR-cta->size); 606 sp = (void *)((uint8_t *)sp + CTSIZE_PTR-cta->size);
479 gcsteps += lj_cconv_tv_ct(cts, cta, 0, o++, sp); 607 gcsteps += lj_cconv_tv_ct(cts, cta, 0, o++, sp);
480 } 608 }
@@ -483,9 +611,14 @@ static void callback_conv_args(CTState *cts, lua_State *L)
483 L->top = o; 611 L->top = o;
484#if LJ_TARGET_X86 612#if LJ_TARGET_X86
485 /* Store stack adjustment for returns from non-cdecl callbacks. */ 613 /* Store stack adjustment for returns from non-cdecl callbacks. */
486 if (ctype_cconv(ct->info) != CTCC_CDECL) 614 if (ctype_cconv(ct->info) != CTCC_CDECL) {
615#if LJ_FR2
616 (L->base-3)->u64 |= (nsp << (16+2));
617#else
487 (L->base-2)->u32.hi |= (nsp << (16+2)); 618 (L->base-2)->u32.hi |= (nsp << (16+2));
488#endif 619#endif
620 }
621#endif
489 while (gcsteps-- > 0) 622 while (gcsteps-- > 0)
490 lj_gc_check(L); 623 lj_gc_check(L);
491} 624}
@@ -493,7 +626,11 @@ static void callback_conv_args(CTState *cts, lua_State *L)
493/* Convert Lua object to callback result. */ 626/* Convert Lua object to callback result. */
494static void callback_conv_result(CTState *cts, lua_State *L, TValue *o) 627static void callback_conv_result(CTState *cts, lua_State *L, TValue *o)
495{ 628{
629#if LJ_FR2
630 CType *ctr = ctype_raw(cts, (uint16_t)(L->base-3)->u64);
631#else
496 CType *ctr = ctype_raw(cts, (uint16_t)(L->base-2)->u32.hi); 632 CType *ctr = ctype_raw(cts, (uint16_t)(L->base-2)->u32.hi);
633#endif
497#if LJ_TARGET_X86 634#if LJ_TARGET_X86
498 cts->cb.gpr[2] = 0; 635 cts->cb.gpr[2] = 0;
499#endif 636#endif
@@ -503,6 +640,10 @@ static void callback_conv_result(CTState *cts, lua_State *L, TValue *o)
503 if (ctype_isfp(ctr->info)) 640 if (ctype_isfp(ctr->info))
504 dp = (uint8_t *)&cts->cb.fpr[0]; 641 dp = (uint8_t *)&cts->cb.fpr[0];
505#endif 642#endif
643#if LJ_TARGET_ARM64 && LJ_BE
644 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float))
645 dp = (uint8_t *)&cts->cb.fpr[0].f[1];
646#endif
506 lj_cconv_ct_tv(cts, ctr, dp, o, 0); 647 lj_cconv_ct_tv(cts, ctr, dp, o, 0);
507#ifdef CALLBACK_HANDLE_RET 648#ifdef CALLBACK_HANDLE_RET
508 CALLBACK_HANDLE_RET 649 CALLBACK_HANDLE_RET
@@ -516,6 +657,12 @@ static void callback_conv_result(CTState *cts, lua_State *L, TValue *o)
516 *(int32_t *)dp = ctr->size == 1 ? (int32_t)*(int8_t *)dp : 657 *(int32_t *)dp = ctr->size == 1 ? (int32_t)*(int8_t *)dp :
517 (int32_t)*(int16_t *)dp; 658 (int32_t)*(int16_t *)dp;
518 } 659 }
660#if LJ_TARGET_MIPS64 || (LJ_TARGET_ARM64 && LJ_BE)
661 /* Always sign-extend results to 64 bits. Even a soft-fp 'float'. */
662 if (ctr->size <= 4 &&
663 (LJ_ABI_SOFTFP || ctype_isinteger_or_bool(ctr->info)))
664 *(int64_t *)dp = (int64_t)*(int32_t *)dp;
665#endif
519#if LJ_TARGET_X86 666#if LJ_TARGET_X86
520 if (ctype_isfp(ctr->info)) 667 if (ctype_isfp(ctr->info))
521 cts->cb.gpr[2] = ctr->size == sizeof(float) ? 1 : 2; 668 cts->cb.gpr[2] = ctr->size == sizeof(float) ? 1 : 2;
@@ -528,8 +675,8 @@ lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf)
528{ 675{
529 lua_State *L = cts->L; 676 lua_State *L = cts->L;
530 global_State *g = cts->g; 677 global_State *g = cts->g;
531 lua_assert(L != NULL); 678 lj_assertG(L != NULL, "uninitialized cts->L in callback");
532 if (gcref(g->jit_L)) { 679 if (tvref(g->jit_base)) {
533 setstrV(L, L->top++, lj_err_str(L, LJ_ERR_FFI_BADCBACK)); 680 setstrV(L, L->top++, lj_err_str(L, LJ_ERR_FFI_BADCBACK));
534 if (g->panic) g->panic(L); 681 if (g->panic) g->panic(L);
535 exit(EXIT_FAILURE); 682 exit(EXIT_FAILURE);
@@ -562,9 +709,9 @@ void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o)
562 } 709 }
563 callback_conv_result(cts, L, o); 710 callback_conv_result(cts, L, o);
564 /* Finally drop C frame and continuation frame. */ 711 /* Finally drop C frame and continuation frame. */
565 L->cframe = cframe_prev(L->cframe); 712 L->top -= 2+2*LJ_FR2;
566 L->top -= 2;
567 L->base = obase; 713 L->base = obase;
714 L->cframe = cframe_prev(L->cframe);
568 cts->cb.slot = 0; /* Blacklist C function that called the callback. */ 715 cts->cb.slot = 0; /* Blacklist C function that called the callback. */
569} 716}
570 717
@@ -613,7 +760,7 @@ static CType *callback_checkfunc(CTState *cts, CType *ct)
613 CType *ctf = ctype_get(cts, fid); 760 CType *ctf = ctype_get(cts, fid);
614 if (!ctype_isattrib(ctf->info)) { 761 if (!ctype_isattrib(ctf->info)) {
615 CType *cta; 762 CType *cta;
616 lua_assert(ctype_isfield(ctf->info)); 763 lj_assertCTS(ctype_isfield(ctf->info), "field expected");
617 cta = ctype_rawchild(cts, ctf); 764 cta = ctype_rawchild(cts, ctf);
618 if (!(ctype_isenum(cta->info) || ctype_isptr(cta->info) || 765 if (!(ctype_isenum(cta->info) || ctype_isptr(cta->info) ||
619 (ctype_isnum(cta->info) && cta->size <= 8)) || 766 (ctype_isnum(cta->info) && cta->size <= 8)) ||
diff --git a/src/lj_cconv.c b/src/lj_cconv.c
index 99776b0e..400c2ae6 100644
--- a/src/lj_cconv.c
+++ b/src/lj_cconv.c
@@ -122,19 +122,25 @@ void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
122 CTInfo dinfo = d->info, sinfo = s->info; 122 CTInfo dinfo = d->info, sinfo = s->info;
123 void *tmpptr; 123 void *tmpptr;
124 124
125 lua_assert(!ctype_isenum(dinfo) && !ctype_isenum(sinfo)); 125 lj_assertCTS(!ctype_isenum(dinfo) && !ctype_isenum(sinfo),
126 lua_assert(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo)); 126 "unresolved enum");
127 lj_assertCTS(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo),
128 "unstripped attribute");
127 129
128 if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT) 130 if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT)
129 goto err_conv; 131 goto err_conv;
130 132
131 /* Some basic sanity checks. */ 133 /* Some basic sanity checks. */
132 lua_assert(!ctype_isnum(dinfo) || dsize > 0); 134 lj_assertCTS(!ctype_isnum(dinfo) || dsize > 0, "bad size for number type");
133 lua_assert(!ctype_isnum(sinfo) || ssize > 0); 135 lj_assertCTS(!ctype_isnum(sinfo) || ssize > 0, "bad size for number type");
134 lua_assert(!ctype_isbool(dinfo) || dsize == 1 || dsize == 4); 136 lj_assertCTS(!ctype_isbool(dinfo) || dsize == 1 || dsize == 4,
135 lua_assert(!ctype_isbool(sinfo) || ssize == 1 || ssize == 4); 137 "bad size for bool type");
136 lua_assert(!ctype_isinteger(dinfo) || (1u<<lj_fls(dsize)) == dsize); 138 lj_assertCTS(!ctype_isbool(sinfo) || ssize == 1 || ssize == 4,
137 lua_assert(!ctype_isinteger(sinfo) || (1u<<lj_fls(ssize)) == ssize); 139 "bad size for bool type");
140 lj_assertCTS(!ctype_isinteger(dinfo) || (1u<<lj_fls(dsize)) == dsize,
141 "bad size for integer type");
142 lj_assertCTS(!ctype_isinteger(sinfo) || (1u<<lj_fls(ssize)) == ssize,
143 "bad size for integer type");
138 144
139 switch (cconv_idx2(dinfo, sinfo)) { 145 switch (cconv_idx2(dinfo, sinfo)) {
140 /* Destination is a bool. */ 146 /* Destination is a bool. */
@@ -357,7 +363,7 @@ void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
357 if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s) 363 if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s)
358 goto err_conv; /* Must be exact same type. */ 364 goto err_conv; /* Must be exact same type. */
359copyval: /* Copy value. */ 365copyval: /* Copy value. */
360 lua_assert(dsize == ssize); 366 lj_assertCTS(dsize == ssize, "value copy with different sizes");
361 memcpy(dp, sp, dsize); 367 memcpy(dp, sp, dsize);
362 break; 368 break;
363 369
@@ -389,7 +395,7 @@ int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
389 lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s, 395 lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s,
390 (uint8_t *)&o->n, sp, 0); 396 (uint8_t *)&o->n, sp, 0);
391 /* Numbers are NOT canonicalized here! Beware of uninitialized data. */ 397 /* Numbers are NOT canonicalized here! Beware of uninitialized data. */
392 lua_assert(tvisnum(o)); 398 lj_assertCTS(tvisnum(o), "non-canonical NaN passed");
393 } 399 }
394 } else { 400 } else {
395 uint32_t b = s->size == 1 ? (*sp != 0) : (*(int *)sp != 0); 401 uint32_t b = s->size == 1 ? (*sp != 0) : (*(int *)sp != 0);
@@ -406,7 +412,7 @@ int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
406 CTSize sz; 412 CTSize sz;
407 copyval: /* Copy value. */ 413 copyval: /* Copy value. */
408 sz = s->size; 414 sz = s->size;
409 lua_assert(sz != CTSIZE_INVALID); 415 lj_assertCTS(sz != CTSIZE_INVALID, "value copy with invalid size");
410 /* Attributes are stripped, qualifiers are kept (but mostly ignored). */ 416 /* Attributes are stripped, qualifiers are kept (but mostly ignored). */
411 cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz); 417 cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz);
412 setcdataV(cts->L, o, cd); 418 setcdataV(cts->L, o, cd);
@@ -421,19 +427,22 @@ int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp)
421 CTInfo info = s->info; 427 CTInfo info = s->info;
422 CTSize pos, bsz; 428 CTSize pos, bsz;
423 uint32_t val; 429 uint32_t val;
424 lua_assert(ctype_isbitfield(info)); 430 lj_assertCTS(ctype_isbitfield(info), "bitfield expected");
425 /* NYI: packed bitfields may cause misaligned reads. */ 431 /* NYI: packed bitfields may cause misaligned reads. */
426 switch (ctype_bitcsz(info)) { 432 switch (ctype_bitcsz(info)) {
427 case 4: val = *(uint32_t *)sp; break; 433 case 4: val = *(uint32_t *)sp; break;
428 case 2: val = *(uint16_t *)sp; break; 434 case 2: val = *(uint16_t *)sp; break;
429 case 1: val = *(uint8_t *)sp; break; 435 case 1: val = *(uint8_t *)sp; break;
430 default: lua_assert(0); val = 0; break; 436 default:
437 lj_assertCTS(0, "bad bitfield container size %d", ctype_bitcsz(info));
438 val = 0;
439 break;
431 } 440 }
432 /* Check if a packed bitfield crosses a container boundary. */ 441 /* Check if a packed bitfield crosses a container boundary. */
433 pos = ctype_bitpos(info); 442 pos = ctype_bitpos(info);
434 bsz = ctype_bitbsz(info); 443 bsz = ctype_bitbsz(info);
435 lua_assert(pos < 8*ctype_bitcsz(info)); 444 lj_assertCTS(pos < 8*ctype_bitcsz(info), "bad bitfield position");
436 lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info)); 445 lj_assertCTS(bsz > 0 && bsz <= 8*ctype_bitcsz(info), "bad bitfield size");
437 if (pos + bsz > 8*ctype_bitcsz(info)) 446 if (pos + bsz > 8*ctype_bitcsz(info))
438 lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); 447 lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT);
439 if (!(info & CTF_BOOL)) { 448 if (!(info & CTF_BOOL)) {
@@ -448,8 +457,10 @@ int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp)
448 setintV(o, (int32_t)val); 457 setintV(o, (int32_t)val);
449 } 458 }
450 } else { 459 } else {
451 lua_assert(bsz == 1); 460 uint32_t b = (val >> pos) & 1;
452 setboolV(o, (val >> pos) & 1); 461 lj_assertCTS(bsz == 1, "bad bool bitfield size");
462 setboolV(o, b);
463 setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */
453 } 464 }
454 return 0; /* No GC step needed. */ 465 return 0; /* No GC step needed. */
455} 466}
@@ -551,7 +562,7 @@ void lj_cconv_ct_tv(CTState *cts, CType *d,
551 sid = cdataV(o)->ctypeid; 562 sid = cdataV(o)->ctypeid;
552 s = ctype_get(cts, sid); 563 s = ctype_get(cts, sid);
553 if (ctype_isref(s->info)) { /* Resolve reference for value. */ 564 if (ctype_isref(s->info)) { /* Resolve reference for value. */
554 lua_assert(s->size == CTSIZE_PTR); 565 lj_assertCTS(s->size == CTSIZE_PTR, "ref is not pointer-sized");
555 sp = *(void **)sp; 566 sp = *(void **)sp;
556 sid = ctype_cid(s->info); 567 sid = ctype_cid(s->info);
557 } 568 }
@@ -569,7 +580,7 @@ void lj_cconv_ct_tv(CTState *cts, CType *d,
569 CType *cct = lj_ctype_getfield(cts, d, str, &ofs); 580 CType *cct = lj_ctype_getfield(cts, d, str, &ofs);
570 if (!cct || !ctype_isconstval(cct->info)) 581 if (!cct || !ctype_isconstval(cct->info))
571 goto err_conv; 582 goto err_conv;
572 lua_assert(d->size == 4); 583 lj_assertCTS(d->size == 4, "only 32 bit enum supported"); /* NYI */
573 sp = (uint8_t *)&cct->size; 584 sp = (uint8_t *)&cct->size;
574 sid = ctype_cid(cct->info); 585 sid = ctype_cid(cct->info);
575 } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */ 586 } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */
@@ -633,10 +644,10 @@ void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o)
633 CTInfo info = d->info; 644 CTInfo info = d->info;
634 CTSize pos, bsz; 645 CTSize pos, bsz;
635 uint32_t val, mask; 646 uint32_t val, mask;
636 lua_assert(ctype_isbitfield(info)); 647 lj_assertCTS(ctype_isbitfield(info), "bitfield expected");
637 if ((info & CTF_BOOL)) { 648 if ((info & CTF_BOOL)) {
638 uint8_t tmpbool; 649 uint8_t tmpbool;
639 lua_assert(ctype_bitbsz(info) == 1); 650 lj_assertCTS(ctype_bitbsz(info) == 1, "bad bool bitfield size");
640 lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0); 651 lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0);
641 val = tmpbool; 652 val = tmpbool;
642 } else { 653 } else {
@@ -645,8 +656,8 @@ void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o)
645 } 656 }
646 pos = ctype_bitpos(info); 657 pos = ctype_bitpos(info);
647 bsz = ctype_bitbsz(info); 658 bsz = ctype_bitbsz(info);
648 lua_assert(pos < 8*ctype_bitcsz(info)); 659 lj_assertCTS(pos < 8*ctype_bitcsz(info), "bad bitfield position");
649 lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info)); 660 lj_assertCTS(bsz > 0 && bsz <= 8*ctype_bitcsz(info), "bad bitfield size");
650 /* Check if a packed bitfield crosses a container boundary. */ 661 /* Check if a packed bitfield crosses a container boundary. */
651 if (pos + bsz > 8*ctype_bitcsz(info)) 662 if (pos + bsz > 8*ctype_bitcsz(info))
652 lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); 663 lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT);
@@ -657,7 +668,9 @@ void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o)
657 case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break; 668 case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break;
658 case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break; 669 case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break;
659 case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break; 670 case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break;
660 default: lua_assert(0); break; 671 default:
672 lj_assertCTS(0, "bad bitfield container size %d", ctype_bitcsz(info));
673 break;
661 } 674 }
662} 675}
663 676
diff --git a/src/lj_cconv.h b/src/lj_cconv.h
index 2fd5a71c..1f716d2a 100644
--- a/src/lj_cconv.h
+++ b/src/lj_cconv.h
@@ -27,13 +27,14 @@ enum {
27static LJ_AINLINE uint32_t cconv_idx(CTInfo info) 27static LJ_AINLINE uint32_t cconv_idx(CTInfo info)
28{ 28{
29 uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */ 29 uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */
30 lua_assert(ctype_type(info) <= CT_MAYCONVERT); 30 lj_assertX(ctype_type(info) <= CT_MAYCONVERT,
31 "cannot convert ctype %08x", info);
31#if LJ_64 32#if LJ_64
32 idx = ((uint32_t)(U64x(f436fff5,fff7f021) >> 4*idx) & 15u); 33 idx = ((uint32_t)(U64x(f436fff5,fff7f021) >> 4*idx) & 15u);
33#else 34#else
34 idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u); 35 idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u);
35#endif 36#endif
36 lua_assert(idx < 8); 37 lj_assertX(idx < 8, "cannot convert ctype %08x", info);
37 return idx; 38 return idx;
38} 39}
39 40
diff --git a/src/lj_cdata.c b/src/lj_cdata.c
index 4aeb0ce3..a827d1ec 100644
--- a/src/lj_cdata.c
+++ b/src/lj_cdata.c
@@ -9,7 +9,6 @@
9 9
10#include "lj_gc.h" 10#include "lj_gc.h"
11#include "lj_err.h" 11#include "lj_err.h"
12#include "lj_str.h"
13#include "lj_tab.h" 12#include "lj_tab.h"
14#include "lj_ctype.h" 13#include "lj_ctype.h"
15#include "lj_cconv.h" 14#include "lj_cconv.h"
@@ -27,20 +26,20 @@ GCcdata *lj_cdata_newref(CTState *cts, const void *p, CTypeID id)
27} 26}
28 27
29/* Allocate variable-sized or specially aligned C data object. */ 28/* Allocate variable-sized or specially aligned C data object. */
30GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz, CTSize align) 29GCcdata *lj_cdata_newv(lua_State *L, CTypeID id, CTSize sz, CTSize align)
31{ 30{
32 global_State *g; 31 global_State *g;
33 MSize extra = sizeof(GCcdataVar) + sizeof(GCcdata) + 32 MSize extra = sizeof(GCcdataVar) + sizeof(GCcdata) +
34 (align > CT_MEMALIGN ? (1u<<align) - (1u<<CT_MEMALIGN) : 0); 33 (align > CT_MEMALIGN ? (1u<<align) - (1u<<CT_MEMALIGN) : 0);
35 char *p = lj_mem_newt(cts->L, extra + sz, char); 34 char *p = lj_mem_newt(L, extra + sz, char);
36 uintptr_t adata = (uintptr_t)p + sizeof(GCcdataVar) + sizeof(GCcdata); 35 uintptr_t adata = (uintptr_t)p + sizeof(GCcdataVar) + sizeof(GCcdata);
37 uintptr_t almask = (1u << align) - 1u; 36 uintptr_t almask = (1u << align) - 1u;
38 GCcdata *cd = (GCcdata *)(((adata + almask) & ~almask) - sizeof(GCcdata)); 37 GCcdata *cd = (GCcdata *)(((adata + almask) & ~almask) - sizeof(GCcdata));
39 lua_assert((char *)cd - p < 65536); 38 lj_assertL((char *)cd - p < 65536, "excessive cdata alignment");
40 cdatav(cd)->offset = (uint16_t)((char *)cd - p); 39 cdatav(cd)->offset = (uint16_t)((char *)cd - p);
41 cdatav(cd)->extra = extra; 40 cdatav(cd)->extra = extra;
42 cdatav(cd)->len = sz; 41 cdatav(cd)->len = sz;
43 g = cts->g; 42 g = G(L);
44 setgcrefr(cd->nextgc, g->gc.root); 43 setgcrefr(cd->nextgc, g->gc.root);
45 setgcref(g->gc.root, obj2gco(cd)); 44 setgcref(g->gc.root, obj2gco(cd));
46 newwhite(g, obj2gco(cd)); 45 newwhite(g, obj2gco(cd));
@@ -50,6 +49,15 @@ GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz, CTSize align)
50 return cd; 49 return cd;
51} 50}
52 51
52/* Allocate arbitrary C data object. */
53GCcdata *lj_cdata_newx(CTState *cts, CTypeID id, CTSize sz, CTInfo info)
54{
55 if (!(info & CTF_VLA) && ctype_align(info) <= CT_MEMALIGN)
56 return lj_cdata_new(cts, id, sz);
57 else
58 return lj_cdata_newv(cts->L, id, sz, ctype_align(info));
59}
60
53/* Free a C data object. */ 61/* Free a C data object. */
54void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd) 62void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd)
55{ 63{
@@ -68,29 +76,30 @@ void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd)
68 } else if (LJ_LIKELY(!cdataisv(cd))) { 76 } else if (LJ_LIKELY(!cdataisv(cd))) {
69 CType *ct = ctype_raw(ctype_ctsG(g), cd->ctypeid); 77 CType *ct = ctype_raw(ctype_ctsG(g), cd->ctypeid);
70 CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR; 78 CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR;
71 lua_assert(ctype_hassize(ct->info) || ctype_isfunc(ct->info) || 79 lj_assertG(ctype_hassize(ct->info) || ctype_isfunc(ct->info) ||
72 ctype_isextern(ct->info)); 80 ctype_isextern(ct->info), "free of ctype without a size");
73 lj_mem_free(g, cd, sizeof(GCcdata) + sz); 81 lj_mem_free(g, cd, sizeof(GCcdata) + sz);
74 } else { 82 } else {
75 lj_mem_free(g, memcdatav(cd), sizecdatav(cd)); 83 lj_mem_free(g, memcdatav(cd), sizecdatav(cd));
76 } 84 }
77} 85}
78 86
79TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd) 87void lj_cdata_setfin(lua_State *L, GCcdata *cd, GCobj *obj, uint32_t it)
80{ 88{
81 global_State *g = G(L); 89 GCtab *t = ctype_ctsG(G(L))->finalizer;
82 GCtab *t = ctype_ctsG(g)->finalizer;
83 if (gcref(t->metatable)) { 90 if (gcref(t->metatable)) {
84 /* Add cdata to finalizer table, if still enabled. */ 91 /* Add cdata to finalizer table, if still enabled. */
85 TValue *tv, tmp; 92 TValue *tv, tmp;
86 setcdataV(L, &tmp, cd); 93 setcdataV(L, &tmp, cd);
87 lj_gc_anybarriert(L, t); 94 lj_gc_anybarriert(L, t);
88 tv = lj_tab_set(L, t, &tmp); 95 tv = lj_tab_set(L, t, &tmp);
89 cd->marked |= LJ_GC_CDATA_FIN; 96 if (it == LJ_TNIL) {
90 return tv; 97 setnilV(tv);
91 } else { 98 cd->marked &= ~LJ_GC_CDATA_FIN;
92 /* Otherwise return dummy TValue. */ 99 } else {
93 return &g->tmptv; 100 setgcV(L, tv, obj, it);
101 cd->marked |= LJ_GC_CDATA_FIN;
102 }
94 } 103 }
95} 104}
96 105
@@ -106,7 +115,7 @@ CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, uint8_t **pp,
106 115
107 /* Resolve reference for cdata object. */ 116 /* Resolve reference for cdata object. */
108 if (ctype_isref(ct->info)) { 117 if (ctype_isref(ct->info)) {
109 lua_assert(ct->size == CTSIZE_PTR); 118 lj_assertCTS(ct->size == CTSIZE_PTR, "ref is not pointer-sized");
110 p = *(uint8_t **)p; 119 p = *(uint8_t **)p;
111 ct = ctype_child(cts, ct); 120 ct = ctype_child(cts, ct);
112 } 121 }
@@ -117,13 +126,19 @@ collect_attrib:
117 if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size; 126 if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size;
118 ct = ctype_child(cts, ct); 127 ct = ctype_child(cts, ct);
119 } 128 }
120 lua_assert(!ctype_isref(ct->info)); /* Interning rejects refs to refs. */ 129 /* Interning rejects refs to refs. */
130 lj_assertCTS(!ctype_isref(ct->info), "bad ref of ref");
121 131
122 if (tvisint(key)) { 132 if (tvisint(key)) {
123 idx = (ptrdiff_t)intV(key); 133 idx = (ptrdiff_t)intV(key);
124 goto integer_key; 134 goto integer_key;
125 } else if (tvisnum(key)) { /* Numeric key. */ 135 } else if (tvisnum(key)) { /* Numeric key. */
126 idx = LJ_64 ? (ptrdiff_t)numV(key) : (ptrdiff_t)lj_num2int(numV(key)); 136#ifdef _MSC_VER
137 /* Workaround for MSVC bug. */
138 volatile
139#endif
140 lua_Number n = numV(key);
141 idx = LJ_64 ? (ptrdiff_t)n : (ptrdiff_t)lj_num2int(n);
127 integer_key: 142 integer_key:
128 if (ctype_ispointer(ct->info)) { 143 if (ctype_ispointer(ct->info)) {
129 CTSize sz = lj_ctype_size(cts, ctype_cid(ct->info)); /* Element size. */ 144 CTSize sz = lj_ctype_size(cts, ctype_cid(ct->info)); /* Element size. */
@@ -198,7 +213,8 @@ collect_attrib:
198static void cdata_getconst(CTState *cts, TValue *o, CType *ct) 213static void cdata_getconst(CTState *cts, TValue *o, CType *ct)
199{ 214{
200 CType *ctt = ctype_child(cts, ct); 215 CType *ctt = ctype_child(cts, ct);
201 lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4); 216 lj_assertCTS(ctype_isinteger(ctt->info) && ctt->size <= 4,
217 "only 32 bit const supported"); /* NYI */
202 /* Constants are already zero-extended/sign-extended to 32 bits. */ 218 /* Constants are already zero-extended/sign-extended to 32 bits. */
203 if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) 219 if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0)
204 setnumV(o, (lua_Number)(uint32_t)ct->size); 220 setnumV(o, (lua_Number)(uint32_t)ct->size);
@@ -219,13 +235,14 @@ int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp)
219 } 235 }
220 236
221 /* Get child type of pointer/array/field. */ 237 /* Get child type of pointer/array/field. */
222 lua_assert(ctype_ispointer(s->info) || ctype_isfield(s->info)); 238 lj_assertCTS(ctype_ispointer(s->info) || ctype_isfield(s->info),
239 "pointer or field expected");
223 sid = ctype_cid(s->info); 240 sid = ctype_cid(s->info);
224 s = ctype_get(cts, sid); 241 s = ctype_get(cts, sid);
225 242
226 /* Resolve reference for field. */ 243 /* Resolve reference for field. */
227 if (ctype_isref(s->info)) { 244 if (ctype_isref(s->info)) {
228 lua_assert(s->size == CTSIZE_PTR); 245 lj_assertCTS(s->size == CTSIZE_PTR, "ref is not pointer-sized");
229 sp = *(uint8_t **)sp; 246 sp = *(uint8_t **)sp;
230 sid = ctype_cid(s->info); 247 sid = ctype_cid(s->info);
231 s = ctype_get(cts, sid); 248 s = ctype_get(cts, sid);
@@ -252,12 +269,13 @@ void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual)
252 } 269 }
253 270
254 /* Get child type of pointer/array/field. */ 271 /* Get child type of pointer/array/field. */
255 lua_assert(ctype_ispointer(d->info) || ctype_isfield(d->info)); 272 lj_assertCTS(ctype_ispointer(d->info) || ctype_isfield(d->info),
273 "pointer or field expected");
256 d = ctype_child(cts, d); 274 d = ctype_child(cts, d);
257 275
258 /* Resolve reference for field. */ 276 /* Resolve reference for field. */
259 if (ctype_isref(d->info)) { 277 if (ctype_isref(d->info)) {
260 lua_assert(d->size == CTSIZE_PTR); 278 lj_assertCTS(d->size == CTSIZE_PTR, "ref is not pointer-sized");
261 dp = *(uint8_t **)dp; 279 dp = *(uint8_t **)dp;
262 d = ctype_child(cts, d); 280 d = ctype_child(cts, d);
263 } 281 }
@@ -272,7 +290,8 @@ void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual)
272 d = ctype_child(cts, d); 290 d = ctype_child(cts, d);
273 } 291 }
274 292
275 lua_assert(ctype_hassize(d->info) && !ctype_isvoid(d->info)); 293 lj_assertCTS(ctype_hassize(d->info), "store to ctype without size");
294 lj_assertCTS(!ctype_isvoid(d->info), "store to void type");
276 295
277 if (((d->info|qual) & CTF_CONST)) { 296 if (((d->info|qual) & CTF_CONST)) {
278 err_const: 297 err_const:
diff --git a/src/lj_cdata.h b/src/lj_cdata.h
index 2ce90bdf..c3df8ba0 100644
--- a/src/lj_cdata.h
+++ b/src/lj_cdata.h
@@ -18,7 +18,7 @@ static LJ_AINLINE void *cdata_getptr(void *p, CTSize sz)
18 if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ 18 if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
19 return ((void *)(uintptr_t)*(uint32_t *)p); 19 return ((void *)(uintptr_t)*(uint32_t *)p);
20 } else { 20 } else {
21 lua_assert(sz == CTSIZE_PTR); 21 lj_assertX(sz == CTSIZE_PTR, "bad pointer size %d", sz);
22 return *(void **)p; 22 return *(void **)p;
23 } 23 }
24} 24}
@@ -29,7 +29,7 @@ static LJ_AINLINE void cdata_setptr(void *p, CTSize sz, const void *v)
29 if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ 29 if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
30 *(uint32_t *)p = (uint32_t)(uintptr_t)v; 30 *(uint32_t *)p = (uint32_t)(uintptr_t)v;
31 } else { 31 } else {
32 lua_assert(sz == CTSIZE_PTR); 32 lj_assertX(sz == CTSIZE_PTR, "bad pointer size %d", sz);
33 *(void **)p = (void *)v; 33 *(void **)p = (void *)v;
34 } 34 }
35} 35}
@@ -40,7 +40,8 @@ static LJ_AINLINE GCcdata *lj_cdata_new(CTState *cts, CTypeID id, CTSize sz)
40 GCcdata *cd; 40 GCcdata *cd;
41#ifdef LUA_USE_ASSERT 41#ifdef LUA_USE_ASSERT
42 CType *ct = ctype_raw(cts, id); 42 CType *ct = ctype_raw(cts, id);
43 lua_assert((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz); 43 lj_assertCTS((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz,
44 "inconsistent size of fixed-size cdata alloc");
44#endif 45#endif
45 cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz); 46 cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz);
46 cd->gct = ~LJ_TCDATA; 47 cd->gct = ~LJ_TCDATA;
@@ -58,11 +59,14 @@ static LJ_AINLINE GCcdata *lj_cdata_new_(lua_State *L, CTypeID id, CTSize sz)
58} 59}
59 60
60LJ_FUNC GCcdata *lj_cdata_newref(CTState *cts, const void *pp, CTypeID id); 61LJ_FUNC GCcdata *lj_cdata_newref(CTState *cts, const void *pp, CTypeID id);
61LJ_FUNC GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz, 62LJ_FUNC GCcdata *lj_cdata_newv(lua_State *L, CTypeID id, CTSize sz,
62 CTSize align); 63 CTSize align);
64LJ_FUNC GCcdata *lj_cdata_newx(CTState *cts, CTypeID id, CTSize sz,
65 CTInfo info);
63 66
64LJ_FUNC void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd); 67LJ_FUNC void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd);
65LJ_FUNCA TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd); 68LJ_FUNC void lj_cdata_setfin(lua_State *L, GCcdata *cd, GCobj *obj,
69 uint32_t it);
66 70
67LJ_FUNC CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, 71LJ_FUNC CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key,
68 uint8_t **pp, CTInfo *qual); 72 uint8_t **pp, CTInfo *qual);
diff --git a/src/lj_clib.c b/src/lj_clib.c
index df20aca3..8da41a83 100644
--- a/src/lj_clib.c
+++ b/src/lj_clib.c
@@ -16,6 +16,7 @@
16#include "lj_cconv.h" 16#include "lj_cconv.h"
17#include "lj_cdata.h" 17#include "lj_cdata.h"
18#include "lj_clib.h" 18#include "lj_clib.h"
19#include "lj_strfmt.h"
19 20
20/* -- OS-specific functions ----------------------------------------------- */ 21/* -- OS-specific functions ----------------------------------------------- */
21 22
@@ -61,7 +62,7 @@ static const char *clib_extname(lua_State *L, const char *name)
61#endif 62#endif
62 ) { 63 ) {
63 if (!strchr(name, '.')) { 64 if (!strchr(name, '.')) {
64 name = lj_str_pushf(L, CLIB_SOEXT, name); 65 name = lj_strfmt_pushf(L, CLIB_SOEXT, name);
65 L->top--; 66 L->top--;
66#if LJ_TARGET_CYGWIN 67#if LJ_TARGET_CYGWIN
67 } else { 68 } else {
@@ -70,7 +71,7 @@ static const char *clib_extname(lua_State *L, const char *name)
70 } 71 }
71 if (!(name[0] == CLIB_SOPREFIX[0] && name[1] == CLIB_SOPREFIX[1] && 72 if (!(name[0] == CLIB_SOPREFIX[0] && name[1] == CLIB_SOPREFIX[1] &&
72 name[2] == CLIB_SOPREFIX[2])) { 73 name[2] == CLIB_SOPREFIX[2])) {
73 name = lj_str_pushf(L, CLIB_SOPREFIX "%s", name); 74 name = lj_strfmt_pushf(L, CLIB_SOPREFIX "%s", name);
74 L->top--; 75 L->top--;
75 } 76 }
76 } 77 }
@@ -158,11 +159,13 @@ BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*);
158/* Default libraries. */ 159/* Default libraries. */
159enum { 160enum {
160 CLIB_HANDLE_EXE, 161 CLIB_HANDLE_EXE,
162#if !LJ_TARGET_UWP
161 CLIB_HANDLE_DLL, 163 CLIB_HANDLE_DLL,
162 CLIB_HANDLE_CRT, 164 CLIB_HANDLE_CRT,
163 CLIB_HANDLE_KERNEL32, 165 CLIB_HANDLE_KERNEL32,
164 CLIB_HANDLE_USER32, 166 CLIB_HANDLE_USER32,
165 CLIB_HANDLE_GDI32, 167 CLIB_HANDLE_GDI32,
168#endif
166 CLIB_HANDLE_MAX 169 CLIB_HANDLE_MAX
167}; 170};
168 171
@@ -172,11 +175,19 @@ LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt,
172 const char *name) 175 const char *name)
173{ 176{
174 DWORD err = GetLastError(); 177 DWORD err = GetLastError();
178#if LJ_TARGET_XBOXONE
179 wchar_t wbuf[128];
180 char buf[128*2];
181 if (!FormatMessageW(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM,
182 NULL, err, 0, wbuf, sizeof(wbuf)/sizeof(wchar_t), NULL) ||
183 !WideCharToMultiByte(CP_ACP, 0, wbuf, 128, buf, 128*2, NULL, NULL))
184#else
175 char buf[128]; 185 char buf[128];
176 if (!FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM, 186 if (!FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM,
177 NULL, err, 0, buf, sizeof(buf), NULL)) 187 NULL, err, 0, buf, sizeof(buf), NULL))
188#endif
178 buf[0] = '\0'; 189 buf[0] = '\0';
179 lj_err_callermsg(L, lj_str_pushf(L, fmt, name, buf)); 190 lj_err_callermsg(L, lj_strfmt_pushf(L, fmt, name, buf));
180} 191}
181 192
182static int clib_needext(const char *s) 193static int clib_needext(const char *s)
@@ -191,7 +202,7 @@ static int clib_needext(const char *s)
191static const char *clib_extname(lua_State *L, const char *name) 202static const char *clib_extname(lua_State *L, const char *name)
192{ 203{
193 if (clib_needext(name)) { 204 if (clib_needext(name)) {
194 name = lj_str_pushf(L, "%s.dll", name); 205 name = lj_strfmt_pushf(L, "%s.dll", name);
195 L->top--; 206 L->top--;
196 } 207 }
197 return name; 208 return name;
@@ -200,7 +211,7 @@ static const char *clib_extname(lua_State *L, const char *name)
200static void *clib_loadlib(lua_State *L, const char *name, int global) 211static void *clib_loadlib(lua_State *L, const char *name, int global)
201{ 212{
202 DWORD oldwerr = GetLastError(); 213 DWORD oldwerr = GetLastError();
203 void *h = (void *)LoadLibraryA(clib_extname(L, name)); 214 void *h = LJ_WIN_LOADLIBA(clib_extname(L, name));
204 if (!h) clib_error(L, "cannot load module " LUA_QS ": %s", name); 215 if (!h) clib_error(L, "cannot load module " LUA_QS ": %s", name);
205 SetLastError(oldwerr); 216 SetLastError(oldwerr);
206 UNUSED(global); 217 UNUSED(global);
@@ -210,6 +221,7 @@ static void *clib_loadlib(lua_State *L, const char *name, int global)
210static void clib_unloadlib(CLibrary *cl) 221static void clib_unloadlib(CLibrary *cl)
211{ 222{
212 if (cl->handle == CLIB_DEFHANDLE) { 223 if (cl->handle == CLIB_DEFHANDLE) {
224#if !LJ_TARGET_UWP
213 MSize i; 225 MSize i;
214 for (i = CLIB_HANDLE_KERNEL32; i < CLIB_HANDLE_MAX; i++) { 226 for (i = CLIB_HANDLE_KERNEL32; i < CLIB_HANDLE_MAX; i++) {
215 void *h = clib_def_handle[i]; 227 void *h = clib_def_handle[i];
@@ -218,11 +230,16 @@ static void clib_unloadlib(CLibrary *cl)
218 FreeLibrary((HINSTANCE)h); 230 FreeLibrary((HINSTANCE)h);
219 } 231 }
220 } 232 }
233#endif
221 } else if (cl->handle) { 234 } else if (cl->handle) {
222 FreeLibrary((HINSTANCE)cl->handle); 235 FreeLibrary((HINSTANCE)cl->handle);
223 } 236 }
224} 237}
225 238
239#if LJ_TARGET_UWP
240EXTERN_C IMAGE_DOS_HEADER __ImageBase;
241#endif
242
226static void *clib_getsym(CLibrary *cl, const char *name) 243static void *clib_getsym(CLibrary *cl, const char *name)
227{ 244{
228 void *p = NULL; 245 void *p = NULL;
@@ -231,6 +248,9 @@ static void *clib_getsym(CLibrary *cl, const char *name)
231 for (i = 0; i < CLIB_HANDLE_MAX; i++) { 248 for (i = 0; i < CLIB_HANDLE_MAX; i++) {
232 HINSTANCE h = (HINSTANCE)clib_def_handle[i]; 249 HINSTANCE h = (HINSTANCE)clib_def_handle[i];
233 if (!(void *)h) { /* Resolve default library handles (once). */ 250 if (!(void *)h) { /* Resolve default library handles (once). */
251#if LJ_TARGET_UWP
252 h = (HINSTANCE)&__ImageBase;
253#else
234 switch (i) { 254 switch (i) {
235 case CLIB_HANDLE_EXE: GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, NULL, &h); break; 255 case CLIB_HANDLE_EXE: GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, NULL, &h); break;
236 case CLIB_HANDLE_DLL: 256 case CLIB_HANDLE_DLL:
@@ -241,11 +261,12 @@ static void *clib_getsym(CLibrary *cl, const char *name)
241 GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, 261 GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
242 (const char *)&_fmode, &h); 262 (const char *)&_fmode, &h);
243 break; 263 break;
244 case CLIB_HANDLE_KERNEL32: h = LoadLibraryA("kernel32.dll"); break; 264 case CLIB_HANDLE_KERNEL32: h = LJ_WIN_LOADLIBA("kernel32.dll"); break;
245 case CLIB_HANDLE_USER32: h = LoadLibraryA("user32.dll"); break; 265 case CLIB_HANDLE_USER32: h = LJ_WIN_LOADLIBA("user32.dll"); break;
246 case CLIB_HANDLE_GDI32: h = LoadLibraryA("gdi32.dll"); break; 266 case CLIB_HANDLE_GDI32: h = LJ_WIN_LOADLIBA("gdi32.dll"); break;
247 } 267 }
248 if (!h) continue; 268 if (!h) continue;
269#endif
249 clib_def_handle[i] = (void *)h; 270 clib_def_handle[i] = (void *)h;
250 } 271 }
251 p = (void *)GetProcAddress(h, name); 272 p = (void *)GetProcAddress(h, name);
@@ -264,7 +285,7 @@ static void *clib_getsym(CLibrary *cl, const char *name)
264LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt, 285LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt,
265 const char *name) 286 const char *name)
266{ 287{
267 lj_err_callermsg(L, lj_str_pushf(L, fmt, name, "no support for this OS")); 288 lj_err_callermsg(L, lj_strfmt_pushf(L, fmt, name, "no support for this OS"));
268} 289}
269 290
270static void *clib_loadlib(lua_State *L, const char *name, int global) 291static void *clib_loadlib(lua_State *L, const char *name, int global)
@@ -329,7 +350,8 @@ TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name)
329 lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name)); 350 lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name));
330 if (ctype_isconstval(ct->info)) { 351 if (ctype_isconstval(ct->info)) {
331 CType *ctt = ctype_child(cts, ct); 352 CType *ctt = ctype_child(cts, ct);
332 lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4); 353 lj_assertCTS(ctype_isinteger(ctt->info) && ctt->size <= 4,
354 "only 32 bit const supported"); /* NYI */
333 if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) 355 if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0)
334 setnumV(tv, (lua_Number)(uint32_t)ct->size); 356 setnumV(tv, (lua_Number)(uint32_t)ct->size);
335 else 357 else
@@ -341,14 +363,15 @@ TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name)
341#endif 363#endif
342 void *p = clib_getsym(cl, sym); 364 void *p = clib_getsym(cl, sym);
343 GCcdata *cd; 365 GCcdata *cd;
344 lua_assert(ctype_isfunc(ct->info) || ctype_isextern(ct->info)); 366 lj_assertCTS(ctype_isfunc(ct->info) || ctype_isextern(ct->info),
367 "unexpected ctype %08x in clib", ct->info);
345#if LJ_TARGET_X86 && LJ_ABI_WIN 368#if LJ_TARGET_X86 && LJ_ABI_WIN
346 /* Retry with decorated name for fastcall/stdcall functions. */ 369 /* Retry with decorated name for fastcall/stdcall functions. */
347 if (!p && ctype_isfunc(ct->info)) { 370 if (!p && ctype_isfunc(ct->info)) {
348 CTInfo cconv = ctype_cconv(ct->info); 371 CTInfo cconv = ctype_cconv(ct->info);
349 if (cconv == CTCC_FASTCALL || cconv == CTCC_STDCALL) { 372 if (cconv == CTCC_FASTCALL || cconv == CTCC_STDCALL) {
350 CTSize sz = clib_func_argsize(cts, ct); 373 CTSize sz = clib_func_argsize(cts, ct);
351 const char *symd = lj_str_pushf(L, 374 const char *symd = lj_strfmt_pushf(L,
352 cconv == CTCC_FASTCALL ? "@%s@%d" : "_%s@%d", 375 cconv == CTCC_FASTCALL ? "@%s@%d" : "_%s@%d",
353 sym, sz); 376 sym, sz);
354 L->top--; 377 L->top--;
diff --git a/src/lj_cparse.c b/src/lj_cparse.c
index 50bb76ad..78628bba 100644
--- a/src/lj_cparse.c
+++ b/src/lj_cparse.c
@@ -9,13 +9,14 @@
9 9
10#include "lj_gc.h" 10#include "lj_gc.h"
11#include "lj_err.h" 11#include "lj_err.h"
12#include "lj_str.h" 12#include "lj_buf.h"
13#include "lj_ctype.h" 13#include "lj_ctype.h"
14#include "lj_cparse.h" 14#include "lj_cparse.h"
15#include "lj_frame.h" 15#include "lj_frame.h"
16#include "lj_vm.h" 16#include "lj_vm.h"
17#include "lj_char.h" 17#include "lj_char.h"
18#include "lj_strscan.h" 18#include "lj_strscan.h"
19#include "lj_strfmt.h"
19 20
20/* 21/*
21** Important note: this is NOT a validating C parser! This is a minimal 22** Important note: this is NOT a validating C parser! This is a minimal
@@ -27,6 +28,30 @@
27** If in doubt, please check the input against your favorite C compiler. 28** If in doubt, please check the input against your favorite C compiler.
28*/ 29*/
29 30
31#ifdef LUA_USE_ASSERT
32#define lj_assertCP(c, ...) (lj_assertG_(G(cp->L), (c), __VA_ARGS__))
33#else
34#define lj_assertCP(c, ...) ((void)cp)
35#endif
36
37/* -- Miscellaneous ------------------------------------------------------- */
38
39/* Match string against a C literal. */
40#define cp_str_is(str, k) \
41 ((str)->len == sizeof(k)-1 && !memcmp(strdata(str), k, sizeof(k)-1))
42
43/* Check string against a linear list of matches. */
44int lj_cparse_case(GCstr *str, const char *match)
45{
46 MSize len;
47 int n;
48 for (n = 0; (len = (MSize)*match++); n++, match += len) {
49 if (str->len == len && !memcmp(match, strdata(str), len))
50 return n;
51 }
52 return -1;
53}
54
30/* -- C lexer ------------------------------------------------------------- */ 55/* -- C lexer ------------------------------------------------------------- */
31 56
32/* C lexer token names. */ 57/* C lexer token names. */
@@ -42,13 +67,13 @@ LJ_NORET static void cp_err(CPState *cp, ErrMsg em);
42 67
43static const char *cp_tok2str(CPState *cp, CPToken tok) 68static const char *cp_tok2str(CPState *cp, CPToken tok)
44{ 69{
45 lua_assert(tok < CTOK_FIRSTDECL); 70 lj_assertCP(tok < CTOK_FIRSTDECL, "bad CPToken %d", tok);
46 if (tok > CTOK_OFS) 71 if (tok > CTOK_OFS)
47 return ctoknames[tok-CTOK_OFS-1]; 72 return ctoknames[tok-CTOK_OFS-1];
48 else if (!lj_char_iscntrl(tok)) 73 else if (!lj_char_iscntrl(tok))
49 return lj_str_pushf(cp->L, "%c", tok); 74 return lj_strfmt_pushf(cp->L, "%c", tok);
50 else 75 else
51 return lj_str_pushf(cp->L, "char(%d)", tok); 76 return lj_strfmt_pushf(cp->L, "char(%d)", tok);
52} 77}
53 78
54/* End-of-line? */ 79/* End-of-line? */
@@ -85,24 +110,10 @@ static LJ_NOINLINE CPChar cp_get_bs(CPState *cp)
85 return cp_get(cp); 110 return cp_get(cp);
86} 111}
87 112
88/* Grow save buffer. */
89static LJ_NOINLINE void cp_save_grow(CPState *cp, CPChar c)
90{
91 MSize newsize;
92 if (cp->sb.sz >= CPARSE_MAX_BUF/2)
93 cp_err(cp, LJ_ERR_XELEM);
94 newsize = cp->sb.sz * 2;
95 lj_str_resizebuf(cp->L, &cp->sb, newsize);
96 cp->sb.buf[cp->sb.n++] = (char)c;
97}
98
99/* Save character in buffer. */ 113/* Save character in buffer. */
100static LJ_AINLINE void cp_save(CPState *cp, CPChar c) 114static LJ_AINLINE void cp_save(CPState *cp, CPChar c)
101{ 115{
102 if (LJ_UNLIKELY(cp->sb.n + 1 > cp->sb.sz)) 116 lj_buf_putb(&cp->sb, c);
103 cp_save_grow(cp, c);
104 else
105 cp->sb.buf[cp->sb.n++] = (char)c;
106} 117}
107 118
108/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */ 119/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */
@@ -122,20 +133,20 @@ LJ_NORET static void cp_errmsg(CPState *cp, CPToken tok, ErrMsg em, ...)
122 tokstr = NULL; 133 tokstr = NULL;
123 } else if (tok == CTOK_IDENT || tok == CTOK_INTEGER || tok == CTOK_STRING || 134 } else if (tok == CTOK_IDENT || tok == CTOK_INTEGER || tok == CTOK_STRING ||
124 tok >= CTOK_FIRSTDECL) { 135 tok >= CTOK_FIRSTDECL) {
125 if (cp->sb.n == 0) cp_save(cp, '$'); 136 if (sbufP(&cp->sb) == sbufB(&cp->sb)) cp_save(cp, '$');
126 cp_save(cp, '\0'); 137 cp_save(cp, '\0');
127 tokstr = cp->sb.buf; 138 tokstr = sbufB(&cp->sb);
128 } else { 139 } else {
129 tokstr = cp_tok2str(cp, tok); 140 tokstr = cp_tok2str(cp, tok);
130 } 141 }
131 L = cp->L; 142 L = cp->L;
132 va_start(argp, em); 143 va_start(argp, em);
133 msg = lj_str_pushvf(L, err2msg(em), argp); 144 msg = lj_strfmt_pushvf(L, err2msg(em), argp);
134 va_end(argp); 145 va_end(argp);
135 if (tokstr) 146 if (tokstr)
136 msg = lj_str_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tokstr); 147 msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tokstr);
137 if (cp->linenumber > 1) 148 if (cp->linenumber > 1)
138 msg = lj_str_pushf(L, "%s at line %d", msg, cp->linenumber); 149 msg = lj_strfmt_pushf(L, "%s at line %d", msg, cp->linenumber);
139 lj_err_callermsg(L, msg); 150 lj_err_callermsg(L, msg);
140} 151}
141 152
@@ -164,7 +175,8 @@ static CPToken cp_number(CPState *cp)
164 TValue o; 175 TValue o;
165 do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp))); 176 do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp)));
166 cp_save(cp, '\0'); 177 cp_save(cp, '\0');
167 fmt = lj_strscan_scan((const uint8_t *)cp->sb.buf, &o, STRSCAN_OPT_C); 178 fmt = lj_strscan_scan((const uint8_t *)sbufB(&cp->sb), sbuflen(&cp->sb)-1,
179 &o, STRSCAN_OPT_C);
168 if (fmt == STRSCAN_INT) cp->val.id = CTID_INT32; 180 if (fmt == STRSCAN_INT) cp->val.id = CTID_INT32;
169 else if (fmt == STRSCAN_U32) cp->val.id = CTID_UINT32; 181 else if (fmt == STRSCAN_U32) cp->val.id = CTID_UINT32;
170 else if (!(cp->mode & CPARSE_MODE_SKIP)) 182 else if (!(cp->mode & CPARSE_MODE_SKIP))
@@ -177,7 +189,7 @@ static CPToken cp_number(CPState *cp)
177static CPToken cp_ident(CPState *cp) 189static CPToken cp_ident(CPState *cp)
178{ 190{
179 do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp))); 191 do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp)));
180 cp->str = lj_str_new(cp->L, cp->sb.buf, cp->sb.n); 192 cp->str = lj_buf_str(cp->L, &cp->sb);
181 cp->val.id = lj_ctype_getname(cp->cts, &cp->ct, cp->str, cp->tmask); 193 cp->val.id = lj_ctype_getname(cp->cts, &cp->ct, cp->str, cp->tmask);
182 if (ctype_type(cp->ct->info) == CT_KW) 194 if (ctype_type(cp->ct->info) == CT_KW)
183 return ctype_cid(cp->ct->info); 195 return ctype_cid(cp->ct->info);
@@ -263,11 +275,11 @@ static CPToken cp_string(CPState *cp)
263 } 275 }
264 cp_get(cp); 276 cp_get(cp);
265 if (delim == '"') { 277 if (delim == '"') {
266 cp->str = lj_str_new(cp->L, cp->sb.buf, cp->sb.n); 278 cp->str = lj_buf_str(cp->L, &cp->sb);
267 return CTOK_STRING; 279 return CTOK_STRING;
268 } else { 280 } else {
269 if (cp->sb.n != 1) cp_err_token(cp, '\''); 281 if (sbuflen(&cp->sb) != 1) cp_err_token(cp, '\'');
270 cp->val.i32 = (int32_t)(char)cp->sb.buf[0]; 282 cp->val.i32 = (int32_t)(char)*sbufB(&cp->sb);
271 cp->val.id = CTID_INT32; 283 cp->val.id = CTID_INT32;
272 return CTOK_INTEGER; 284 return CTOK_INTEGER;
273 } 285 }
@@ -296,7 +308,7 @@ static void cp_comment_cpp(CPState *cp)
296/* Lexical scanner for C. Only a minimal subset is implemented. */ 308/* Lexical scanner for C. Only a minimal subset is implemented. */
297static CPToken cp_next_(CPState *cp) 309static CPToken cp_next_(CPState *cp)
298{ 310{
299 lj_str_resetbuf(&cp->sb); 311 lj_buf_reset(&cp->sb);
300 for (;;) { 312 for (;;) {
301 if (lj_char_isident(cp->c)) 313 if (lj_char_isident(cp->c))
302 return lj_char_isdigit(cp->c) ? cp_number(cp) : cp_ident(cp); 314 return lj_char_isdigit(cp->c) ? cp_number(cp) : cp_ident(cp);
@@ -385,9 +397,8 @@ static void cp_init(CPState *cp)
385 cp->depth = 0; 397 cp->depth = 0;
386 cp->curpack = 0; 398 cp->curpack = 0;
387 cp->packstack[0] = 255; 399 cp->packstack[0] = 255;
388 lj_str_initbuf(&cp->sb); 400 lj_buf_init(cp->L, &cp->sb);
389 lj_str_resizebuf(cp->L, &cp->sb, LJ_MIN_SBUF); 401 lj_assertCP(cp->p != NULL, "uninitialized cp->p");
390 lua_assert(cp->p != NULL);
391 cp_get(cp); /* Read-ahead first char. */ 402 cp_get(cp); /* Read-ahead first char. */
392 cp->tok = 0; 403 cp->tok = 0;
393 cp->tmask = CPNS_DEFAULT; 404 cp->tmask = CPNS_DEFAULT;
@@ -398,7 +409,7 @@ static void cp_init(CPState *cp)
398static void cp_cleanup(CPState *cp) 409static void cp_cleanup(CPState *cp)
399{ 410{
400 global_State *g = G(cp->L); 411 global_State *g = G(cp->L);
401 lj_str_freebuf(g, &cp->sb); 412 lj_buf_free(g, &cp->sb);
402} 413}
403 414
404/* Check and consume optional token. */ 415/* Check and consume optional token. */
@@ -848,12 +859,13 @@ static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl)
848 /* The cid is already part of info for copies of pointers/functions. */ 859 /* The cid is already part of info for copies of pointers/functions. */
849 idx = ct->next; 860 idx = ct->next;
850 if (ctype_istypedef(info)) { 861 if (ctype_istypedef(info)) {
851 lua_assert(id == 0); 862 lj_assertCP(id == 0, "typedef not at toplevel");
852 id = ctype_cid(info); 863 id = ctype_cid(info);
853 /* Always refetch info/size, since struct/enum may have been completed. */ 864 /* Always refetch info/size, since struct/enum may have been completed. */
854 cinfo = ctype_get(cp->cts, id)->info; 865 cinfo = ctype_get(cp->cts, id)->info;
855 csize = ctype_get(cp->cts, id)->size; 866 csize = ctype_get(cp->cts, id)->size;
856 lua_assert(ctype_isstruct(cinfo) || ctype_isenum(cinfo)); 867 lj_assertCP(ctype_isstruct(cinfo) || ctype_isenum(cinfo),
868 "typedef of bad type");
857 } else if (ctype_isfunc(info)) { /* Intern function. */ 869 } else if (ctype_isfunc(info)) { /* Intern function. */
858 CType *fct; 870 CType *fct;
859 CTypeID fid; 871 CTypeID fid;
@@ -886,7 +898,7 @@ static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl)
886 /* Inherit csize/cinfo from original type. */ 898 /* Inherit csize/cinfo from original type. */
887 } else { 899 } else {
888 if (ctype_isnum(info)) { /* Handle mode/vector-size attributes. */ 900 if (ctype_isnum(info)) { /* Handle mode/vector-size attributes. */
889 lua_assert(id == 0); 901 lj_assertCP(id == 0, "number not at toplevel");
890 if (!(info & CTF_BOOL)) { 902 if (!(info & CTF_BOOL)) {
891 CTSize msize = ctype_msizeP(decl->attr); 903 CTSize msize = ctype_msizeP(decl->attr);
892 CTSize vsize = ctype_vsizeP(decl->attr); 904 CTSize vsize = ctype_vsizeP(decl->attr);
@@ -941,7 +953,7 @@ static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl)
941 info = (info & ~CTF_ALIGN) | (cinfo & CTF_ALIGN); 953 info = (info & ~CTF_ALIGN) | (cinfo & CTF_ALIGN);
942 info |= (cinfo & CTF_QUAL); /* Inherit qual. */ 954 info |= (cinfo & CTF_QUAL); /* Inherit qual. */
943 } else { 955 } else {
944 lua_assert(ctype_isvoid(info)); 956 lj_assertCP(ctype_isvoid(info), "bad ctype %08x", info);
945 } 957 }
946 csize = size; 958 csize = size;
947 cinfo = info+id; 959 cinfo = info+id;
@@ -953,8 +965,6 @@ static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl)
953 965
954/* -- C declaration parser ------------------------------------------------ */ 966/* -- C declaration parser ------------------------------------------------ */
955 967
956#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be)
957
958/* Reset declaration state to declaration specifier. */ 968/* Reset declaration state to declaration specifier. */
959static void cp_decl_reset(CPDecl *decl) 969static void cp_decl_reset(CPDecl *decl)
960{ 970{
@@ -1031,7 +1041,7 @@ static void cp_decl_asm(CPState *cp, CPDecl *decl)
1031 if (cp->tok == CTOK_STRING) { 1041 if (cp->tok == CTOK_STRING) {
1032 GCstr *str = cp->str; 1042 GCstr *str = cp->str;
1033 while (cp_next(cp) == CTOK_STRING) { 1043 while (cp_next(cp) == CTOK_STRING) {
1034 lj_str_pushf(cp->L, "%s%s", strdata(str), strdata(cp->str)); 1044 lj_strfmt_pushf(cp->L, "%s%s", strdata(str), strdata(cp->str));
1035 cp->L->top--; 1045 cp->L->top--;
1036 str = strV(cp->L->top); 1046 str = strV(cp->L->top);
1037 } 1047 }
@@ -1083,44 +1093,57 @@ static void cp_decl_gccattribute(CPState *cp, CPDecl *decl)
1083 if (cp->tok == CTOK_IDENT) { 1093 if (cp->tok == CTOK_IDENT) {
1084 GCstr *attrstr = cp->str; 1094 GCstr *attrstr = cp->str;
1085 cp_next(cp); 1095 cp_next(cp);
1086 switch (attrstr->hash) { 1096 switch (lj_cparse_case(attrstr,
1087 case H_(64a9208e,8ce14319): case H_(8e6331b2,95a282af): /* aligned */ 1097 "\007aligned" "\013__aligned__"
1098 "\006packed" "\012__packed__"
1099 "\004mode" "\010__mode__"
1100 "\013vector_size" "\017__vector_size__"
1101#if LJ_TARGET_X86
1102 "\007regparm" "\013__regparm__"
1103 "\005cdecl" "\011__cdecl__"
1104 "\010thiscall" "\014__thiscall__"
1105 "\010fastcall" "\014__fastcall__"
1106 "\007stdcall" "\013__stdcall__"
1107 "\012sseregparm" "\016__sseregparm__"
1108#endif
1109 )) {
1110 case 0: case 1: /* aligned */
1088 cp_decl_align(cp, decl); 1111 cp_decl_align(cp, decl);
1089 break; 1112 break;
1090 case H_(42eb47de,f0ede26c): case H_(29f48a09,cf383e0c): /* packed */ 1113 case 2: case 3: /* packed */
1091 decl->attr |= CTFP_PACKED; 1114 decl->attr |= CTFP_PACKED;
1092 break; 1115 break;
1093 case H_(0a84eef6,8dfab04c): case H_(995cf92c,d5696591): /* mode */ 1116 case 4: case 5: /* mode */
1094 cp_decl_mode(cp, decl); 1117 cp_decl_mode(cp, decl);
1095 break; 1118 break;
1096 case H_(0ab31997,2d5213fa): case H_(bf875611,200e9990): /* vector_size */ 1119 case 6: case 7: /* vector_size */
1097 { 1120 {
1098 CTSize vsize = cp_decl_sizeattr(cp); 1121 CTSize vsize = cp_decl_sizeattr(cp);
1099 if (vsize) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vsize)); 1122 if (vsize) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vsize));
1100 } 1123 }
1101 break; 1124 break;
1102#if LJ_TARGET_X86 1125#if LJ_TARGET_X86
1103 case H_(5ad22db8,c689b848): case H_(439150fa,65ea78cb): /* regparm */ 1126 case 8: case 9: /* regparm */
1104 CTF_INSERT(decl->fattr, REGPARM, cp_decl_sizeattr(cp)); 1127 CTF_INSERT(decl->fattr, REGPARM, cp_decl_sizeattr(cp));
1105 decl->fattr |= CTFP_CCONV; 1128 decl->fattr |= CTFP_CCONV;
1106 break; 1129 break;
1107 case H_(18fc0b98,7ff4c074): case H_(4e62abed,0a747424): /* cdecl */ 1130 case 10: case 11: /* cdecl */
1108 CTF_INSERT(decl->fattr, CCONV, CTCC_CDECL); 1131 CTF_INSERT(decl->fattr, CCONV, CTCC_CDECL);
1109 decl->fattr |= CTFP_CCONV; 1132 decl->fattr |= CTFP_CCONV;
1110 break; 1133 break;
1111 case H_(72b2e41b,494c5a44): case H_(f2356d59,f25fc9bd): /* thiscall */ 1134 case 12: case 13: /* thiscall */
1112 CTF_INSERT(decl->fattr, CCONV, CTCC_THISCALL); 1135 CTF_INSERT(decl->fattr, CCONV, CTCC_THISCALL);
1113 decl->fattr |= CTFP_CCONV; 1136 decl->fattr |= CTFP_CCONV;
1114 break; 1137 break;
1115 case H_(0d0ffc42,ab746f88): case H_(21c54ba1,7f0ca7e3): /* fastcall */ 1138 case 14: case 15: /* fastcall */
1116 CTF_INSERT(decl->fattr, CCONV, CTCC_FASTCALL); 1139 CTF_INSERT(decl->fattr, CCONV, CTCC_FASTCALL);
1117 decl->fattr |= CTFP_CCONV; 1140 decl->fattr |= CTFP_CCONV;
1118 break; 1141 break;
1119 case H_(ef76b040,9412e06a): case H_(de56697b,c750e6e1): /* stdcall */ 1142 case 16: case 17: /* stdcall */
1120 CTF_INSERT(decl->fattr, CCONV, CTCC_STDCALL); 1143 CTF_INSERT(decl->fattr, CCONV, CTCC_STDCALL);
1121 decl->fattr |= CTFP_CCONV; 1144 decl->fattr |= CTFP_CCONV;
1122 break; 1145 break;
1123 case H_(ea78b622,f234bd8e): case H_(252ffb06,8d50f34b): /* sseregparm */ 1146 case 18: case 19: /* sseregparm */
1124 decl->fattr |= CTF_SSEREGPARM; 1147 decl->fattr |= CTF_SSEREGPARM;
1125 decl->fattr |= CTFP_CCONV; 1148 decl->fattr |= CTFP_CCONV;
1126 break; 1149 break;
@@ -1152,16 +1175,13 @@ static void cp_decl_msvcattribute(CPState *cp, CPDecl *decl)
1152 while (cp->tok == CTOK_IDENT) { 1175 while (cp->tok == CTOK_IDENT) {
1153 GCstr *attrstr = cp->str; 1176 GCstr *attrstr = cp->str;
1154 cp_next(cp); 1177 cp_next(cp);
1155 switch (attrstr->hash) { 1178 if (cp_str_is(attrstr, "align")) {
1156 case H_(bc2395fa,98f267f8): /* align */
1157 cp_decl_align(cp, decl); 1179 cp_decl_align(cp, decl);
1158 break; 1180 } else { /* Ignore all other attributes. */
1159 default: /* Ignore all other attributes. */
1160 if (cp_opt(cp, '(')) { 1181 if (cp_opt(cp, '(')) {
1161 while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp); 1182 while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp);
1162 cp_check(cp, ')'); 1183 cp_check(cp, ')');
1163 } 1184 }
1164 break;
1165 } 1185 }
1166 } 1186 }
1167 cp_check(cp, ')'); 1187 cp_check(cp, ')');
@@ -1572,7 +1592,7 @@ end_decl:
1572 cp_errmsg(cp, cp->tok, LJ_ERR_FFI_DECLSPEC); 1592 cp_errmsg(cp, cp->tok, LJ_ERR_FFI_DECLSPEC);
1573 sz = sizeof(int); 1593 sz = sizeof(int);
1574 } 1594 }
1575 lua_assert(sz != 0); 1595 lj_assertCP(sz != 0, "basic ctype with zero size");
1576 info += CTALIGN(lj_fls(sz)); /* Use natural alignment. */ 1596 info += CTALIGN(lj_fls(sz)); /* Use natural alignment. */
1577 info += (decl->attr & CTF_QUAL); /* Merge qualifiers. */ 1597 info += (decl->attr & CTF_QUAL); /* Merge qualifiers. */
1578 cp_push(decl, info, sz); 1598 cp_push(decl, info, sz);
@@ -1741,17 +1761,16 @@ static CTypeID cp_decl_abstract(CPState *cp)
1741static void cp_pragma(CPState *cp, BCLine pragmaline) 1761static void cp_pragma(CPState *cp, BCLine pragmaline)
1742{ 1762{
1743 cp_next(cp); 1763 cp_next(cp);
1744 if (cp->tok == CTOK_IDENT && 1764 if (cp->tok == CTOK_IDENT && cp_str_is(cp->str, "pack")) {
1745 cp->str->hash == H_(e79b999f,42ca3e85)) { /* pack */
1746 cp_next(cp); 1765 cp_next(cp);
1747 cp_check(cp, '('); 1766 cp_check(cp, '(');
1748 if (cp->tok == CTOK_IDENT) { 1767 if (cp->tok == CTOK_IDENT) {
1749 if (cp->str->hash == H_(738e923c,a1b65954)) { /* push */ 1768 if (cp_str_is(cp->str, "push")) {
1750 if (cp->curpack < CPARSE_MAX_PACKSTACK) { 1769 if (cp->curpack < CPARSE_MAX_PACKSTACK) {
1751 cp->packstack[cp->curpack+1] = cp->packstack[cp->curpack]; 1770 cp->packstack[cp->curpack+1] = cp->packstack[cp->curpack];
1752 cp->curpack++; 1771 cp->curpack++;
1753 } 1772 }
1754 } else if (cp->str->hash == H_(6c71cf27,6c71cf27)) { /* pop */ 1773 } else if (cp_str_is(cp->str, "pop")) {
1755 if (cp->curpack > 0) cp->curpack--; 1774 if (cp->curpack > 0) cp->curpack--;
1756 } else { 1775 } else {
1757 cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); 1776 cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL);
@@ -1773,6 +1792,16 @@ static void cp_pragma(CPState *cp, BCLine pragmaline)
1773 } 1792 }
1774} 1793}
1775 1794
1795/* Handle line number. */
1796static void cp_line(CPState *cp, BCLine hashline)
1797{
1798 BCLine newline = cp->val.u32;
1799 /* TODO: Handle file name and include it in error messages. */
1800 while (cp->tok != CTOK_EOF && cp->linenumber == hashline)
1801 cp_next(cp);
1802 cp->linenumber = newline;
1803}
1804
1776/* Parse multiple C declarations of types or extern identifiers. */ 1805/* Parse multiple C declarations of types or extern identifiers. */
1777static void cp_decl_multi(CPState *cp) 1806static void cp_decl_multi(CPState *cp)
1778{ 1807{
@@ -1785,12 +1814,21 @@ static void cp_decl_multi(CPState *cp)
1785 continue; 1814 continue;
1786 } 1815 }
1787 if (cp->tok == '#') { /* Workaround, since we have no preprocessor, yet. */ 1816 if (cp->tok == '#') { /* Workaround, since we have no preprocessor, yet. */
1788 BCLine pragmaline = cp->linenumber; 1817 BCLine hashline = cp->linenumber;
1789 if (!(cp_next(cp) == CTOK_IDENT && 1818 CPToken tok = cp_next(cp);
1790 cp->str->hash == H_(f5e6b4f8,1d509107))) /* pragma */ 1819 if (tok == CTOK_INTEGER) {
1820 cp_line(cp, hashline);
1821 continue;
1822 } else if (tok == CTOK_IDENT && cp_str_is(cp->str, "line")) {
1823 if (cp_next(cp) != CTOK_INTEGER) cp_err_token(cp, tok);
1824 cp_line(cp, hashline);
1825 continue;
1826 } else if (tok == CTOK_IDENT && cp_str_is(cp->str, "pragma")) {
1827 cp_pragma(cp, hashline);
1828 continue;
1829 } else {
1791 cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL); 1830 cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL);
1792 cp_pragma(cp, pragmaline); 1831 }
1793 continue;
1794 } 1832 }
1795 scl = cp_decl_spec(cp, &decl, CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC); 1833 scl = cp_decl_spec(cp, &decl, CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC);
1796 if ((cp->tok == ';' || cp->tok == CTOK_EOF) && 1834 if ((cp->tok == ';' || cp->tok == CTOK_EOF) &&
@@ -1814,7 +1852,7 @@ static void cp_decl_multi(CPState *cp)
1814 /* Treat both static and extern function declarations as extern. */ 1852 /* Treat both static and extern function declarations as extern. */
1815 ct = ctype_get(cp->cts, ctypeid); 1853 ct = ctype_get(cp->cts, ctypeid);
1816 /* We always get new anonymous functions (typedefs are copied). */ 1854 /* We always get new anonymous functions (typedefs are copied). */
1817 lua_assert(gcref(ct->name) == NULL); 1855 lj_assertCP(gcref(ct->name) == NULL, "unexpected named function");
1818 id = ctypeid; /* Just name it. */ 1856 id = ctypeid; /* Just name it. */
1819 } else if ((scl & CDF_STATIC)) { /* Accept static constants. */ 1857 } else if ((scl & CDF_STATIC)) { /* Accept static constants. */
1820 id = cp_decl_constinit(cp, &ct, ctypeid); 1858 id = cp_decl_constinit(cp, &ct, ctypeid);
@@ -1856,8 +1894,6 @@ static void cp_decl_single(CPState *cp)
1856 if (cp->tok != CTOK_EOF) cp_err_token(cp, CTOK_EOF); 1894 if (cp->tok != CTOK_EOF) cp_err_token(cp, CTOK_EOF);
1857} 1895}
1858 1896
1859#undef H_
1860
1861/* ------------------------------------------------------------------------ */ 1897/* ------------------------------------------------------------------------ */
1862 1898
1863/* Protected callback for C parser. */ 1899/* Protected callback for C parser. */
@@ -1873,7 +1909,7 @@ static TValue *cpcparser(lua_State *L, lua_CFunction dummy, void *ud)
1873 cp_decl_single(cp); 1909 cp_decl_single(cp);
1874 if (cp->param && cp->param != cp->L->top) 1910 if (cp->param && cp->param != cp->L->top)
1875 cp_err(cp, LJ_ERR_FFI_NUMPARAM); 1911 cp_err(cp, LJ_ERR_FFI_NUMPARAM);
1876 lua_assert(cp->depth == 0); 1912 lj_assertCP(cp->depth == 0, "unbalanced cparser declaration depth");
1877 return NULL; 1913 return NULL;
1878} 1914}
1879 1915
diff --git a/src/lj_cparse.h b/src/lj_cparse.h
index 87eb3ff4..5f667a7c 100644
--- a/src/lj_cparse.h
+++ b/src/lj_cparse.h
@@ -60,6 +60,8 @@ typedef struct CPState {
60 60
61LJ_FUNC int lj_cparse(CPState *cp); 61LJ_FUNC int lj_cparse(CPState *cp);
62 62
63LJ_FUNC int lj_cparse_case(GCstr *str, const char *match);
64
63#endif 65#endif
64 66
65#endif 67#endif
diff --git a/src/lj_crecord.c b/src/lj_crecord.c
index 6e3c36a1..7ae1479e 100644
--- a/src/lj_crecord.c
+++ b/src/lj_crecord.c
@@ -11,13 +11,13 @@
11#if LJ_HASJIT && LJ_HASFFI 11#if LJ_HASJIT && LJ_HASFFI
12 12
13#include "lj_err.h" 13#include "lj_err.h"
14#include "lj_str.h"
15#include "lj_tab.h" 14#include "lj_tab.h"
16#include "lj_frame.h" 15#include "lj_frame.h"
17#include "lj_ctype.h" 16#include "lj_ctype.h"
18#include "lj_cdata.h" 17#include "lj_cdata.h"
19#include "lj_cparse.h" 18#include "lj_cparse.h"
20#include "lj_cconv.h" 19#include "lj_cconv.h"
20#include "lj_carith.h"
21#include "lj_clib.h" 21#include "lj_clib.h"
22#include "lj_ccall.h" 22#include "lj_ccall.h"
23#include "lj_ff.h" 23#include "lj_ff.h"
@@ -31,6 +31,7 @@
31#include "lj_snap.h" 31#include "lj_snap.h"
32#include "lj_crecord.h" 32#include "lj_crecord.h"
33#include "lj_dispatch.h" 33#include "lj_dispatch.h"
34#include "lj_strfmt.h"
34 35
35/* Some local macros to save typing. Undef'd at the end. */ 36/* Some local macros to save typing. Undef'd at the end. */
36#define IR(ref) (&J->cur.ir[(ref)]) 37#define IR(ref) (&J->cur.ir[(ref)])
@@ -60,7 +61,8 @@ static GCcdata *argv2cdata(jit_State *J, TRef tr, cTValue *o)
60static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr) 61static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr)
61{ 62{
62 CTypeID id; 63 CTypeID id;
63 lua_assert(tref_iscdata(tr) && cd->ctypeid == CTID_CTYPEID); 64 lj_assertJ(tref_iscdata(tr) && cd->ctypeid == CTID_CTYPEID,
65 "expected CTypeID cdata");
64 id = *(CTypeID *)cdataptr(cd); 66 id = *(CTypeID *)cdataptr(cd);
65 tr = emitir(IRT(IR_FLOAD, IRT_INT), tr, IRFL_CDATA_INT); 67 tr = emitir(IRT(IR_FLOAD, IRT_INT), tr, IRFL_CDATA_INT);
66 emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id)); 68 emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id));
@@ -211,7 +213,7 @@ static void crec_copy_emit(jit_State *J, CRecMemList *ml, MSize mlp,
211 ml[i].trval = emitir(IRT(IR_XLOAD, ml[i].tp), trsptr, 0); 213 ml[i].trval = emitir(IRT(IR_XLOAD, ml[i].tp), trsptr, 0);
212 ml[i].trofs = trofs; 214 ml[i].trofs = trofs;
213 i++; 215 i++;
214 rwin += (LJ_SOFTFP && ml[i].tp == IRT_NUM) ? 2 : 1; 216 rwin += (LJ_SOFTFP32 && ml[i].tp == IRT_NUM) ? 2 : 1;
215 if (rwin >= CREC_COPY_REGWIN || i >= mlp) { /* Flush buffered stores. */ 217 if (rwin >= CREC_COPY_REGWIN || i >= mlp) { /* Flush buffered stores. */
216 rwin = 0; 218 rwin = 0;
217 for ( ; j < i; j++) { 219 for ( ; j < i; j++) {
@@ -236,13 +238,14 @@ static void crec_copy(jit_State *J, TRef trdst, TRef trsrc, TRef trlen,
236 if (len > CREC_COPY_MAXLEN) goto fallback; 238 if (len > CREC_COPY_MAXLEN) goto fallback;
237 if (ct) { 239 if (ct) {
238 CTState *cts = ctype_ctsG(J2G(J)); 240 CTState *cts = ctype_ctsG(J2G(J));
239 lua_assert(ctype_isarray(ct->info) || ctype_isstruct(ct->info)); 241 lj_assertJ(ctype_isarray(ct->info) || ctype_isstruct(ct->info),
242 "copy of non-aggregate");
240 if (ctype_isarray(ct->info)) { 243 if (ctype_isarray(ct->info)) {
241 CType *cct = ctype_rawchild(cts, ct); 244 CType *cct = ctype_rawchild(cts, ct);
242 tp = crec_ct2irt(cts, cct); 245 tp = crec_ct2irt(cts, cct);
243 if (tp == IRT_CDATA) goto rawcopy; 246 if (tp == IRT_CDATA) goto rawcopy;
244 step = lj_ir_type_size[tp]; 247 step = lj_ir_type_size[tp];
245 lua_assert((len & (step-1)) == 0); 248 lj_assertJ((len & (step-1)) == 0, "copy of fractional size");
246 } else if ((ct->info & CTF_UNION)) { 249 } else if ((ct->info & CTF_UNION)) {
247 step = (1u << ctype_align(ct->info)); 250 step = (1u << ctype_align(ct->info));
248 goto rawcopy; 251 goto rawcopy;
@@ -441,7 +444,7 @@ static TRef crec_ct_ct(jit_State *J, CType *d, CType *s, TRef dp, TRef sp,
441 /* fallthrough */ 444 /* fallthrough */
442 case CCX(I, F): 445 case CCX(I, F):
443 if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi; 446 if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
444 sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, IRCONV_TRUNC|IRCONV_ANY); 447 sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, IRCONV_ANY);
445 goto xstore; 448 goto xstore;
446 case CCX(I, P): 449 case CCX(I, P):
447 case CCX(I, A): 450 case CCX(I, A):
@@ -521,7 +524,7 @@ static TRef crec_ct_ct(jit_State *J, CType *d, CType *s, TRef dp, TRef sp,
521 if (st == IRT_CDATA) goto err_nyi; 524 if (st == IRT_CDATA) goto err_nyi;
522 /* The signed conversion is cheaper. x64 really has 47 bit pointers. */ 525 /* The signed conversion is cheaper. x64 really has 47 bit pointers. */
523 sp = emitconv(sp, (LJ_64 && dsize == 8) ? IRT_I64 : IRT_U32, 526 sp = emitconv(sp, (LJ_64 && dsize == 8) ? IRT_I64 : IRT_U32,
524 st, IRCONV_TRUNC|IRCONV_ANY); 527 st, IRCONV_ANY);
525 goto xstore; 528 goto xstore;
526 529
527 /* Destination is an array. */ 530 /* Destination is an array. */
@@ -628,7 +631,8 @@ static TRef crec_ct_tv(jit_State *J, CType *d, TRef dp, TRef sp, cTValue *sval)
628 /* Specialize to the name of the enum constant. */ 631 /* Specialize to the name of the enum constant. */
629 emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str)); 632 emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str));
630 if (cct && ctype_isconstval(cct->info)) { 633 if (cct && ctype_isconstval(cct->info)) {
631 lua_assert(ctype_child(cts, cct)->size == 4); 634 lj_assertJ(ctype_child(cts, cct)->size == 4,
635 "only 32 bit const supported"); /* NYI */
632 svisnz = (void *)(intptr_t)(ofs != 0); 636 svisnz = (void *)(intptr_t)(ofs != 0);
633 sp = lj_ir_kint(J, (int32_t)ofs); 637 sp = lj_ir_kint(J, (int32_t)ofs);
634 sid = ctype_cid(cct->info); 638 sid = ctype_cid(cct->info);
@@ -640,12 +644,23 @@ static TRef crec_ct_tv(jit_State *J, CType *d, TRef dp, TRef sp, cTValue *sval)
640 sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCstr))); 644 sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCstr)));
641 sid = CTID_A_CCHAR; 645 sid = CTID_A_CCHAR;
642 } 646 }
643 } else { /* NYI: tref_istab(sp), tref_islightud(sp). */ 647 } else if (tref_islightud(sp)) {
648#if LJ_64
649 sp = emitir(IRT(IR_BAND, IRT_P64), sp,
650 lj_ir_kint64(J, U64x(00007fff,ffffffff)));
651#endif
652 } else { /* NYI: tref_istab(sp). */
644 IRType t; 653 IRType t;
645 sid = argv2cdata(J, sp, sval)->ctypeid; 654 sid = argv2cdata(J, sp, sval)->ctypeid;
646 s = ctype_raw(cts, sid); 655 s = ctype_raw(cts, sid);
647 svisnz = cdataptr(cdataV(sval)); 656 svisnz = cdataptr(cdataV(sval));
648 t = crec_ct2irt(cts, s); 657 if (ctype_isfunc(s->info)) {
658 sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR);
659 s = ctype_get(cts, sid);
660 t = IRT_PTR;
661 } else {
662 t = crec_ct2irt(cts, s);
663 }
649 if (ctype_isptr(s->info)) { 664 if (ctype_isptr(s->info)) {
650 sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_PTR); 665 sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_PTR);
651 if (ctype_isref(s->info)) { 666 if (ctype_isref(s->info)) {
@@ -700,6 +715,19 @@ static TRef crec_reassoc_ofs(jit_State *J, TRef tr, ptrdiff_t *ofsp, MSize sz)
700 return tr; 715 return tr;
701} 716}
702 717
718/* Tailcall to function. */
719static void crec_tailcall(jit_State *J, RecordFFData *rd, cTValue *tv)
720{
721 TRef kfunc = lj_ir_kfunc(J, funcV(tv));
722#if LJ_FR2
723 J->base[-2] = kfunc;
724 J->base[-1] = TREF_FRAME;
725#else
726 J->base[-1] = kfunc | TREF_FRAME;
727#endif
728 rd->nres = -1; /* Pending tailcall. */
729}
730
703/* Record ctype __index/__newindex metamethods. */ 731/* Record ctype __index/__newindex metamethods. */
704static void crec_index_meta(jit_State *J, CTState *cts, CType *ct, 732static void crec_index_meta(jit_State *J, CTState *cts, CType *ct,
705 RecordFFData *rd) 733 RecordFFData *rd)
@@ -709,8 +737,7 @@ static void crec_index_meta(jit_State *J, CTState *cts, CType *ct,
709 if (!tv) 737 if (!tv)
710 lj_trace_err(J, LJ_TRERR_BADTYPE); 738 lj_trace_err(J, LJ_TRERR_BADTYPE);
711 if (tvisfunc(tv)) { 739 if (tvisfunc(tv)) {
712 J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; 740 crec_tailcall(J, rd, tv);
713 rd->nres = -1; /* Pending tailcall. */
714 } else if (rd->data == 0 && tvistab(tv) && tref_isstr(J->base[1])) { 741 } else if (rd->data == 0 && tvistab(tv) && tref_isstr(J->base[1])) {
715 /* Specialize to result of __index lookup. */ 742 /* Specialize to result of __index lookup. */
716 cTValue *o = lj_tab_get(J->L, tabV(tv), &rd->argv[1]); 743 cTValue *o = lj_tab_get(J->L, tabV(tv), &rd->argv[1]);
@@ -727,6 +754,48 @@ static void crec_index_meta(jit_State *J, CTState *cts, CType *ct,
727 } 754 }
728} 755}
729 756
757/* Record bitfield load/store. */
758static void crec_index_bf(jit_State *J, RecordFFData *rd, TRef ptr, CTInfo info)
759{
760 IRType t = IRT_I8 + 2*lj_fls(ctype_bitcsz(info)) + ((info&CTF_UNSIGNED)?1:0);
761 TRef tr = emitir(IRT(IR_XLOAD, t), ptr, 0);
762 CTSize pos = ctype_bitpos(info), bsz = ctype_bitbsz(info), shift = 32 - bsz;
763 lj_assertJ(t <= IRT_U32, "only 32 bit bitfields supported"); /* NYI */
764 if (rd->data == 0) { /* __index metamethod. */
765 if ((info & CTF_BOOL)) {
766 tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << pos))));
767 /* Assume not equal to zero. Fixup and emit pending guard later. */
768 lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0));
769 J->postproc = LJ_POST_FIXGUARD;
770 tr = TREF_TRUE;
771 } else if (!(info & CTF_UNSIGNED)) {
772 tr = emitir(IRTI(IR_BSHL), tr, lj_ir_kint(J, shift - pos));
773 tr = emitir(IRTI(IR_BSAR), tr, lj_ir_kint(J, shift));
774 } else {
775 lj_assertJ(bsz < 32, "unexpected full bitfield index");
776 tr = emitir(IRTI(IR_BSHR), tr, lj_ir_kint(J, pos));
777 tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << bsz)-1)));
778 /* We can omit the U32 to NUM conversion, since bsz < 32. */
779 }
780 J->base[0] = tr;
781 } else { /* __newindex metamethod. */
782 CTState *cts = ctype_ctsG(J2G(J));
783 CType *ct = ctype_get(cts,
784 (info & CTF_BOOL) ? CTID_BOOL :
785 (info & CTF_UNSIGNED) ? CTID_UINT32 : CTID_INT32);
786 int32_t mask = (int32_t)(((1u << bsz)-1) << pos);
787 TRef sp = crec_ct_tv(J, ct, 0, J->base[2], &rd->argv[2]);
788 sp = emitir(IRTI(IR_BSHL), sp, lj_ir_kint(J, pos));
789 /* Use of the target type avoids forwarding conversions. */
790 sp = emitir(IRT(IR_BAND, t), sp, lj_ir_kint(J, mask));
791 tr = emitir(IRT(IR_BAND, t), tr, lj_ir_kint(J, (int32_t)~mask));
792 tr = emitir(IRT(IR_BOR, t), tr, sp);
793 emitir(IRT(IR_XSTORE, t), ptr, tr);
794 rd->nres = 0;
795 J->needsnap = 1;
796 }
797}
798
730void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd) 799void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd)
731{ 800{
732 TRef idx, ptr = J->base[0]; 801 TRef idx, ptr = J->base[0];
@@ -801,6 +870,7 @@ again:
801 CType *fct; 870 CType *fct;
802 fct = lj_ctype_getfield(cts, ct, name, &fofs); 871 fct = lj_ctype_getfield(cts, ct, name, &fofs);
803 if (fct) { 872 if (fct) {
873 ofs += (ptrdiff_t)fofs;
804 /* Always specialize to the field name. */ 874 /* Always specialize to the field name. */
805 emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name)); 875 emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name));
806 if (ctype_isconstval(fct->info)) { 876 if (ctype_isconstval(fct->info)) {
@@ -812,12 +882,14 @@ again:
812 J->base[0] = lj_ir_kint(J, (int32_t)fct->size); 882 J->base[0] = lj_ir_kint(J, (int32_t)fct->size);
813 return; /* Interpreter will throw for newindex. */ 883 return; /* Interpreter will throw for newindex. */
814 } else if (ctype_isbitfield(fct->info)) { 884 } else if (ctype_isbitfield(fct->info)) {
815 lj_trace_err(J, LJ_TRERR_NYICONV); 885 if (ofs)
886 ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs));
887 crec_index_bf(J, rd, ptr, fct->info);
888 return;
816 } else { 889 } else {
817 lua_assert(ctype_isfield(fct->info)); 890 lj_assertJ(ctype_isfield(fct->info), "field expected");
818 sid = ctype_cid(fct->info); 891 sid = ctype_cid(fct->info);
819 } 892 }
820 ofs += (ptrdiff_t)fofs;
821 } 893 }
822 } else if (ctype_iscomplex(ct->info)) { 894 } else if (ctype_iscomplex(ct->info)) {
823 if (name->len == 2 && 895 if (name->len == 2 &&
@@ -867,21 +939,17 @@ again:
867} 939}
868 940
869/* Record setting a finalizer. */ 941/* Record setting a finalizer. */
870static void crec_finalizer(jit_State *J, TRef trcd, cTValue *fin) 942static void crec_finalizer(jit_State *J, TRef trcd, TRef trfin, cTValue *fin)
871{ 943{
872 TRef trlo = lj_ir_call(J, IRCALL_lj_cdata_setfin, trcd); 944 if (tvisgcv(fin)) {
873 TRef trhi = emitir(IRT(IR_ADD, IRT_P32), trlo, lj_ir_kint(J, 4)); 945 if (!trfin) trfin = lj_ir_kptr(J, gcval(fin));
874 if (LJ_BE) { TRef tmp = trlo; trlo = trhi; trhi = tmp; } 946 } else if (tvisnil(fin)) {
875 if (tvisfunc(fin)) { 947 trfin = lj_ir_kptr(J, NULL);
876 emitir(IRT(IR_XSTORE, IRT_P32), trlo, lj_ir_kfunc(J, funcV(fin)));
877 emitir(IRTI(IR_XSTORE), trhi, lj_ir_kint(J, LJ_TFUNC));
878 } else if (tviscdata(fin)) {
879 emitir(IRT(IR_XSTORE, IRT_P32), trlo,
880 lj_ir_kgc(J, obj2gco(cdataV(fin)), IRT_CDATA));
881 emitir(IRTI(IR_XSTORE), trhi, lj_ir_kint(J, LJ_TCDATA));
882 } else { 948 } else {
883 lj_trace_err(J, LJ_TRERR_BADTYPE); 949 lj_trace_err(J, LJ_TRERR_BADTYPE);
884 } 950 }
951 lj_ir_call(J, IRCALL_lj_cdata_setfin, trcd,
952 trfin, lj_ir_kint(J, (int32_t)itype(fin)));
885 J->needsnap = 1; 953 J->needsnap = 1;
886} 954}
887 955
@@ -892,10 +960,8 @@ static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id)
892 CTSize sz; 960 CTSize sz;
893 CTInfo info = lj_ctype_info(cts, id, &sz); 961 CTInfo info = lj_ctype_info(cts, id, &sz);
894 CType *d = ctype_raw(cts, id); 962 CType *d = ctype_raw(cts, id);
895 TRef trid; 963 TRef trcd, trid = lj_ir_kint(J, id);
896 if (!sz || sz > 128 || (info & CTF_VLA) || ctype_align(info) > CT_MEMALIGN) 964 cTValue *fin;
897 lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: large/special allocations. */
898 trid = lj_ir_kint(J, id);
899 /* Use special instruction to box pointer or 32/64 bit integer. */ 965 /* Use special instruction to box pointer or 32/64 bit integer. */
900 if (ctype_isptr(info) || (ctype_isinteger(info) && (sz == 4 || sz == 8))) { 966 if (ctype_isptr(info) || (ctype_isinteger(info) && (sz == 4 || sz == 8))) {
901 TRef sp = J->base[1] ? crec_ct_tv(J, d, 0, J->base[1], &rd->argv[1]) : 967 TRef sp = J->base[1] ? crec_ct_tv(J, d, 0, J->base[1], &rd->argv[1]) :
@@ -903,11 +969,36 @@ static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id)
903 sz == 4 ? lj_ir_kint(J, 0) : 969 sz == 4 ? lj_ir_kint(J, 0) :
904 (lj_needsplit(J), lj_ir_kint64(J, 0)); 970 (lj_needsplit(J), lj_ir_kint64(J, 0));
905 J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, sp); 971 J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, sp);
972 return;
906 } else { 973 } else {
907 TRef trcd = emitir(IRTG(IR_CNEW, IRT_CDATA), trid, TREF_NIL); 974 TRef trsz = TREF_NIL;
908 cTValue *fin; 975 if ((info & CTF_VLA)) { /* Calculate VLA/VLS size at runtime. */
909 J->base[0] = trcd; 976 CTSize sz0, sz1;
910 if (J->base[1] && !J->base[2] && 977 if (!J->base[1] || J->base[2])
978 lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init VLA/VLS. */
979 trsz = crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0,
980 J->base[1], &rd->argv[1]);
981 sz0 = lj_ctype_vlsize(cts, d, 0);
982 sz1 = lj_ctype_vlsize(cts, d, 1);
983 trsz = emitir(IRTGI(IR_MULOV), trsz, lj_ir_kint(J, (int32_t)(sz1-sz0)));
984 trsz = emitir(IRTGI(IR_ADDOV), trsz, lj_ir_kint(J, (int32_t)sz0));
985 J->base[1] = 0; /* Simplify logic below. */
986 } else if (ctype_align(info) > CT_MEMALIGN) {
987 trsz = lj_ir_kint(J, sz);
988 }
989 trcd = emitir(IRTG(IR_CNEW, IRT_CDATA), trid, trsz);
990 if (sz > 128 || (info & CTF_VLA)) {
991 TRef dp;
992 CTSize align;
993 special: /* Only handle bulk zero-fill for large/VLA/VLS types. */
994 if (J->base[1])
995 lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init large/VLA/VLS types. */
996 dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata)));
997 if (trsz == TREF_NIL) trsz = lj_ir_kint(J, sz);
998 align = ctype_align(info);
999 if (align < CT_MEMALIGN) align = CT_MEMALIGN;
1000 crec_fill(J, dp, trsz, lj_ir_kint(J, 0), (1u << align));
1001 } else if (J->base[1] && !J->base[2] &&
911 !lj_cconv_multi_init(cts, d, &rd->argv[1])) { 1002 !lj_cconv_multi_init(cts, d, &rd->argv[1])) {
912 goto single_init; 1003 goto single_init;
913 } else if (ctype_isarray(d->info)) { 1004 } else if (ctype_isarray(d->info)) {
@@ -918,8 +1009,9 @@ static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id)
918 TValue *sval = &tv; 1009 TValue *sval = &tv;
919 MSize i; 1010 MSize i;
920 tv.u64 = 0; 1011 tv.u64 = 0;
921 if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info))) 1012 if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info)) ||
922 lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init array of aggregates. */ 1013 esize * CREC_FILL_MAXUNROLL < sz)
1014 goto special;
923 for (i = 1, ofs = 0; ofs < sz; ofs += esize) { 1015 for (i = 1, ofs = 0; ofs < sz; ofs += esize) {
924 TRef dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, 1016 TRef dp = emitir(IRT(IR_ADD, IRT_PTR), trcd,
925 lj_ir_kintp(J, ofs + sizeof(GCcdata))); 1017 lj_ir_kintp(J, ofs + sizeof(GCcdata)));
@@ -976,11 +1068,12 @@ static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id)
976 crec_ct_tv(J, d, dp, lj_ir_kint(J, 0), &tv); 1068 crec_ct_tv(J, d, dp, lj_ir_kint(J, 0), &tv);
977 } 1069 }
978 } 1070 }
979 /* Handle __gc metamethod. */
980 fin = lj_ctype_meta(cts, id, MM_gc);
981 if (fin)
982 crec_finalizer(J, trcd, fin);
983 } 1071 }
1072 J->base[0] = trcd;
1073 /* Handle __gc metamethod. */
1074 fin = lj_ctype_meta(cts, id, MM_gc);
1075 if (fin)
1076 crec_finalizer(J, trcd, 0, fin);
984} 1077}
985 1078
986/* Record argument conversions. */ 1079/* Record argument conversions. */
@@ -1021,7 +1114,7 @@ static TRef crec_call_args(jit_State *J, RecordFFData *rd,
1021 if (fid) { /* Get argument type from field. */ 1114 if (fid) { /* Get argument type from field. */
1022 CType *ctf = ctype_get(cts, fid); 1115 CType *ctf = ctype_get(cts, fid);
1023 fid = ctf->sib; 1116 fid = ctf->sib;
1024 lua_assert(ctype_isfield(ctf->info)); 1117 lj_assertJ(ctype_isfield(ctf->info), "field expected");
1025 did = ctype_cid(ctf->info); 1118 did = ctype_cid(ctf->info);
1026 } else { 1119 } else {
1027 if (!(ct->info & CTF_VARARG)) 1120 if (!(ct->info & CTF_VARARG))
@@ -1040,7 +1133,7 @@ static TRef crec_call_args(jit_State *J, RecordFFData *rd,
1040 else 1133 else
1041 tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_I8 : IRT_I16,IRCONV_SEXT); 1134 tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_I8 : IRT_I16,IRCONV_SEXT);
1042 } 1135 }
1043 } else if (LJ_SOFTFP && ctype_isfp(d->info) && d->size > 4) { 1136 } else if (LJ_SOFTFP32 && ctype_isfp(d->info) && d->size > 4) {
1044 lj_needsplit(J); 1137 lj_needsplit(J);
1045 } 1138 }
1046#if LJ_TARGET_X86 1139#if LJ_TARGET_X86
@@ -1086,20 +1179,20 @@ static void crec_snap_caller(jit_State *J)
1086 lua_State *L = J->L; 1179 lua_State *L = J->L;
1087 TValue *base = L->base, *top = L->top; 1180 TValue *base = L->base, *top = L->top;
1088 const BCIns *pc = J->pc; 1181 const BCIns *pc = J->pc;
1089 TRef ftr = J->base[-1]; 1182 TRef ftr = J->base[-1-LJ_FR2];
1090 ptrdiff_t delta; 1183 ptrdiff_t delta;
1091 if (!frame_islua(base-1) || J->framedepth <= 0) 1184 if (!frame_islua(base-1) || J->framedepth <= 0)
1092 lj_trace_err(J, LJ_TRERR_NYICALL); 1185 lj_trace_err(J, LJ_TRERR_NYICALL);
1093 J->pc = frame_pc(base-1); delta = 1+bc_a(J->pc[-1]); 1186 J->pc = frame_pc(base-1); delta = 1+LJ_FR2+bc_a(J->pc[-1]);
1094 L->top = base; L->base = base - delta; 1187 L->top = base; L->base = base - delta;
1095 J->base[-1] = TREF_FALSE; 1188 J->base[-1-LJ_FR2] = TREF_FALSE;
1096 J->base -= delta; J->baseslot -= (BCReg)delta; 1189 J->base -= delta; J->baseslot -= (BCReg)delta;
1097 J->maxslot = (BCReg)delta; J->framedepth--; 1190 J->maxslot = (BCReg)delta-LJ_FR2; J->framedepth--;
1098 lj_snap_add(J); 1191 lj_snap_add(J);
1099 L->base = base; L->top = top; 1192 L->base = base; L->top = top;
1100 J->framedepth++; J->maxslot = 1; 1193 J->framedepth++; J->maxslot = 1;
1101 J->base += delta; J->baseslot += (BCReg)delta; 1194 J->base += delta; J->baseslot += (BCReg)delta;
1102 J->base[-1] = ftr; J->pc = pc; 1195 J->base[-1-LJ_FR2] = ftr; J->pc = pc;
1103} 1196}
1104 1197
1105/* Record function call. */ 1198/* Record function call. */
@@ -1191,8 +1284,7 @@ void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd)
1191 tv = lj_ctype_meta(cts, ctype_isptr(ct->info) ? ctype_cid(ct->info) : id, mm); 1284 tv = lj_ctype_meta(cts, ctype_isptr(ct->info) ? ctype_cid(ct->info) : id, mm);
1192 if (tv) { 1285 if (tv) {
1193 if (tvisfunc(tv)) { 1286 if (tvisfunc(tv)) {
1194 J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; 1287 crec_tailcall(J, rd, tv);
1195 rd->nres = -1; /* Pending tailcall. */
1196 return; 1288 return;
1197 } 1289 }
1198 } else if (mm == MM_new) { 1290 } else if (mm == MM_new) {
@@ -1233,7 +1325,7 @@ static TRef crec_arith_int64(jit_State *J, TRef *sp, CType **s, MMS mm)
1233 for (i = 0; i < 2; i++) { 1325 for (i = 0; i < 2; i++) {
1234 IRType st = tref_type(sp[i]); 1326 IRType st = tref_type(sp[i]);
1235 if (st == IRT_NUM || st == IRT_FLOAT) 1327 if (st == IRT_NUM || st == IRT_FLOAT)
1236 sp[i] = emitconv(sp[i], dt, st, IRCONV_TRUNC|IRCONV_ANY); 1328 sp[i] = emitconv(sp[i], dt, st, IRCONV_ANY);
1237 else if (!(st == IRT_I64 || st == IRT_U64)) 1329 else if (!(st == IRT_I64 || st == IRT_U64))
1238 sp[i] = emitconv(sp[i], dt, IRT_INT, 1330 sp[i] = emitconv(sp[i], dt, IRT_INT,
1239 (s[i]->info & CTF_UNSIGNED) ? 0 : IRCONV_SEXT); 1331 (s[i]->info & CTF_UNSIGNED) ? 0 : IRCONV_SEXT);
@@ -1302,15 +1394,14 @@ static TRef crec_arith_ptr(jit_State *J, TRef *sp, CType **s, MMS mm)
1302 CTypeID id; 1394 CTypeID id;
1303#if LJ_64 1395#if LJ_64
1304 if (t == IRT_NUM || t == IRT_FLOAT) 1396 if (t == IRT_NUM || t == IRT_FLOAT)
1305 tr = emitconv(tr, IRT_INTP, t, IRCONV_TRUNC|IRCONV_ANY); 1397 tr = emitconv(tr, IRT_INTP, t, IRCONV_ANY);
1306 else if (!(t == IRT_I64 || t == IRT_U64)) 1398 else if (!(t == IRT_I64 || t == IRT_U64))
1307 tr = emitconv(tr, IRT_INTP, IRT_INT, 1399 tr = emitconv(tr, IRT_INTP, IRT_INT,
1308 ((t - IRT_I8) & 1) ? 0 : IRCONV_SEXT); 1400 ((t - IRT_I8) & 1) ? 0 : IRCONV_SEXT);
1309#else 1401#else
1310 if (!tref_typerange(sp[1], IRT_I8, IRT_U32)) { 1402 if (!tref_typerange(sp[1], IRT_I8, IRT_U32)) {
1311 tr = emitconv(tr, IRT_INTP, t, 1403 tr = emitconv(tr, IRT_INTP, t,
1312 (t == IRT_NUM || t == IRT_FLOAT) ? 1404 (t == IRT_NUM || t == IRT_FLOAT) ? IRCONV_ANY : 0);
1313 IRCONV_TRUNC|IRCONV_ANY : 0);
1314 } 1405 }
1315#endif 1406#endif
1316 tr = emitir(IRT(IR_MUL, IRT_INTP), tr, lj_ir_kintp(J, sz)); 1407 tr = emitir(IRT(IR_MUL, IRT_INTP), tr, lj_ir_kintp(J, sz));
@@ -1342,8 +1433,7 @@ static TRef crec_arith_meta(jit_State *J, TRef *sp, CType **s, CTState *cts,
1342 } 1433 }
1343 if (tv) { 1434 if (tv) {
1344 if (tvisfunc(tv)) { 1435 if (tvisfunc(tv)) {
1345 J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME; 1436 crec_tailcall(J, rd, tv);
1346 rd->nres = -1; /* Pending tailcall. */
1347 return 0; 1437 return 0;
1348 } /* NYI: non-function metamethods. */ 1438 } /* NYI: non-function metamethods. */
1349 } else if ((MMS)rd->data == MM_eq) { /* Fallback cdata pointer comparison. */ 1439 } else if ((MMS)rd->data == MM_eq) { /* Fallback cdata pointer comparison. */
@@ -1455,8 +1545,7 @@ void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd)
1455 !irt_isguard(J->guardemit)) { 1545 !irt_isguard(J->guardemit)) {
1456 const BCIns *pc = frame_contpc(J->L->base-1) - 1; 1546 const BCIns *pc = frame_contpc(J->L->base-1) - 1;
1457 if (bc_op(*pc) <= BC_ISNEP) { 1547 if (bc_op(*pc) <= BC_ISNEP) {
1458 setframe_pc(&J2G(J)->tmptv, pc); 1548 J2G(J)->tmptv.u64 = (uint64_t)(uintptr_t)pc;
1459 J2G(J)->tmptv.u32.lo = ((tref_istrue(tr) ^ bc_op(*pc)) & 1);
1460 J->postproc = LJ_POST_FIXCOMP; 1549 J->postproc = LJ_POST_FIXCOMP;
1461 } 1550 }
1462 } 1551 }
@@ -1645,7 +1734,139 @@ void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd)
1645void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd) 1734void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd)
1646{ 1735{
1647 argv2cdata(J, J->base[0], &rd->argv[0]); 1736 argv2cdata(J, J->base[0], &rd->argv[0]);
1648 crec_finalizer(J, J->base[0], &rd->argv[1]); 1737 if (!J->base[1])
1738 lj_trace_err(J, LJ_TRERR_BADTYPE);
1739 crec_finalizer(J, J->base[0], J->base[1], &rd->argv[1]);
1740}
1741
1742/* -- 64 bit bit.* library functions -------------------------------------- */
1743
1744/* Determine bit operation type from argument type. */
1745static CTypeID crec_bit64_type(CTState *cts, cTValue *tv)
1746{
1747 if (tviscdata(tv)) {
1748 CType *ct = lj_ctype_rawref(cts, cdataV(tv)->ctypeid);
1749 if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
1750 if ((ct->info & (CTMASK_NUM|CTF_BOOL|CTF_FP|CTF_UNSIGNED)) ==
1751 CTINFO(CT_NUM, CTF_UNSIGNED) && ct->size == 8)
1752 return CTID_UINT64; /* Use uint64_t, since it has the highest rank. */
1753 return CTID_INT64; /* Otherwise use int64_t. */
1754 }
1755 return 0; /* Use regular 32 bit ops. */
1756}
1757
1758void LJ_FASTCALL recff_bit64_tobit(jit_State *J, RecordFFData *rd)
1759{
1760 CTState *cts = ctype_ctsG(J2G(J));
1761 TRef tr = crec_ct_tv(J, ctype_get(cts, CTID_INT64), 0,
1762 J->base[0], &rd->argv[0]);
1763 if (!tref_isinteger(tr))
1764 tr = emitconv(tr, IRT_INT, tref_type(tr), 0);
1765 J->base[0] = tr;
1766}
1767
1768int LJ_FASTCALL recff_bit64_unary(jit_State *J, RecordFFData *rd)
1769{
1770 CTState *cts = ctype_ctsG(J2G(J));
1771 CTypeID id = crec_bit64_type(cts, &rd->argv[0]);
1772 if (id) {
1773 TRef tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]);
1774 tr = emitir(IRT(rd->data, id-CTID_INT64+IRT_I64), tr, 0);
1775 J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
1776 return 1;
1777 }
1778 return 0;
1779}
1780
1781int LJ_FASTCALL recff_bit64_nary(jit_State *J, RecordFFData *rd)
1782{
1783 CTState *cts = ctype_ctsG(J2G(J));
1784 CTypeID id = 0;
1785 MSize i;
1786 for (i = 0; J->base[i] != 0; i++) {
1787 CTypeID aid = crec_bit64_type(cts, &rd->argv[i]);
1788 if (id < aid) id = aid; /* Determine highest type rank of all arguments. */
1789 }
1790 if (id) {
1791 CType *ct = ctype_get(cts, id);
1792 uint32_t ot = IRT(rd->data, id-CTID_INT64+IRT_I64);
1793 TRef tr = crec_ct_tv(J, ct, 0, J->base[0], &rd->argv[0]);
1794 for (i = 1; J->base[i] != 0; i++) {
1795 TRef tr2 = crec_ct_tv(J, ct, 0, J->base[i], &rd->argv[i]);
1796 tr = emitir(ot, tr, tr2);
1797 }
1798 J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
1799 return 1;
1800 }
1801 return 0;
1802}
1803
1804int LJ_FASTCALL recff_bit64_shift(jit_State *J, RecordFFData *rd)
1805{
1806 CTState *cts = ctype_ctsG(J2G(J));
1807 CTypeID id;
1808 TRef tsh = 0;
1809 if (J->base[0] && tref_iscdata(J->base[1])) {
1810 tsh = crec_ct_tv(J, ctype_get(cts, CTID_INT64), 0,
1811 J->base[1], &rd->argv[1]);
1812 if (!tref_isinteger(tsh))
1813 tsh = emitconv(tsh, IRT_INT, tref_type(tsh), 0);
1814 J->base[1] = tsh;
1815 }
1816 id = crec_bit64_type(cts, &rd->argv[0]);
1817 if (id) {
1818 TRef tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]);
1819 uint32_t op = rd->data;
1820 if (!tsh) tsh = lj_opt_narrow_tobit(J, J->base[1]);
1821 if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) &&
1822 !tref_isk(tsh))
1823 tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 63));
1824#ifdef LJ_TARGET_UNIFYROT
1825 if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) {
1826 op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR;
1827 tsh = emitir(IRTI(IR_NEG), tsh, tsh);
1828 }
1829#endif
1830 tr = emitir(IRT(op, id-CTID_INT64+IRT_I64), tr, tsh);
1831 J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
1832 return 1;
1833 }
1834 return 0;
1835}
1836
1837TRef recff_bit64_tohex(jit_State *J, RecordFFData *rd, TRef hdr)
1838{
1839 CTState *cts = ctype_ctsG(J2G(J));
1840 CTypeID id = crec_bit64_type(cts, &rd->argv[0]);
1841 TRef tr, trsf = J->base[1];
1842 SFormat sf = (STRFMT_UINT|STRFMT_T_HEX);
1843 int32_t n;
1844 if (trsf) {
1845 CTypeID id2 = 0;
1846 n = (int32_t)lj_carith_check64(J->L, 2, &id2);
1847 if (id2)
1848 trsf = crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, trsf, &rd->argv[1]);
1849 else
1850 trsf = lj_opt_narrow_tobit(J, trsf);
1851 emitir(IRTGI(IR_EQ), trsf, lj_ir_kint(J, n)); /* Specialize to n. */
1852 } else {
1853 n = id ? 16 : 8;
1854 }
1855 if (n < 0) { n = -n; sf |= STRFMT_F_UPPER; }
1856 sf |= ((SFormat)((n+1)&255) << STRFMT_SH_PREC);
1857 if (id) {
1858 tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]);
1859 if (n < 16)
1860 tr = emitir(IRT(IR_BAND, IRT_U64), tr,
1861 lj_ir_kint64(J, ((uint64_t)1 << 4*n)-1));
1862 } else {
1863 tr = lj_opt_narrow_tobit(J, J->base[0]);
1864 if (n < 8)
1865 tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << 4*n)-1)));
1866 tr = emitconv(tr, IRT_U64, IRT_INT, 0); /* No sign-extension. */
1867 lj_needsplit(J);
1868 }
1869 return lj_ir_call(J, IRCALL_lj_strfmt_putfxint, hdr, lj_ir_kint(J, sf), tr);
1649} 1870}
1650 1871
1651/* -- Miscellaneous library functions ------------------------------------- */ 1872/* -- Miscellaneous library functions ------------------------------------- */
diff --git a/src/lj_crecord.h b/src/lj_crecord.h
index 941c8adb..4a8465ad 100644
--- a/src/lj_crecord.h
+++ b/src/lj_crecord.h
@@ -25,6 +25,13 @@ LJ_FUNC void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd);
25LJ_FUNC void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd); 25LJ_FUNC void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd);
26LJ_FUNC void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd); 26LJ_FUNC void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd);
27LJ_FUNC void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd); 27LJ_FUNC void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd);
28
29LJ_FUNC void LJ_FASTCALL recff_bit64_tobit(jit_State *J, RecordFFData *rd);
30LJ_FUNC int LJ_FASTCALL recff_bit64_unary(jit_State *J, RecordFFData *rd);
31LJ_FUNC int LJ_FASTCALL recff_bit64_nary(jit_State *J, RecordFFData *rd);
32LJ_FUNC int LJ_FASTCALL recff_bit64_shift(jit_State *J, RecordFFData *rd);
33LJ_FUNC TRef recff_bit64_tohex(jit_State *J, RecordFFData *rd, TRef hdr);
34
28LJ_FUNC void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd); 35LJ_FUNC void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd);
29#endif 36#endif
30 37
diff --git a/src/lj_ctype.c b/src/lj_ctype.c
index a338e8e6..0e3f8855 100644
--- a/src/lj_ctype.c
+++ b/src/lj_ctype.c
@@ -11,8 +11,10 @@
11#include "lj_err.h" 11#include "lj_err.h"
12#include "lj_str.h" 12#include "lj_str.h"
13#include "lj_tab.h" 13#include "lj_tab.h"
14#include "lj_strfmt.h"
14#include "lj_ctype.h" 15#include "lj_ctype.h"
15#include "lj_ccallback.h" 16#include "lj_ccallback.h"
17#include "lj_buf.h"
16 18
17/* -- C type definitions -------------------------------------------------- */ 19/* -- C type definitions -------------------------------------------------- */
18 20
@@ -37,6 +39,8 @@
37 _("uint64_t", UINT64) \ 39 _("uint64_t", UINT64) \
38 _("intptr_t", INT_PSZ) \ 40 _("intptr_t", INT_PSZ) \
39 _("uintptr_t", UINT_PSZ) \ 41 _("uintptr_t", UINT_PSZ) \
42 /* From POSIX. */ \
43 _("ssize_t", INT_PSZ) \
40 /* End of typedef list. */ 44 /* End of typedef list. */
41 45
42/* Keywords (only the ones we actually care for). */ 46/* Keywords (only the ones we actually care for). */
@@ -149,7 +153,7 @@ CTypeID lj_ctype_new(CTState *cts, CType **ctp)
149{ 153{
150 CTypeID id = cts->top; 154 CTypeID id = cts->top;
151 CType *ct; 155 CType *ct;
152 lua_assert(cts->L); 156 lj_assertCTS(cts->L, "uninitialized cts->L");
153 if (LJ_UNLIKELY(id >= cts->sizetab)) { 157 if (LJ_UNLIKELY(id >= cts->sizetab)) {
154 if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV); 158 if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV);
155#ifdef LUAJIT_CTYPE_CHECK_ANCHOR 159#ifdef LUAJIT_CTYPE_CHECK_ANCHOR
@@ -178,7 +182,7 @@ CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size)
178{ 182{
179 uint32_t h = ct_hashtype(info, size); 183 uint32_t h = ct_hashtype(info, size);
180 CTypeID id = cts->hash[h]; 184 CTypeID id = cts->hash[h];
181 lua_assert(cts->L); 185 lj_assertCTS(cts->L, "uninitialized cts->L");
182 while (id) { 186 while (id) {
183 CType *ct = ctype_get(cts, id); 187 CType *ct = ctype_get(cts, id);
184 if (ct->info == info && ct->size == size) 188 if (ct->info == info && ct->size == size)
@@ -294,9 +298,9 @@ CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem)
294 } 298 }
295 ct = ctype_raw(cts, arrid); 299 ct = ctype_raw(cts, arrid);
296 } 300 }
297 lua_assert(ctype_isvlarray(ct->info)); /* Must be a VLA. */ 301 lj_assertCTS(ctype_isvlarray(ct->info), "VLA expected");
298 ct = ctype_rawchild(cts, ct); /* Get array element. */ 302 ct = ctype_rawchild(cts, ct); /* Get array element. */
299 lua_assert(ctype_hassize(ct->info)); 303 lj_assertCTS(ctype_hassize(ct->info), "bad VLA without size");
300 /* Calculate actual size of VLA and check for overflow. */ 304 /* Calculate actual size of VLA and check for overflow. */
301 xsz += (uint64_t)ct->size * nelem; 305 xsz += (uint64_t)ct->size * nelem;
302 return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID; 306 return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID;
@@ -319,7 +323,8 @@ CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp)
319 } else { 323 } else {
320 if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN); 324 if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN);
321 qual |= (info & ~(CTF_ALIGN|CTMASK_CID)); 325 qual |= (info & ~(CTF_ALIGN|CTMASK_CID));
322 lua_assert(ctype_hassize(info) || ctype_isfunc(info)); 326 lj_assertCTS(ctype_hassize(info) || ctype_isfunc(info),
327 "ctype without size");
323 *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size; 328 *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size;
324 break; 329 break;
325 } 330 }
@@ -524,7 +529,7 @@ static void ctype_repr(CTRepr *ctr, CTypeID id)
524 ctype_appc(ctr, ')'); 529 ctype_appc(ctr, ')');
525 break; 530 break;
526 default: 531 default:
527 lua_assert(0); 532 lj_assertG_(ctr->cts->g, 0, "bad ctype %08x", info);
528 break; 533 break;
529 } 534 }
530 ct = ctype_get(ctr->cts, ctype_cid(info)); 535 ct = ctype_get(ctr->cts, ctype_cid(info));
@@ -568,19 +573,18 @@ GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned)
568/* Convert complex to string with 'i' or 'I' suffix. */ 573/* Convert complex to string with 'i' or 'I' suffix. */
569GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size) 574GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size)
570{ 575{
571 char buf[2*LJ_STR_NUMBUF+2+1]; 576 SBuf *sb = lj_buf_tmp_(L);
572 TValue re, im; 577 TValue re, im;
573 size_t len;
574 if (size == 2*sizeof(double)) { 578 if (size == 2*sizeof(double)) {
575 re.n = *(double *)sp; im.n = ((double *)sp)[1]; 579 re.n = *(double *)sp; im.n = ((double *)sp)[1];
576 } else { 580 } else {
577 re.n = (double)*(float *)sp; im.n = (double)((float *)sp)[1]; 581 re.n = (double)*(float *)sp; im.n = (double)((float *)sp)[1];
578 } 582 }
579 len = lj_str_bufnum(buf, &re); 583 lj_strfmt_putfnum(sb, STRFMT_G14, re.n);
580 if (!(im.u32.hi & 0x80000000u) || im.n != im.n) buf[len++] = '+'; 584 if (!(im.u32.hi & 0x80000000u) || im.n != im.n) lj_buf_putchar(sb, '+');
581 len += lj_str_bufnum(buf+len, &im); 585 lj_strfmt_putfnum(sb, STRFMT_G14, im.n);
582 buf[len] = buf[len-1] >= 'a' ? 'I' : 'i'; 586 lj_buf_putchar(sb, sbufP(sb)[-1] >= 'a' ? 'I' : 'i');
583 return lj_str_new(L, buf, len+1); 587 return lj_buf_str(L, sb);
584} 588}
585 589
586/* -- C type state -------------------------------------------------------- */ 590/* -- C type state -------------------------------------------------------- */
diff --git a/src/lj_ctype.h b/src/lj_ctype.h
index 8066214f..8c8fc790 100644
--- a/src/lj_ctype.h
+++ b/src/lj_ctype.h
@@ -260,10 +260,16 @@ typedef struct CTState {
260 260
261#define CT_MEMALIGN 3 /* Alignment guaranteed by memory allocator. */ 261#define CT_MEMALIGN 3 /* Alignment guaranteed by memory allocator. */
262 262
263#ifdef LUA_USE_ASSERT
264#define lj_assertCTS(c, ...) (lj_assertG_(cts->g, (c), __VA_ARGS__))
265#else
266#define lj_assertCTS(c, ...) ((void)cts)
267#endif
268
263/* -- Predefined types ---------------------------------------------------- */ 269/* -- Predefined types ---------------------------------------------------- */
264 270
265/* Target-dependent types. */ 271/* Target-dependent types. */
266#if LJ_TARGET_PPC || LJ_TARGET_PPCSPE 272#if LJ_TARGET_PPC
267#define CTTYDEFP(_) \ 273#define CTTYDEFP(_) \
268 _(LINT32, 4, CT_NUM, CTF_LONG|CTALIGN(2)) 274 _(LINT32, 4, CT_NUM, CTF_LONG|CTALIGN(2))
269#else 275#else
@@ -392,7 +398,8 @@ static LJ_AINLINE CTState *ctype_cts(lua_State *L)
392/* Check C type ID for validity when assertions are enabled. */ 398/* Check C type ID for validity when assertions are enabled. */
393static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id) 399static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id)
394{ 400{
395 lua_assert(id > 0 && id < cts->top); UNUSED(cts); 401 UNUSED(cts);
402 lj_assertCTS(id > 0 && id < cts->top, "bad CTID %d", id);
396 return id; 403 return id;
397} 404}
398 405
@@ -408,8 +415,9 @@ static LJ_AINLINE CType *ctype_get(CTState *cts, CTypeID id)
408/* Get child C type. */ 415/* Get child C type. */
409static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct) 416static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct)
410{ 417{
411 lua_assert(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) || 418 lj_assertCTS(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) ||
412 ctype_isbitfield(ct->info))); /* These don't have children. */ 419 ctype_isbitfield(ct->info)),
420 "ctype %08x has no children", ct->info);
413 return ctype_get(cts, ctype_cid(ct->info)); 421 return ctype_get(cts, ctype_cid(ct->info));
414} 422}
415 423
diff --git a/src/lj_debug.c b/src/lj_debug.c
index 6863cffd..273dd078 100644
--- a/src/lj_debug.c
+++ b/src/lj_debug.c
@@ -9,12 +9,12 @@
9#include "lj_obj.h" 9#include "lj_obj.h"
10#include "lj_err.h" 10#include "lj_err.h"
11#include "lj_debug.h" 11#include "lj_debug.h"
12#include "lj_str.h" 12#include "lj_buf.h"
13#include "lj_tab.h" 13#include "lj_tab.h"
14#include "lj_state.h" 14#include "lj_state.h"
15#include "lj_frame.h" 15#include "lj_frame.h"
16#include "lj_bc.h" 16#include "lj_bc.h"
17#include "lj_vm.h" 17#include "lj_strfmt.h"
18#if LJ_HASJIT 18#if LJ_HASJIT
19#include "lj_jit.h" 19#include "lj_jit.h"
20#endif 20#endif
@@ -24,11 +24,11 @@
24/* Get frame corresponding to a level. */ 24/* Get frame corresponding to a level. */
25cTValue *lj_debug_frame(lua_State *L, int level, int *size) 25cTValue *lj_debug_frame(lua_State *L, int level, int *size)
26{ 26{
27 cTValue *frame, *nextframe, *bot = tvref(L->stack); 27 cTValue *frame, *nextframe, *bot = tvref(L->stack)+LJ_FR2;
28 /* Traverse frames backwards. */ 28 /* Traverse frames backwards. */
29 for (nextframe = frame = L->base-1; frame > bot; ) { 29 for (nextframe = frame = L->base-1; frame > bot; ) {
30 if (frame_gc(frame) == obj2gco(L)) 30 if (frame_gc(frame) == obj2gco(L))
31 level++; /* Skip dummy frames. See lj_meta_call(). */ 31 level++; /* Skip dummy frames. See lj_err_optype_call(). */
32 if (level-- == 0) { 32 if (level-- == 0) {
33 *size = (int)(nextframe - frame); 33 *size = (int)(nextframe - frame);
34 return frame; /* Level found. */ 34 return frame; /* Level found. */
@@ -55,7 +55,8 @@ static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe)
55 const BCIns *ins; 55 const BCIns *ins;
56 GCproto *pt; 56 GCproto *pt;
57 BCPos pos; 57 BCPos pos;
58 lua_assert(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD); 58 lj_assertL(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD,
59 "function or frame expected");
59 if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */ 60 if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */
60 return NO_BCPOS; 61 return NO_BCPOS;
61 } else if (nextframe == NULL) { /* Lua function on top. */ 62 } else if (nextframe == NULL) { /* Lua function on top. */
@@ -87,8 +88,7 @@ static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe)
87 if (frame_islua(f)) { 88 if (frame_islua(f)) {
88 f = frame_prevl(f); 89 f = frame_prevl(f);
89 } else { 90 } else {
90 if (frame_isc(f) || (LJ_HASFFI && frame_iscont(f) && 91 if (frame_isc(f) || (frame_iscont(f) && frame_iscont_fficb(f)))
91 (f-1)->u32.lo == LJ_CONT_FFI_CALLBACK))
92 cf = cframe_raw(cframe_prev(cf)); 92 cf = cframe_raw(cframe_prev(cf));
93 f = frame_prevd(f); 93 f = frame_prevd(f);
94 } 94 }
@@ -102,7 +102,7 @@ static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe)
102#if LJ_HASJIT 102#if LJ_HASJIT
103 if (pos > pt->sizebc) { /* Undo the effects of lj_trace_exit for JLOOP. */ 103 if (pos > pt->sizebc) { /* Undo the effects of lj_trace_exit for JLOOP. */
104 GCtrace *T = (GCtrace *)((char *)(ins-1) - offsetof(GCtrace, startins)); 104 GCtrace *T = (GCtrace *)((char *)(ins-1) - offsetof(GCtrace, startins));
105 lua_assert(bc_isret(bc_op(ins[-1]))); 105 lj_assertL(bc_isret(bc_op(ins[-1])), "return bytecode expected");
106 pos = proto_bcpos(pt, mref(T->startpc, const BCIns)); 106 pos = proto_bcpos(pt, mref(T->startpc, const BCIns));
107 } 107 }
108#endif 108#endif
@@ -135,7 +135,7 @@ static BCLine debug_frameline(lua_State *L, GCfunc *fn, cTValue *nextframe)
135 BCPos pc = debug_framepc(L, fn, nextframe); 135 BCPos pc = debug_framepc(L, fn, nextframe);
136 if (pc != NO_BCPOS) { 136 if (pc != NO_BCPOS) {
137 GCproto *pt = funcproto(fn); 137 GCproto *pt = funcproto(fn);
138 lua_assert(pc <= pt->sizebc); 138 lj_assertL(pc <= pt->sizebc, "PC out of range");
139 return lj_debug_line(pt, pc); 139 return lj_debug_line(pt, pc);
140 } 140 }
141 return -1; 141 return -1;
@@ -143,38 +143,25 @@ static BCLine debug_frameline(lua_State *L, GCfunc *fn, cTValue *nextframe)
143 143
144/* -- Variable names ------------------------------------------------------ */ 144/* -- Variable names ------------------------------------------------------ */
145 145
146/* Read ULEB128 value. */
147static uint32_t debug_read_uleb128(const uint8_t **pp)
148{
149 const uint8_t *p = *pp;
150 uint32_t v = *p++;
151 if (LJ_UNLIKELY(v >= 0x80)) {
152 int sh = 0;
153 v &= 0x7f;
154 do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80);
155 }
156 *pp = p;
157 return v;
158}
159
160/* Get name of a local variable from slot number and PC. */ 146/* Get name of a local variable from slot number and PC. */
161static const char *debug_varname(const GCproto *pt, BCPos pc, BCReg slot) 147static const char *debug_varname(const GCproto *pt, BCPos pc, BCReg slot)
162{ 148{
163 const uint8_t *p = proto_varinfo(pt); 149 const char *p = (const char *)proto_varinfo(pt);
164 if (p) { 150 if (p) {
165 BCPos lastpc = 0; 151 BCPos lastpc = 0;
166 for (;;) { 152 for (;;) {
167 const char *name = (const char *)p; 153 const char *name = p;
168 uint32_t vn = *p++; 154 uint32_t vn = *(const uint8_t *)p;
169 BCPos startpc, endpc; 155 BCPos startpc, endpc;
170 if (vn < VARNAME__MAX) { 156 if (vn < VARNAME__MAX) {
171 if (vn == VARNAME_END) break; /* End of varinfo. */ 157 if (vn == VARNAME_END) break; /* End of varinfo. */
172 } else { 158 } else {
173 while (*p++) ; /* Skip over variable name string. */ 159 do { p++; } while (*(const uint8_t *)p); /* Skip over variable name. */
174 } 160 }
175 lastpc = startpc = lastpc + debug_read_uleb128(&p); 161 p++;
162 lastpc = startpc = lastpc + lj_buf_ruleb128(&p);
176 if (startpc > pc) break; 163 if (startpc > pc) break;
177 endpc = startpc + debug_read_uleb128(&p); 164 endpc = startpc + lj_buf_ruleb128(&p);
178 if (pc < endpc && slot-- == 0) { 165 if (pc < endpc && slot-- == 0) {
179 if (vn < VARNAME__MAX) { 166 if (vn < VARNAME__MAX) {
180#define VARNAMESTR(name, str) str "\0" 167#define VARNAMESTR(name, str) str "\0"
@@ -199,7 +186,7 @@ static TValue *debug_localname(lua_State *L, const lua_Debug *ar,
199 TValue *nextframe = size ? frame + size : NULL; 186 TValue *nextframe = size ? frame + size : NULL;
200 GCfunc *fn = frame_func(frame); 187 GCfunc *fn = frame_func(frame);
201 BCPos pc = debug_framepc(L, fn, nextframe); 188 BCPos pc = debug_framepc(L, fn, nextframe);
202 if (!nextframe) nextframe = L->top; 189 if (!nextframe) nextframe = L->top+LJ_FR2;
203 if ((int)slot1 < 0) { /* Negative slot number is for varargs. */ 190 if ((int)slot1 < 0) { /* Negative slot number is for varargs. */
204 if (pc != NO_BCPOS) { 191 if (pc != NO_BCPOS) {
205 GCproto *pt = funcproto(fn); 192 GCproto *pt = funcproto(fn);
@@ -209,7 +196,7 @@ static TValue *debug_localname(lua_State *L, const lua_Debug *ar,
209 nextframe = frame; 196 nextframe = frame;
210 frame = frame_prevd(frame); 197 frame = frame_prevd(frame);
211 } 198 }
212 if (frame + slot1 < nextframe) { 199 if (frame + slot1+LJ_FR2 < nextframe) {
213 *name = "(*vararg)"; 200 *name = "(*vararg)";
214 return frame+slot1; 201 return frame+slot1;
215 } 202 }
@@ -220,7 +207,7 @@ static TValue *debug_localname(lua_State *L, const lua_Debug *ar,
220 if (pc != NO_BCPOS && 207 if (pc != NO_BCPOS &&
221 (*name = debug_varname(funcproto(fn), pc, slot1-1)) != NULL) 208 (*name = debug_varname(funcproto(fn), pc, slot1-1)) != NULL)
222 ; 209 ;
223 else if (slot1 > 0 && frame + slot1 < nextframe) 210 else if (slot1 > 0 && frame + slot1+LJ_FR2 < nextframe)
224 *name = "(*temporary)"; 211 *name = "(*temporary)";
225 return frame+slot1; 212 return frame+slot1;
226} 213}
@@ -229,7 +216,7 @@ static TValue *debug_localname(lua_State *L, const lua_Debug *ar,
229const char *lj_debug_uvname(GCproto *pt, uint32_t idx) 216const char *lj_debug_uvname(GCproto *pt, uint32_t idx)
230{ 217{
231 const uint8_t *p = proto_uvinfo(pt); 218 const uint8_t *p = proto_uvinfo(pt);
232 lua_assert(idx < pt->sizeuv); 219 lj_assertX(idx < pt->sizeuv, "bad upvalue index");
233 if (!p) return ""; 220 if (!p) return "";
234 if (idx) while (*p++ || --idx) ; 221 if (idx) while (*p++ || --idx) ;
235 return (const char *)p; 222 return (const char *)p;
@@ -286,7 +273,7 @@ restart:
286 *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_c(ins)))); 273 *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_c(ins))));
287 if (ip > proto_bc(pt)) { 274 if (ip > proto_bc(pt)) {
288 BCIns insp = ip[-1]; 275 BCIns insp = ip[-1];
289 if (bc_op(insp) == BC_MOV && bc_a(insp) == ra+1 && 276 if (bc_op(insp) == BC_MOV && bc_a(insp) == ra+1+LJ_FR2 &&
290 bc_d(insp) == bc_b(ins)) 277 bc_d(insp) == bc_b(ins))
291 return "method"; 278 return "method";
292 } 279 }
@@ -303,12 +290,12 @@ restart:
303} 290}
304 291
305/* Deduce function name from caller of a frame. */ 292/* Deduce function name from caller of a frame. */
306const char *lj_debug_funcname(lua_State *L, TValue *frame, const char **name) 293const char *lj_debug_funcname(lua_State *L, cTValue *frame, const char **name)
307{ 294{
308 TValue *pframe; 295 cTValue *pframe;
309 GCfunc *fn; 296 GCfunc *fn;
310 BCPos pc; 297 BCPos pc;
311 if (frame <= tvref(L->stack)) 298 if (frame <= tvref(L->stack)+LJ_FR2)
312 return NULL; 299 return NULL;
313 if (frame_isvarg(frame)) 300 if (frame_isvarg(frame))
314 frame = frame_prevd(frame); 301 frame = frame_prevd(frame);
@@ -334,7 +321,7 @@ const char *lj_debug_funcname(lua_State *L, TValue *frame, const char **name)
334/* -- Source code locations ----------------------------------------------- */ 321/* -- Source code locations ----------------------------------------------- */
335 322
336/* Generate shortened source name. */ 323/* Generate shortened source name. */
337void lj_debug_shortname(char *out, GCstr *str) 324void lj_debug_shortname(char *out, GCstr *str, BCLine line)
338{ 325{
339 const char *src = strdata(str); 326 const char *src = strdata(str);
340 if (*src == '=') { 327 if (*src == '=') {
@@ -348,11 +335,11 @@ void lj_debug_shortname(char *out, GCstr *str)
348 *out++ = '.'; *out++ = '.'; *out++ = '.'; 335 *out++ = '.'; *out++ = '.'; *out++ = '.';
349 } 336 }
350 strcpy(out, src); 337 strcpy(out, src);
351 } else { /* Output [string "string"]. */ 338 } else { /* Output [string "string"] or [builtin:name]. */
352 size_t len; /* Length, up to first control char. */ 339 size_t len; /* Length, up to first control char. */
353 for (len = 0; len < LUA_IDSIZE-12; len++) 340 for (len = 0; len < LUA_IDSIZE-12; len++)
354 if (((const unsigned char *)src)[len] < ' ') break; 341 if (((const unsigned char *)src)[len] < ' ') break;
355 strcpy(out, "[string \""); out += 9; 342 strcpy(out, line == ~(BCLine)0 ? "[builtin:" : "[string \""); out += 9;
356 if (src[len] != '\0') { /* Must truncate? */ 343 if (src[len] != '\0') { /* Must truncate? */
357 if (len > LUA_IDSIZE-15) len = LUA_IDSIZE-15; 344 if (len > LUA_IDSIZE-15) len = LUA_IDSIZE-15;
358 strncpy(out, src, len); out += len; 345 strncpy(out, src, len); out += len;
@@ -360,7 +347,7 @@ void lj_debug_shortname(char *out, GCstr *str)
360 } else { 347 } else {
361 strcpy(out, src); out += len; 348 strcpy(out, src); out += len;
362 } 349 }
363 strcpy(out, "\"]"); 350 strcpy(out, line == ~(BCLine)0 ? "]" : "\"]");
364 } 351 }
365} 352}
366 353
@@ -373,14 +360,15 @@ void lj_debug_addloc(lua_State *L, const char *msg,
373 if (isluafunc(fn)) { 360 if (isluafunc(fn)) {
374 BCLine line = debug_frameline(L, fn, nextframe); 361 BCLine line = debug_frameline(L, fn, nextframe);
375 if (line >= 0) { 362 if (line >= 0) {
363 GCproto *pt = funcproto(fn);
376 char buf[LUA_IDSIZE]; 364 char buf[LUA_IDSIZE];
377 lj_debug_shortname(buf, proto_chunkname(funcproto(fn))); 365 lj_debug_shortname(buf, proto_chunkname(pt), pt->firstline);
378 lj_str_pushf(L, "%s:%d: %s", buf, line, msg); 366 lj_strfmt_pushf(L, "%s:%d: %s", buf, line, msg);
379 return; 367 return;
380 } 368 }
381 } 369 }
382 } 370 }
383 lj_str_pushf(L, "%s", msg); 371 lj_strfmt_pushf(L, "%s", msg);
384} 372}
385 373
386/* Push location string for a bytecode position to Lua stack. */ 374/* Push location string for a bytecode position to Lua stack. */
@@ -390,20 +378,22 @@ void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc)
390 const char *s = strdata(name); 378 const char *s = strdata(name);
391 MSize i, len = name->len; 379 MSize i, len = name->len;
392 BCLine line = lj_debug_line(pt, pc); 380 BCLine line = lj_debug_line(pt, pc);
393 if (*s == '@') { 381 if (pt->firstline == ~(BCLine)0) {
382 lj_strfmt_pushf(L, "builtin:%s", s);
383 } else if (*s == '@') {
394 s++; len--; 384 s++; len--;
395 for (i = len; i > 0; i--) 385 for (i = len; i > 0; i--)
396 if (s[i] == '/' || s[i] == '\\') { 386 if (s[i] == '/' || s[i] == '\\') {
397 s += i+1; 387 s += i+1;
398 break; 388 break;
399 } 389 }
400 lj_str_pushf(L, "%s:%d", s, line); 390 lj_strfmt_pushf(L, "%s:%d", s, line);
401 } else if (len > 40) { 391 } else if (len > 40) {
402 lj_str_pushf(L, "%p:%d", pt, line); 392 lj_strfmt_pushf(L, "%p:%d", pt, line);
403 } else if (*s == '=') { 393 } else if (*s == '=') {
404 lj_str_pushf(L, "%s:%d", s+1, line); 394 lj_strfmt_pushf(L, "%s:%d", s+1, line);
405 } else { 395 } else {
406 lj_str_pushf(L, "\"%s\":%d", s, line); 396 lj_strfmt_pushf(L, "\"%s\":%d", s, line);
407 } 397 }
408} 398}
409 399
@@ -451,13 +441,14 @@ int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, int ext)
451 } else { 441 } else {
452 uint32_t offset = (uint32_t)ar->i_ci & 0xffff; 442 uint32_t offset = (uint32_t)ar->i_ci & 0xffff;
453 uint32_t size = (uint32_t)ar->i_ci >> 16; 443 uint32_t size = (uint32_t)ar->i_ci >> 16;
454 lua_assert(offset != 0); 444 lj_assertL(offset != 0, "bad frame offset");
455 frame = tvref(L->stack) + offset; 445 frame = tvref(L->stack) + offset;
456 if (size) nextframe = frame + size; 446 if (size) nextframe = frame + size;
457 lua_assert(frame <= tvref(L->maxstack) && 447 lj_assertL(frame <= tvref(L->maxstack) &&
458 (!nextframe || nextframe <= tvref(L->maxstack))); 448 (!nextframe || nextframe <= tvref(L->maxstack)),
449 "broken frame chain");
459 fn = frame_func(frame); 450 fn = frame_func(frame);
460 lua_assert(fn->c.gct == ~LJ_TFUNC); 451 lj_assertL(fn->c.gct == ~LJ_TFUNC, "bad frame function");
461 } 452 }
462 for (; *what; what++) { 453 for (; *what; what++) {
463 if (*what == 'S') { 454 if (*what == 'S') {
@@ -466,7 +457,7 @@ int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, int ext)
466 BCLine firstline = pt->firstline; 457 BCLine firstline = pt->firstline;
467 GCstr *name = proto_chunkname(pt); 458 GCstr *name = proto_chunkname(pt);
468 ar->source = strdata(name); 459 ar->source = strdata(name);
469 lj_debug_shortname(ar->short_src, name); 460 lj_debug_shortname(ar->short_src, name, pt->firstline);
470 ar->linedefined = (int)firstline; 461 ar->linedefined = (int)firstline;
471 ar->lastlinedefined = (int)(firstline + pt->numline); 462 ar->lastlinedefined = (int)(firstline + pt->numline);
472 ar->what = (firstline || !pt->numline) ? "Lua" : "main"; 463 ar->what = (firstline || !pt->numline) ? "Lua" : "main";
@@ -556,6 +547,111 @@ LUA_API int lua_getstack(lua_State *L, int level, lua_Debug *ar)
556 } 547 }
557} 548}
558 549
550#if LJ_HASPROFILE
551/* Put the chunkname into a buffer. */
552static int debug_putchunkname(SBuf *sb, GCproto *pt, int pathstrip)
553{
554 GCstr *name = proto_chunkname(pt);
555 const char *p = strdata(name);
556 if (pt->firstline == ~(BCLine)0) {
557 lj_buf_putmem(sb, "[builtin:", 9);
558 lj_buf_putstr(sb, name);
559 lj_buf_putb(sb, ']');
560 return 0;
561 }
562 if (*p == '=' || *p == '@') {
563 MSize len = name->len-1;
564 p++;
565 if (pathstrip) {
566 int i;
567 for (i = len-1; i >= 0; i--)
568 if (p[i] == '/' || p[i] == '\\') {
569 len -= i+1;
570 p = p+i+1;
571 break;
572 }
573 }
574 lj_buf_putmem(sb, p, len);
575 } else {
576 lj_buf_putmem(sb, "[string]", 8);
577 }
578 return 1;
579}
580
581/* Put a compact stack dump into a buffer. */
582void lj_debug_dumpstack(lua_State *L, SBuf *sb, const char *fmt, int depth)
583{
584 int level = 0, dir = 1, pathstrip = 1;
585 MSize lastlen = 0;
586 if (depth < 0) { level = ~depth; depth = dir = -1; } /* Reverse frames. */
587 while (level != depth) { /* Loop through all frame. */
588 int size;
589 cTValue *frame = lj_debug_frame(L, level, &size);
590 if (frame) {
591 cTValue *nextframe = size ? frame+size : NULL;
592 GCfunc *fn = frame_func(frame);
593 const uint8_t *p = (const uint8_t *)fmt;
594 int c;
595 while ((c = *p++)) {
596 switch (c) {
597 case 'p': /* Preserve full path. */
598 pathstrip = 0;
599 break;
600 case 'F': case 'f': { /* Dump function name. */
601 const char *name;
602 const char *what = lj_debug_funcname(L, frame, &name);
603 if (what) {
604 if (c == 'F' && isluafunc(fn)) { /* Dump module:name for 'F'. */
605 GCproto *pt = funcproto(fn);
606 if (pt->firstline != ~(BCLine)0) { /* Not a bytecode builtin. */
607 debug_putchunkname(sb, pt, pathstrip);
608 lj_buf_putb(sb, ':');
609 }
610 }
611 lj_buf_putmem(sb, name, (MSize)strlen(name));
612 break;
613 } /* else: can't derive a name, dump module:line. */
614 }
615 /* fallthrough */
616 case 'l': /* Dump module:line. */
617 if (isluafunc(fn)) {
618 GCproto *pt = funcproto(fn);
619 if (debug_putchunkname(sb, pt, pathstrip)) {
620 /* Regular Lua function. */
621 BCLine line = c == 'l' ? debug_frameline(L, fn, nextframe) :
622 pt->firstline;
623 lj_buf_putb(sb, ':');
624 lj_strfmt_putint(sb, line >= 0 ? line : pt->firstline);
625 }
626 } else if (isffunc(fn)) { /* Dump numbered builtins. */
627 lj_buf_putmem(sb, "[builtin#", 9);
628 lj_strfmt_putint(sb, fn->c.ffid);
629 lj_buf_putb(sb, ']');
630 } else { /* Dump C function address. */
631 lj_buf_putb(sb, '@');
632 lj_strfmt_putptr(sb, fn->c.f);
633 }
634 break;
635 case 'Z': /* Zap trailing separator. */
636 lastlen = sbuflen(sb);
637 break;
638 default:
639 lj_buf_putb(sb, c);
640 break;
641 }
642 }
643 } else if (dir == 1) {
644 break;
645 } else {
646 level -= size; /* Reverse frame order: quickly skip missing level. */
647 }
648 level += dir;
649 }
650 if (lastlen)
651 setsbufP(sb, sbufB(sb) + lastlen); /* Zap trailing separator. */
652}
653#endif
654
559/* Number of frames for the leading and trailing part of a traceback. */ 655/* Number of frames for the leading and trailing part of a traceback. */
560#define TRACEBACK_LEVELS1 12 656#define TRACEBACK_LEVELS1 12
561#define TRACEBACK_LEVELS2 10 657#define TRACEBACK_LEVELS2 10
diff --git a/src/lj_debug.h b/src/lj_debug.h
index 43fb9c19..a8be19e8 100644
--- a/src/lj_debug.h
+++ b/src/lj_debug.h
@@ -33,14 +33,18 @@ LJ_FUNC const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp,
33 GCobj **op); 33 GCobj **op);
34LJ_FUNC const char *lj_debug_slotname(GCproto *pt, const BCIns *pc, 34LJ_FUNC const char *lj_debug_slotname(GCproto *pt, const BCIns *pc,
35 BCReg slot, const char **name); 35 BCReg slot, const char **name);
36LJ_FUNC const char *lj_debug_funcname(lua_State *L, TValue *frame, 36LJ_FUNC const char *lj_debug_funcname(lua_State *L, cTValue *frame,
37 const char **name); 37 const char **name);
38LJ_FUNC void lj_debug_shortname(char *out, GCstr *str); 38LJ_FUNC void lj_debug_shortname(char *out, GCstr *str, BCLine line);
39LJ_FUNC void lj_debug_addloc(lua_State *L, const char *msg, 39LJ_FUNC void lj_debug_addloc(lua_State *L, const char *msg,
40 cTValue *frame, cTValue *nextframe); 40 cTValue *frame, cTValue *nextframe);
41LJ_FUNC void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc); 41LJ_FUNC void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc);
42LJ_FUNC int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, 42LJ_FUNC int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar,
43 int ext); 43 int ext);
44#if LJ_HASPROFILE
45LJ_FUNC void lj_debug_dumpstack(lua_State *L, SBuf *sb, const char *fmt,
46 int depth);
47#endif
44 48
45/* Fixed internal variable names. */ 49/* Fixed internal variable names. */
46#define VARNAMEDEF(_) \ 50#define VARNAMEDEF(_) \
diff --git a/src/lj_def.h b/src/lj_def.h
index b5e26d69..cfe18c48 100644
--- a/src/lj_def.h
+++ b/src/lj_def.h
@@ -46,10 +46,14 @@ typedef unsigned int uintptr_t;
46#include <stdlib.h> 46#include <stdlib.h>
47 47
48/* Various VM limits. */ 48/* Various VM limits. */
49#define LJ_MAX_MEM 0x7fffff00 /* Max. total memory allocation. */ 49#define LJ_MAX_MEM32 0x7fffff00 /* Max. 32 bit memory allocation. */
50#define LJ_MAX_MEM64 ((uint64_t)1<<47) /* Max. 64 bit memory allocation. */
51/* Max. total memory allocation. */
52#define LJ_MAX_MEM (LJ_GC64 ? LJ_MAX_MEM64 : LJ_MAX_MEM32)
50#define LJ_MAX_ALLOC LJ_MAX_MEM /* Max. individual allocation length. */ 53#define LJ_MAX_ALLOC LJ_MAX_MEM /* Max. individual allocation length. */
51#define LJ_MAX_STR LJ_MAX_MEM /* Max. string length. */ 54#define LJ_MAX_STR LJ_MAX_MEM32 /* Max. string length. */
52#define LJ_MAX_UDATA LJ_MAX_MEM /* Max. userdata length. */ 55#define LJ_MAX_BUF LJ_MAX_MEM32 /* Max. buffer length. */
56#define LJ_MAX_UDATA LJ_MAX_MEM32 /* Max. userdata length. */
53 57
54#define LJ_MAX_STRTAB (1<<26) /* Max. string table size. */ 58#define LJ_MAX_STRTAB (1<<26) /* Max. string table size. */
55#define LJ_MAX_HBITS 26 /* Max. hash bits. */ 59#define LJ_MAX_HBITS 26 /* Max. hash bits. */
@@ -57,7 +61,7 @@ typedef unsigned int uintptr_t;
57#define LJ_MAX_ASIZE ((1<<(LJ_MAX_ABITS-1))+1) /* Max. array part size. */ 61#define LJ_MAX_ASIZE ((1<<(LJ_MAX_ABITS-1))+1) /* Max. array part size. */
58#define LJ_MAX_COLOSIZE 16 /* Max. elems for colocated array. */ 62#define LJ_MAX_COLOSIZE 16 /* Max. elems for colocated array. */
59 63
60#define LJ_MAX_LINE LJ_MAX_MEM /* Max. source code line number. */ 64#define LJ_MAX_LINE LJ_MAX_MEM32 /* Max. source code line number. */
61#define LJ_MAX_XLEVEL 200 /* Max. syntactic nesting level. */ 65#define LJ_MAX_XLEVEL 200 /* Max. syntactic nesting level. */
62#define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */ 66#define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */
63#define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */ 67#define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */
@@ -65,7 +69,7 @@ typedef unsigned int uintptr_t;
65#define LJ_MAX_UPVAL 60 /* Max. # of upvalues. */ 69#define LJ_MAX_UPVAL 60 /* Max. # of upvalues. */
66 70
67#define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */ 71#define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */
68#define LJ_STACK_EXTRA 5 /* Extra stack space (metamethods). */ 72#define LJ_STACK_EXTRA (5+2*LJ_FR2) /* Extra stack space (metamethods). */
69 73
70#define LJ_NUM_CBPAGE 1 /* Number of FFI callback pages. */ 74#define LJ_NUM_CBPAGE 1 /* Number of FFI callback pages. */
71 75
@@ -76,7 +80,6 @@ typedef unsigned int uintptr_t;
76#define LJ_MIN_SBUF 32 /* Min. string buffer length. */ 80#define LJ_MIN_SBUF 32 /* Min. string buffer length. */
77#define LJ_MIN_VECSZ 8 /* Min. size for growable vectors. */ 81#define LJ_MIN_VECSZ 8 /* Min. size for growable vectors. */
78#define LJ_MIN_IRSZ 32 /* Min. size for growable IR. */ 82#define LJ_MIN_IRSZ 32 /* Min. size for growable IR. */
79#define LJ_MIN_K64SZ 16 /* Min. size for chained K64Array. */
80 83
81/* JIT compiler limits. */ 84/* JIT compiler limits. */
82#define LJ_MAX_JSLOTS 250 /* Max. # of stack slots for a trace. */ 85#define LJ_MAX_JSLOTS 250 /* Max. # of stack slots for a trace. */
@@ -91,6 +94,9 @@ typedef unsigned int uintptr_t;
91#define U64x(hi, lo) (((uint64_t)0x##hi << 32) + (uint64_t)0x##lo) 94#define U64x(hi, lo) (((uint64_t)0x##hi << 32) + (uint64_t)0x##lo)
92#define i32ptr(p) ((int32_t)(intptr_t)(void *)(p)) 95#define i32ptr(p) ((int32_t)(intptr_t)(void *)(p))
93#define u32ptr(p) ((uint32_t)(intptr_t)(void *)(p)) 96#define u32ptr(p) ((uint32_t)(intptr_t)(void *)(p))
97#define i64ptr(p) ((int64_t)(intptr_t)(void *)(p))
98#define u64ptr(p) ((uint64_t)(intptr_t)(void *)(p))
99#define igcptr(p) (LJ_GC64 ? i64ptr(p) : i32ptr(p))
94 100
95#define checki8(x) ((x) == (int32_t)(int8_t)(x)) 101#define checki8(x) ((x) == (int32_t)(int8_t)(x))
96#define checku8(x) ((x) == (int32_t)(uint8_t)(x)) 102#define checku8(x) ((x) == (int32_t)(uint8_t)(x))
@@ -98,7 +104,10 @@ typedef unsigned int uintptr_t;
98#define checku16(x) ((x) == (int32_t)(uint16_t)(x)) 104#define checku16(x) ((x) == (int32_t)(uint16_t)(x))
99#define checki32(x) ((x) == (int32_t)(x)) 105#define checki32(x) ((x) == (int32_t)(x))
100#define checku32(x) ((x) == (uint32_t)(x)) 106#define checku32(x) ((x) == (uint32_t)(x))
107#define checkptr31(x) (((uint64_t)(uintptr_t)(x) >> 31) == 0)
101#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x)) 108#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x))
109#define checkptr47(x) (((uint64_t)(uintptr_t)(x) >> 47) == 0)
110#define checkptrGC(x) (LJ_GC64 ? checkptr47((x)) : LJ_64 ? checkptr31((x)) :1)
102 111
103/* Every half-decent C compiler transforms this into a rotate instruction. */ 112/* Every half-decent C compiler transforms this into a rotate instruction. */
104#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(-(int)(n)&(8*sizeof(x)-1)))) 113#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(-(int)(n)&(8*sizeof(x)-1))))
@@ -111,7 +120,7 @@ typedef uintptr_t BloomFilter;
111#define bloomset(b, x) ((b) |= bloombit((x))) 120#define bloomset(b, x) ((b) |= bloombit((x)))
112#define bloomtest(b, x) ((b) & bloombit((x))) 121#define bloomtest(b, x) ((b) & bloombit((x)))
113 122
114#if defined(__GNUC__) || defined(__psp2__) 123#if defined(__GNUC__) || defined(__clang__) || defined(__psp2__)
115 124
116#define LJ_NORET __attribute__((noreturn)) 125#define LJ_NORET __attribute__((noreturn))
117#define LJ_ALIGN(n) __attribute__((aligned(n))) 126#define LJ_ALIGN(n) __attribute__((aligned(n)))
@@ -173,7 +182,7 @@ static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
173{ 182{
174 return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32)); 183 return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
175} 184}
176#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) 185#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __clang__
177static LJ_AINLINE uint32_t lj_bswap(uint32_t x) 186static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
178{ 187{
179 return (uint32_t)__builtin_bswap32((int32_t)x); 188 return (uint32_t)__builtin_bswap32((int32_t)x);
@@ -329,14 +338,28 @@ static LJ_AINLINE uint32_t lj_getu32(const void *v)
329#define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET 338#define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET
330#define LJ_ASMF_NORET LJ_ASMF LJ_NORET 339#define LJ_ASMF_NORET LJ_ASMF LJ_NORET
331 340
332/* Runtime assertions. */ 341/* Internal assertions. */
333#ifdef lua_assert 342#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
334#define check_exp(c, e) (lua_assert(c), (e)) 343#define lj_assert_check(g, c, ...) \
335#define api_check(l, e) lua_assert(e) 344 ((c) ? (void)0 : \
345 (lj_assert_fail((g), __FILE__, __LINE__, __func__, __VA_ARGS__), 0))
346#define lj_checkapi(c, ...) lj_assert_check(G(L), (c), __VA_ARGS__)
336#else 347#else
337#define lua_assert(c) ((void)0) 348#define lj_checkapi(c, ...) ((void)L)
349#endif
350
351#ifdef LUA_USE_ASSERT
352#define lj_assertG_(g, c, ...) lj_assert_check((g), (c), __VA_ARGS__)
353#define lj_assertG(c, ...) lj_assert_check(g, (c), __VA_ARGS__)
354#define lj_assertL(c, ...) lj_assert_check(G(L), (c), __VA_ARGS__)
355#define lj_assertX(c, ...) lj_assert_check(NULL, (c), __VA_ARGS__)
356#define check_exp(c, e) (lj_assertX((c), #c), (e))
357#else
358#define lj_assertG_(g, c, ...) ((void)0)
359#define lj_assertG(c, ...) ((void)g)
360#define lj_assertL(c, ...) ((void)L)
361#define lj_assertX(c, ...) ((void)0)
338#define check_exp(c, e) (e) 362#define check_exp(c, e) (e)
339#define api_check luai_apicheck
340#endif 363#endif
341 364
342/* Static assertions. */ 365/* Static assertions. */
@@ -350,4 +373,9 @@ static LJ_AINLINE uint32_t lj_getu32(const void *v)
350 extern void LJ_ASSERT_NAME(__LINE__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1]) 373 extern void LJ_ASSERT_NAME(__LINE__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
351#endif 374#endif
352 375
376/* PRNG state. Need this here, details in lj_prng.h. */
377typedef struct PRNGState {
378 uint64_t u[4];
379} PRNGState;
380
353#endif 381#endif
diff --git a/src/lj_dispatch.c b/src/lj_dispatch.c
index 644e9028..c608e223 100644
--- a/src/lj_dispatch.c
+++ b/src/lj_dispatch.c
@@ -8,6 +8,7 @@
8 8
9#include "lj_obj.h" 9#include "lj_obj.h"
10#include "lj_err.h" 10#include "lj_err.h"
11#include "lj_buf.h"
11#include "lj_func.h" 12#include "lj_func.h"
12#include "lj_str.h" 13#include "lj_str.h"
13#include "lj_tab.h" 14#include "lj_tab.h"
@@ -17,6 +18,7 @@
17#include "lj_frame.h" 18#include "lj_frame.h"
18#include "lj_bc.h" 19#include "lj_bc.h"
19#include "lj_ff.h" 20#include "lj_ff.h"
21#include "lj_strfmt.h"
20#if LJ_HASJIT 22#if LJ_HASJIT
21#include "lj_jit.h" 23#include "lj_jit.h"
22#endif 24#endif
@@ -25,6 +27,9 @@
25#endif 27#endif
26#include "lj_trace.h" 28#include "lj_trace.h"
27#include "lj_dispatch.h" 29#include "lj_dispatch.h"
30#if LJ_HASPROFILE
31#include "lj_profile.h"
32#endif
28#include "lj_vm.h" 33#include "lj_vm.h"
29#include "luajit.h" 34#include "luajit.h"
30 35
@@ -37,6 +42,12 @@ LJ_STATIC_ASSERT(GG_NUM_ASMFF == FF_NUM_ASMFUNC);
37#include <math.h> 42#include <math.h>
38LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, 43LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L,
39 lua_State *co); 44 lua_State *co);
45#if !LJ_HASJIT
46#define lj_dispatch_stitch lj_dispatch_ins
47#endif
48#if !LJ_HASPROFILE
49#define lj_dispatch_profile lj_dispatch_ins
50#endif
40 51
41#define GOTFUNC(name) (ASMFunction)name, 52#define GOTFUNC(name) (ASMFunction)name,
42static const ASMFunction dispatch_got[] = { 53static const ASMFunction dispatch_got[] = {
@@ -64,7 +75,7 @@ void lj_dispatch_init(GG_State *GG)
64 for (i = 0; i < GG_NUM_ASMFF; i++) 75 for (i = 0; i < GG_NUM_ASMFF; i++)
65 GG->bcff[i] = BCINS_AD(BC__MAX+i, 0, 0); 76 GG->bcff[i] = BCINS_AD(BC__MAX+i, 0, 0);
66#if LJ_TARGET_MIPS 77#if LJ_TARGET_MIPS
67 memcpy(GG->got, dispatch_got, LJ_GOT__MAX*4); 78 memcpy(GG->got, dispatch_got, LJ_GOT__MAX*sizeof(ASMFunction *));
68#endif 79#endif
69} 80}
70 81
@@ -82,11 +93,12 @@ void lj_dispatch_init_hotcount(global_State *g)
82#endif 93#endif
83 94
84/* Internal dispatch mode bits. */ 95/* Internal dispatch mode bits. */
85#define DISPMODE_JIT 0x01 /* JIT compiler on. */ 96#define DISPMODE_CALL 0x01 /* Override call dispatch. */
86#define DISPMODE_REC 0x02 /* Recording active. */ 97#define DISPMODE_RET 0x02 /* Override return dispatch. */
87#define DISPMODE_INS 0x04 /* Override instruction dispatch. */ 98#define DISPMODE_INS 0x04 /* Override instruction dispatch. */
88#define DISPMODE_CALL 0x08 /* Override call dispatch. */ 99#define DISPMODE_JIT 0x10 /* JIT compiler on. */
89#define DISPMODE_RET 0x10 /* Override return dispatch. */ 100#define DISPMODE_REC 0x20 /* Recording active. */
101#define DISPMODE_PROF 0x40 /* Profiling active. */
90 102
91/* Update dispatch table depending on various flags. */ 103/* Update dispatch table depending on various flags. */
92void lj_dispatch_update(global_State *g) 104void lj_dispatch_update(global_State *g)
@@ -98,6 +110,9 @@ void lj_dispatch_update(global_State *g)
98 mode |= G2J(g)->state != LJ_TRACE_IDLE ? 110 mode |= G2J(g)->state != LJ_TRACE_IDLE ?
99 (DISPMODE_REC|DISPMODE_INS|DISPMODE_CALL) : 0; 111 (DISPMODE_REC|DISPMODE_INS|DISPMODE_CALL) : 0;
100#endif 112#endif
113#if LJ_HASPROFILE
114 mode |= (g->hookmask & HOOK_PROFILE) ? (DISPMODE_PROF|DISPMODE_INS) : 0;
115#endif
101 mode |= (g->hookmask & (LUA_MASKLINE|LUA_MASKCOUNT)) ? DISPMODE_INS : 0; 116 mode |= (g->hookmask & (LUA_MASKLINE|LUA_MASKCOUNT)) ? DISPMODE_INS : 0;
102 mode |= (g->hookmask & LUA_MASKCALL) ? DISPMODE_CALL : 0; 117 mode |= (g->hookmask & LUA_MASKCALL) ? DISPMODE_CALL : 0;
103 mode |= (g->hookmask & LUA_MASKRET) ? DISPMODE_RET : 0; 118 mode |= (g->hookmask & LUA_MASKRET) ? DISPMODE_RET : 0;
@@ -126,9 +141,9 @@ void lj_dispatch_update(global_State *g)
126 disp[GG_LEN_DDISP+BC_LOOP] = f_loop; 141 disp[GG_LEN_DDISP+BC_LOOP] = f_loop;
127 142
128 /* Set dynamic instruction dispatch. */ 143 /* Set dynamic instruction dispatch. */
129 if ((oldmode ^ mode) & (DISPMODE_REC|DISPMODE_INS)) { 144 if ((oldmode ^ mode) & (DISPMODE_PROF|DISPMODE_REC|DISPMODE_INS)) {
130 /* Need to update the whole table. */ 145 /* Need to update the whole table. */
131 if (!(mode & (DISPMODE_REC|DISPMODE_INS))) { /* No ins dispatch? */ 146 if (!(mode & DISPMODE_INS)) { /* No ins dispatch? */
132 /* Copy static dispatch table to dynamic dispatch table. */ 147 /* Copy static dispatch table to dynamic dispatch table. */
133 memcpy(&disp[0], &disp[GG_LEN_DDISP], GG_LEN_SDISP*sizeof(ASMFunction)); 148 memcpy(&disp[0], &disp[GG_LEN_DDISP], GG_LEN_SDISP*sizeof(ASMFunction));
134 /* Overwrite with dynamic return dispatch. */ 149 /* Overwrite with dynamic return dispatch. */
@@ -140,12 +155,13 @@ void lj_dispatch_update(global_State *g)
140 } 155 }
141 } else { 156 } else {
142 /* The recording dispatch also checks for hooks. */ 157 /* The recording dispatch also checks for hooks. */
143 ASMFunction f = (mode & DISPMODE_REC) ? lj_vm_record : lj_vm_inshook; 158 ASMFunction f = (mode & DISPMODE_PROF) ? lj_vm_profhook :
159 (mode & DISPMODE_REC) ? lj_vm_record : lj_vm_inshook;
144 uint32_t i; 160 uint32_t i;
145 for (i = 0; i < GG_LEN_SDISP; i++) 161 for (i = 0; i < GG_LEN_SDISP; i++)
146 disp[i] = f; 162 disp[i] = f;
147 } 163 }
148 } else if (!(mode & (DISPMODE_REC|DISPMODE_INS))) { 164 } else if (!(mode & DISPMODE_INS)) {
149 /* Otherwise set dynamic counting ins. */ 165 /* Otherwise set dynamic counting ins. */
150 disp[BC_FORL] = f_forl; 166 disp[BC_FORL] = f_forl;
151 disp[BC_ITERL] = f_iterl; 167 disp[BC_ITERL] = f_iterl;
@@ -236,22 +252,15 @@ int luaJIT_setmode(lua_State *L, int idx, int mode)
236 } else { 252 } else {
237 if (!(mode & LUAJIT_MODE_ON)) 253 if (!(mode & LUAJIT_MODE_ON))
238 G2J(g)->flags &= ~(uint32_t)JIT_F_ON; 254 G2J(g)->flags &= ~(uint32_t)JIT_F_ON;
239#if LJ_TARGET_X86ORX64
240 else if ((G2J(g)->flags & JIT_F_SSE2))
241 G2J(g)->flags |= (uint32_t)JIT_F_ON;
242 else
243 return 0; /* Don't turn on JIT compiler without SSE2 support. */
244#else
245 else 255 else
246 G2J(g)->flags |= (uint32_t)JIT_F_ON; 256 G2J(g)->flags |= (uint32_t)JIT_F_ON;
247#endif
248 lj_dispatch_update(g); 257 lj_dispatch_update(g);
249 } 258 }
250 break; 259 break;
251 case LUAJIT_MODE_FUNC: 260 case LUAJIT_MODE_FUNC:
252 case LUAJIT_MODE_ALLFUNC: 261 case LUAJIT_MODE_ALLFUNC:
253 case LUAJIT_MODE_ALLSUBFUNC: { 262 case LUAJIT_MODE_ALLSUBFUNC: {
254 cTValue *tv = idx == 0 ? frame_prev(L->base-1) : 263 cTValue *tv = idx == 0 ? frame_prev(L->base-1)-LJ_FR2 :
255 idx > 0 ? L->base + (idx-1) : L->top + idx; 264 idx > 0 ? L->base + (idx-1) : L->top + idx;
256 GCproto *pt; 265 GCproto *pt;
257 if ((idx == 0 || tvisfunc(tv)) && isluafunc(&gcval(tv)->fn)) 266 if ((idx == 0 || tvisfunc(tv)) && isluafunc(&gcval(tv)->fn))
@@ -352,10 +361,19 @@ static void callhook(lua_State *L, int event, BCLine line)
352 /* Top frame, nextframe = NULL. */ 361 /* Top frame, nextframe = NULL. */
353 ar.i_ci = (int)((L->base-1) - tvref(L->stack)); 362 ar.i_ci = (int)((L->base-1) - tvref(L->stack));
354 lj_state_checkstack(L, 1+LUA_MINSTACK); 363 lj_state_checkstack(L, 1+LUA_MINSTACK);
364#if LJ_HASPROFILE && !LJ_PROFILE_SIGPROF
365 lj_profile_hook_enter(g);
366#else
355 hook_enter(g); 367 hook_enter(g);
368#endif
356 hookf(L, &ar); 369 hookf(L, &ar);
357 lua_assert(hook_active(g)); 370 lj_assertG(hook_active(g), "active hook flag removed");
371 setgcref(g->cur_L, obj2gco(L));
372#if LJ_HASPROFILE && !LJ_PROFILE_SIGPROF
373 lj_profile_hook_leave(g);
374#else
358 hook_leave(g); 375 hook_leave(g);
376#endif
359 } 377 }
360} 378}
361 379
@@ -368,7 +386,7 @@ static BCReg cur_topslot(GCproto *pt, const BCIns *pc, uint32_t nres)
368 if (bc_op(ins) == BC_UCLO) 386 if (bc_op(ins) == BC_UCLO)
369 ins = pc[bc_j(ins)]; 387 ins = pc[bc_j(ins)];
370 switch (bc_op(ins)) { 388 switch (bc_op(ins)) {
371 case BC_CALLM: case BC_CALLMT: return bc_a(ins) + bc_c(ins) + nres-1+1; 389 case BC_CALLM: case BC_CALLMT: return bc_a(ins) + bc_c(ins) + nres-1+1+LJ_FR2;
372 case BC_RETM: return bc_a(ins) + bc_d(ins) + nres-1; 390 case BC_RETM: return bc_a(ins) + bc_d(ins) + nres-1;
373 case BC_TSETM: return bc_a(ins) + nres-1; 391 case BC_TSETM: return bc_a(ins) + nres-1;
374 default: return pt->framesize; 392 default: return pt->framesize;
@@ -397,7 +415,8 @@ void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc)
397#endif 415#endif
398 J->L = L; 416 J->L = L;
399 lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ 417 lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */
400 lua_assert(L->top - L->base == delta); 418 lj_assertG(L->top - L->base == delta,
419 "unbalanced stack after tracing of instruction");
401 } 420 }
402 } 421 }
403#endif 422#endif
@@ -457,7 +476,8 @@ ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc)
457#endif 476#endif
458 pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1); 477 pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1);
459 lj_trace_hot(J, pc); 478 lj_trace_hot(J, pc);
460 lua_assert(L->top - L->base == delta); 479 lj_assertG(L->top - L->base == delta,
480 "unbalanced stack after hot call");
461 goto out; 481 goto out;
462 } else if (J->state != LJ_TRACE_IDLE && 482 } else if (J->state != LJ_TRACE_IDLE &&
463 !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) { 483 !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
@@ -466,7 +486,8 @@ ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc)
466#endif 486#endif
467 /* Record the FUNC* bytecodes, too. */ 487 /* Record the FUNC* bytecodes, too. */
468 lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ 488 lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */
469 lua_assert(L->top - L->base == delta); 489 lj_assertG(L->top - L->base == delta,
490 "unbalanced stack after hot instruction");
470 } 491 }
471#endif 492#endif
472 if ((g->hookmask & LUA_MASKCALL)) { 493 if ((g->hookmask & LUA_MASKCALL)) {
@@ -492,3 +513,41 @@ out:
492 return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */ 513 return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */
493} 514}
494 515
516#if LJ_HASJIT
517/* Stitch a new trace. */
518void LJ_FASTCALL lj_dispatch_stitch(jit_State *J, const BCIns *pc)
519{
520 ERRNO_SAVE
521 lua_State *L = J->L;
522 void *cf = cframe_raw(L->cframe);
523 const BCIns *oldpc = cframe_pc(cf);
524 setcframe_pc(cf, pc);
525 /* Before dispatch, have to bias PC by 1. */
526 L->top = L->base + cur_topslot(curr_proto(L), pc+1, cframe_multres_n(cf));
527 lj_trace_stitch(J, pc-1); /* Point to the CALL instruction. */
528 setcframe_pc(cf, oldpc);
529 ERRNO_RESTORE
530}
531#endif
532
533#if LJ_HASPROFILE
534/* Profile dispatch. */
535void LJ_FASTCALL lj_dispatch_profile(lua_State *L, const BCIns *pc)
536{
537 ERRNO_SAVE
538 GCfunc *fn = curr_func(L);
539 GCproto *pt = funcproto(fn);
540 void *cf = cframe_raw(L->cframe);
541 const BCIns *oldpc = cframe_pc(cf);
542 global_State *g;
543 setcframe_pc(cf, pc);
544 L->top = L->base + cur_topslot(pt, pc, cframe_multres_n(cf));
545 lj_profile_interpreter(L);
546 setcframe_pc(cf, oldpc);
547 g = G(L);
548 setgcref(g->cur_L, obj2gco(L));
549 setvmstate(g, INTERP);
550 ERRNO_RESTORE
551}
552#endif
553
diff --git a/src/lj_dispatch.h b/src/lj_dispatch.h
index 372de014..187bd5ca 100644
--- a/src/lj_dispatch.h
+++ b/src/lj_dispatch.h
@@ -14,6 +14,22 @@
14 14
15#if LJ_TARGET_MIPS 15#if LJ_TARGET_MIPS
16/* Need our own global offset table for the dreaded MIPS calling conventions. */ 16/* Need our own global offset table for the dreaded MIPS calling conventions. */
17
18#ifndef _LJ_VM_H
19LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b);
20#endif
21
22#if LJ_SOFTFP
23#ifndef _LJ_IRCALL_H
24extern double __adddf3(double a, double b);
25extern double __subdf3(double a, double b);
26extern double __muldf3(double a, double b);
27extern double __divdf3(double a, double b);
28#endif
29#define SFGOTDEF(_) _(sqrt) _(__adddf3) _(__subdf3) _(__muldf3) _(__divdf3)
30#else
31#define SFGOTDEF(_)
32#endif
17#if LJ_HASJIT 33#if LJ_HASJIT
18#define JITGOTDEF(_) _(lj_trace_exit) _(lj_trace_hot) 34#define JITGOTDEF(_) _(lj_trace_exit) _(lj_trace_hot)
19#else 35#else
@@ -28,16 +44,19 @@
28#define GOTDEF(_) \ 44#define GOTDEF(_) \
29 _(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \ 45 _(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \
30 _(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \ 46 _(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \
31 _(pow) _(fmod) _(ldexp) \ 47 _(pow) _(fmod) _(ldexp) _(lj_vm_modi) \
32 _(lj_dispatch_call) _(lj_dispatch_ins) _(lj_err_throw) _(lj_err_run) \ 48 _(lj_dispatch_call) _(lj_dispatch_ins) _(lj_dispatch_stitch) \
49 _(lj_dispatch_profile) _(lj_err_throw) _(lj_err_run) \
33 _(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \ 50 _(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \
34 _(lj_gc_barrieruv) _(lj_gc_step) _(lj_gc_step_fixtop) _(lj_meta_arith) \ 51 _(lj_gc_barrieruv) _(lj_gc_step) _(lj_gc_step_fixtop) _(lj_meta_arith) \
35 _(lj_meta_call) _(lj_meta_cat) _(lj_meta_comp) _(lj_meta_equal) \ 52 _(lj_meta_call) _(lj_meta_cat) _(lj_meta_comp) _(lj_meta_equal) \
36 _(lj_meta_for) _(lj_meta_len) _(lj_meta_tget) _(lj_meta_tset) \ 53 _(lj_meta_for) _(lj_meta_istype) _(lj_meta_len) _(lj_meta_tget) \
37 _(lj_state_growstack) _(lj_str_fromnum) _(lj_str_fromnumber) _(lj_str_new) \ 54 _(lj_meta_tset) _(lj_state_growstack) _(lj_strfmt_number) \
38 _(lj_tab_dup) _(lj_tab_get) _(lj_tab_getinth) _(lj_tab_len) _(lj_tab_new) \ 55 _(lj_str_new) _(lj_tab_dup) _(lj_tab_get) _(lj_tab_getinth) _(lj_tab_len) \
39 _(lj_tab_newkey) _(lj_tab_next) _(lj_tab_reasize) \ 56 _(lj_tab_new) _(lj_tab_newkey) _(lj_tab_next) _(lj_tab_reasize) \
40 JITGOTDEF(_) FFIGOTDEF(_) 57 _(lj_tab_setinth) _(lj_buf_putstr_reverse) _(lj_buf_putstr_lower) \
58 _(lj_buf_putstr_upper) _(lj_buf_tostr) \
59 JITGOTDEF(_) FFIGOTDEF(_) SFGOTDEF(_)
41 60
42enum { 61enum {
43#define GOTENUM(name) LJ_GOT_##name, 62#define GOTENUM(name) LJ_GOT_##name,
@@ -60,7 +79,7 @@ typedef uint16_t HotCount;
60#define HOTCOUNT_CALL 1 79#define HOTCOUNT_CALL 1
61 80
62/* This solves a circular dependency problem -- bump as needed. Sigh. */ 81/* This solves a circular dependency problem -- bump as needed. Sigh. */
63#define GG_NUM_ASMFF 62 82#define GG_NUM_ASMFF 57
64 83
65#define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF) 84#define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF)
66#define GG_LEN_SDISP BC_FUNCF 85#define GG_LEN_SDISP BC_FUNCF
@@ -96,6 +115,7 @@ typedef struct GG_State {
96#define J2G(J) (&J2GG(J)->g) 115#define J2G(J) (&J2GG(J)->g)
97#define G2J(gl) (&G2GG(gl)->J) 116#define G2J(gl) (&G2GG(gl)->J)
98#define L2J(L) (&L2GG(L)->J) 117#define L2J(L) (&L2GG(L)->J)
118#define GG_G2J (GG_OFS(J) - GG_OFS(g))
99#define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g)) 119#define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g))
100#define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch)) 120#define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch))
101#define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch)) 121#define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch))
@@ -117,7 +137,12 @@ LJ_FUNC void lj_dispatch_update(global_State *g);
117/* Instruction dispatch callback for hooks or when recording. */ 137/* Instruction dispatch callback for hooks or when recording. */
118LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc); 138LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc);
119LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc); 139LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc);
120LJ_FUNCA void LJ_FASTCALL lj_dispatch_return(lua_State *L, const BCIns *pc); 140#if LJ_HASJIT
141LJ_FUNCA void LJ_FASTCALL lj_dispatch_stitch(jit_State *J, const BCIns *pc);
142#endif
143#if LJ_HASPROFILE
144LJ_FUNCA void LJ_FASTCALL lj_dispatch_profile(lua_State *L, const BCIns *pc);
145#endif
121 146
122#if LJ_HASFFI && !defined(_BUILDVM_H) 147#if LJ_HASFFI && !defined(_BUILDVM_H)
123/* Save/restore errno and GetLastError() around hooks, exits and recording. */ 148/* Save/restore errno and GetLastError() around hooks, exits and recording. */
diff --git a/src/lj_emit_arm.h b/src/lj_emit_arm.h
index 6a136e51..165d546d 100644
--- a/src/lj_emit_arm.h
+++ b/src/lj_emit_arm.h
@@ -81,7 +81,8 @@ static void emit_m(ASMState *as, ARMIns ai, Reg rm)
81 81
82static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) 82static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
83{ 83{
84 lua_assert(ofs >= -255 && ofs <= 255); 84 lj_assertA(ofs >= -255 && ofs <= 255,
85 "load/store offset %d out of range", ofs);
85 if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; 86 if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
86 *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) | 87 *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) |
87 ((ofs & 0xf0) << 4) | (ofs & 0x0f); 88 ((ofs & 0xf0) << 4) | (ofs & 0x0f);
@@ -89,7 +90,8 @@ static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
89 90
90static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) 91static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
91{ 92{
92 lua_assert(ofs >= -4095 && ofs <= 4095); 93 lj_assertA(ofs >= -4095 && ofs <= 4095,
94 "load/store offset %d out of range", ofs);
93 /* Combine LDR/STR pairs to LDRD/STRD. */ 95 /* Combine LDR/STR pairs to LDRD/STRD. */
94 if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) && 96 if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) &&
95 (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn && 97 (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn &&
@@ -106,7 +108,8 @@ static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
106#if !LJ_SOFTFP 108#if !LJ_SOFTFP
107static void emit_vlso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) 109static void emit_vlso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
108{ 110{
109 lua_assert(ofs >= -1020 && ofs <= 1020 && (ofs&3) == 0); 111 lj_assertA(ofs >= -1020 && ofs <= 1020 && (ofs&3) == 0,
112 "load/store offset %d out of range", ofs);
110 if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; 113 if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
111 *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd & 15) | ARMF_N(rn) | (ofs >> 2); 114 *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd & 15) | ARMF_N(rn) | (ofs >> 2);
112} 115}
@@ -124,7 +127,7 @@ static int emit_kdelta1(ASMState *as, Reg d, int32_t i)
124 while (work) { 127 while (work) {
125 Reg r = rset_picktop(work); 128 Reg r = rset_picktop(work);
126 IRRef ref = regcost_ref(as->cost[r]); 129 IRRef ref = regcost_ref(as->cost[r]);
127 lua_assert(r != d); 130 lj_assertA(r != d, "dest reg not free");
128 if (emit_canremat(ref)) { 131 if (emit_canremat(ref)) {
129 int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); 132 int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
130 uint32_t k = emit_isk12(ARMI_ADD, delta); 133 uint32_t k = emit_isk12(ARMI_ADD, delta);
@@ -142,13 +145,13 @@ static int emit_kdelta1(ASMState *as, Reg d, int32_t i)
142} 145}
143 146
144/* Try to find a two step delta relative to another constant. */ 147/* Try to find a two step delta relative to another constant. */
145static int emit_kdelta2(ASMState *as, Reg d, int32_t i) 148static int emit_kdelta2(ASMState *as, Reg rd, int32_t i)
146{ 149{
147 RegSet work = ~as->freeset & RSET_GPR; 150 RegSet work = ~as->freeset & RSET_GPR;
148 while (work) { 151 while (work) {
149 Reg r = rset_picktop(work); 152 Reg r = rset_picktop(work);
150 IRRef ref = regcost_ref(as->cost[r]); 153 IRRef ref = regcost_ref(as->cost[r]);
151 lua_assert(r != d); 154 lj_assertA(r != rd, "dest reg %d not free", rd);
152 if (emit_canremat(ref)) { 155 if (emit_canremat(ref)) {
153 int32_t other = ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i; 156 int32_t other = ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i;
154 if (other) { 157 if (other) {
@@ -159,8 +162,8 @@ static int emit_kdelta2(ASMState *as, Reg d, int32_t i)
159 k2 = emit_isk12(0, delta & (255 << sh)); 162 k2 = emit_isk12(0, delta & (255 << sh));
160 k = emit_isk12(0, delta & ~(255 << sh)); 163 k = emit_isk12(0, delta & ~(255 << sh));
161 if (k) { 164 if (k) {
162 emit_dn(as, ARMI_ADD^k2^inv, d, d); 165 emit_dn(as, ARMI_ADD^k2^inv, rd, rd);
163 emit_dn(as, ARMI_ADD^k^inv, d, r); 166 emit_dn(as, ARMI_ADD^k^inv, rd, r);
164 return 1; 167 return 1;
165 } 168 }
166 } 169 }
@@ -171,23 +174,24 @@ static int emit_kdelta2(ASMState *as, Reg d, int32_t i)
171} 174}
172 175
173/* Load a 32 bit constant into a GPR. */ 176/* Load a 32 bit constant into a GPR. */
174static void emit_loadi(ASMState *as, Reg r, int32_t i) 177static void emit_loadi(ASMState *as, Reg rd, int32_t i)
175{ 178{
176 uint32_t k = emit_isk12(ARMI_MOV, i); 179 uint32_t k = emit_isk12(ARMI_MOV, i);
177 lua_assert(rset_test(as->freeset, r) || r == RID_TMP); 180 lj_assertA(rset_test(as->freeset, rd) || rd == RID_TMP,
181 "dest reg %d not free", rd);
178 if (k) { 182 if (k) {
179 /* Standard K12 constant. */ 183 /* Standard K12 constant. */
180 emit_d(as, ARMI_MOV^k, r); 184 emit_d(as, ARMI_MOV^k, rd);
181 } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) { 185 } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) {
182 /* 16 bit loword constant for ARMv6T2. */ 186 /* 16 bit loword constant for ARMv6T2. */
183 emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r); 187 emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), rd);
184 } else if (emit_kdelta1(as, r, i)) { 188 } else if (emit_kdelta1(as, rd, i)) {
185 /* One step delta relative to another constant. */ 189 /* One step delta relative to another constant. */
186 } else if ((as->flags & JIT_F_ARMV6T2)) { 190 } else if ((as->flags & JIT_F_ARMV6T2)) {
187 /* 32 bit hiword/loword constant for ARMv6T2. */ 191 /* 32 bit hiword/loword constant for ARMv6T2. */
188 emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), r); 192 emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), rd);
189 emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r); 193 emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), rd);
190 } else if (emit_kdelta2(as, r, i)) { 194 } else if (emit_kdelta2(as, rd, i)) {
191 /* Two step delta relative to another constant. */ 195 /* Two step delta relative to another constant. */
192 } else { 196 } else {
193 /* Otherwise construct the constant with up to 4 instructions. */ 197 /* Otherwise construct the constant with up to 4 instructions. */
@@ -197,17 +201,17 @@ static void emit_loadi(ASMState *as, Reg r, int32_t i)
197 int32_t m = i & (255 << sh); 201 int32_t m = i & (255 << sh);
198 i &= ~(255 << sh); 202 i &= ~(255 << sh);
199 if (i == 0) { 203 if (i == 0) {
200 emit_d(as, ARMI_MOV ^ emit_isk12(0, m), r); 204 emit_d(as, ARMI_MOV ^ emit_isk12(0, m), rd);
201 break; 205 break;
202 } 206 }
203 emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), r, r); 207 emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), rd, rd);
204 } 208 }
205 } 209 }
206} 210}
207 211
208#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) 212#define emit_loada(as, rd, addr) emit_loadi(as, (rd), i32ptr((addr)))
209 213
210static Reg ra_allock(ASMState *as, int32_t k, RegSet allow); 214static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow);
211 215
212/* Get/set from constant pointer. */ 216/* Get/set from constant pointer. */
213static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p) 217static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p)
@@ -219,8 +223,9 @@ static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p)
219 223
220#if !LJ_SOFTFP 224#if !LJ_SOFTFP
221/* Load a number constant into an FPR. */ 225/* Load a number constant into an FPR. */
222static void emit_loadn(ASMState *as, Reg r, cTValue *tv) 226static void emit_loadk64(ASMState *as, Reg r, IRIns *ir)
223{ 227{
228 cTValue *tv = ir_knum(ir);
224 int32_t i; 229 int32_t i;
225 if ((as->flags & JIT_F_VFPV3) && !tv->u32.lo) { 230 if ((as->flags & JIT_F_VFPV3) && !tv->u32.lo) {
226 uint32_t hi = tv->u32.hi; 231 uint32_t hi = tv->u32.hi;
@@ -260,7 +265,7 @@ static void emit_branch(ASMState *as, ARMIns ai, MCode *target)
260{ 265{
261 MCode *p = as->mcp; 266 MCode *p = as->mcp;
262 ptrdiff_t delta = (target - p) - 1; 267 ptrdiff_t delta = (target - p) - 1;
263 lua_assert(((delta + 0x00800000) >> 24) == 0); 268 lj_assertA(((delta + 0x00800000) >> 24) == 0, "branch target out of range");
264 *--p = ai | ((uint32_t)delta & 0x00ffffffu); 269 *--p = ai | ((uint32_t)delta & 0x00ffffffu);
265 as->mcp = p; 270 as->mcp = p;
266} 271}
@@ -288,7 +293,7 @@ static void emit_call(ASMState *as, void *target)
288static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) 293static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
289{ 294{
290#if LJ_SOFTFP 295#if LJ_SOFTFP
291 lua_assert(!irt_isnum(ir->t)); UNUSED(ir); 296 lj_assertA(!irt_isnum(ir->t), "unexpected FP op"); UNUSED(ir);
292#else 297#else
293 if (dst >= RID_MAX_GPR) { 298 if (dst >= RID_MAX_GPR) {
294 emit_dm(as, irt_isnum(ir->t) ? ARMI_VMOV_D : ARMI_VMOV_S, 299 emit_dm(as, irt_isnum(ir->t) ? ARMI_VMOV_D : ARMI_VMOV_S,
@@ -308,30 +313,30 @@ static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
308 emit_dm(as, ARMI_MOV, dst, src); 313 emit_dm(as, ARMI_MOV, dst, src);
309} 314}
310 315
311/* Generic load of register from stack slot. */ 316/* Generic load of register with base and (small) offset address. */
312static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs) 317static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
313{ 318{
314#if LJ_SOFTFP 319#if LJ_SOFTFP
315 lua_assert(!irt_isnum(ir->t)); UNUSED(ir); 320 lj_assertA(!irt_isnum(ir->t), "unexpected FP op"); UNUSED(ir);
316#else 321#else
317 if (r >= RID_MAX_GPR) 322 if (r >= RID_MAX_GPR)
318 emit_vlso(as, irt_isnum(ir->t) ? ARMI_VLDR_D : ARMI_VLDR_S, r, RID_SP, ofs); 323 emit_vlso(as, irt_isnum(ir->t) ? ARMI_VLDR_D : ARMI_VLDR_S, r, base, ofs);
319 else 324 else
320#endif 325#endif
321 emit_lso(as, ARMI_LDR, r, RID_SP, ofs); 326 emit_lso(as, ARMI_LDR, r, base, ofs);
322} 327}
323 328
324/* Generic store of register to stack slot. */ 329/* Generic store of register with base and (small) offset address. */
325static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs) 330static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
326{ 331{
327#if LJ_SOFTFP 332#if LJ_SOFTFP
328 lua_assert(!irt_isnum(ir->t)); UNUSED(ir); 333 lj_assertA(!irt_isnum(ir->t), "unexpected FP op"); UNUSED(ir);
329#else 334#else
330 if (r >= RID_MAX_GPR) 335 if (r >= RID_MAX_GPR)
331 emit_vlso(as, irt_isnum(ir->t) ? ARMI_VSTR_D : ARMI_VSTR_S, r, RID_SP, ofs); 336 emit_vlso(as, irt_isnum(ir->t) ? ARMI_VSTR_D : ARMI_VSTR_S, r, base, ofs);
332 else 337 else
333#endif 338#endif
334 emit_lso(as, ARMI_STR, r, RID_SP, ofs); 339 emit_lso(as, ARMI_STR, r, base, ofs);
335} 340}
336 341
337/* Emit an arithmetic/logic operation with a constant operand. */ 342/* Emit an arithmetic/logic operation with a constant operand. */
diff --git a/src/lj_emit_arm64.h b/src/lj_emit_arm64.h
new file mode 100644
index 00000000..61a2df82
--- /dev/null
+++ b/src/lj_emit_arm64.h
@@ -0,0 +1,422 @@
1/*
2** ARM64 instruction emitter.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4**
5** Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com.
6** Sponsored by Cisco Systems, Inc.
7*/
8
9/* -- Constant encoding --------------------------------------------------- */
10
11static uint64_t get_k64val(ASMState *as, IRRef ref)
12{
13 IRIns *ir = IR(ref);
14 if (ir->o == IR_KINT64) {
15 return ir_kint64(ir)->u64;
16 } else if (ir->o == IR_KGC) {
17 return (uint64_t)ir_kgc(ir);
18 } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
19 return (uint64_t)ir_kptr(ir);
20 } else {
21 lj_assertA(ir->o == IR_KINT || ir->o == IR_KNULL,
22 "bad 64 bit const IR op %d", ir->o);
23 return ir->i; /* Sign-extended. */
24 }
25}
26
27/* Encode constant in K12 format for data processing instructions. */
28static uint32_t emit_isk12(int64_t n)
29{
30 uint64_t k = (n < 0) ? -n : n;
31 uint32_t m = (n < 0) ? 0x40000000 : 0;
32 if (k < 0x1000) {
33 return A64I_K12|m|A64F_U12(k);
34 } else if ((k & 0xfff000) == k) {
35 return A64I_K12|m|0x400000|A64F_U12(k>>12);
36 }
37 return 0;
38}
39
40#define emit_clz64(n) __builtin_clzll(n)
41#define emit_ctz64(n) __builtin_ctzll(n)
42
43/* Encode constant in K13 format for logical data processing instructions. */
44static uint32_t emit_isk13(uint64_t n, int is64)
45{
46 int inv = 0, w = 128, lz, tz;
47 if (n & 1) { n = ~n; w = 64; inv = 1; } /* Avoid wrap-around of ones. */
48 if (!n) return 0; /* Neither all-zero nor all-ones are allowed. */
49 do { /* Find the repeat width. */
50 if (is64 && (uint32_t)(n^(n>>32))) break;
51 n = (uint32_t)n;
52 if (!n) return 0; /* Ditto when passing n=0xffffffff and is64=0. */
53 w = 32; if ((n^(n>>16)) & 0xffff) break;
54 n = n & 0xffff; w = 16; if ((n^(n>>8)) & 0xff) break;
55 n = n & 0xff; w = 8; if ((n^(n>>4)) & 0xf) break;
56 n = n & 0xf; w = 4; if ((n^(n>>2)) & 0x3) break;
57 n = n & 0x3; w = 2;
58 } while (0);
59 lz = emit_clz64(n);
60 tz = emit_ctz64(n);
61 if ((int64_t)(n << lz) >> (lz+tz) != -1ll) return 0; /* Non-contiguous? */
62 if (inv)
63 return A64I_K13 | (((lz-w) & 127) << 16) | (((lz+tz-w-1) & 63) << 10);
64 else
65 return A64I_K13 | ((w-tz) << 16) | (((63-lz-tz-w-w) & 63) << 10);
66}
67
68static uint32_t emit_isfpk64(uint64_t n)
69{
70 uint64_t etop9 = ((n >> 54) & 0x1ff);
71 if ((n << 16) == 0 && (etop9 == 0x100 || etop9 == 0x0ff)) {
72 return (uint32_t)(((n >> 48) & 0x7f) | ((n >> 56) & 0x80));
73 }
74 return ~0u;
75}
76
77/* -- Emit basic instructions --------------------------------------------- */
78
79static void emit_dnma(ASMState *as, A64Ins ai, Reg rd, Reg rn, Reg rm, Reg ra)
80{
81 *--as->mcp = ai | A64F_D(rd) | A64F_N(rn) | A64F_M(rm) | A64F_A(ra);
82}
83
84static void emit_dnm(ASMState *as, A64Ins ai, Reg rd, Reg rn, Reg rm)
85{
86 *--as->mcp = ai | A64F_D(rd) | A64F_N(rn) | A64F_M(rm);
87}
88
89static void emit_dm(ASMState *as, A64Ins ai, Reg rd, Reg rm)
90{
91 *--as->mcp = ai | A64F_D(rd) | A64F_M(rm);
92}
93
94static void emit_dn(ASMState *as, A64Ins ai, Reg rd, Reg rn)
95{
96 *--as->mcp = ai | A64F_D(rd) | A64F_N(rn);
97}
98
99static void emit_nm(ASMState *as, A64Ins ai, Reg rn, Reg rm)
100{
101 *--as->mcp = ai | A64F_N(rn) | A64F_M(rm);
102}
103
104static void emit_d(ASMState *as, A64Ins ai, Reg rd)
105{
106 *--as->mcp = ai | A64F_D(rd);
107}
108
109static void emit_n(ASMState *as, A64Ins ai, Reg rn)
110{
111 *--as->mcp = ai | A64F_N(rn);
112}
113
114static int emit_checkofs(A64Ins ai, int64_t ofs)
115{
116 int scale = (ai >> 30) & 3;
117 if (ofs < 0 || (ofs & ((1<<scale)-1))) {
118 return (ofs >= -256 && ofs <= 255) ? -1 : 0;
119 } else {
120 return (ofs < (4096<<scale)) ? 1 : 0;
121 }
122}
123
124static void emit_lso(ASMState *as, A64Ins ai, Reg rd, Reg rn, int64_t ofs)
125{
126 int ot = emit_checkofs(ai, ofs), sc = (ai >> 30) & 3;
127 lj_assertA(ot, "load/store offset %d out of range", ofs);
128 /* Combine LDR/STR pairs to LDP/STP. */
129 if ((sc == 2 || sc == 3) &&
130 (!(ai & 0x400000) || rd != rn) &&
131 as->mcp != as->mcloop) {
132 uint32_t prev = *as->mcp & ~A64F_D(31);
133 int ofsm = ofs - (1<<sc), ofsp = ofs + (1<<sc);
134 A64Ins aip;
135 if (prev == (ai | A64F_N(rn) | A64F_U12(ofsm>>sc)) ||
136 prev == ((ai^A64I_LS_U) | A64F_N(rn) | A64F_S9(ofsm&0x1ff))) {
137 aip = (A64F_A(rd) | A64F_D(*as->mcp & 31));
138 } else if (prev == (ai | A64F_N(rn) | A64F_U12(ofsp>>sc)) ||
139 prev == ((ai^A64I_LS_U) | A64F_N(rn) | A64F_S9(ofsp&0x1ff))) {
140 aip = (A64F_D(rd) | A64F_A(*as->mcp & 31));
141 ofsm = ofs;
142 } else {
143 goto nopair;
144 }
145 if (ofsm >= (int)((unsigned int)-64<<sc) && ofsm <= (63<<sc)) {
146 *as->mcp = aip | A64F_N(rn) | ((ofsm >> sc) << 15) |
147 (ai ^ ((ai == A64I_LDRx || ai == A64I_STRx) ? 0x50000000 : 0x90000000));
148 return;
149 }
150 }
151nopair:
152 if (ot == 1)
153 *--as->mcp = ai | A64F_D(rd) | A64F_N(rn) | A64F_U12(ofs >> sc);
154 else
155 *--as->mcp = (ai^A64I_LS_U) | A64F_D(rd) | A64F_N(rn) | A64F_S9(ofs & 0x1ff);
156}
157
158/* -- Emit loads/stores --------------------------------------------------- */
159
160/* Prefer rematerialization of BASE/L from global_State over spills. */
161#define emit_canremat(ref) ((ref) <= ASMREF_L)
162
163/* Try to find an N-step delta relative to other consts with N < lim. */
164static int emit_kdelta(ASMState *as, Reg rd, uint64_t k, int lim)
165{
166 RegSet work = ~as->freeset & RSET_GPR;
167 if (lim <= 1) return 0; /* Can't beat that. */
168 while (work) {
169 Reg r = rset_picktop(work);
170 IRRef ref = regcost_ref(as->cost[r]);
171 lj_assertA(r != rd, "dest reg %d not free", rd);
172 if (ref < REF_TRUE) {
173 uint64_t kx = ra_iskref(ref) ? (uint64_t)ra_krefk(as, ref) :
174 get_k64val(as, ref);
175 int64_t delta = (int64_t)(k - kx);
176 if (delta == 0) {
177 emit_dm(as, A64I_MOVx, rd, r);
178 return 1;
179 } else {
180 uint32_t k12 = emit_isk12(delta < 0 ? -delta : delta);
181 if (k12) {
182 emit_dn(as, (delta < 0 ? A64I_SUBx : A64I_ADDx)^k12, rd, r);
183 return 1;
184 }
185 /* Do other ops or multi-step deltas pay off? Probably not.
186 ** E.g. XOR rarely helps with pointer consts.
187 */
188 }
189 }
190 rset_clear(work, r);
191 }
192 return 0; /* Failed. */
193}
194
195static void emit_loadk(ASMState *as, Reg rd, uint64_t u64, int is64)
196{
197 uint32_t k13 = emit_isk13(u64, is64);
198 if (k13) { /* Can the constant be represented as a bitmask immediate? */
199 emit_dn(as, (is64|A64I_ORRw)^k13, rd, RID_ZERO);
200 } else {
201 int i, zeros = 0, ones = 0, neg;
202 if (!is64) u64 = (int64_t)(int32_t)u64; /* Sign-extend. */
203 /* Count homogeneous 16 bit fragments. */
204 for (i = 0; i < 4; i++) {
205 uint64_t frag = (u64 >> i*16) & 0xffff;
206 zeros += (frag == 0);
207 ones += (frag == 0xffff);
208 }
209 neg = ones > zeros; /* Use MOVN if it pays off. */
210 if (!emit_kdelta(as, rd, u64, 4 - (neg ? ones : zeros))) {
211 int shift = 0, lshift = 0;
212 uint64_t n64 = neg ? ~u64 : u64;
213 if (n64 != 0) {
214 /* Find first/last fragment to be filled. */
215 shift = (63-emit_clz64(n64)) & ~15;
216 lshift = emit_ctz64(n64) & ~15;
217 }
218 /* MOVK requires the original value (u64). */
219 while (shift > lshift) {
220 uint32_t u16 = (u64 >> shift) & 0xffff;
221 /* Skip fragments that are correctly filled by MOVN/MOVZ. */
222 if (u16 != (neg ? 0xffff : 0))
223 emit_d(as, is64 | A64I_MOVKw | A64F_U16(u16) | A64F_LSL16(shift), rd);
224 shift -= 16;
225 }
226 /* But MOVN needs an inverted value (n64). */
227 emit_d(as, (neg ? A64I_MOVNx : A64I_MOVZx) |
228 A64F_U16((n64 >> lshift) & 0xffff) | A64F_LSL16(lshift), rd);
229 }
230 }
231}
232
233/* Load a 32 bit constant into a GPR. */
234#define emit_loadi(as, rd, i) emit_loadk(as, rd, i, 0)
235
236/* Load a 64 bit constant into a GPR. */
237#define emit_loadu64(as, rd, i) emit_loadk(as, rd, i, A64I_X)
238
239#define emit_loada(as, r, addr) emit_loadu64(as, (r), (uintptr_t)(addr))
240
241#define glofs(as, k) \
242 ((intptr_t)((uintptr_t)(k) - (uintptr_t)&J2GG(as->J)->g))
243#define mcpofs(as, k) \
244 ((intptr_t)((uintptr_t)(k) - (uintptr_t)(as->mcp - 1)))
245#define checkmcpofs(as, k) \
246 (A64F_S_OK(mcpofs(as, k)>>2, 19))
247
248static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow);
249
250/* Get/set from constant pointer. */
251static void emit_lsptr(ASMState *as, A64Ins ai, Reg r, void *p)
252{
253 /* First, check if ip + offset is in range. */
254 if ((ai & 0x00400000) && checkmcpofs(as, p)) {
255 emit_d(as, A64I_LDRLx | A64F_S19(mcpofs(as, p)>>2), r);
256 } else {
257 Reg base = RID_GL; /* Next, try GL + offset. */
258 int64_t ofs = glofs(as, p);
259 if (!emit_checkofs(ai, ofs)) { /* Else split up into base reg + offset. */
260 int64_t i64 = i64ptr(p);
261 base = ra_allock(as, (i64 & ~0x7fffull), rset_exclude(RSET_GPR, r));
262 ofs = i64 & 0x7fffull;
263 }
264 emit_lso(as, ai, r, base, ofs);
265 }
266}
267
268/* Load 64 bit IR constant into register. */
269static void emit_loadk64(ASMState *as, Reg r, IRIns *ir)
270{
271 const uint64_t *k = &ir_k64(ir)->u64;
272 int64_t ofs;
273 if (r >= RID_MAX_GPR) {
274 uint32_t fpk = emit_isfpk64(*k);
275 if (fpk != ~0u) {
276 emit_d(as, A64I_FMOV_DI | A64F_FP8(fpk), (r & 31));
277 return;
278 }
279 }
280 ofs = glofs(as, k);
281 if (emit_checkofs(A64I_LDRx, ofs)) {
282 emit_lso(as, r >= RID_MAX_GPR ? A64I_LDRd : A64I_LDRx,
283 (r & 31), RID_GL, ofs);
284 } else {
285 if (r >= RID_MAX_GPR) {
286 emit_dn(as, A64I_FMOV_D_R, (r & 31), RID_TMP);
287 r = RID_TMP;
288 }
289 if (checkmcpofs(as, k))
290 emit_d(as, A64I_LDRLx | A64F_S19(mcpofs(as, k)>>2), r);
291 else
292 emit_loadu64(as, r, *k);
293 }
294}
295
296/* Get/set global_State fields. */
297#define emit_getgl(as, r, field) \
298 emit_lsptr(as, A64I_LDRx, (r), (void *)&J2G(as->J)->field)
299#define emit_setgl(as, r, field) \
300 emit_lsptr(as, A64I_STRx, (r), (void *)&J2G(as->J)->field)
301
302/* Trace number is determined from pc of exit instruction. */
303#define emit_setvmstate(as, i) UNUSED(i)
304
305/* -- Emit control-flow instructions -------------------------------------- */
306
307/* Label for internal jumps. */
308typedef MCode *MCLabel;
309
310/* Return label pointing to current PC. */
311#define emit_label(as) ((as)->mcp)
312
313static void emit_cond_branch(ASMState *as, A64CC cond, MCode *target)
314{
315 MCode *p = --as->mcp;
316 ptrdiff_t delta = target - p;
317 lj_assertA(A64F_S_OK(delta, 19), "branch target out of range");
318 *p = A64I_BCC | A64F_S19(delta) | cond;
319}
320
321static void emit_branch(ASMState *as, A64Ins ai, MCode *target)
322{
323 MCode *p = --as->mcp;
324 ptrdiff_t delta = target - p;
325 lj_assertA(A64F_S_OK(delta, 26), "branch target out of range");
326 *p = ai | A64F_S26(delta);
327}
328
329static void emit_tnb(ASMState *as, A64Ins ai, Reg r, uint32_t bit, MCode *target)
330{
331 MCode *p = --as->mcp;
332 ptrdiff_t delta = target - p;
333 lj_assertA(bit < 63, "bit number out of range");
334 lj_assertA(A64F_S_OK(delta, 14), "branch target out of range");
335 if (bit > 31) ai |= A64I_X;
336 *p = ai | A64F_BIT(bit & 31) | A64F_S14(delta) | r;
337}
338
339static void emit_cnb(ASMState *as, A64Ins ai, Reg r, MCode *target)
340{
341 MCode *p = --as->mcp;
342 ptrdiff_t delta = target - p;
343 lj_assertA(A64F_S_OK(delta, 19), "branch target out of range");
344 *p = ai | A64F_S19(delta) | r;
345}
346
347#define emit_jmp(as, target) emit_branch(as, A64I_B, (target))
348
349static void emit_call(ASMState *as, void *target)
350{
351 MCode *p = --as->mcp;
352 ptrdiff_t delta = (char *)target - (char *)p;
353 if (A64F_S_OK(delta>>2, 26)) {
354 *p = A64I_BL | A64F_S26(delta>>2);
355 } else { /* Target out of range: need indirect call. But don't use R0-R7. */
356 Reg r = ra_allock(as, i64ptr(target),
357 RSET_RANGE(RID_X8, RID_MAX_GPR)-RSET_FIXED);
358 *p = A64I_BLR | A64F_N(r);
359 }
360}
361
362/* -- Emit generic operations --------------------------------------------- */
363
364/* Generic move between two regs. */
365static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
366{
367 if (dst >= RID_MAX_GPR) {
368 emit_dn(as, irt_isnum(ir->t) ? A64I_FMOV_D : A64I_FMOV_S,
369 (dst & 31), (src & 31));
370 return;
371 }
372 if (as->mcp != as->mcloop) { /* Swap early registers for loads/stores. */
373 MCode ins = *as->mcp, swp = (src^dst);
374 if ((ins & 0xbf800000) == 0xb9000000) {
375 if (!((ins ^ (dst << 5)) & 0x000003e0))
376 *as->mcp = ins ^ (swp << 5); /* Swap N in load/store. */
377 if (!(ins & 0x00400000) && !((ins ^ dst) & 0x0000001f))
378 *as->mcp = ins ^ swp; /* Swap D in store. */
379 }
380 }
381 emit_dm(as, A64I_MOVx, dst, src);
382}
383
384/* Generic load of register with base and (small) offset address. */
385static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
386{
387 if (r >= RID_MAX_GPR)
388 emit_lso(as, irt_isnum(ir->t) ? A64I_LDRd : A64I_LDRs, (r & 31), base, ofs);
389 else
390 emit_lso(as, irt_is64(ir->t) ? A64I_LDRx : A64I_LDRw, r, base, ofs);
391}
392
393/* Generic store of register with base and (small) offset address. */
394static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
395{
396 if (r >= RID_MAX_GPR)
397 emit_lso(as, irt_isnum(ir->t) ? A64I_STRd : A64I_STRs, (r & 31), base, ofs);
398 else
399 emit_lso(as, irt_is64(ir->t) ? A64I_STRx : A64I_STRw, r, base, ofs);
400}
401
402/* Emit an arithmetic operation with a constant operand. */
403static void emit_opk(ASMState *as, A64Ins ai, Reg dest, Reg src,
404 int32_t i, RegSet allow)
405{
406 uint32_t k = emit_isk12(i);
407 if (k)
408 emit_dn(as, ai^k, dest, src);
409 else
410 emit_dnm(as, ai, dest, src, ra_allock(as, i, allow));
411}
412
413/* Add offset to pointer. */
414static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
415{
416 if (ofs)
417 emit_opk(as, ofs < 0 ? A64I_SUBx : A64I_ADDx, r, r,
418 ofs < 0 ? -ofs : ofs, rset_exclude(RSET_GPR, r));
419}
420
421#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
422
diff --git a/src/lj_emit_mips.h b/src/lj_emit_mips.h
index f3dcd1dd..3de5ff18 100644
--- a/src/lj_emit_mips.h
+++ b/src/lj_emit_mips.h
@@ -3,6 +3,32 @@
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h 3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/ 4*/
5 5
6#if LJ_64
7static intptr_t get_k64val(ASMState *as, IRRef ref)
8{
9 IRIns *ir = IR(ref);
10 if (ir->o == IR_KINT64) {
11 return (intptr_t)ir_kint64(ir)->u64;
12 } else if (ir->o == IR_KGC) {
13 return (intptr_t)ir_kgc(ir);
14 } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
15 return (intptr_t)ir_kptr(ir);
16 } else if (LJ_SOFTFP && ir->o == IR_KNUM) {
17 return (intptr_t)ir_knum(ir)->u64;
18 } else {
19 lj_assertA(ir->o == IR_KINT || ir->o == IR_KNULL,
20 "bad 64 bit const IR op %d", ir->o);
21 return ir->i; /* Sign-extended. */
22 }
23}
24#endif
25
26#if LJ_64
27#define get_kval(as, ref) get_k64val(as, ref)
28#else
29#define get_kval(as, ref) (IR((ref))->i)
30#endif
31
6/* -- Emit basic instructions --------------------------------------------- */ 32/* -- Emit basic instructions --------------------------------------------- */
7 33
8static void emit_dst(ASMState *as, MIPSIns mi, Reg rd, Reg rs, Reg rt) 34static void emit_dst(ASMState *as, MIPSIns mi, Reg rd, Reg rs, Reg rt)
@@ -35,7 +61,7 @@ static void emit_fgh(ASMState *as, MIPSIns mi, Reg rf, Reg rg, Reg rh)
35 61
36static void emit_rotr(ASMState *as, Reg dest, Reg src, Reg tmp, uint32_t shift) 62static void emit_rotr(ASMState *as, Reg dest, Reg src, Reg tmp, uint32_t shift)
37{ 63{
38 if ((as->flags & JIT_F_MIPS32R2)) { 64 if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) {
39 emit_dta(as, MIPSI_ROTR, dest, src, shift); 65 emit_dta(as, MIPSI_ROTR, dest, src, shift);
40 } else { 66 } else {
41 emit_dst(as, MIPSI_OR, dest, dest, tmp); 67 emit_dst(as, MIPSI_OR, dest, dest, tmp);
@@ -44,23 +70,32 @@ static void emit_rotr(ASMState *as, Reg dest, Reg src, Reg tmp, uint32_t shift)
44 } 70 }
45} 71}
46 72
73#if LJ_64
74static void emit_tsml(ASMState *as, MIPSIns mi, Reg rt, Reg rs, uint32_t msb,
75 uint32_t lsb)
76{
77 *--as->mcp = mi | MIPSF_T(rt) | MIPSF_S(rs) | MIPSF_M(msb) | MIPSF_L(lsb);
78}
79#endif
80
47/* -- Emit loads/stores --------------------------------------------------- */ 81/* -- Emit loads/stores --------------------------------------------------- */
48 82
49/* Prefer rematerialization of BASE/L from global_State over spills. */ 83/* Prefer rematerialization of BASE/L from global_State over spills. */
50#define emit_canremat(ref) ((ref) <= REF_BASE) 84#define emit_canremat(ref) ((ref) <= REF_BASE)
51 85
52/* Try to find a one step delta relative to another constant. */ 86/* Try to find a one step delta relative to another constant. */
53static int emit_kdelta1(ASMState *as, Reg t, int32_t i) 87static int emit_kdelta1(ASMState *as, Reg rd, intptr_t i)
54{ 88{
55 RegSet work = ~as->freeset & RSET_GPR; 89 RegSet work = ~as->freeset & RSET_GPR;
56 while (work) { 90 while (work) {
57 Reg r = rset_picktop(work); 91 Reg r = rset_picktop(work);
58 IRRef ref = regcost_ref(as->cost[r]); 92 IRRef ref = regcost_ref(as->cost[r]);
59 lua_assert(r != t); 93 lj_assertA(r != rd, "dest reg %d not free", rd);
60 if (ref < ASMREF_L) { 94 if (ref < ASMREF_L) {
61 int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); 95 intptr_t delta = (intptr_t)((uintptr_t)i -
96 (uintptr_t)(ra_iskref(ref) ? ra_krefk(as, ref) : get_kval(as, ref)));
62 if (checki16(delta)) { 97 if (checki16(delta)) {
63 emit_tsi(as, MIPSI_ADDIU, t, r, delta); 98 emit_tsi(as, MIPSI_AADDIU, rd, r, delta);
64 return 1; 99 return 1;
65 } 100 }
66 } 101 }
@@ -76,8 +111,8 @@ static void emit_loadi(ASMState *as, Reg r, int32_t i)
76 emit_ti(as, MIPSI_LI, r, i); 111 emit_ti(as, MIPSI_LI, r, i);
77 } else { 112 } else {
78 if ((i & 0xffff)) { 113 if ((i & 0xffff)) {
79 int32_t jgl = i32ptr(J2G(as->J)); 114 intptr_t jgl = (intptr_t)(void *)J2G(as->J);
80 if ((uint32_t)(i-jgl) < 65536) { 115 if ((uintptr_t)(i-jgl) < 65536) {
81 emit_tsi(as, MIPSI_ADDIU, r, RID_JGL, i-jgl-32768); 116 emit_tsi(as, MIPSI_ADDIU, r, RID_JGL, i-jgl-32768);
82 return; 117 return;
83 } else if (emit_kdelta1(as, r, i)) { 118 } else if (emit_kdelta1(as, r, i)) {
@@ -92,16 +127,49 @@ static void emit_loadi(ASMState *as, Reg r, int32_t i)
92 } 127 }
93} 128}
94 129
130#if LJ_64
131/* Load a 64 bit constant into a GPR. */
132static void emit_loadu64(ASMState *as, Reg r, uint64_t u64)
133{
134 if (checki32((int64_t)u64)) {
135 emit_loadi(as, r, (int32_t)u64);
136 } else {
137 uint64_t delta = u64 - (uint64_t)(void *)J2G(as->J);
138 if (delta < 65536) {
139 emit_tsi(as, MIPSI_DADDIU, r, RID_JGL, (int32_t)(delta-32768));
140 } else if (emit_kdelta1(as, r, (intptr_t)u64)) {
141 return;
142 } else {
143 /* TODO MIPSR6: Use DAHI & DATI. Caveat: sign-extension. */
144 if ((u64 & 0xffff)) {
145 emit_tsi(as, MIPSI_ORI, r, r, u64 & 0xffff);
146 }
147 if (((u64 >> 16) & 0xffff)) {
148 emit_dta(as, MIPSI_DSLL, r, r, 16);
149 emit_tsi(as, MIPSI_ORI, r, r, (u64 >> 16) & 0xffff);
150 emit_dta(as, MIPSI_DSLL, r, r, 16);
151 } else {
152 emit_dta(as, MIPSI_DSLL32, r, r, 0);
153 }
154 emit_loadi(as, r, (int32_t)(u64 >> 32));
155 }
156 /* TODO: There are probably more optimization opportunities. */
157 }
158}
159
160#define emit_loada(as, r, addr) emit_loadu64(as, (r), u64ptr((addr)))
161#else
95#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) 162#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
163#endif
96 164
97static Reg ra_allock(ASMState *as, int32_t k, RegSet allow); 165static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow);
98static void ra_allockreg(ASMState *as, int32_t k, Reg r); 166static void ra_allockreg(ASMState *as, intptr_t k, Reg r);
99 167
100/* Get/set from constant pointer. */ 168/* Get/set from constant pointer. */
101static void emit_lsptr(ASMState *as, MIPSIns mi, Reg r, void *p, RegSet allow) 169static void emit_lsptr(ASMState *as, MIPSIns mi, Reg r, void *p, RegSet allow)
102{ 170{
103 int32_t jgl = i32ptr(J2G(as->J)); 171 intptr_t jgl = (intptr_t)(J2G(as->J));
104 int32_t i = i32ptr(p); 172 intptr_t i = (intptr_t)(p);
105 Reg base; 173 Reg base;
106 if ((uint32_t)(i-jgl) < 65536) { 174 if ((uint32_t)(i-jgl) < 65536) {
107 i = i-jgl-32768; 175 i = i-jgl-32768;
@@ -112,8 +180,24 @@ static void emit_lsptr(ASMState *as, MIPSIns mi, Reg r, void *p, RegSet allow)
112 emit_tsi(as, mi, r, base, i); 180 emit_tsi(as, mi, r, base, i);
113} 181}
114 182
115#define emit_loadn(as, r, tv) \ 183#if LJ_64
116 emit_lsptr(as, MIPSI_LDC1, ((r) & 31), (void *)(tv), RSET_GPR) 184static void emit_loadk64(ASMState *as, Reg r, IRIns *ir)
185{
186 const uint64_t *k = &ir_k64(ir)->u64;
187 Reg r64 = r;
188 if (rset_test(RSET_FPR, r)) {
189 r64 = RID_TMP;
190 emit_tg(as, MIPSI_DMTC1, r64, r);
191 }
192 if ((uint32_t)((intptr_t)k-(intptr_t)J2G(as->J)) < 65536)
193 emit_lsptr(as, MIPSI_LD, r64, (void *)k, 0);
194 else
195 emit_loadu64(as, r64, *k);
196}
197#else
198#define emit_loadk64(as, r, ir) \
199 emit_lsptr(as, MIPSI_LDC1, ((r) & 31), (void *)&ir_knum((ir))->u64, RSET_GPR)
200#endif
117 201
118/* Get/set global_State fields. */ 202/* Get/set global_State fields. */
119static void emit_lsglptr(ASMState *as, MIPSIns mi, Reg r, int32_t ofs) 203static void emit_lsglptr(ASMState *as, MIPSIns mi, Reg r, int32_t ofs)
@@ -122,9 +206,9 @@ static void emit_lsglptr(ASMState *as, MIPSIns mi, Reg r, int32_t ofs)
122} 206}
123 207
124#define emit_getgl(as, r, field) \ 208#define emit_getgl(as, r, field) \
125 emit_lsglptr(as, MIPSI_LW, (r), (int32_t)offsetof(global_State, field)) 209 emit_lsglptr(as, MIPSI_AL, (r), (int32_t)offsetof(global_State, field))
126#define emit_setgl(as, r, field) \ 210#define emit_setgl(as, r, field) \
127 emit_lsglptr(as, MIPSI_SW, (r), (int32_t)offsetof(global_State, field)) 211 emit_lsglptr(as, MIPSI_AS, (r), (int32_t)offsetof(global_State, field))
128 212
129/* Trace number is determined from per-trace exit stubs. */ 213/* Trace number is determined from per-trace exit stubs. */
130#define emit_setvmstate(as, i) UNUSED(i) 214#define emit_setvmstate(as, i) UNUSED(i)
@@ -141,7 +225,7 @@ static void emit_branch(ASMState *as, MIPSIns mi, Reg rs, Reg rt, MCode *target)
141{ 225{
142 MCode *p = as->mcp; 226 MCode *p = as->mcp;
143 ptrdiff_t delta = target - p; 227 ptrdiff_t delta = target - p;
144 lua_assert(((delta + 0x8000) >> 16) == 0); 228 lj_assertA(((delta + 0x8000) >> 16) == 0, "branch target out of range");
145 *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu); 229 *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu);
146 as->mcp = p; 230 as->mcp = p;
147} 231}
@@ -152,16 +236,31 @@ static void emit_jmp(ASMState *as, MCode *target)
152 emit_branch(as, MIPSI_B, RID_ZERO, RID_ZERO, (target)); 236 emit_branch(as, MIPSI_B, RID_ZERO, RID_ZERO, (target));
153} 237}
154 238
155static void emit_call(ASMState *as, void *target) 239static void emit_call(ASMState *as, void *target, int needcfa)
156{ 240{
157 MCode *p = as->mcp; 241 MCode *p = as->mcp;
158 *--p = MIPSI_NOP; 242#if LJ_TARGET_MIPSR6
159 if ((((uintptr_t)target ^ (uintptr_t)p) >> 28) == 0) 243 ptrdiff_t delta = (char *)target - (char *)p;
244 if ((((delta>>2) + 0x02000000) >> 26) == 0) { /* Try compact call first. */
245 *--p = MIPSI_BALC | (((uintptr_t)delta >>2) & 0x03ffffffu);
246 as->mcp = p;
247 return;
248 }
249#endif
250 *--p = MIPSI_NOP; /* Delay slot. */
251 if ((((uintptr_t)target ^ (uintptr_t)p) >> 28) == 0) {
252#if !LJ_TARGET_MIPSR6
253 *--p = (((uintptr_t)target & 1) ? MIPSI_JALX : MIPSI_JAL) |
254 (((uintptr_t)target >>2) & 0x03ffffffu);
255#else
160 *--p = MIPSI_JAL | (((uintptr_t)target >>2) & 0x03ffffffu); 256 *--p = MIPSI_JAL | (((uintptr_t)target >>2) & 0x03ffffffu);
161 else /* Target out of range: need indirect call. */ 257#endif
258 } else { /* Target out of range: need indirect call. */
162 *--p = MIPSI_JALR | MIPSF_S(RID_CFUNCADDR); 259 *--p = MIPSI_JALR | MIPSF_S(RID_CFUNCADDR);
260 needcfa = 1;
261 }
163 as->mcp = p; 262 as->mcp = p;
164 ra_allockreg(as, i32ptr(target), RID_CFUNCADDR); 263 if (needcfa) ra_allockreg(as, (intptr_t)target, RID_CFUNCADDR);
165} 264}
166 265
167/* -- Emit generic operations --------------------------------------------- */ 266/* -- Emit generic operations --------------------------------------------- */
@@ -178,32 +277,32 @@ static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
178 emit_fg(as, irt_isnum(ir->t) ? MIPSI_MOV_D : MIPSI_MOV_S, dst, src); 277 emit_fg(as, irt_isnum(ir->t) ? MIPSI_MOV_D : MIPSI_MOV_S, dst, src);
179} 278}
180 279
181/* Generic load of register from stack slot. */ 280/* Generic load of register with base and (small) offset address. */
182static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs) 281static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
183{ 282{
184 if (r < RID_MAX_GPR) 283 if (r < RID_MAX_GPR)
185 emit_tsi(as, MIPSI_LW, r, RID_SP, ofs); 284 emit_tsi(as, irt_is64(ir->t) ? MIPSI_LD : MIPSI_LW, r, base, ofs);
186 else 285 else
187 emit_tsi(as, irt_isnum(ir->t) ? MIPSI_LDC1 : MIPSI_LWC1, 286 emit_tsi(as, irt_isnum(ir->t) ? MIPSI_LDC1 : MIPSI_LWC1,
188 (r & 31), RID_SP, ofs); 287 (r & 31), base, ofs);
189} 288}
190 289
191/* Generic store of register to stack slot. */ 290/* Generic store of register with base and (small) offset address. */
192static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs) 291static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
193{ 292{
194 if (r < RID_MAX_GPR) 293 if (r < RID_MAX_GPR)
195 emit_tsi(as, MIPSI_SW, r, RID_SP, ofs); 294 emit_tsi(as, irt_is64(ir->t) ? MIPSI_SD : MIPSI_SW, r, base, ofs);
196 else 295 else
197 emit_tsi(as, irt_isnum(ir->t) ? MIPSI_SDC1 : MIPSI_SWC1, 296 emit_tsi(as, irt_isnum(ir->t) ? MIPSI_SDC1 : MIPSI_SWC1,
198 (r&31), RID_SP, ofs); 297 (r&31), base, ofs);
199} 298}
200 299
201/* Add offset to pointer. */ 300/* Add offset to pointer. */
202static void emit_addptr(ASMState *as, Reg r, int32_t ofs) 301static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
203{ 302{
204 if (ofs) { 303 if (ofs) {
205 lua_assert(checki16(ofs)); 304 lj_assertA(checki16(ofs), "offset %d out of range", ofs);
206 emit_tsi(as, MIPSI_ADDIU, r, r, ofs); 305 emit_tsi(as, MIPSI_AADDIU, r, r, ofs);
207 } 306 }
208} 307}
209 308
diff --git a/src/lj_emit_ppc.h b/src/lj_emit_ppc.h
index e993d294..6bc74c04 100644
--- a/src/lj_emit_ppc.h
+++ b/src/lj_emit_ppc.h
@@ -41,13 +41,13 @@ static void emit_rot(ASMState *as, PPCIns pi, Reg ra, Reg rs,
41 41
42static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n) 42static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n)
43{ 43{
44 lua_assert(n >= 0 && n < 32); 44 lj_assertA(n >= 0 && n < 32, "shift out or range");
45 emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n); 45 emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n);
46} 46}
47 47
48static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n) 48static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n)
49{ 49{
50 lua_assert(n >= 0 && n < 32); 50 lj_assertA(n >= 0 && n < 32, "shift out or range");
51 emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31); 51 emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31);
52} 52}
53 53
@@ -57,17 +57,17 @@ static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n)
57#define emit_canremat(ref) ((ref) <= REF_BASE) 57#define emit_canremat(ref) ((ref) <= REF_BASE)
58 58
59/* Try to find a one step delta relative to another constant. */ 59/* Try to find a one step delta relative to another constant. */
60static int emit_kdelta1(ASMState *as, Reg t, int32_t i) 60static int emit_kdelta1(ASMState *as, Reg rd, int32_t i)
61{ 61{
62 RegSet work = ~as->freeset & RSET_GPR; 62 RegSet work = ~as->freeset & RSET_GPR;
63 while (work) { 63 while (work) {
64 Reg r = rset_picktop(work); 64 Reg r = rset_picktop(work);
65 IRRef ref = regcost_ref(as->cost[r]); 65 IRRef ref = regcost_ref(as->cost[r]);
66 lua_assert(r != t); 66 lj_assertA(r != rd, "dest reg %d not free", rd);
67 if (ref < ASMREF_L) { 67 if (ref < ASMREF_L) {
68 int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); 68 int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
69 if (checki16(delta)) { 69 if (checki16(delta)) {
70 emit_tai(as, PPCI_ADDI, t, r, delta); 70 emit_tai(as, PPCI_ADDI, rd, r, delta);
71 return 1; 71 return 1;
72 } 72 }
73 } 73 }
@@ -98,7 +98,7 @@ static void emit_loadi(ASMState *as, Reg r, int32_t i)
98 98
99#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) 99#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
100 100
101static Reg ra_allock(ASMState *as, int32_t k, RegSet allow); 101static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow);
102 102
103/* Get/set from constant pointer. */ 103/* Get/set from constant pointer. */
104static void emit_lsptr(ASMState *as, PPCIns pi, Reg r, void *p, RegSet allow) 104static void emit_lsptr(ASMState *as, PPCIns pi, Reg r, void *p, RegSet allow)
@@ -115,8 +115,8 @@ static void emit_lsptr(ASMState *as, PPCIns pi, Reg r, void *p, RegSet allow)
115 emit_tai(as, pi, r, base, i); 115 emit_tai(as, pi, r, base, i);
116} 116}
117 117
118#define emit_loadn(as, r, tv) \ 118#define emit_loadk64(as, r, ir) \
119 emit_lsptr(as, PPCI_LFD, ((r) & 31), (void *)(tv), RSET_GPR) 119 emit_lsptr(as, PPCI_LFD, ((r) & 31), (void *)&ir_knum((ir))->u64, RSET_GPR)
120 120
121/* Get/set global_State fields. */ 121/* Get/set global_State fields. */
122static void emit_lsglptr(ASMState *as, PPCIns pi, Reg r, int32_t ofs) 122static void emit_lsglptr(ASMState *as, PPCIns pi, Reg r, int32_t ofs)
@@ -144,7 +144,7 @@ static void emit_condbranch(ASMState *as, PPCIns pi, PPCCC cc, MCode *target)
144{ 144{
145 MCode *p = --as->mcp; 145 MCode *p = --as->mcp;
146 ptrdiff_t delta = (char *)target - (char *)p; 146 ptrdiff_t delta = (char *)target - (char *)p;
147 lua_assert(((delta + 0x8000) >> 16) == 0); 147 lj_assertA(((delta + 0x8000) >> 16) == 0, "branch target out of range");
148 pi ^= (delta & 0x8000) * (PPCF_Y/0x8000); 148 pi ^= (delta & 0x8000) * (PPCF_Y/0x8000);
149 *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu); 149 *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu);
150} 150}
@@ -186,22 +186,22 @@ static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
186 emit_fb(as, PPCI_FMR, dst, src); 186 emit_fb(as, PPCI_FMR, dst, src);
187} 187}
188 188
189/* Generic load of register from stack slot. */ 189/* Generic load of register with base and (small) offset address. */
190static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs) 190static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
191{ 191{
192 if (r < RID_MAX_GPR) 192 if (r < RID_MAX_GPR)
193 emit_tai(as, PPCI_LWZ, r, RID_SP, ofs); 193 emit_tai(as, PPCI_LWZ, r, base, ofs);
194 else 194 else
195 emit_fai(as, irt_isnum(ir->t) ? PPCI_LFD : PPCI_LFS, r, RID_SP, ofs); 195 emit_fai(as, irt_isnum(ir->t) ? PPCI_LFD : PPCI_LFS, r, base, ofs);
196} 196}
197 197
198/* Generic store of register to stack slot. */ 198/* Generic store of register with base and (small) offset address. */
199static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs) 199static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
200{ 200{
201 if (r < RID_MAX_GPR) 201 if (r < RID_MAX_GPR)
202 emit_tai(as, PPCI_STW, r, RID_SP, ofs); 202 emit_tai(as, PPCI_STW, r, base, ofs);
203 else 203 else
204 emit_fai(as, irt_isnum(ir->t) ? PPCI_STFD : PPCI_STFS, r, RID_SP, ofs); 204 emit_fai(as, irt_isnum(ir->t) ? PPCI_STFD : PPCI_STFS, r, base, ofs);
205} 205}
206 206
207/* Emit a compare (for equality) with a constant operand. */ 207/* Emit a compare (for equality) with a constant operand. */
diff --git a/src/lj_emit_x86.h b/src/lj_emit_x86.h
index 7bbc695b..9173a299 100644
--- a/src/lj_emit_x86.h
+++ b/src/lj_emit_x86.h
@@ -13,10 +13,17 @@
13 if (rex != 0x40) *--(p) = rex; } 13 if (rex != 0x40) *--(p) = rex; }
14#define FORCE_REX 0x200 14#define FORCE_REX 0x200
15#define REX_64 (FORCE_REX|0x080000) 15#define REX_64 (FORCE_REX|0x080000)
16#define VEX_64 0x800000
16#else 17#else
17#define REXRB(p, rr, rb) ((void)0) 18#define REXRB(p, rr, rb) ((void)0)
18#define FORCE_REX 0 19#define FORCE_REX 0
19#define REX_64 0 20#define REX_64 0
21#define VEX_64 0
22#endif
23#if LJ_GC64
24#define REX_GC64 REX_64
25#else
26#define REX_GC64 0
20#endif 27#endif
21 28
22#define emit_i8(as, i) (*--as->mcp = (MCode)(i)) 29#define emit_i8(as, i) (*--as->mcp = (MCode)(i))
@@ -31,7 +38,14 @@ static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx,
31 MCode *p, int delta) 38 MCode *p, int delta)
32{ 39{
33 int n = (int8_t)xo; 40 int n = (int8_t)xo;
34#if defined(__GNUC__) 41 if (n == -60) { /* VEX-encoded instruction */
42#if LJ_64
43 xo ^= (((rr>>1)&4)+((rx>>2)&2)+((rb>>3)&1))<<13;
44#endif
45 *(uint32_t *)(p+delta-5) = (uint32_t)xo;
46 return p+delta-5;
47 }
48#if defined(__GNUC__) || defined(__clang__)
35 if (__builtin_constant_p(xo) && n == -2) 49 if (__builtin_constant_p(xo) && n == -2)
36 p[delta-2] = (MCode)(xo >> 24); 50 p[delta-2] = (MCode)(xo >> 24);
37 else if (__builtin_constant_p(xo) && n == -3) 51 else if (__builtin_constant_p(xo) && n == -3)
@@ -78,33 +92,24 @@ static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2)
78/* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */ 92/* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */
79static int32_t ptr2addr(const void *p) 93static int32_t ptr2addr(const void *p)
80{ 94{
81 lua_assert((uintptr_t)p < (uintptr_t)0x80000000); 95 lj_assertX((uintptr_t)p < (uintptr_t)0x80000000, "pointer outside 2G range");
82 return i32ptr(p); 96 return i32ptr(p);
83} 97}
84#else 98#else
85#define ptr2addr(p) (i32ptr((p))) 99#define ptr2addr(p) (i32ptr((p)))
86#endif 100#endif
87 101
88/* op r, [addr] */
89static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr)
90{
91 MCode *p = as->mcp;
92 *(int32_t *)(p-4) = ptr2addr(addr);
93#if LJ_64
94 p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
95 as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5);
96#else
97 as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4);
98#endif
99}
100
101/* op r, [base+ofs] */ 102/* op r, [base+ofs] */
102static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs) 103static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs)
103{ 104{
104 MCode *p = as->mcp; 105 MCode *p = as->mcp;
105 x86Mode mode; 106 x86Mode mode;
106 if (ra_hasreg(rb)) { 107 if (ra_hasreg(rb)) {
107 if (ofs == 0 && (rb&7) != RID_EBP) { 108 if (LJ_GC64 && rb == RID_RIP) {
109 mode = XM_OFS0;
110 p -= 4;
111 *(int32_t *)p = ofs;
112 } else if (ofs == 0 && (rb&7) != RID_EBP) {
108 mode = XM_OFS0; 113 mode = XM_OFS0;
109 } else if (checki8(ofs)) { 114 } else if (checki8(ofs)) {
110 *--p = (MCode)ofs; 115 *--p = (MCode)ofs;
@@ -202,6 +207,11 @@ static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb)
202 *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP); 207 *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
203 rb = RID_ESP; 208 rb = RID_ESP;
204#endif 209#endif
210 } else if (LJ_GC64 && rb == RID_RIP) {
211 lj_assertA(as->mrm.idx == RID_NONE, "RIP-rel mrm cannot have index");
212 mode = XM_OFS0;
213 p -= 4;
214 *(int32_t *)p = as->mrm.ofs;
205 } else { 215 } else {
206 if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) { 216 if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) {
207 mode = XM_OFS0; 217 mode = XM_OFS0;
@@ -241,10 +251,6 @@ static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i)
241 251
242/* -- Emit loads/stores --------------------------------------------------- */ 252/* -- Emit loads/stores --------------------------------------------------- */
243 253
244/* Instruction selection for XMM moves. */
245#define XMM_MOVRR(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVSD : XO_MOVAPS)
246#define XMM_MOVRM(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVLPD : XO_MOVSD)
247
248/* mov [base+ofs], i */ 254/* mov [base+ofs], i */
249static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i) 255static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i)
250{ 256{
@@ -259,8 +265,8 @@ static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i)
259/* Get/set global_State fields. */ 265/* Get/set global_State fields. */
260#define emit_opgl(as, xo, r, field) \ 266#define emit_opgl(as, xo, r, field) \
261 emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field) 267 emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field)
262#define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field) 268#define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r)|REX_GC64, field)
263#define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field) 269#define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r)|REX_GC64, field)
264 270
265#define emit_setvmstate(as, i) \ 271#define emit_setvmstate(as, i) \
266 (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate)) 272 (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate))
@@ -285,9 +291,21 @@ static void emit_loadi(ASMState *as, Reg r, int32_t i)
285 } 291 }
286} 292}
287 293
294#if LJ_GC64
295#define dispofs(as, k) \
296 ((intptr_t)((uintptr_t)(k) - (uintptr_t)J2GG(as->J)->dispatch))
297#define mcpofs(as, k) \
298 ((intptr_t)((uintptr_t)(k) - (uintptr_t)as->mcp))
299#define mctopofs(as, k) \
300 ((intptr_t)((uintptr_t)(k) - (uintptr_t)as->mctop))
301/* mov r, addr */
302#define emit_loada(as, r, addr) \
303 emit_loadu64(as, (r), (uintptr_t)(addr))
304#else
288/* mov r, addr */ 305/* mov r, addr */
289#define emit_loada(as, r, addr) \ 306#define emit_loada(as, r, addr) \
290 emit_loadi(as, (r), ptr2addr((addr))) 307 emit_loadi(as, (r), ptr2addr((addr)))
308#endif
291 309
292#if LJ_64 310#if LJ_64
293/* mov r, imm64 or shorter 32 bit extended load. */ 311/* mov r, imm64 or shorter 32 bit extended load. */
@@ -299,6 +317,15 @@ static void emit_loadu64(ASMState *as, Reg r, uint64_t u64)
299 MCode *p = as->mcp; 317 MCode *p = as->mcp;
300 *(int32_t *)(p-4) = (int32_t)u64; 318 *(int32_t *)(p-4) = (int32_t)u64;
301 as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4); 319 as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4);
320#if LJ_GC64
321 } else if (checki32(dispofs(as, u64))) {
322 emit_rmro(as, XO_LEA, r|REX_64, RID_DISPATCH, (int32_t)dispofs(as, u64));
323 } else if (checki32(mcpofs(as, u64)) && checki32(mctopofs(as, u64))) {
324 /* Since as->realign assumes the code size doesn't change, check
325 ** RIP-relative addressing reachability for both as->mcp and as->mctop.
326 */
327 emit_rmro(as, XO_LEA, r|REX_64, RID_RIP, (int32_t)mcpofs(as, u64));
328#endif
302 } else { /* Full-size 64 bit load. */ 329 } else { /* Full-size 64 bit load. */
303 MCode *p = as->mcp; 330 MCode *p = as->mcp;
304 *(uint64_t *)(p-8) = u64; 331 *(uint64_t *)(p-8) = u64;
@@ -310,13 +337,90 @@ static void emit_loadu64(ASMState *as, Reg r, uint64_t u64)
310} 337}
311#endif 338#endif
312 339
313/* movsd r, [&tv->n] / xorps r, r */ 340/* op r, [addr] */
314static void emit_loadn(ASMState *as, Reg r, cTValue *tv) 341static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr)
315{ 342{
316 if (tvispzero(tv)) /* Use xor only for +0. */ 343#if LJ_GC64
317 emit_rr(as, XO_XORPS, r, r); 344 if (checki32(dispofs(as, addr))) {
318 else 345 emit_rmro(as, xo, rr, RID_DISPATCH, (int32_t)dispofs(as, addr));
319 emit_rma(as, XMM_MOVRM(as), r, &tv->n); 346 } else if (checki32(mcpofs(as, addr)) && checki32(mctopofs(as, addr))) {
347 emit_rmro(as, xo, rr, RID_RIP, (int32_t)mcpofs(as, addr));
348 } else if (!checki32((intptr_t)addr)) {
349 Reg ra = (rr & 15);
350 if (xo != XO_MOV) {
351 /* We can't allocate a register here. Use and restore DISPATCH. Ugly. */
352 uint64_t dispaddr = (uintptr_t)J2GG(as->J)->dispatch;
353 uint8_t i8 = xo == XO_GROUP3b ? *as->mcp++ : 0;
354 ra = RID_DISPATCH;
355 if (checku32(dispaddr)) {
356 emit_loadi(as, ra, (int32_t)dispaddr);
357 } else { /* Full-size 64 bit load. */
358 MCode *p = as->mcp;
359 *(uint64_t *)(p-8) = dispaddr;
360 p[-9] = (MCode)(XI_MOVri+(ra&7));
361 p[-10] = 0x48 + ((ra>>3)&1);
362 p -= 10;
363 as->mcp = p;
364 }
365 if (xo == XO_GROUP3b) emit_i8(as, i8);
366 }
367 emit_rmro(as, xo, rr, ra, 0);
368 emit_loadu64(as, ra, (uintptr_t)addr);
369 } else
370#endif
371 {
372 MCode *p = as->mcp;
373 *(int32_t *)(p-4) = ptr2addr(addr);
374#if LJ_64
375 p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
376 as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5);
377#else
378 as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4);
379#endif
380 }
381}
382
383/* Load 64 bit IR constant into register. */
384static void emit_loadk64(ASMState *as, Reg r, IRIns *ir)
385{
386 Reg r64;
387 x86Op xo;
388 const uint64_t *k = &ir_k64(ir)->u64;
389 if (rset_test(RSET_FPR, r)) {
390 r64 = r;
391 xo = XO_MOVSD;
392 } else {
393 r64 = r | REX_64;
394 xo = XO_MOV;
395 }
396 if (*k == 0) {
397 emit_rr(as, rset_test(RSET_FPR, r) ? XO_XORPS : XO_ARITH(XOg_XOR), r, r);
398#if LJ_GC64
399 } else if (checki32((intptr_t)k) || checki32(dispofs(as, k)) ||
400 (checki32(mcpofs(as, k)) && checki32(mctopofs(as, k)))) {
401 emit_rma(as, xo, r64, k);
402 } else {
403 if (ir->i) {
404 lj_assertA(*k == *(uint64_t*)(as->mctop - ir->i),
405 "bad interned 64 bit constant");
406 } else if (as->curins <= as->stopins && rset_test(RSET_GPR, r)) {
407 emit_loadu64(as, r, *k);
408 return;
409 } else {
410 /* If all else fails, add the FP constant at the MCode area bottom. */
411 while ((uintptr_t)as->mcbot & 7) *as->mcbot++ = XI_INT3;
412 *(uint64_t *)as->mcbot = *k;
413 ir->i = (int32_t)(as->mctop - as->mcbot);
414 as->mcbot += 8;
415 as->mclim = as->mcbot + MCLIM_REDZONE;
416 lj_mcode_commitbot(as->J, as->mcbot);
417 }
418 emit_rmro(as, xo, r64, RID_RIP, (int32_t)mcpofs(as, as->mctop - ir->i));
419#else
420 } else {
421 emit_rma(as, xo, r64, k);
422#endif
423 }
320} 424}
321 425
322/* -- Emit control-flow instructions -------------------------------------- */ 426/* -- Emit control-flow instructions -------------------------------------- */
@@ -330,7 +434,7 @@ static void emit_sjmp(ASMState *as, MCLabel target)
330{ 434{
331 MCode *p = as->mcp; 435 MCode *p = as->mcp;
332 ptrdiff_t delta = target - p; 436 ptrdiff_t delta = target - p;
333 lua_assert(delta == (int8_t)delta); 437 lj_assertA(delta == (int8_t)delta, "short jump target out of range");
334 p[-1] = (MCode)(int8_t)delta; 438 p[-1] = (MCode)(int8_t)delta;
335 p[-2] = XI_JMPs; 439 p[-2] = XI_JMPs;
336 as->mcp = p - 2; 440 as->mcp = p - 2;
@@ -342,7 +446,7 @@ static void emit_sjcc(ASMState *as, int cc, MCLabel target)
342{ 446{
343 MCode *p = as->mcp; 447 MCode *p = as->mcp;
344 ptrdiff_t delta = target - p; 448 ptrdiff_t delta = target - p;
345 lua_assert(delta == (int8_t)delta); 449 lj_assertA(delta == (int8_t)delta, "short jump target out of range");
346 p[-1] = (MCode)(int8_t)delta; 450 p[-1] = (MCode)(int8_t)delta;
347 p[-2] = (MCode)(XI_JCCs+(cc&15)); 451 p[-2] = (MCode)(XI_JCCs+(cc&15));
348 as->mcp = p - 2; 452 as->mcp = p - 2;
@@ -368,10 +472,11 @@ static void emit_sfixup(ASMState *as, MCLabel source)
368#define emit_label(as) ((as)->mcp) 472#define emit_label(as) ((as)->mcp)
369 473
370/* Compute relative 32 bit offset for jump and call instructions. */ 474/* Compute relative 32 bit offset for jump and call instructions. */
371static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target) 475static LJ_AINLINE int32_t jmprel(jit_State *J, MCode *p, MCode *target)
372{ 476{
373 ptrdiff_t delta = target - p; 477 ptrdiff_t delta = target - p;
374 lua_assert(delta == (int32_t)delta); 478 UNUSED(J);
479 lj_assertJ(delta == (int32_t)delta, "jump target out of range");
375 return (int32_t)delta; 480 return (int32_t)delta;
376} 481}
377 482
@@ -379,7 +484,7 @@ static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target)
379static void emit_jcc(ASMState *as, int cc, MCode *target) 484static void emit_jcc(ASMState *as, int cc, MCode *target)
380{ 485{
381 MCode *p = as->mcp; 486 MCode *p = as->mcp;
382 *(int32_t *)(p-4) = jmprel(p, target); 487 *(int32_t *)(p-4) = jmprel(as->J, p, target);
383 p[-5] = (MCode)(XI_JCCn+(cc&15)); 488 p[-5] = (MCode)(XI_JCCn+(cc&15));
384 p[-6] = 0x0f; 489 p[-6] = 0x0f;
385 as->mcp = p - 6; 490 as->mcp = p - 6;
@@ -389,7 +494,7 @@ static void emit_jcc(ASMState *as, int cc, MCode *target)
389static void emit_jmp(ASMState *as, MCode *target) 494static void emit_jmp(ASMState *as, MCode *target)
390{ 495{
391 MCode *p = as->mcp; 496 MCode *p = as->mcp;
392 *(int32_t *)(p-4) = jmprel(p, target); 497 *(int32_t *)(p-4) = jmprel(as->J, p, target);
393 p[-5] = XI_JMP; 498 p[-5] = XI_JMP;
394 as->mcp = p - 5; 499 as->mcp = p - 5;
395} 500}
@@ -406,7 +511,7 @@ static void emit_call_(ASMState *as, MCode *target)
406 return; 511 return;
407 } 512 }
408#endif 513#endif
409 *(int32_t *)(p-4) = jmprel(p, target); 514 *(int32_t *)(p-4) = jmprel(as->J, p, target);
410 p[-5] = XI_CALL; 515 p[-5] = XI_CALL;
411 as->mcp = p - 5; 516 as->mcp = p - 5;
412} 517}
@@ -418,8 +523,10 @@ static void emit_call_(ASMState *as, MCode *target)
418/* Use 64 bit operations to handle 64 bit IR types. */ 523/* Use 64 bit operations to handle 64 bit IR types. */
419#if LJ_64 524#if LJ_64
420#define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0)) 525#define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0))
526#define VEX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? VEX_64 : 0))
421#else 527#else
422#define REX_64IR(ir, r) (r) 528#define REX_64IR(ir, r) (r)
529#define VEX_64IR(ir, r) (r)
423#endif 530#endif
424 531
425/* Generic move between two regs. */ 532/* Generic move between two regs. */
@@ -429,35 +536,32 @@ static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
429 if (dst < RID_MAX_GPR) 536 if (dst < RID_MAX_GPR)
430 emit_rr(as, XO_MOV, REX_64IR(ir, dst), src); 537 emit_rr(as, XO_MOV, REX_64IR(ir, dst), src);
431 else 538 else
432 emit_rr(as, XMM_MOVRR(as), dst, src); 539 emit_rr(as, XO_MOVAPS, dst, src);
433} 540}
434 541
435/* Generic load of register from stack slot. */ 542/* Generic load of register with base and (small) offset address. */
436static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs) 543static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
437{ 544{
438 if (r < RID_MAX_GPR) 545 if (r < RID_MAX_GPR)
439 emit_rmro(as, XO_MOV, REX_64IR(ir, r), RID_ESP, ofs); 546 emit_rmro(as, XO_MOV, REX_64IR(ir, r), base, ofs);
440 else 547 else
441 emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, r, RID_ESP, ofs); 548 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, r, base, ofs);
442} 549}
443 550
444/* Generic store of register to stack slot. */ 551/* Generic store of register with base and (small) offset address. */
445static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs) 552static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
446{ 553{
447 if (r < RID_MAX_GPR) 554 if (r < RID_MAX_GPR)
448 emit_rmro(as, XO_MOVto, REX_64IR(ir, r), RID_ESP, ofs); 555 emit_rmro(as, XO_MOVto, REX_64IR(ir, r), base, ofs);
449 else 556 else
450 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, RID_ESP, ofs); 557 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, base, ofs);
451} 558}
452 559
453/* Add offset to pointer. */ 560/* Add offset to pointer. */
454static void emit_addptr(ASMState *as, Reg r, int32_t ofs) 561static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
455{ 562{
456 if (ofs) { 563 if (ofs) {
457 if ((as->flags & JIT_F_LEA_AGU)) 564 emit_gri(as, XG_ARITHi(XOg_ADD), r|REX_GC64, ofs);
458 emit_rmro(as, XO_LEA, r, r, ofs);
459 else
460 emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs);
461 } 565 }
462} 566}
463 567
diff --git a/src/lj_err.c b/src/lj_err.c
index ad3394df..47f88740 100644
--- a/src/lj_err.c
+++ b/src/lj_err.c
@@ -16,6 +16,7 @@
16#include "lj_ff.h" 16#include "lj_ff.h"
17#include "lj_trace.h" 17#include "lj_trace.h"
18#include "lj_vm.h" 18#include "lj_vm.h"
19#include "lj_strfmt.h"
19 20
20/* 21/*
21** LuaJIT can either use internal or external frame unwinding: 22** LuaJIT can either use internal or external frame unwinding:
@@ -45,7 +46,8 @@
45** the wrapper function feature. Lua errors thrown through C++ frames 46** the wrapper function feature. Lua errors thrown through C++ frames
46** cannot be caught by C++ code and C++ destructors are not run. 47** cannot be caught by C++ code and C++ destructors are not run.
47** 48**
48** EXT is the default on x64 systems, INT is the default on all other systems. 49** EXT is the default on x64 systems and on Windows, INT is the default on all
50** other systems.
49** 51**
50** EXT can be manually enabled on POSIX systems using GCC and DWARF2 stack 52** EXT can be manually enabled on POSIX systems using GCC and DWARF2 stack
51** unwinding with -DLUAJIT_UNWIND_EXTERNAL. *All* C code must be compiled 53** unwinding with -DLUAJIT_UNWIND_EXTERNAL. *All* C code must be compiled
@@ -54,15 +56,14 @@
54** and all C libraries that have callbacks which may be used to call back 56** and all C libraries that have callbacks which may be used to call back
55** into Lua. C++ code must *not* be compiled with -fno-exceptions. 57** into Lua. C++ code must *not* be compiled with -fno-exceptions.
56** 58**
57** EXT cannot be enabled on WIN32 since system exceptions use code-driven SEH.
58** EXT is mandatory on WIN64 since the calling convention has an abundance 59** EXT is mandatory on WIN64 since the calling convention has an abundance
59** of callee-saved registers (rbx, rbp, rsi, rdi, r12-r15, xmm6-xmm15). 60** of callee-saved registers (rbx, rbp, rsi, rdi, r12-r15, xmm6-xmm15).
60** The POSIX/x64 interpreter only saves r12/r13 for INT (e.g. PS4). 61** The POSIX/x64 interpreter only saves r12/r13 for INT (e.g. PS4).
61*/ 62*/
62 63
63#if defined(__GNUC__) && (LJ_TARGET_X64 || defined(LUAJIT_UNWIND_EXTERNAL)) && !LJ_NO_UNWIND 64#if (defined(__GNUC__) || defined(__clang__)) && (LJ_TARGET_X64 || defined(LUAJIT_UNWIND_EXTERNAL)) && !LJ_NO_UNWIND
64#define LJ_UNWIND_EXT 1 65#define LJ_UNWIND_EXT 1
65#elif LJ_TARGET_X64 && LJ_TARGET_WINDOWS 66#elif LJ_TARGET_WINDOWS
66#define LJ_UNWIND_EXT 1 67#define LJ_UNWIND_EXT 1
67#endif 68#endif
68 69
@@ -98,14 +99,14 @@ static void *err_unwind(lua_State *L, void *stopcf, int errcode)
98 TValue *top = restorestack(L, -nres); 99 TValue *top = restorestack(L, -nres);
99 if (frame < top) { /* Frame reached? */ 100 if (frame < top) { /* Frame reached? */
100 if (errcode) { 101 if (errcode) {
101 L->cframe = cframe_prev(cf);
102 L->base = frame+1; 102 L->base = frame+1;
103 L->cframe = cframe_prev(cf);
103 unwindstack(L, top); 104 unwindstack(L, top);
104 } 105 }
105 return cf; 106 return cf;
106 } 107 }
107 } 108 }
108 if (frame <= tvref(L->stack)) 109 if (frame <= tvref(L->stack)+LJ_FR2)
109 break; 110 break;
110 switch (frame_typep(frame)) { 111 switch (frame_typep(frame)) {
111 case FRAME_LUA: /* Lua frame. */ 112 case FRAME_LUA: /* Lua frame. */
@@ -113,14 +114,12 @@ static void *err_unwind(lua_State *L, void *stopcf, int errcode)
113 frame = frame_prevl(frame); 114 frame = frame_prevl(frame);
114 break; 115 break;
115 case FRAME_C: /* C frame. */ 116 case FRAME_C: /* C frame. */
116#if LJ_HASFFI
117 unwind_c: 117 unwind_c:
118#endif
119#if LJ_UNWIND_EXT 118#if LJ_UNWIND_EXT
120 if (errcode) { 119 if (errcode) {
121 L->cframe = cframe_prev(cf);
122 L->base = frame_prevd(frame) + 1; 120 L->base = frame_prevd(frame) + 1;
123 unwindstack(L, frame); 121 L->cframe = cframe_prev(cf);
122 unwindstack(L, frame - LJ_FR2);
124 } else if (cf != stopcf) { 123 } else if (cf != stopcf) {
125 cf = cframe_prev(cf); 124 cf = cframe_prev(cf);
126 frame = frame_prevd(frame); 125 frame = frame_prevd(frame);
@@ -143,16 +142,14 @@ static void *err_unwind(lua_State *L, void *stopcf, int errcode)
143 return cf; 142 return cf;
144 } 143 }
145 if (errcode) { 144 if (errcode) {
146 L->cframe = cframe_prev(cf);
147 L->base = frame_prevd(frame) + 1; 145 L->base = frame_prevd(frame) + 1;
148 unwindstack(L, frame); 146 L->cframe = cframe_prev(cf);
147 unwindstack(L, frame - LJ_FR2);
149 } 148 }
150 return cf; 149 return cf;
151 case FRAME_CONT: /* Continuation frame. */ 150 case FRAME_CONT: /* Continuation frame. */
152#if LJ_HASFFI 151 if (frame_iscont_fficb(frame))
153 if ((frame-1)->u32.lo == LJ_CONT_FFI_CALLBACK)
154 goto unwind_c; 152 goto unwind_c;
155#endif
156 /* fallthrough */ 153 /* fallthrough */
157 case FRAME_VARG: /* Vararg frame. */ 154 case FRAME_VARG: /* Vararg frame. */
158 frame = frame_prevd(frame); 155 frame = frame_prevd(frame);
@@ -166,8 +163,8 @@ static void *err_unwind(lua_State *L, void *stopcf, int errcode)
166 } 163 }
167 if (frame_typep(frame) == FRAME_PCALL) 164 if (frame_typep(frame) == FRAME_PCALL)
168 hook_leave(G(L)); 165 hook_leave(G(L));
169 L->cframe = cf;
170 L->base = frame_prevd(frame) + 1; 166 L->base = frame_prevd(frame) + 1;
167 L->cframe = cf;
171 unwindstack(L, L->base); 168 unwindstack(L, L->base);
172 } 169 }
173 return (void *)((intptr_t)cf | CFRAME_UNWIND_FF); 170 return (void *)((intptr_t)cf | CFRAME_UNWIND_FF);
@@ -175,8 +172,8 @@ static void *err_unwind(lua_State *L, void *stopcf, int errcode)
175 } 172 }
176 /* No C frame. */ 173 /* No C frame. */
177 if (errcode) { 174 if (errcode) {
175 L->base = tvref(L->stack)+1+LJ_FR2;
178 L->cframe = NULL; 176 L->cframe = NULL;
179 L->base = tvref(L->stack)+1;
180 unwindstack(L, L->base); 177 unwindstack(L, L->base);
181 if (G(L)->panic) 178 if (G(L)->panic)
182 G(L)->panic(L); 179 G(L)->panic(L);
@@ -187,20 +184,13 @@ static void *err_unwind(lua_State *L, void *stopcf, int errcode)
187 184
188/* -- External frame unwinding -------------------------------------------- */ 185/* -- External frame unwinding -------------------------------------------- */
189 186
190#if defined(__GNUC__) && !LJ_NO_UNWIND && !LJ_ABI_WIN 187#if (defined(__GNUC__) || defined(__clang__)) && !LJ_NO_UNWIND && !LJ_ABI_WIN
191 188
192/* 189/*
193** We have to use our own definitions instead of the mandatory (!) unwind.h, 190** We have to use our own definitions instead of the mandatory (!) unwind.h,
194** since various OS, distros and compilers mess up the header installation. 191** since various OS, distros and compilers mess up the header installation.
195*/ 192*/
196 193
197typedef struct _Unwind_Exception
198{
199 uint64_t exclass;
200 void (*excleanup)(int, struct _Unwind_Exception *);
201 uintptr_t p1, p2;
202} __attribute__((__aligned__)) _Unwind_Exception;
203
204typedef struct _Unwind_Context _Unwind_Context; 194typedef struct _Unwind_Context _Unwind_Context;
205 195
206#define _URC_OK 0 196#define _URC_OK 0
@@ -210,8 +200,20 @@ typedef struct _Unwind_Context _Unwind_Context;
210#define _URC_CONTINUE_UNWIND 8 200#define _URC_CONTINUE_UNWIND 8
211#define _URC_FAILURE 9 201#define _URC_FAILURE 9
212 202
203#define LJ_UEXCLASS 0x4c55414a49543200ULL /* LUAJIT2\0 */
204#define LJ_UEXCLASS_MAKE(c) (LJ_UEXCLASS | (uint64_t)(c))
205#define LJ_UEXCLASS_CHECK(cl) (((cl) ^ LJ_UEXCLASS) <= 0xff)
206#define LJ_UEXCLASS_ERRCODE(cl) ((int)((cl) & 0xff))
207
213#if !LJ_TARGET_ARM 208#if !LJ_TARGET_ARM
214 209
210typedef struct _Unwind_Exception
211{
212 uint64_t exclass;
213 void (*excleanup)(int, struct _Unwind_Exception *);
214 uintptr_t p1, p2;
215} __attribute__((__aligned__)) _Unwind_Exception;
216
215extern uintptr_t _Unwind_GetCFA(_Unwind_Context *); 217extern uintptr_t _Unwind_GetCFA(_Unwind_Context *);
216extern void _Unwind_SetGR(_Unwind_Context *, int, uintptr_t); 218extern void _Unwind_SetGR(_Unwind_Context *, int, uintptr_t);
217extern void _Unwind_SetIP(_Unwind_Context *, uintptr_t); 219extern void _Unwind_SetIP(_Unwind_Context *, uintptr_t);
@@ -223,11 +225,6 @@ extern int _Unwind_RaiseException(_Unwind_Exception *);
223#define _UA_HANDLER_FRAME 4 225#define _UA_HANDLER_FRAME 4
224#define _UA_FORCE_UNWIND 8 226#define _UA_FORCE_UNWIND 8
225 227
226#define LJ_UEXCLASS 0x4c55414a49543200ULL /* LUAJIT2\0 */
227#define LJ_UEXCLASS_MAKE(c) (LJ_UEXCLASS | (uint64_t)(c))
228#define LJ_UEXCLASS_CHECK(cl) (((cl) ^ LJ_UEXCLASS) <= 0xff)
229#define LJ_UEXCLASS_ERRCODE(cl) ((int)((cl) & 0xff))
230
231/* DWARF2 personality handler referenced from interpreter .eh_frame. */ 228/* DWARF2 personality handler referenced from interpreter .eh_frame. */
232LJ_FUNCA int lj_err_unwind_dwarf(int version, int actions, 229LJ_FUNCA int lj_err_unwind_dwarf(int version, int actions,
233 uint64_t uexclass, _Unwind_Exception *uex, _Unwind_Context *ctx) 230 uint64_t uexclass, _Unwind_Exception *uex, _Unwind_Context *ctx)
@@ -301,10 +298,22 @@ static void err_raise_ext(int errcode)
301} 298}
302#endif 299#endif
303 300
304#else 301#else /* LJ_TARGET_ARM */
305 302
306extern void _Unwind_DeleteException(void *); 303#define _US_VIRTUAL_UNWIND_FRAME 0
307extern int __gnu_unwind_frame (void *, _Unwind_Context *); 304#define _US_UNWIND_FRAME_STARTING 1
305#define _US_ACTION_MASK 3
306#define _US_FORCE_UNWIND 8
307
308typedef struct _Unwind_Control_Block _Unwind_Control_Block;
309
310struct _Unwind_Control_Block {
311 uint64_t exclass;
312 uint32_t misc[20];
313};
314
315extern int _Unwind_RaiseException(_Unwind_Control_Block *);
316extern int __gnu_unwind_frame(_Unwind_Control_Block *, _Unwind_Context *);
308extern int _Unwind_VRS_Set(_Unwind_Context *, int, uint32_t, int, void *); 317extern int _Unwind_VRS_Set(_Unwind_Context *, int, uint32_t, int, void *);
309extern int _Unwind_VRS_Get(_Unwind_Context *, int, uint32_t, int, void *); 318extern int _Unwind_VRS_Get(_Unwind_Context *, int, uint32_t, int, void *);
310 319
@@ -320,35 +329,58 @@ static inline void _Unwind_SetGR(_Unwind_Context *ctx, int r, uint32_t v)
320 _Unwind_VRS_Set(ctx, 0, r, 0, &v); 329 _Unwind_VRS_Set(ctx, 0, r, 0, &v);
321} 330}
322 331
323#define _US_VIRTUAL_UNWIND_FRAME 0 332extern void lj_vm_unwind_ext(void);
324#define _US_UNWIND_FRAME_STARTING 1
325#define _US_ACTION_MASK 3
326#define _US_FORCE_UNWIND 8
327 333
328/* ARM unwinder personality handler referenced from interpreter .ARM.extab. */ 334/* ARM unwinder personality handler referenced from interpreter .ARM.extab. */
329LJ_FUNCA int lj_err_unwind_arm(int state, void *ucb, _Unwind_Context *ctx) 335LJ_FUNCA int lj_err_unwind_arm(int state, _Unwind_Control_Block *ucb,
336 _Unwind_Context *ctx)
330{ 337{
331 void *cf = (void *)_Unwind_GetGR(ctx, 13); 338 void *cf = (void *)_Unwind_GetGR(ctx, 13);
332 lua_State *L = cframe_L(cf); 339 lua_State *L = cframe_L(cf);
333 if ((state & _US_ACTION_MASK) == _US_VIRTUAL_UNWIND_FRAME) { 340 int errcode;
334 setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); 341
342 switch ((state & _US_ACTION_MASK)) {
343 case _US_VIRTUAL_UNWIND_FRAME:
344 if ((state & _US_FORCE_UNWIND)) break;
335 return _URC_HANDLER_FOUND; 345 return _URC_HANDLER_FOUND;
336 } 346 case _US_UNWIND_FRAME_STARTING:
337 if ((state&(_US_ACTION_MASK|_US_FORCE_UNWIND)) == _US_UNWIND_FRAME_STARTING) { 347 if (LJ_UEXCLASS_CHECK(ucb->exclass)) {
338 _Unwind_DeleteException(ucb); 348 errcode = LJ_UEXCLASS_ERRCODE(ucb->exclass);
339 _Unwind_SetGR(ctx, 15, (uint32_t)(void *)lj_err_throw); 349 } else {
340 _Unwind_SetGR(ctx, 0, (uint32_t)L); 350 errcode = LUA_ERRRUN;
341 _Unwind_SetGR(ctx, 1, (uint32_t)LUA_ERRRUN); 351 setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP));
352 }
353 cf = err_unwind(L, cf, errcode);
354 if ((state & _US_FORCE_UNWIND) || cf == NULL) break;
355 _Unwind_SetGR(ctx, 15, (uint32_t)lj_vm_unwind_ext);
356 _Unwind_SetGR(ctx, 0, (uint32_t)ucb);
357 _Unwind_SetGR(ctx, 1, (uint32_t)errcode);
358 _Unwind_SetGR(ctx, 2, cframe_unwind_ff(cf) ?
359 (uint32_t)lj_vm_unwind_ff_eh :
360 (uint32_t)lj_vm_unwind_c_eh);
342 return _URC_INSTALL_CONTEXT; 361 return _URC_INSTALL_CONTEXT;
362 default:
363 return _URC_FAILURE;
343 } 364 }
344 if (__gnu_unwind_frame(ucb, ctx) != _URC_OK) 365 if (__gnu_unwind_frame(ucb, ctx) != _URC_OK)
345 return _URC_FAILURE; 366 return _URC_FAILURE;
346 return _URC_CONTINUE_UNWIND; 367 return _URC_CONTINUE_UNWIND;
347} 368}
348 369
370#if LJ_UNWIND_EXT
371static __thread _Unwind_Control_Block static_uex;
372
373static void err_raise_ext(int errcode)
374{
375 memset(&static_uex, 0, sizeof(static_uex));
376 static_uex.exclass = LJ_UEXCLASS_MAKE(errcode);
377 _Unwind_RaiseException(&static_uex);
378}
349#endif 379#endif
350 380
351#elif LJ_TARGET_X64 && LJ_ABI_WIN 381#endif /* LJ_TARGET_ARM */
382
383#elif LJ_ABI_WIN
352 384
353/* 385/*
354** Someone in Redmond owes me several days of my life. A lot of this is 386** Someone in Redmond owes me several days of my life. A lot of this is
@@ -366,6 +398,7 @@ LJ_FUNCA int lj_err_unwind_arm(int state, void *ucb, _Unwind_Context *ctx)
366#define WIN32_LEAN_AND_MEAN 398#define WIN32_LEAN_AND_MEAN
367#include <windows.h> 399#include <windows.h>
368 400
401#if LJ_TARGET_X64
369/* Taken from: http://www.nynaeve.net/?p=99 */ 402/* Taken from: http://www.nynaeve.net/?p=99 */
370typedef struct UndocumentedDispatcherContext { 403typedef struct UndocumentedDispatcherContext {
371 ULONG64 ControlPc; 404 ULONG64 ControlPc;
@@ -380,11 +413,14 @@ typedef struct UndocumentedDispatcherContext {
380 ULONG ScopeIndex; 413 ULONG ScopeIndex;
381 ULONG Fill0; 414 ULONG Fill0;
382} UndocumentedDispatcherContext; 415} UndocumentedDispatcherContext;
416#else
417typedef void *UndocumentedDispatcherContext;
418#endif
383 419
384/* Another wild guess. */ 420/* Another wild guess. */
385extern void __DestructExceptionObject(EXCEPTION_RECORD *rec, int nothrow); 421extern void __DestructExceptionObject(EXCEPTION_RECORD *rec, int nothrow);
386 422
387#ifdef MINGW_SDK_INIT 423#if LJ_TARGET_X64 && defined(MINGW_SDK_INIT)
388/* Workaround for broken MinGW64 declaration. */ 424/* Workaround for broken MinGW64 declaration. */
389VOID RtlUnwindEx_FIXED(PVOID,PVOID,PVOID,PVOID,PVOID,PVOID) asm("RtlUnwindEx"); 425VOID RtlUnwindEx_FIXED(PVOID,PVOID,PVOID,PVOID,PVOID,PVOID) asm("RtlUnwindEx");
390#define RtlUnwindEx RtlUnwindEx_FIXED 426#define RtlUnwindEx RtlUnwindEx_FIXED
@@ -398,10 +434,15 @@ VOID RtlUnwindEx_FIXED(PVOID,PVOID,PVOID,PVOID,PVOID,PVOID) asm("RtlUnwindEx");
398#define LJ_EXCODE_CHECK(cl) (((cl) ^ LJ_EXCODE) <= 0xff) 434#define LJ_EXCODE_CHECK(cl) (((cl) ^ LJ_EXCODE) <= 0xff)
399#define LJ_EXCODE_ERRCODE(cl) ((int)((cl) & 0xff)) 435#define LJ_EXCODE_ERRCODE(cl) ((int)((cl) & 0xff))
400 436
401/* Win64 exception handler for interpreter frame. */ 437/* Windows exception handler for interpreter frame. */
402LJ_FUNCA EXCEPTION_DISPOSITION lj_err_unwind_win64(EXCEPTION_RECORD *rec, 438LJ_FUNCA int lj_err_unwind_win(EXCEPTION_RECORD *rec,
403 void *cf, CONTEXT *ctx, UndocumentedDispatcherContext *dispatch) 439 void *f, CONTEXT *ctx, UndocumentedDispatcherContext *dispatch)
404{ 440{
441#if LJ_TARGET_X64
442 void *cf = f;
443#else
444 void *cf = (char *)f - CFRAME_OFS_SEH;
445#endif
405 lua_State *L = cframe_L(cf); 446 lua_State *L = cframe_L(cf);
406 int errcode = LJ_EXCODE_CHECK(rec->ExceptionCode) ? 447 int errcode = LJ_EXCODE_CHECK(rec->ExceptionCode) ?
407 LJ_EXCODE_ERRCODE(rec->ExceptionCode) : LUA_ERRRUN; 448 LJ_EXCODE_ERRCODE(rec->ExceptionCode) : LUA_ERRRUN;
@@ -419,8 +460,9 @@ LJ_FUNCA EXCEPTION_DISPOSITION lj_err_unwind_win64(EXCEPTION_RECORD *rec,
419 setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP)); 460 setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP));
420 } else if (!LJ_EXCODE_CHECK(rec->ExceptionCode)) { 461 } else if (!LJ_EXCODE_CHECK(rec->ExceptionCode)) {
421 /* Don't catch access violations etc. */ 462 /* Don't catch access violations etc. */
422 return ExceptionContinueSearch; 463 return 1; /* ExceptionContinueSearch */
423 } 464 }
465#if LJ_TARGET_X64
424 /* Unwind the stack and call all handlers for all lower C frames 466 /* Unwind the stack and call all handlers for all lower C frames
425 ** (including ourselves) again with EH_UNWINDING set. Then set 467 ** (including ourselves) again with EH_UNWINDING set. Then set
426 ** rsp = cf, rax = errcode and jump to the specified target. 468 ** rsp = cf, rax = errcode and jump to the specified target.
@@ -430,9 +472,21 @@ LJ_FUNCA EXCEPTION_DISPOSITION lj_err_unwind_win64(EXCEPTION_RECORD *rec,
430 lj_vm_unwind_c_eh), 472 lj_vm_unwind_c_eh),
431 rec, (void *)(uintptr_t)errcode, ctx, dispatch->HistoryTable); 473 rec, (void *)(uintptr_t)errcode, ctx, dispatch->HistoryTable);
432 /* RtlUnwindEx should never return. */ 474 /* RtlUnwindEx should never return. */
475#else
476 UNUSED(ctx);
477 UNUSED(dispatch);
478 /* Call all handlers for all lower C frames (including ourselves) again
479 ** with EH_UNWINDING set. Then call the specified function, passing cf
480 ** and errcode.
481 */
482 lj_vm_rtlunwind(cf, (void *)rec,
483 (cframe_unwind_ff(cf2) && errcode != LUA_YIELD) ?
484 (void *)lj_vm_unwind_ff : (void *)lj_vm_unwind_c, errcode);
485 /* lj_vm_rtlunwind does not return. */
486#endif
433 } 487 }
434 } 488 }
435 return ExceptionContinueSearch; 489 return 1; /* ExceptionContinueSearch */
436} 490}
437 491
438/* Raise Windows exception. */ 492/* Raise Windows exception. */
@@ -450,8 +504,8 @@ LJ_NOINLINE void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode)
450{ 504{
451 global_State *g = G(L); 505 global_State *g = G(L);
452 lj_trace_abort(g); 506 lj_trace_abort(g);
453 setgcrefnull(g->jit_L); 507 setmref(g->jit_base, NULL);
454 L->status = 0; 508 L->status = LUA_OK;
455#if LJ_UNWIND_EXT 509#if LJ_UNWIND_EXT
456 err_raise_ext(errcode); 510 err_raise_ext(errcode);
457 /* 511 /*
@@ -495,7 +549,7 @@ LJ_NOINLINE void lj_err_mem(lua_State *L)
495/* Find error function for runtime errors. Requires an extra stack traversal. */ 549/* Find error function for runtime errors. Requires an extra stack traversal. */
496static ptrdiff_t finderrfunc(lua_State *L) 550static ptrdiff_t finderrfunc(lua_State *L)
497{ 551{
498 cTValue *frame = L->base-1, *bot = tvref(L->stack); 552 cTValue *frame = L->base-1, *bot = tvref(L->stack)+LJ_FR2;
499 void *cf = L->cframe; 553 void *cf = L->cframe;
500 while (frame > bot && cf) { 554 while (frame > bot && cf) {
501 while (cframe_nres(cframe_raw(cf)) < 0) { /* cframe without frame? */ 555 while (cframe_nres(cframe_raw(cf)) < 0) { /* cframe without frame? */
@@ -519,10 +573,8 @@ static ptrdiff_t finderrfunc(lua_State *L)
519 frame = frame_prevd(frame); 573 frame = frame_prevd(frame);
520 break; 574 break;
521 case FRAME_CONT: 575 case FRAME_CONT:
522#if LJ_HASFFI 576 if (frame_iscont_fficb(frame))
523 if ((frame-1)->u32.lo == LJ_CONT_FFI_CALLBACK)
524 cf = cframe_prev(cf); 577 cf = cframe_prev(cf);
525#endif
526 frame = frame_prevd(frame); 578 frame = frame_prevd(frame);
527 break; 579 break;
528 case FRAME_CP: 580 case FRAME_CP:
@@ -534,11 +586,11 @@ static ptrdiff_t finderrfunc(lua_State *L)
534 break; 586 break;
535 case FRAME_PCALL: 587 case FRAME_PCALL:
536 case FRAME_PCALLH: 588 case FRAME_PCALLH:
537 if (frame_ftsz(frame) >= (ptrdiff_t)(2*sizeof(TValue))) /* xpcall? */ 589 if (frame_func(frame_prevd(frame))->c.ffid == FF_xpcall)
538 return savestack(L, frame-1); /* Point to xpcall's errorfunc. */ 590 return savestack(L, frame_prevd(frame)+1); /* xpcall's errorfunc. */
539 return 0; 591 return 0;
540 default: 592 default:
541 lua_assert(0); 593 lj_assertL(0, "bad frame type");
542 return 0; 594 return 0;
543 } 595 }
544 } 596 }
@@ -558,8 +610,9 @@ LJ_NOINLINE void LJ_FASTCALL lj_err_run(lua_State *L)
558 lj_err_throw(L, LUA_ERRERR); 610 lj_err_throw(L, LUA_ERRERR);
559 } 611 }
560 L->status = LUA_ERRERR; 612 L->status = LUA_ERRERR;
561 copyTV(L, top, top-1); 613 copyTV(L, top+LJ_FR2, top-1);
562 copyTV(L, top-1, errfunc); 614 copyTV(L, top-1, errfunc);
615 if (LJ_FR2) setnilV(top++);
563 L->top = top+1; 616 L->top = top+1;
564 lj_vm_call(L, top, 1+1); /* Stack: |errfunc|msg| -> |msg| */ 617 lj_vm_call(L, top, 1+1); /* Stack: |errfunc|msg| -> |msg| */
565 } 618 }
@@ -573,7 +626,7 @@ LJ_NORET LJ_NOINLINE static void err_msgv(lua_State *L, ErrMsg em, ...)
573 va_list argp; 626 va_list argp;
574 va_start(argp, em); 627 va_start(argp, em);
575 if (curr_funcisL(L)) L->top = curr_topL(L); 628 if (curr_funcisL(L)) L->top = curr_topL(L);
576 msg = lj_str_pushvf(L, err2msg(em), argp); 629 msg = lj_strfmt_pushvf(L, err2msg(em), argp);
577 va_end(argp); 630 va_end(argp);
578 lj_debug_addloc(L, msg, L->base-1, NULL); 631 lj_debug_addloc(L, msg, L->base-1, NULL);
579 lj_err_run(L); 632 lj_err_run(L);
@@ -591,11 +644,11 @@ LJ_NOINLINE void lj_err_lex(lua_State *L, GCstr *src, const char *tok,
591{ 644{
592 char buff[LUA_IDSIZE]; 645 char buff[LUA_IDSIZE];
593 const char *msg; 646 const char *msg;
594 lj_debug_shortname(buff, src); 647 lj_debug_shortname(buff, src, line);
595 msg = lj_str_pushvf(L, err2msg(em), argp); 648 msg = lj_strfmt_pushvf(L, err2msg(em), argp);
596 msg = lj_str_pushf(L, "%s:%d: %s", buff, line, msg); 649 msg = lj_strfmt_pushf(L, "%s:%d: %s", buff, line, msg);
597 if (tok) 650 if (tok)
598 lj_str_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tok); 651 lj_strfmt_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tok);
599 lj_err_throw(L, LUA_ERRSYNTAX); 652 lj_err_throw(L, LUA_ERRSYNTAX);
600} 653}
601 654
@@ -634,8 +687,9 @@ LJ_NOINLINE void lj_err_optype_call(lua_State *L, TValue *o)
634 const BCIns *pc = cframe_Lpc(L); 687 const BCIns *pc = cframe_Lpc(L);
635 if (((ptrdiff_t)pc & FRAME_TYPE) != FRAME_LUA) { 688 if (((ptrdiff_t)pc & FRAME_TYPE) != FRAME_LUA) {
636 const char *tname = lj_typename(o); 689 const char *tname = lj_typename(o);
690 if (LJ_FR2) o++;
637 setframe_pc(o, pc); 691 setframe_pc(o, pc);
638 setframe_gc(o, obj2gco(L)); 692 setframe_gc(o, obj2gco(L), LJ_TTHREAD);
639 L->top = L->base = o+1; 693 L->top = L->base = o+1;
640 err_msgv(L, LJ_ERR_BADCALL, tname); 694 err_msgv(L, LJ_ERR_BADCALL, tname);
641 } 695 }
@@ -650,13 +704,10 @@ LJ_NOINLINE void lj_err_callermsg(lua_State *L, const char *msg)
650 if (frame_islua(frame)) { 704 if (frame_islua(frame)) {
651 pframe = frame_prevl(frame); 705 pframe = frame_prevl(frame);
652 } else if (frame_iscont(frame)) { 706 } else if (frame_iscont(frame)) {
653#if LJ_HASFFI 707 if (frame_iscont_fficb(frame)) {
654 if ((frame-1)->u32.lo == LJ_CONT_FFI_CALLBACK) {
655 pframe = frame; 708 pframe = frame;
656 frame = NULL; 709 frame = NULL;
657 } else 710 } else {
658#endif
659 {
660 pframe = frame_prevd(frame); 711 pframe = frame_prevd(frame);
661#if LJ_HASFFI 712#if LJ_HASFFI
662 /* Remove frame for FFI metamethods. */ 713 /* Remove frame for FFI metamethods. */
@@ -679,7 +730,7 @@ LJ_NOINLINE void lj_err_callerv(lua_State *L, ErrMsg em, ...)
679 const char *msg; 730 const char *msg;
680 va_list argp; 731 va_list argp;
681 va_start(argp, em); 732 va_start(argp, em);
682 msg = lj_str_pushvf(L, err2msg(em), argp); 733 msg = lj_strfmt_pushvf(L, err2msg(em), argp);
683 va_end(argp); 734 va_end(argp);
684 lj_err_callermsg(L, msg); 735 lj_err_callermsg(L, msg);
685} 736}
@@ -699,9 +750,9 @@ LJ_NORET LJ_NOINLINE static void err_argmsg(lua_State *L, int narg,
699 if (narg < 0 && narg > LUA_REGISTRYINDEX) 750 if (narg < 0 && narg > LUA_REGISTRYINDEX)
700 narg = (int)(L->top - L->base) + narg + 1; 751 narg = (int)(L->top - L->base) + narg + 1;
701 if (ftype && ftype[3] == 'h' && --narg == 0) /* Check for "method". */ 752 if (ftype && ftype[3] == 'h' && --narg == 0) /* Check for "method". */
702 msg = lj_str_pushf(L, err2msg(LJ_ERR_BADSELF), fname, msg); 753 msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADSELF), fname, msg);
703 else 754 else
704 msg = lj_str_pushf(L, err2msg(LJ_ERR_BADARG), narg, fname, msg); 755 msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADARG), narg, fname, msg);
705 lj_err_callermsg(L, msg); 756 lj_err_callermsg(L, msg);
706} 757}
707 758
@@ -711,7 +762,7 @@ LJ_NOINLINE void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...)
711 const char *msg; 762 const char *msg;
712 va_list argp; 763 va_list argp;
713 va_start(argp, em); 764 va_start(argp, em);
714 msg = lj_str_pushvf(L, err2msg(em), argp); 765 msg = lj_strfmt_pushvf(L, err2msg(em), argp);
715 va_end(argp); 766 va_end(argp);
716 err_argmsg(L, narg, msg); 767 err_argmsg(L, narg, msg);
717} 768}
@@ -741,7 +792,7 @@ LJ_NOINLINE void lj_err_argtype(lua_State *L, int narg, const char *xname)
741 TValue *o = narg < 0 ? L->top + narg : L->base + narg-1; 792 TValue *o = narg < 0 ? L->top + narg : L->base + narg-1;
742 tname = o < L->top ? lj_typename(o) : lj_obj_typename[0]; 793 tname = o < L->top ? lj_typename(o) : lj_obj_typename[0];
743 } 794 }
744 msg = lj_str_pushf(L, err2msg(LJ_ERR_BADTYPE), xname, tname); 795 msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADTYPE), xname, tname);
745 err_argmsg(L, narg, msg); 796 err_argmsg(L, narg, msg);
746} 797}
747 798
@@ -791,7 +842,7 @@ LUALIB_API int luaL_error(lua_State *L, const char *fmt, ...)
791 const char *msg; 842 const char *msg;
792 va_list argp; 843 va_list argp;
793 va_start(argp, fmt); 844 va_start(argp, fmt);
794 msg = lj_str_pushvf(L, fmt, argp); 845 msg = lj_strfmt_pushvf(L, fmt, argp);
795 va_end(argp); 846 va_end(argp);
796 lj_err_callermsg(L, msg); 847 lj_err_callermsg(L, msg);
797 return 0; /* unreachable */ 848 return 0; /* unreachable */
diff --git a/src/lj_errmsg.h b/src/lj_errmsg.h
index 35b5edd5..9110dc7e 100644
--- a/src/lj_errmsg.h
+++ b/src/lj_errmsg.h
@@ -96,18 +96,12 @@ ERRDEF(STRPATX, "pattern too complex")
96ERRDEF(STRCAPI, "invalid capture index") 96ERRDEF(STRCAPI, "invalid capture index")
97ERRDEF(STRCAPN, "too many captures") 97ERRDEF(STRCAPN, "too many captures")
98ERRDEF(STRCAPU, "unfinished capture") 98ERRDEF(STRCAPU, "unfinished capture")
99ERRDEF(STRFMTO, "invalid option " LUA_QL("%%%c") " to " LUA_QL("format")) 99ERRDEF(STRFMT, "invalid option " LUA_QS " to " LUA_QL("format"))
100ERRDEF(STRFMTR, "invalid format (repeated flags)")
101ERRDEF(STRFMTW, "invalid format (width or precision too long)")
102ERRDEF(STRGSRV, "invalid replacement value (a %s)") 100ERRDEF(STRGSRV, "invalid replacement value (a %s)")
103ERRDEF(BADMODN, "name conflict for module " LUA_QS) 101ERRDEF(BADMODN, "name conflict for module " LUA_QS)
104#if LJ_HASJIT 102#if LJ_HASJIT
105ERRDEF(JITPROT, "runtime code generation failed, restricted kernel?") 103ERRDEF(JITPROT, "runtime code generation failed, restricted kernel?")
106#if LJ_TARGET_X86ORX64
107ERRDEF(NOJIT, "JIT compiler disabled, CPU does not support SSE2")
108#else
109ERRDEF(NOJIT, "JIT compiler disabled") 104ERRDEF(NOJIT, "JIT compiler disabled")
110#endif
111#elif defined(LJ_ARCH_NOJIT) 105#elif defined(LJ_ARCH_NOJIT)
112ERRDEF(NOJIT, "no JIT compiler for this architecture (yet)") 106ERRDEF(NOJIT, "no JIT compiler for this architecture (yet)")
113#else 107#else
@@ -118,7 +112,6 @@ ERRDEF(JITOPT, "unknown or malformed optimization flag " LUA_QS)
118/* Lexer/parser errors. */ 112/* Lexer/parser errors. */
119ERRDEF(XMODE, "attempt to load chunk with wrong mode") 113ERRDEF(XMODE, "attempt to load chunk with wrong mode")
120ERRDEF(XNEAR, "%s near " LUA_QS) 114ERRDEF(XNEAR, "%s near " LUA_QS)
121ERRDEF(XELEM, "lexical element too long")
122ERRDEF(XLINES, "chunk has too many lines") 115ERRDEF(XLINES, "chunk has too many lines")
123ERRDEF(XLEVELS, "chunk has too many syntax levels") 116ERRDEF(XLEVELS, "chunk has too many syntax levels")
124ERRDEF(XNUMBER, "malformed number") 117ERRDEF(XNUMBER, "malformed number")
diff --git a/src/lj_ffrecord.c b/src/lj_ffrecord.c
index 1d428590..19da15a2 100644
--- a/src/lj_ffrecord.c
+++ b/src/lj_ffrecord.c
@@ -27,6 +27,7 @@
27#include "lj_dispatch.h" 27#include "lj_dispatch.h"
28#include "lj_vm.h" 28#include "lj_vm.h"
29#include "lj_strscan.h" 29#include "lj_strscan.h"
30#include "lj_strfmt.h"
30 31
31/* Some local macros to save typing. Undef'd at the end. */ 32/* Some local macros to save typing. Undef'd at the end. */
32#define IR(ref) (&J->cur.ir[(ref)]) 33#define IR(ref) (&J->cur.ir[(ref)])
@@ -79,10 +80,7 @@ static GCstr *argv2str(jit_State *J, TValue *o)
79 GCstr *s; 80 GCstr *s;
80 if (!tvisnumber(o)) 81 if (!tvisnumber(o))
81 lj_trace_err(J, LJ_TRERR_BADTYPE); 82 lj_trace_err(J, LJ_TRERR_BADTYPE);
82 if (tvisint(o)) 83 s = lj_strfmt_number(J->L, o);
83 s = lj_str_fromint(J->L, intV(o));
84 else
85 s = lj_str_fromnum(J->L, &o->n);
86 setstrV(J->L, o, s); 84 setstrV(J->L, o, s);
87 return s; 85 return s;
88 } 86 }
@@ -98,27 +96,90 @@ static ptrdiff_t results_wanted(jit_State *J)
98 return -1; 96 return -1;
99} 97}
100 98
101/* Throw error for unsupported variant of fast function. */ 99/* Trace stitching: add continuation below frame to start a new trace. */
102LJ_NORET static void recff_nyiu(jit_State *J) 100static void recff_stitch(jit_State *J)
103{ 101{
104 setfuncV(J->L, &J->errinfo, J->fn); 102 ASMFunction cont = lj_cont_stitch;
105 lj_trace_err_info(J, LJ_TRERR_NYIFFU); 103 lua_State *L = J->L;
104 TValue *base = L->base;
105 BCReg nslot = J->maxslot + 1 + LJ_FR2;
106 TValue *nframe = base + 1 + LJ_FR2;
107 const BCIns *pc = frame_pc(base-1);
108 TValue *pframe = frame_prevl(base-1);
109
110 /* Move func + args up in Lua stack and insert continuation. */
111 memmove(&base[1], &base[-1-LJ_FR2], sizeof(TValue)*nslot);
112 setframe_ftsz(nframe, ((char *)nframe - (char *)pframe) + FRAME_CONT);
113 setcont(base-LJ_FR2, cont);
114 setframe_pc(base, pc);
115 setnilV(base-1-LJ_FR2); /* Incorrect, but rec_check_slots() won't run anymore. */
116 L->base += 2 + LJ_FR2;
117 L->top += 2 + LJ_FR2;
118
119 /* Ditto for the IR. */
120 memmove(&J->base[1], &J->base[-1-LJ_FR2], sizeof(TRef)*nslot);
121#if LJ_FR2
122 J->base[2] = TREF_FRAME;
123 J->base[-1] = lj_ir_k64(J, IR_KNUM, u64ptr(contptr(cont)));
124 J->base[0] = lj_ir_k64(J, IR_KNUM, u64ptr(pc)) | TREF_CONT;
125#else
126 J->base[0] = lj_ir_kptr(J, contptr(cont)) | TREF_CONT;
127#endif
128 J->ktrace = tref_ref((J->base[-1-LJ_FR2] = lj_ir_ktrace(J)));
129 J->base += 2 + LJ_FR2;
130 J->baseslot += 2 + LJ_FR2;
131 J->framedepth++;
132
133 lj_record_stop(J, LJ_TRLINK_STITCH, 0);
134
135 /* Undo Lua stack changes. */
136 memmove(&base[-1-LJ_FR2], &base[1], sizeof(TValue)*nslot);
137 setframe_pc(base-1, pc);
138 L->base -= 2 + LJ_FR2;
139 L->top -= 2 + LJ_FR2;
106} 140}
107 141
108/* Fallback handler for all fast functions that are not recorded (yet). */ 142/* Fallback handler for fast functions that are not recorded (yet). */
109static void LJ_FASTCALL recff_nyi(jit_State *J, RecordFFData *rd) 143static void LJ_FASTCALL recff_nyi(jit_State *J, RecordFFData *rd)
110{ 144{
111 setfuncV(J->L, &J->errinfo, J->fn); 145 if (J->cur.nins < (IRRef)J->param[JIT_P_minstitch] + REF_BASE) {
112 lj_trace_err_info(J, LJ_TRERR_NYIFF); 146 lj_trace_err_info(J, LJ_TRERR_TRACEUV);
113 UNUSED(rd); 147 } else {
148 /* Can only stitch from Lua call. */
149 if (J->framedepth && frame_islua(J->L->base-1)) {
150 BCOp op = bc_op(*frame_pc(J->L->base-1));
151 /* Stitched trace cannot start with *M op with variable # of args. */
152 if (!(op == BC_CALLM || op == BC_CALLMT ||
153 op == BC_RETM || op == BC_TSETM)) {
154 switch (J->fn->c.ffid) {
155 case FF_error:
156 case FF_debug_sethook:
157 case FF_jit_flush:
158 break; /* Don't stitch across special builtins. */
159 default:
160 recff_stitch(J); /* Use trace stitching. */
161 rd->nres = -1;
162 return;
163 }
164 }
165 }
166 /* Otherwise stop trace and return to interpreter. */
167 lj_record_stop(J, LJ_TRLINK_RETURN, 0);
168 rd->nres = -1;
169 }
114} 170}
115 171
116/* C functions can have arbitrary side-effects and are not recorded (yet). */ 172/* Fallback handler for unsupported variants of fast functions. */
117static void LJ_FASTCALL recff_c(jit_State *J, RecordFFData *rd) 173#define recff_nyiu recff_nyi
174
175/* Must stop the trace for classic C functions with arbitrary side-effects. */
176#define recff_c recff_nyi
177
178/* Emit BUFHDR for the global temporary buffer. */
179static TRef recff_bufhdr(jit_State *J)
118{ 180{
119 setfuncV(J->L, &J->errinfo, J->fn); 181 return emitir(IRT(IR_BUFHDR, IRT_PGC),
120 lj_trace_err_info(J, LJ_TRERR_NYICF); 182 lj_ir_kptr(J, &J2G(J)->tmpbuf), IRBUFHDR_RESET);
121 UNUSED(rd);
122} 183}
123 184
124/* -- Base library fast functions ----------------------------------------- */ 185/* -- Base library fast functions ----------------------------------------- */
@@ -135,7 +196,7 @@ static void LJ_FASTCALL recff_type(jit_State *J, RecordFFData *rd)
135 uint32_t t; 196 uint32_t t;
136 if (tvisnumber(&rd->argv[0])) 197 if (tvisnumber(&rd->argv[0]))
137 t = ~LJ_TNUMX; 198 t = ~LJ_TNUMX;
138 else if (LJ_64 && tvislightud(&rd->argv[0])) 199 else if (LJ_64 && !LJ_GC64 && tvislightud(&rd->argv[0]))
139 t = ~LJ_TLIGHTUD; 200 t = ~LJ_TLIGHTUD;
140 else 201 else
141 t = ~itype(&rd->argv[0]); 202 t = ~itype(&rd->argv[0]);
@@ -167,7 +228,7 @@ static void LJ_FASTCALL recff_setmetatable(jit_State *J, RecordFFData *rd)
167 ix.tab = tr; 228 ix.tab = tr;
168 copyTV(J->L, &ix.tabv, &rd->argv[0]); 229 copyTV(J->L, &ix.tabv, &rd->argv[0]);
169 lj_record_mm_lookup(J, &ix, MM_metatable); /* Guard for no __metatable. */ 230 lj_record_mm_lookup(J, &ix, MM_metatable); /* Guard for no __metatable. */
170 fref = emitir(IRT(IR_FREF, IRT_P32), tr, IRFL_TAB_META); 231 fref = emitir(IRT(IR_FREF, IRT_PGC), tr, IRFL_TAB_META);
171 mtref = tref_isnil(mt) ? lj_ir_knull(J, IRT_TAB) : mt; 232 mtref = tref_isnil(mt) ? lj_ir_knull(J, IRT_TAB) : mt;
172 emitir(IRT(IR_FSTORE, IRT_TAB), fref, mtref); 233 emitir(IRT(IR_FSTORE, IRT_TAB), fref, mtref);
173 if (!tref_isnil(mt)) 234 if (!tref_isnil(mt))
@@ -220,7 +281,7 @@ static void LJ_FASTCALL recff_rawlen(jit_State *J, RecordFFData *rd)
220 if (tref_isstr(tr)) 281 if (tref_isstr(tr))
221 J->base[0] = emitir(IRTI(IR_FLOAD), tr, IRFL_STR_LEN); 282 J->base[0] = emitir(IRTI(IR_FLOAD), tr, IRFL_STR_LEN);
222 else if (tref_istab(tr)) 283 else if (tref_istab(tr))
223 J->base[0] = lj_ir_call(J, IRCALL_lj_tab_len, tr); 284 J->base[0] = emitir(IRTI(IR_ALEN), tr, TREF_NIL);
224 /* else: Interpreter will throw. */ 285 /* else: Interpreter will throw. */
225 UNUSED(rd); 286 UNUSED(rd);
226} 287}
@@ -233,7 +294,7 @@ int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv)
233 if (strV(tv)->len == 1) { 294 if (strV(tv)->len == 1) {
234 emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, strV(tv))); 295 emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, strV(tv)));
235 } else { 296 } else {
236 TRef trptr = emitir(IRT(IR_STRREF, IRT_P32), tr, lj_ir_kint(J, 0)); 297 TRef trptr = emitir(IRT(IR_STRREF, IRT_PGC), tr, lj_ir_kint(J, 0));
237 TRef trchar = emitir(IRT(IR_XLOAD, IRT_U8), trptr, IRXLOAD_READONLY); 298 TRef trchar = emitir(IRT(IR_XLOAD, IRT_U8), trptr, IRXLOAD_READONLY);
238 emitir(IRTG(IR_EQ, IRT_INT), trchar, lj_ir_kint(J, '#')); 299 emitir(IRTG(IR_EQ, IRT_INT), trchar, lj_ir_kint(J, '#'));
239 } 300 }
@@ -263,7 +324,8 @@ static void LJ_FASTCALL recff_select(jit_State *J, RecordFFData *rd)
263 J->base[i] = J->base[start+i]; 324 J->base[i] = J->base[start+i];
264 } /* else: Interpreter will throw. */ 325 } /* else: Interpreter will throw. */
265 } else { 326 } else {
266 recff_nyiu(J); 327 recff_nyiu(J, rd);
328 return;
267 } 329 }
268 } /* else: Interpreter will throw. */ 330 } /* else: Interpreter will throw. */
269} 331}
@@ -274,14 +336,18 @@ static void LJ_FASTCALL recff_tonumber(jit_State *J, RecordFFData *rd)
274 TRef base = J->base[1]; 336 TRef base = J->base[1];
275 if (tr && !tref_isnil(base)) { 337 if (tr && !tref_isnil(base)) {
276 base = lj_opt_narrow_toint(J, base); 338 base = lj_opt_narrow_toint(J, base);
277 if (!tref_isk(base) || IR(tref_ref(base))->i != 10) 339 if (!tref_isk(base) || IR(tref_ref(base))->i != 10) {
278 recff_nyiu(J); 340 recff_nyiu(J, rd);
341 return;
342 }
279 } 343 }
280 if (tref_isnumber_str(tr)) { 344 if (tref_isnumber_str(tr)) {
281 if (tref_isstr(tr)) { 345 if (tref_isstr(tr)) {
282 TValue tmp; 346 TValue tmp;
283 if (!lj_strscan_num(strV(&rd->argv[0]), &tmp)) 347 if (!lj_strscan_num(strV(&rd->argv[0]), &tmp)) {
284 recff_nyiu(J); /* Would need an inverted STRTO for this case. */ 348 recff_nyiu(J, rd); /* Would need an inverted STRTO for this case. */
349 return;
350 }
285 tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); 351 tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
286 } 352 }
287#if LJ_HASFFI 353#if LJ_HASFFI
@@ -313,10 +379,10 @@ static int recff_metacall(jit_State *J, RecordFFData *rd, MMS mm)
313 int errcode; 379 int errcode;
314 TValue argv0; 380 TValue argv0;
315 /* Temporarily insert metamethod below object. */ 381 /* Temporarily insert metamethod below object. */
316 J->base[1] = J->base[0]; 382 J->base[1+LJ_FR2] = J->base[0];
317 J->base[0] = ix.mobj; 383 J->base[0] = ix.mobj;
318 copyTV(J->L, &argv0, &rd->argv[0]); 384 copyTV(J->L, &argv0, &rd->argv[0]);
319 copyTV(J->L, &rd->argv[1], &rd->argv[0]); 385 copyTV(J->L, &rd->argv[1+LJ_FR2], &rd->argv[0]);
320 copyTV(J->L, &rd->argv[0], &ix.mobjv); 386 copyTV(J->L, &rd->argv[0], &ix.mobjv);
321 /* Need to protect lj_record_tailcall because it may throw. */ 387 /* Need to protect lj_record_tailcall because it may throw. */
322 errcode = lj_vm_cpcall(J->L, NULL, J, recff_metacall_cp); 388 errcode = lj_vm_cpcall(J->L, NULL, J, recff_metacall_cp);
@@ -336,13 +402,15 @@ static void LJ_FASTCALL recff_tostring(jit_State *J, RecordFFData *rd)
336 if (tref_isstr(tr)) { 402 if (tref_isstr(tr)) {
337 /* Ignore __tostring in the string base metatable. */ 403 /* Ignore __tostring in the string base metatable. */
338 /* Pass on result in J->base[0]. */ 404 /* Pass on result in J->base[0]. */
339 } else if (!recff_metacall(J, rd, MM_tostring)) { 405 } else if (tr && !recff_metacall(J, rd, MM_tostring)) {
340 if (tref_isnumber(tr)) { 406 if (tref_isnumber(tr)) {
341 J->base[0] = emitir(IRT(IR_TOSTR, IRT_STR), tr, 0); 407 J->base[0] = emitir(IRT(IR_TOSTR, IRT_STR), tr,
408 tref_isnum(tr) ? IRTOSTR_NUM : IRTOSTR_INT);
342 } else if (tref_ispri(tr)) { 409 } else if (tref_ispri(tr)) {
343 J->base[0] = lj_ir_kstr(J, strV(&J->fn->c.upvalue[tref_type(tr)])); 410 J->base[0] = lj_ir_kstr(J, lj_strfmt_obj(J->L, &rd->argv[0]));
344 } else { 411 } else {
345 recff_nyiu(J); 412 recff_nyiu(J, rd);
413 return;
346 } 414 }
347 } 415 }
348} 416}
@@ -364,15 +432,15 @@ static void LJ_FASTCALL recff_ipairs_aux(jit_State *J, RecordFFData *rd)
364 } /* else: Interpreter will throw. */ 432 } /* else: Interpreter will throw. */
365} 433}
366 434
367static void LJ_FASTCALL recff_ipairs(jit_State *J, RecordFFData *rd) 435static void LJ_FASTCALL recff_xpairs(jit_State *J, RecordFFData *rd)
368{ 436{
369 TRef tr = J->base[0]; 437 TRef tr = J->base[0];
370 if (!((LJ_52 || (LJ_HASFFI && tref_iscdata(tr))) && 438 if (!((LJ_52 || (LJ_HASFFI && tref_iscdata(tr))) &&
371 recff_metacall(J, rd, MM_ipairs))) { 439 recff_metacall(J, rd, MM_pairs + rd->data))) {
372 if (tref_istab(tr)) { 440 if (tref_istab(tr)) {
373 J->base[0] = lj_ir_kfunc(J, funcV(&J->fn->c.upvalue[0])); 441 J->base[0] = lj_ir_kfunc(J, funcV(&J->fn->c.upvalue[0]));
374 J->base[1] = tr; 442 J->base[1] = tr;
375 J->base[2] = lj_ir_kint(J, 0); 443 J->base[2] = rd->data ? lj_ir_kint(J, 0) : TREF_NIL;
376 rd->nres = 3; 444 rd->nres = 3;
377 } /* else: Interpreter will throw. */ 445 } /* else: Interpreter will throw. */
378 } 446 }
@@ -381,6 +449,10 @@ static void LJ_FASTCALL recff_ipairs(jit_State *J, RecordFFData *rd)
381static void LJ_FASTCALL recff_pcall(jit_State *J, RecordFFData *rd) 449static void LJ_FASTCALL recff_pcall(jit_State *J, RecordFFData *rd)
382{ 450{
383 if (J->maxslot >= 1) { 451 if (J->maxslot >= 1) {
452#if LJ_FR2
453 /* Shift function arguments up. */
454 memmove(J->base + 1, J->base, sizeof(TRef) * J->maxslot);
455#endif
384 lj_record_call(J, 0, J->maxslot - 1); 456 lj_record_call(J, 0, J->maxslot - 1);
385 rd->nres = -1; /* Pending call. */ 457 rd->nres = -1; /* Pending call. */
386 } /* else: Interpreter will throw. */ 458 } /* else: Interpreter will throw. */
@@ -406,6 +478,10 @@ static void LJ_FASTCALL recff_xpcall(jit_State *J, RecordFFData *rd)
406 copyTV(J->L, &argv1, &rd->argv[1]); 478 copyTV(J->L, &argv1, &rd->argv[1]);
407 copyTV(J->L, &rd->argv[0], &argv1); 479 copyTV(J->L, &rd->argv[0], &argv1);
408 copyTV(J->L, &rd->argv[1], &argv0); 480 copyTV(J->L, &rd->argv[1], &argv0);
481#if LJ_FR2
482 /* Shift function arguments up. */
483 memmove(J->base + 2, J->base + 1, sizeof(TRef) * (J->maxslot-1));
484#endif
409 /* Need to protect lj_record_call because it may throw. */ 485 /* Need to protect lj_record_call because it may throw. */
410 errcode = lj_vm_cpcall(J->L, NULL, J, recff_xpcall_cp); 486 errcode = lj_vm_cpcall(J->L, NULL, J, recff_xpcall_cp);
411 /* Always undo Lua stack swap to avoid confusing the interpreter. */ 487 /* Always undo Lua stack swap to avoid confusing the interpreter. */
@@ -417,12 +493,24 @@ static void LJ_FASTCALL recff_xpcall(jit_State *J, RecordFFData *rd)
417 } /* else: Interpreter will throw. */ 493 } /* else: Interpreter will throw. */
418} 494}
419 495
496static void LJ_FASTCALL recff_getfenv(jit_State *J, RecordFFData *rd)
497{
498 TRef tr = J->base[0];
499 /* Only support getfenv(0) for now. */
500 if (tref_isint(tr) && tref_isk(tr) && IR(tref_ref(tr))->i == 0) {
501 TRef trl = emitir(IRT(IR_LREF, IRT_THREAD), 0, 0);
502 J->base[0] = emitir(IRT(IR_FLOAD, IRT_TAB), trl, IRFL_THREAD_ENV);
503 return;
504 }
505 recff_nyiu(J, rd);
506}
507
420/* -- Math library fast functions ----------------------------------------- */ 508/* -- Math library fast functions ----------------------------------------- */
421 509
422static void LJ_FASTCALL recff_math_abs(jit_State *J, RecordFFData *rd) 510static void LJ_FASTCALL recff_math_abs(jit_State *J, RecordFFData *rd)
423{ 511{
424 TRef tr = lj_ir_tonum(J, J->base[0]); 512 TRef tr = lj_ir_tonum(J, J->base[0]);
425 J->base[0] = emitir(IRTN(IR_ABS), tr, lj_ir_knum_abs(J)); 513 J->base[0] = emitir(IRTN(IR_ABS), tr, lj_ir_ksimd(J, LJ_KSIMD_ABS));
426 UNUSED(rd); 514 UNUSED(rd);
427} 515}
428 516
@@ -475,7 +563,7 @@ static void LJ_FASTCALL recff_math_atan2(jit_State *J, RecordFFData *rd)
475{ 563{
476 TRef tr = lj_ir_tonum(J, J->base[0]); 564 TRef tr = lj_ir_tonum(J, J->base[0]);
477 TRef tr2 = lj_ir_tonum(J, J->base[1]); 565 TRef tr2 = lj_ir_tonum(J, J->base[1]);
478 J->base[0] = emitir(IRTN(IR_ATAN2), tr, tr2); 566 J->base[0] = lj_ir_call(J, IRCALL_atan2, tr, tr2);
479 UNUSED(rd); 567 UNUSED(rd);
480} 568}
481 569
@@ -492,51 +580,12 @@ static void LJ_FASTCALL recff_math_ldexp(jit_State *J, RecordFFData *rd)
492 UNUSED(rd); 580 UNUSED(rd);
493} 581}
494 582
495/* Record math.asin, math.acos, math.atan. */ 583static void LJ_FASTCALL recff_math_call(jit_State *J, RecordFFData *rd)
496static void LJ_FASTCALL recff_math_atrig(jit_State *J, RecordFFData *rd)
497{
498 TRef y = lj_ir_tonum(J, J->base[0]);
499 TRef x = lj_ir_knum_one(J);
500 uint32_t ffid = rd->data;
501 if (ffid != FF_math_atan) {
502 TRef tmp = emitir(IRTN(IR_MUL), y, y);
503 tmp = emitir(IRTN(IR_SUB), x, tmp);
504 tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_SQRT);
505 if (ffid == FF_math_asin) { x = tmp; } else { x = y; y = tmp; }
506 }
507 J->base[0] = emitir(IRTN(IR_ATAN2), y, x);
508}
509
510static void LJ_FASTCALL recff_math_htrig(jit_State *J, RecordFFData *rd)
511{ 584{
512 TRef tr = lj_ir_tonum(J, J->base[0]); 585 TRef tr = lj_ir_tonum(J, J->base[0]);
513 J->base[0] = emitir(IRTN(IR_CALLN), tr, rd->data); 586 J->base[0] = emitir(IRTN(IR_CALLN), tr, rd->data);
514} 587}
515 588
516static void LJ_FASTCALL recff_math_modf(jit_State *J, RecordFFData *rd)
517{
518 TRef tr = J->base[0];
519 if (tref_isinteger(tr)) {
520 J->base[0] = tr;
521 J->base[1] = lj_ir_kint(J, 0);
522 } else {
523 TRef trt;
524 tr = lj_ir_tonum(J, tr);
525 trt = emitir(IRTN(IR_FPMATH), tr, IRFPM_TRUNC);
526 J->base[0] = trt;
527 J->base[1] = emitir(IRTN(IR_SUB), tr, trt);
528 }
529 rd->nres = 2;
530}
531
532static void LJ_FASTCALL recff_math_degrad(jit_State *J, RecordFFData *rd)
533{
534 TRef tr = lj_ir_tonum(J, J->base[0]);
535 TRef trm = lj_ir_knum(J, numV(&J->fn->c.upvalue[0]));
536 J->base[0] = emitir(IRTN(IR_MUL), tr, trm);
537 UNUSED(rd);
538}
539
540static void LJ_FASTCALL recff_math_pow(jit_State *J, RecordFFData *rd) 589static void LJ_FASTCALL recff_math_pow(jit_State *J, RecordFFData *rd)
541{ 590{
542 J->base[0] = lj_opt_narrow_pow(J, J->base[0], J->base[1], 591 J->base[0] = lj_opt_narrow_pow(J, J->base[0], J->base[1],
@@ -567,7 +616,7 @@ static void LJ_FASTCALL recff_math_random(jit_State *J, RecordFFData *rd)
567 GCudata *ud = udataV(&J->fn->c.upvalue[0]); 616 GCudata *ud = udataV(&J->fn->c.upvalue[0]);
568 TRef tr, one; 617 TRef tr, one;
569 lj_ir_kgc(J, obj2gco(ud), IRT_UDATA); /* Prevent collection. */ 618 lj_ir_kgc(J, obj2gco(ud), IRT_UDATA); /* Prevent collection. */
570 tr = lj_ir_call(J, IRCALL_lj_math_random_step, lj_ir_kptr(J, uddata(ud))); 619 tr = lj_ir_call(J, IRCALL_lj_prng_u64d, lj_ir_kptr(J, uddata(ud)));
571 one = lj_ir_knum_one(J); 620 one = lj_ir_knum_one(J);
572 tr = emitir(IRTN(IR_SUB), tr, one); 621 tr = emitir(IRTN(IR_SUB), tr, one);
573 if (J->base[0]) { 622 if (J->base[0]) {
@@ -591,48 +640,105 @@ static void LJ_FASTCALL recff_math_random(jit_State *J, RecordFFData *rd)
591 640
592/* -- Bit library fast functions ------------------------------------------ */ 641/* -- Bit library fast functions ------------------------------------------ */
593 642
594/* Record unary bit.tobit, bit.bnot, bit.bswap. */ 643/* Record bit.tobit. */
644static void LJ_FASTCALL recff_bit_tobit(jit_State *J, RecordFFData *rd)
645{
646 TRef tr = J->base[0];
647#if LJ_HASFFI
648 if (tref_iscdata(tr)) { recff_bit64_tobit(J, rd); return; }
649#endif
650 J->base[0] = lj_opt_narrow_tobit(J, tr);
651 UNUSED(rd);
652}
653
654/* Record unary bit.bnot, bit.bswap. */
595static void LJ_FASTCALL recff_bit_unary(jit_State *J, RecordFFData *rd) 655static void LJ_FASTCALL recff_bit_unary(jit_State *J, RecordFFData *rd)
596{ 656{
597 TRef tr = lj_opt_narrow_tobit(J, J->base[0]); 657#if LJ_HASFFI
598 J->base[0] = (rd->data == IR_TOBIT) ? tr : emitir(IRTI(rd->data), tr, 0); 658 if (recff_bit64_unary(J, rd))
659 return;
660#endif
661 J->base[0] = emitir(IRTI(rd->data), lj_opt_narrow_tobit(J, J->base[0]), 0);
599} 662}
600 663
601/* Record N-ary bit.band, bit.bor, bit.bxor. */ 664/* Record N-ary bit.band, bit.bor, bit.bxor. */
602static void LJ_FASTCALL recff_bit_nary(jit_State *J, RecordFFData *rd) 665static void LJ_FASTCALL recff_bit_nary(jit_State *J, RecordFFData *rd)
603{ 666{
604 TRef tr = lj_opt_narrow_tobit(J, J->base[0]); 667#if LJ_HASFFI
605 uint32_t op = rd->data; 668 if (recff_bit64_nary(J, rd))
606 BCReg i; 669 return;
607 for (i = 1; J->base[i] != 0; i++) 670#endif
608 tr = emitir(IRTI(op), tr, lj_opt_narrow_tobit(J, J->base[i])); 671 {
609 J->base[0] = tr; 672 TRef tr = lj_opt_narrow_tobit(J, J->base[0]);
673 uint32_t ot = IRTI(rd->data);
674 BCReg i;
675 for (i = 1; J->base[i] != 0; i++)
676 tr = emitir(ot, tr, lj_opt_narrow_tobit(J, J->base[i]));
677 J->base[0] = tr;
678 }
610} 679}
611 680
612/* Record bit shifts. */ 681/* Record bit shifts. */
613static void LJ_FASTCALL recff_bit_shift(jit_State *J, RecordFFData *rd) 682static void LJ_FASTCALL recff_bit_shift(jit_State *J, RecordFFData *rd)
614{ 683{
615 TRef tr = lj_opt_narrow_tobit(J, J->base[0]); 684#if LJ_HASFFI
616 TRef tsh = lj_opt_narrow_tobit(J, J->base[1]); 685 if (recff_bit64_shift(J, rd))
617 IROp op = (IROp)rd->data; 686 return;
618 if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) && 687#endif
619 !tref_isk(tsh)) 688 {
620 tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 31)); 689 TRef tr = lj_opt_narrow_tobit(J, J->base[0]);
690 TRef tsh = lj_opt_narrow_tobit(J, J->base[1]);
691 IROp op = (IROp)rd->data;
692 if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) &&
693 !tref_isk(tsh))
694 tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 31));
621#ifdef LJ_TARGET_UNIFYROT 695#ifdef LJ_TARGET_UNIFYROT
622 if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) { 696 if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) {
623 op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR; 697 op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR;
624 tsh = emitir(IRTI(IR_NEG), tsh, tsh); 698 tsh = emitir(IRTI(IR_NEG), tsh, tsh);
699 }
700#endif
701 J->base[0] = emitir(IRTI(op), tr, tsh);
625 } 702 }
703}
704
705static void LJ_FASTCALL recff_bit_tohex(jit_State *J, RecordFFData *rd)
706{
707#if LJ_HASFFI
708 TRef hdr = recff_bufhdr(J);
709 TRef tr = recff_bit64_tohex(J, rd, hdr);
710 J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr);
711#else
712 recff_nyiu(J, rd); /* Don't bother working around this NYI. */
626#endif 713#endif
627 J->base[0] = emitir(IRTI(op), tr, tsh);
628} 714}
629 715
630/* -- String library fast functions --------------------------------------- */ 716/* -- String library fast functions --------------------------------------- */
631 717
632static void LJ_FASTCALL recff_string_len(jit_State *J, RecordFFData *rd) 718/* Specialize to relative starting position for string. */
719static TRef recff_string_start(jit_State *J, GCstr *s, int32_t *st, TRef tr,
720 TRef trlen, TRef tr0)
633{ 721{
634 J->base[0] = emitir(IRTI(IR_FLOAD), lj_ir_tostr(J, J->base[0]), IRFL_STR_LEN); 722 int32_t start = *st;
635 UNUSED(rd); 723 if (start < 0) {
724 emitir(IRTGI(IR_LT), tr, tr0);
725 tr = emitir(IRTI(IR_ADD), trlen, tr);
726 start = start + (int32_t)s->len;
727 emitir(start < 0 ? IRTGI(IR_LT) : IRTGI(IR_GE), tr, tr0);
728 if (start < 0) {
729 tr = tr0;
730 start = 0;
731 }
732 } else if (start == 0) {
733 emitir(IRTGI(IR_EQ), tr, tr0);
734 tr = tr0;
735 } else {
736 tr = emitir(IRTI(IR_ADD), tr, lj_ir_kint(J, -1));
737 emitir(IRTGI(IR_GE), tr, tr0);
738 start--;
739 }
740 *st = start;
741 return tr;
636} 742}
637 743
638/* Handle string.byte (rd->data = 0) and string.sub (rd->data = 1). */ 744/* Handle string.byte (rd->data = 0) and string.sub (rd->data = 1). */
@@ -679,39 +785,21 @@ static void LJ_FASTCALL recff_string_range(jit_State *J, RecordFFData *rd)
679 } else if ((MSize)end <= str->len) { 785 } else if ((MSize)end <= str->len) {
680 emitir(IRTGI(IR_ULE), trend, trlen); 786 emitir(IRTGI(IR_ULE), trend, trlen);
681 } else { 787 } else {
682 emitir(IRTGI(IR_GT), trend, trlen); 788 emitir(IRTGI(IR_UGT), trend, trlen);
683 end = (int32_t)str->len; 789 end = (int32_t)str->len;
684 trend = trlen; 790 trend = trlen;
685 } 791 }
686 if (start < 0) { 792 trstart = recff_string_start(J, str, &start, trstart, trlen, tr0);
687 emitir(IRTGI(IR_LT), trstart, tr0);
688 trstart = emitir(IRTI(IR_ADD), trlen, trstart);
689 start = start+(int32_t)str->len;
690 emitir(start < 0 ? IRTGI(IR_LT) : IRTGI(IR_GE), trstart, tr0);
691 if (start < 0) {
692 trstart = tr0;
693 start = 0;
694 }
695 } else {
696 if (start == 0) {
697 emitir(IRTGI(IR_EQ), trstart, tr0);
698 trstart = tr0;
699 } else {
700 trstart = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, -1));
701 emitir(IRTGI(IR_GE), trstart, tr0);
702 start--;
703 }
704 }
705 if (rd->data) { /* Return string.sub result. */ 793 if (rd->data) { /* Return string.sub result. */
706 if (end - start >= 0) { 794 if (end - start >= 0) {
707 /* Also handle empty range here, to avoid extra traces. */ 795 /* Also handle empty range here, to avoid extra traces. */
708 TRef trptr, trslen = emitir(IRTI(IR_SUB), trend, trstart); 796 TRef trptr, trslen = emitir(IRTI(IR_SUB), trend, trstart);
709 emitir(IRTGI(IR_GE), trslen, tr0); 797 emitir(IRTGI(IR_GE), trslen, tr0);
710 trptr = emitir(IRT(IR_STRREF, IRT_P32), trstr, trstart); 798 trptr = emitir(IRT(IR_STRREF, IRT_PGC), trstr, trstart);
711 J->base[0] = emitir(IRT(IR_SNEW, IRT_STR), trptr, trslen); 799 J->base[0] = emitir(IRT(IR_SNEW, IRT_STR), trptr, trslen);
712 } else { /* Range underflow: return empty string. */ 800 } else { /* Range underflow: return empty string. */
713 emitir(IRTGI(IR_LT), trend, trstart); 801 emitir(IRTGI(IR_LT), trend, trstart);
714 J->base[0] = lj_ir_kstr(J, lj_str_new(J->L, strdata(str), 0)); 802 J->base[0] = lj_ir_kstr(J, &J2G(J)->strempty);
715 } 803 }
716 } else { /* Return string.byte result(s). */ 804 } else { /* Return string.byte result(s). */
717 ptrdiff_t i, len = end - start; 805 ptrdiff_t i, len = end - start;
@@ -723,7 +811,7 @@ static void LJ_FASTCALL recff_string_range(jit_State *J, RecordFFData *rd)
723 rd->nres = len; 811 rd->nres = len;
724 for (i = 0; i < len; i++) { 812 for (i = 0; i < len; i++) {
725 TRef tmp = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, (int32_t)i)); 813 TRef tmp = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, (int32_t)i));
726 tmp = emitir(IRT(IR_STRREF, IRT_P32), trstr, tmp); 814 tmp = emitir(IRT(IR_STRREF, IRT_PGC), trstr, tmp);
727 J->base[i] = emitir(IRT(IR_XLOAD, IRT_U8), tmp, IRXLOAD_READONLY); 815 J->base[i] = emitir(IRT(IR_XLOAD, IRT_U8), tmp, IRXLOAD_READONLY);
728 } 816 }
729 } else { /* Empty range or range underflow: return no results. */ 817 } else { /* Empty range or range underflow: return no results. */
@@ -733,48 +821,203 @@ static void LJ_FASTCALL recff_string_range(jit_State *J, RecordFFData *rd)
733 } 821 }
734} 822}
735 823
736/* -- Table library fast functions ---------------------------------------- */ 824static void LJ_FASTCALL recff_string_char(jit_State *J, RecordFFData *rd)
737
738static void LJ_FASTCALL recff_table_getn(jit_State *J, RecordFFData *rd)
739{ 825{
740 if (tref_istab(J->base[0])) 826 TRef k255 = lj_ir_kint(J, 255);
741 J->base[0] = lj_ir_call(J, IRCALL_lj_tab_len, J->base[0]); 827 BCReg i;
742 /* else: Interpreter will throw. */ 828 for (i = 0; J->base[i] != 0; i++) { /* Convert char values to strings. */
829 TRef tr = lj_opt_narrow_toint(J, J->base[i]);
830 emitir(IRTGI(IR_ULE), tr, k255);
831 J->base[i] = emitir(IRT(IR_TOSTR, IRT_STR), tr, IRTOSTR_CHAR);
832 }
833 if (i > 1) { /* Concatenate the strings, if there's more than one. */
834 TRef hdr = recff_bufhdr(J), tr = hdr;
835 for (i = 0; J->base[i] != 0; i++)
836 tr = emitir(IRT(IR_BUFPUT, IRT_PGC), tr, J->base[i]);
837 J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr);
838 } else if (i == 0) {
839 J->base[0] = lj_ir_kstr(J, &J2G(J)->strempty);
840 }
743 UNUSED(rd); 841 UNUSED(rd);
744} 842}
745 843
746static void LJ_FASTCALL recff_table_remove(jit_State *J, RecordFFData *rd) 844static void LJ_FASTCALL recff_string_rep(jit_State *J, RecordFFData *rd)
747{ 845{
748 TRef tab = J->base[0]; 846 TRef str = lj_ir_tostr(J, J->base[0]);
749 rd->nres = 0; 847 TRef rep = lj_opt_narrow_toint(J, J->base[1]);
750 if (tref_istab(tab)) { 848 TRef hdr, tr, str2 = 0;
751 if (tref_isnil(J->base[1])) { /* Simple pop: t[#t] = nil */ 849 if (!tref_isnil(J->base[2])) {
752 TRef trlen = lj_ir_call(J, IRCALL_lj_tab_len, tab); 850 TRef sep = lj_ir_tostr(J, J->base[2]);
753 GCtab *t = tabV(&rd->argv[0]); 851 int32_t vrep = argv2int(J, &rd->argv[1]);
754 MSize len = lj_tab_len(t); 852 emitir(IRTGI(vrep > 1 ? IR_GT : IR_LE), rep, lj_ir_kint(J, 1));
755 emitir(IRTGI(len ? IR_NE : IR_EQ), trlen, lj_ir_kint(J, 0)); 853 if (vrep > 1) {
756 if (len) { 854 TRef hdr2 = recff_bufhdr(J);
757 RecordIndex ix; 855 TRef tr2 = emitir(IRT(IR_BUFPUT, IRT_PGC), hdr2, sep);
758 ix.tab = tab; 856 tr2 = emitir(IRT(IR_BUFPUT, IRT_PGC), tr2, str);
759 ix.key = trlen; 857 str2 = emitir(IRT(IR_BUFSTR, IRT_STR), tr2, hdr2);
760 settabV(J->L, &ix.tabv, t); 858 }
761 setintV(&ix.keyv, len); 859 }
762 ix.idxchain = 0; 860 tr = hdr = recff_bufhdr(J);
763 if (results_wanted(J) != 0) { /* Specialize load only if needed. */ 861 if (str2) {
764 ix.val = 0; 862 tr = emitir(IRT(IR_BUFPUT, IRT_PGC), tr, str);
765 J->base[0] = lj_record_idx(J, &ix); /* Load previous value. */ 863 str = str2;
766 rd->nres = 1; 864 rep = emitir(IRTI(IR_ADD), rep, lj_ir_kint(J, -1));
767 /* Assumes ix.key/ix.tab is not modified for raw lj_record_idx(). */ 865 }
768 } 866 tr = lj_ir_call(J, IRCALL_lj_buf_putstr_rep, tr, str, rep);
769 ix.val = TREF_NIL; 867 J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr);
770 lj_record_idx(J, &ix); /* Remove value. */ 868}
869
870static void LJ_FASTCALL recff_string_op(jit_State *J, RecordFFData *rd)
871{
872 TRef str = lj_ir_tostr(J, J->base[0]);
873 TRef hdr = recff_bufhdr(J);
874 TRef tr = lj_ir_call(J, rd->data, hdr, str);
875 J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr);
876}
877
878static void LJ_FASTCALL recff_string_find(jit_State *J, RecordFFData *rd)
879{
880 TRef trstr = lj_ir_tostr(J, J->base[0]);
881 TRef trpat = lj_ir_tostr(J, J->base[1]);
882 TRef trlen = emitir(IRTI(IR_FLOAD), trstr, IRFL_STR_LEN);
883 TRef tr0 = lj_ir_kint(J, 0);
884 TRef trstart;
885 GCstr *str = argv2str(J, &rd->argv[0]);
886 GCstr *pat = argv2str(J, &rd->argv[1]);
887 int32_t start;
888 J->needsnap = 1;
889 if (tref_isnil(J->base[2])) {
890 trstart = lj_ir_kint(J, 1);
891 start = 1;
892 } else {
893 trstart = lj_opt_narrow_toint(J, J->base[2]);
894 start = argv2int(J, &rd->argv[2]);
895 }
896 trstart = recff_string_start(J, str, &start, trstart, trlen, tr0);
897 if ((MSize)start <= str->len) {
898 emitir(IRTGI(IR_ULE), trstart, trlen);
899 } else {
900 emitir(IRTGI(IR_UGT), trstart, trlen);
901#if LJ_52
902 J->base[0] = TREF_NIL;
903 return;
904#else
905 trstart = trlen;
906 start = str->len;
907#endif
908 }
909 /* Fixed arg or no pattern matching chars? (Specialized to pattern string.) */
910 if ((J->base[2] && tref_istruecond(J->base[3])) ||
911 (emitir(IRTG(IR_EQ, IRT_STR), trpat, lj_ir_kstr(J, pat)),
912 !lj_str_haspattern(pat))) { /* Search for fixed string. */
913 TRef trsptr = emitir(IRT(IR_STRREF, IRT_PGC), trstr, trstart);
914 TRef trpptr = emitir(IRT(IR_STRREF, IRT_PGC), trpat, tr0);
915 TRef trslen = emitir(IRTI(IR_SUB), trlen, trstart);
916 TRef trplen = emitir(IRTI(IR_FLOAD), trpat, IRFL_STR_LEN);
917 TRef tr = lj_ir_call(J, IRCALL_lj_str_find, trsptr, trpptr, trslen, trplen);
918 TRef trp0 = lj_ir_kkptr(J, NULL);
919 if (lj_str_find(strdata(str)+(MSize)start, strdata(pat),
920 str->len-(MSize)start, pat->len)) {
921 TRef pos;
922 emitir(IRTG(IR_NE, IRT_PGC), tr, trp0);
923 /* Recompute offset. trsptr may not point into trstr after folding. */
924 pos = emitir(IRTI(IR_ADD), emitir(IRTI(IR_SUB), tr, trsptr), trstart);
925 J->base[0] = emitir(IRTI(IR_ADD), pos, lj_ir_kint(J, 1));
926 J->base[1] = emitir(IRTI(IR_ADD), pos, trplen);
927 rd->nres = 2;
928 } else {
929 emitir(IRTG(IR_EQ, IRT_PGC), tr, trp0);
930 J->base[0] = TREF_NIL;
931 }
932 } else { /* Search for pattern. */
933 recff_nyiu(J, rd);
934 return;
935 }
936}
937
938static void LJ_FASTCALL recff_string_format(jit_State *J, RecordFFData *rd)
939{
940 TRef trfmt = lj_ir_tostr(J, J->base[0]);
941 GCstr *fmt = argv2str(J, &rd->argv[0]);
942 int arg = 1;
943 TRef hdr, tr;
944 FormatState fs;
945 SFormat sf;
946 /* Specialize to the format string. */
947 emitir(IRTG(IR_EQ, IRT_STR), trfmt, lj_ir_kstr(J, fmt));
948 tr = hdr = recff_bufhdr(J);
949 lj_strfmt_init(&fs, strdata(fmt), fmt->len);
950 while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) { /* Parse format. */
951 TRef tra = sf == STRFMT_LIT ? 0 : J->base[arg++];
952 TRef trsf = lj_ir_kint(J, (int32_t)sf);
953 IRCallID id;
954 switch (STRFMT_TYPE(sf)) {
955 case STRFMT_LIT:
956 tr = emitir(IRT(IR_BUFPUT, IRT_PGC), tr,
957 lj_ir_kstr(J, lj_str_new(J->L, fs.str, fs.len)));
958 break;
959 case STRFMT_INT:
960 id = IRCALL_lj_strfmt_putfnum_int;
961 handle_int:
962 if (!tref_isinteger(tra))
963 goto handle_num;
964 if (sf == STRFMT_INT) { /* Shortcut for plain %d. */
965 tr = emitir(IRT(IR_BUFPUT, IRT_PGC), tr,
966 emitir(IRT(IR_TOSTR, IRT_STR), tra, IRTOSTR_INT));
967 } else {
968#if LJ_HASFFI
969 tra = emitir(IRT(IR_CONV, IRT_U64), tra,
970 (IRT_INT|(IRT_U64<<5)|IRCONV_SEXT));
971 tr = lj_ir_call(J, IRCALL_lj_strfmt_putfxint, tr, trsf, tra);
972 lj_needsplit(J);
973#else
974 recff_nyiu(J, rd); /* Don't bother working around this NYI. */
975 return;
976#endif
771 } 977 }
772 } else { /* Complex case: remove in the middle. */ 978 break;
773 recff_nyiu(J); 979 case STRFMT_UINT:
980 id = IRCALL_lj_strfmt_putfnum_uint;
981 goto handle_int;
982 case STRFMT_NUM:
983 id = IRCALL_lj_strfmt_putfnum;
984 handle_num:
985 tra = lj_ir_tonum(J, tra);
986 tr = lj_ir_call(J, id, tr, trsf, tra);
987 if (LJ_SOFTFP32) lj_needsplit(J);
988 break;
989 case STRFMT_STR:
990 if (!tref_isstr(tra)) {
991 recff_nyiu(J, rd); /* NYI: __tostring and non-string types for %s. */
992 return;
993 }
994 if (sf == STRFMT_STR) /* Shortcut for plain %s. */
995 tr = emitir(IRT(IR_BUFPUT, IRT_PGC), tr, tra);
996 else if ((sf & STRFMT_T_QUOTED))
997 tr = lj_ir_call(J, IRCALL_lj_strfmt_putquoted, tr, tra);
998 else
999 tr = lj_ir_call(J, IRCALL_lj_strfmt_putfstr, tr, trsf, tra);
1000 break;
1001 case STRFMT_CHAR:
1002 tra = lj_opt_narrow_toint(J, tra);
1003 if (sf == STRFMT_CHAR) /* Shortcut for plain %c. */
1004 tr = emitir(IRT(IR_BUFPUT, IRT_PGC), tr,
1005 emitir(IRT(IR_TOSTR, IRT_STR), tra, IRTOSTR_CHAR));
1006 else
1007 tr = lj_ir_call(J, IRCALL_lj_strfmt_putfchar, tr, trsf, tra);
1008 break;
1009 case STRFMT_PTR: /* NYI */
1010 case STRFMT_ERR:
1011 default:
1012 recff_nyiu(J, rd);
1013 return;
774 } 1014 }
775 } /* else: Interpreter will throw. */ 1015 }
1016 J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr);
776} 1017}
777 1018
1019/* -- Table library fast functions ---------------------------------------- */
1020
778static void LJ_FASTCALL recff_table_insert(jit_State *J, RecordFFData *rd) 1021static void LJ_FASTCALL recff_table_insert(jit_State *J, RecordFFData *rd)
779{ 1022{
780 RecordIndex ix; 1023 RecordIndex ix;
@@ -783,7 +1026,7 @@ static void LJ_FASTCALL recff_table_insert(jit_State *J, RecordFFData *rd)
783 rd->nres = 0; 1026 rd->nres = 0;
784 if (tref_istab(ix.tab) && ix.val) { 1027 if (tref_istab(ix.tab) && ix.val) {
785 if (!J->base[2]) { /* Simple push: t[#t+1] = v */ 1028 if (!J->base[2]) { /* Simple push: t[#t+1] = v */
786 TRef trlen = lj_ir_call(J, IRCALL_lj_tab_len, ix.tab); 1029 TRef trlen = emitir(IRTI(IR_ALEN), ix.tab, TREF_NIL);
787 GCtab *t = tabV(&rd->argv[0]); 1030 GCtab *t = tabV(&rd->argv[0]);
788 ix.key = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1)); 1031 ix.key = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1));
789 settabV(J->L, &ix.tabv, t); 1032 settabV(J->L, &ix.tabv, t);
@@ -791,11 +1034,49 @@ static void LJ_FASTCALL recff_table_insert(jit_State *J, RecordFFData *rd)
791 ix.idxchain = 0; 1034 ix.idxchain = 0;
792 lj_record_idx(J, &ix); /* Set new value. */ 1035 lj_record_idx(J, &ix); /* Set new value. */
793 } else { /* Complex case: insert in the middle. */ 1036 } else { /* Complex case: insert in the middle. */
794 recff_nyiu(J); 1037 recff_nyiu(J, rd);
1038 return;
795 } 1039 }
796 } /* else: Interpreter will throw. */ 1040 } /* else: Interpreter will throw. */
797} 1041}
798 1042
1043static void LJ_FASTCALL recff_table_concat(jit_State *J, RecordFFData *rd)
1044{
1045 TRef tab = J->base[0];
1046 if (tref_istab(tab)) {
1047 TRef sep = !tref_isnil(J->base[1]) ?
1048 lj_ir_tostr(J, J->base[1]) : lj_ir_knull(J, IRT_STR);
1049 TRef tri = (J->base[1] && !tref_isnil(J->base[2])) ?
1050 lj_opt_narrow_toint(J, J->base[2]) : lj_ir_kint(J, 1);
1051 TRef tre = (J->base[1] && J->base[2] && !tref_isnil(J->base[3])) ?
1052 lj_opt_narrow_toint(J, J->base[3]) :
1053 emitir(IRTI(IR_ALEN), tab, TREF_NIL);
1054 TRef hdr = recff_bufhdr(J);
1055 TRef tr = lj_ir_call(J, IRCALL_lj_buf_puttab, hdr, tab, sep, tri, tre);
1056 emitir(IRTG(IR_NE, IRT_PTR), tr, lj_ir_kptr(J, NULL));
1057 J->base[0] = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr);
1058 } /* else: Interpreter will throw. */
1059 UNUSED(rd);
1060}
1061
1062static void LJ_FASTCALL recff_table_new(jit_State *J, RecordFFData *rd)
1063{
1064 TRef tra = lj_opt_narrow_toint(J, J->base[0]);
1065 TRef trh = lj_opt_narrow_toint(J, J->base[1]);
1066 J->base[0] = lj_ir_call(J, IRCALL_lj_tab_new_ah, tra, trh);
1067 UNUSED(rd);
1068}
1069
1070static void LJ_FASTCALL recff_table_clear(jit_State *J, RecordFFData *rd)
1071{
1072 TRef tr = J->base[0];
1073 if (tref_istab(tr)) {
1074 rd->nres = 0;
1075 lj_ir_call(J, IRCALL_lj_tab_clear, tr);
1076 J->needsnap = 1;
1077 } /* else: Interpreter will throw. */
1078}
1079
799/* -- I/O library fast functions ------------------------------------------ */ 1080/* -- I/O library fast functions ------------------------------------------ */
800 1081
801/* Get FILE* for I/O function. Any I/O error aborts recording, so there's 1082/* Get FILE* for I/O function. Any I/O error aborts recording, so there's
@@ -805,8 +1086,7 @@ static TRef recff_io_fp(jit_State *J, TRef *udp, int32_t id)
805{ 1086{
806 TRef tr, ud, fp; 1087 TRef tr, ud, fp;
807 if (id) { /* io.func() */ 1088 if (id) { /* io.func() */
808 tr = lj_ir_kptr(J, &J2G(J)->gcroot[id]); 1089 ud = lj_ir_ggfload(J, IRT_UDATA, GG_OFS(g.gcroot[id]));
809 ud = emitir(IRT(IR_XLOAD, IRT_UDATA), tr, 0);
810 } else { /* fp:method() */ 1090 } else { /* fp:method() */
811 ud = J->base[0]; 1091 ud = J->base[0];
812 if (!tref_isudata(ud)) 1092 if (!tref_isudata(ud))
@@ -828,10 +1108,13 @@ static void LJ_FASTCALL recff_io_write(jit_State *J, RecordFFData *rd)
828 ptrdiff_t i = rd->data == 0 ? 1 : 0; 1108 ptrdiff_t i = rd->data == 0 ? 1 : 0;
829 for (; J->base[i]; i++) { 1109 for (; J->base[i]; i++) {
830 TRef str = lj_ir_tostr(J, J->base[i]); 1110 TRef str = lj_ir_tostr(J, J->base[i]);
831 TRef buf = emitir(IRT(IR_STRREF, IRT_P32), str, zero); 1111 TRef buf = emitir(IRT(IR_STRREF, IRT_PGC), str, zero);
832 TRef len = emitir(IRTI(IR_FLOAD), str, IRFL_STR_LEN); 1112 TRef len = emitir(IRTI(IR_FLOAD), str, IRFL_STR_LEN);
833 if (tref_isk(len) && IR(tref_ref(len))->i == 1) { 1113 if (tref_isk(len) && IR(tref_ref(len))->i == 1) {
834 TRef tr = emitir(IRT(IR_XLOAD, IRT_U8), buf, IRXLOAD_READONLY); 1114 IRIns *irs = IR(tref_ref(str));
1115 TRef tr = (irs->o == IR_TOSTR && irs->op2 == IRTOSTR_CHAR) ?
1116 irs->op1 :
1117 emitir(IRT(IR_XLOAD, IRT_U8), buf, IRXLOAD_READONLY);
835 tr = lj_ir_call(J, IRCALL_fputc, tr, fp); 1118 tr = lj_ir_call(J, IRCALL_fputc, tr, fp);
836 if (results_wanted(J) != 0) /* Check result only if not ignored. */ 1119 if (results_wanted(J) != 0) /* Check result only if not ignored. */
837 emitir(IRTGI(IR_NE), tr, lj_ir_kint(J, -1)); 1120 emitir(IRTGI(IR_NE), tr, lj_ir_kint(J, -1));
@@ -853,6 +1136,28 @@ static void LJ_FASTCALL recff_io_flush(jit_State *J, RecordFFData *rd)
853 J->base[0] = TREF_TRUE; 1136 J->base[0] = TREF_TRUE;
854} 1137}
855 1138
1139/* -- Debug library fast functions ---------------------------------------- */
1140
1141static void LJ_FASTCALL recff_debug_getmetatable(jit_State *J, RecordFFData *rd)
1142{
1143 GCtab *mt;
1144 TRef mtref;
1145 TRef tr = J->base[0];
1146 if (tref_istab(tr)) {
1147 mt = tabref(tabV(&rd->argv[0])->metatable);
1148 mtref = emitir(IRT(IR_FLOAD, IRT_TAB), tr, IRFL_TAB_META);
1149 } else if (tref_isudata(tr)) {
1150 mt = tabref(udataV(&rd->argv[0])->metatable);
1151 mtref = emitir(IRT(IR_FLOAD, IRT_TAB), tr, IRFL_UDATA_META);
1152 } else {
1153 mt = tabref(basemt_obj(J2G(J), &rd->argv[0]));
1154 J->base[0] = mt ? lj_ir_ktab(J, mt) : TREF_NIL;
1155 return;
1156 }
1157 emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB));
1158 J->base[0] = mt ? mtref : TREF_NIL;
1159}
1160
856/* -- Record calls to fast functions -------------------------------------- */ 1161/* -- Record calls to fast functions -------------------------------------- */
857 1162
858#include "lj_recdef.h" 1163#include "lj_recdef.h"
diff --git a/src/lj_frame.h b/src/lj_frame.h
index e78a28a5..599a2d1c 100644
--- a/src/lj_frame.h
+++ b/src/lj_frame.h
@@ -11,7 +11,16 @@
11 11
12/* -- Lua stack frame ----------------------------------------------------- */ 12/* -- Lua stack frame ----------------------------------------------------- */
13 13
14/* Frame type markers in callee function slot (callee base-1). */ 14/* Frame type markers in LSB of PC (4-byte aligned) or delta (8-byte aligned:
15**
16** PC 00 Lua frame
17** delta 001 C frame
18** delta 010 Continuation frame
19** delta 011 Lua vararg frame
20** delta 101 cpcall() frame
21** delta 110 ff pcall() frame
22** delta 111 ff pcall() frame with active hook
23*/
15enum { 24enum {
16 FRAME_LUA, FRAME_C, FRAME_CONT, FRAME_VARG, 25 FRAME_LUA, FRAME_C, FRAME_CONT, FRAME_VARG,
17 FRAME_LUAP, FRAME_CP, FRAME_PCALL, FRAME_PCALLH 26 FRAME_LUAP, FRAME_CP, FRAME_PCALL, FRAME_PCALLH
@@ -21,9 +30,47 @@ enum {
21#define FRAME_TYPEP (FRAME_TYPE|FRAME_P) 30#define FRAME_TYPEP (FRAME_TYPE|FRAME_P)
22 31
23/* Macros to access and modify Lua frames. */ 32/* Macros to access and modify Lua frames. */
33#if LJ_FR2
34/* Two-slot frame info, required for 64 bit PC/GCRef:
35**
36** base-2 base-1 | base base+1 ...
37** [func PC/delta/ft] | [slots ...]
38** ^-- frame | ^-- base ^-- top
39**
40** Continuation frames:
41**
42** base-4 base-3 base-2 base-1 | base base+1 ...
43** [cont PC ] [func PC/delta/ft] | [slots ...]
44** ^-- frame | ^-- base ^-- top
45*/
46#define frame_gc(f) (gcval((f)-1))
47#define frame_ftsz(f) ((ptrdiff_t)(f)->ftsz)
48#define frame_pc(f) ((const BCIns *)frame_ftsz(f))
49#define setframe_gc(f, p, tp) (setgcVraw((f)-1, (p), (tp)))
50#define setframe_ftsz(f, sz) ((f)->ftsz = (sz))
51#define setframe_pc(f, pc) ((f)->ftsz = (int64_t)(intptr_t)(pc))
52#else
53/* One-slot frame info, sufficient for 32 bit PC/GCRef:
54**
55** base-1 | base base+1 ...
56** lo hi |
57** [func | PC/delta/ft] | [slots ...]
58** ^-- frame | ^-- base ^-- top
59**
60** Continuation frames:
61**
62** base-2 base-1 | base base+1 ...
63** lo hi lo hi |
64** [cont | PC] [func | PC/delta/ft] | [slots ...]
65** ^-- frame | ^-- base ^-- top
66*/
24#define frame_gc(f) (gcref((f)->fr.func)) 67#define frame_gc(f) (gcref((f)->fr.func))
25#define frame_func(f) (&frame_gc(f)->fn) 68#define frame_ftsz(f) ((ptrdiff_t)(f)->fr.tp.ftsz)
26#define frame_ftsz(f) ((f)->fr.tp.ftsz) 69#define frame_pc(f) (mref((f)->fr.tp.pcr, const BCIns))
70#define setframe_gc(f, p, tp) (setgcref((f)->fr.func, (p)), UNUSED(tp))
71#define setframe_ftsz(f, sz) ((f)->fr.tp.ftsz = (int32_t)(sz))
72#define setframe_pc(f, pc) (setmref((f)->fr.tp.pcr, (pc)))
73#endif
27 74
28#define frame_type(f) (frame_ftsz(f) & FRAME_TYPE) 75#define frame_type(f) (frame_ftsz(f) & FRAME_TYPE)
29#define frame_typep(f) (frame_ftsz(f) & FRAME_TYPEP) 76#define frame_typep(f) (frame_ftsz(f) & FRAME_TYPEP)
@@ -33,33 +80,53 @@ enum {
33#define frame_isvarg(f) (frame_typep(f) == FRAME_VARG) 80#define frame_isvarg(f) (frame_typep(f) == FRAME_VARG)
34#define frame_ispcall(f) ((frame_ftsz(f) & 6) == FRAME_PCALL) 81#define frame_ispcall(f) ((frame_ftsz(f) & 6) == FRAME_PCALL)
35 82
36#define frame_pc(f) (mref((f)->fr.tp.pcr, const BCIns)) 83#define frame_func(f) (&frame_gc(f)->fn)
84#define frame_delta(f) (frame_ftsz(f) >> 3)
85#define frame_sized(f) (frame_ftsz(f) & ~FRAME_TYPEP)
86
87enum { LJ_CONT_TAILCALL, LJ_CONT_FFI_CALLBACK }; /* Special continuations. */
88
89#if LJ_FR2
90#define frame_contpc(f) (frame_pc((f)-2))
91#define frame_contv(f) (((f)-3)->u64)
92#else
37#define frame_contpc(f) (frame_pc((f)-1)) 93#define frame_contpc(f) (frame_pc((f)-1))
38#if LJ_64 94#define frame_contv(f) (((f)-1)->u32.lo)
95#endif
96#if LJ_FR2
97#define frame_contf(f) ((ASMFunction)(uintptr_t)((f)-3)->u64)
98#elif LJ_64
39#define frame_contf(f) \ 99#define frame_contf(f) \
40 ((ASMFunction)(void *)((intptr_t)lj_vm_asm_begin + \ 100 ((ASMFunction)(void *)((intptr_t)lj_vm_asm_begin + \
41 (intptr_t)(int32_t)((f)-1)->u32.lo)) 101 (intptr_t)(int32_t)((f)-1)->u32.lo))
42#else 102#else
43#define frame_contf(f) ((ASMFunction)gcrefp(((f)-1)->gcr, void)) 103#define frame_contf(f) ((ASMFunction)gcrefp(((f)-1)->gcr, void))
44#endif 104#endif
45#define frame_delta(f) (frame_ftsz(f) >> 3) 105#define frame_iscont_fficb(f) \
46#define frame_sized(f) (frame_ftsz(f) & ~FRAME_TYPEP) 106 (LJ_HASFFI && frame_contv(f) == LJ_CONT_FFI_CALLBACK)
47 107
48#define frame_prevl(f) ((f) - (1+bc_a(frame_pc(f)[-1]))) 108#define frame_prevl(f) ((f) - (1+LJ_FR2+bc_a(frame_pc(f)[-1])))
49#define frame_prevd(f) ((TValue *)((char *)(f) - frame_sized(f))) 109#define frame_prevd(f) ((TValue *)((char *)(f) - frame_sized(f)))
50#define frame_prev(f) (frame_islua(f)?frame_prevl(f):frame_prevd(f)) 110#define frame_prev(f) (frame_islua(f)?frame_prevl(f):frame_prevd(f))
51/* Note: this macro does not skip over FRAME_VARG. */ 111/* Note: this macro does not skip over FRAME_VARG. */
52 112
53#define setframe_pc(f, pc) (setmref((f)->fr.tp.pcr, (pc)))
54#define setframe_ftsz(f, sz) ((f)->fr.tp.ftsz = (sz))
55#define setframe_gc(f, p) (setgcref((f)->fr.func, (p)))
56
57/* -- C stack frame ------------------------------------------------------- */ 113/* -- C stack frame ------------------------------------------------------- */
58 114
59/* Macros to access and modify the C stack frame chain. */ 115/* Macros to access and modify the C stack frame chain. */
60 116
61/* These definitions must match with the arch-specific *.dasc files. */ 117/* These definitions must match with the arch-specific *.dasc files. */
62#if LJ_TARGET_X86 118#if LJ_TARGET_X86
119#if LJ_ABI_WIN
120#define CFRAME_OFS_ERRF (19*4)
121#define CFRAME_OFS_NRES (18*4)
122#define CFRAME_OFS_PREV (17*4)
123#define CFRAME_OFS_L (16*4)
124#define CFRAME_OFS_SEH (9*4)
125#define CFRAME_OFS_PC (6*4)
126#define CFRAME_OFS_MULTRES (5*4)
127#define CFRAME_SIZE (16*4)
128#define CFRAME_SHIFT_MULTRES 0
129#else
63#define CFRAME_OFS_ERRF (15*4) 130#define CFRAME_OFS_ERRF (15*4)
64#define CFRAME_OFS_NRES (14*4) 131#define CFRAME_OFS_NRES (14*4)
65#define CFRAME_OFS_PREV (13*4) 132#define CFRAME_OFS_PREV (13*4)
@@ -68,24 +135,41 @@ enum {
68#define CFRAME_OFS_MULTRES (5*4) 135#define CFRAME_OFS_MULTRES (5*4)
69#define CFRAME_SIZE (12*4) 136#define CFRAME_SIZE (12*4)
70#define CFRAME_SHIFT_MULTRES 0 137#define CFRAME_SHIFT_MULTRES 0
138#endif
71#elif LJ_TARGET_X64 139#elif LJ_TARGET_X64
72#if LJ_ABI_WIN 140#if LJ_ABI_WIN
73#define CFRAME_OFS_PREV (13*8) 141#define CFRAME_OFS_PREV (13*8)
142#if LJ_GC64
143#define CFRAME_OFS_PC (12*8)
144#define CFRAME_OFS_L (11*8)
145#define CFRAME_OFS_ERRF (21*4)
146#define CFRAME_OFS_NRES (20*4)
147#define CFRAME_OFS_MULTRES (8*4)
148#else
74#define CFRAME_OFS_PC (25*4) 149#define CFRAME_OFS_PC (25*4)
75#define CFRAME_OFS_L (24*4) 150#define CFRAME_OFS_L (24*4)
76#define CFRAME_OFS_ERRF (23*4) 151#define CFRAME_OFS_ERRF (23*4)
77#define CFRAME_OFS_NRES (22*4) 152#define CFRAME_OFS_NRES (22*4)
78#define CFRAME_OFS_MULTRES (21*4) 153#define CFRAME_OFS_MULTRES (21*4)
154#endif
79#define CFRAME_SIZE (10*8) 155#define CFRAME_SIZE (10*8)
80#define CFRAME_SIZE_JIT (CFRAME_SIZE + 9*16 + 4*8) 156#define CFRAME_SIZE_JIT (CFRAME_SIZE + 9*16 + 4*8)
81#define CFRAME_SHIFT_MULTRES 0 157#define CFRAME_SHIFT_MULTRES 0
82#else 158#else
83#define CFRAME_OFS_PREV (4*8) 159#define CFRAME_OFS_PREV (4*8)
160#if LJ_GC64
161#define CFRAME_OFS_PC (3*8)
162#define CFRAME_OFS_L (2*8)
163#define CFRAME_OFS_ERRF (3*4)
164#define CFRAME_OFS_NRES (2*4)
165#define CFRAME_OFS_MULTRES (0*4)
166#else
84#define CFRAME_OFS_PC (7*4) 167#define CFRAME_OFS_PC (7*4)
85#define CFRAME_OFS_L (6*4) 168#define CFRAME_OFS_L (6*4)
86#define CFRAME_OFS_ERRF (5*4) 169#define CFRAME_OFS_ERRF (5*4)
87#define CFRAME_OFS_NRES (4*4) 170#define CFRAME_OFS_NRES (4*4)
88#define CFRAME_OFS_MULTRES (1*4) 171#define CFRAME_OFS_MULTRES (1*4)
172#endif
89#if LJ_NO_UNWIND 173#if LJ_NO_UNWIND
90#define CFRAME_SIZE (12*8) 174#define CFRAME_SIZE (12*8)
91#else 175#else
@@ -107,6 +191,15 @@ enum {
107#define CFRAME_SIZE 64 191#define CFRAME_SIZE 64
108#endif 192#endif
109#define CFRAME_SHIFT_MULTRES 3 193#define CFRAME_SHIFT_MULTRES 3
194#elif LJ_TARGET_ARM64
195#define CFRAME_OFS_ERRF 196
196#define CFRAME_OFS_NRES 200
197#define CFRAME_OFS_PREV 160
198#define CFRAME_OFS_L 176
199#define CFRAME_OFS_PC 168
200#define CFRAME_OFS_MULTRES 192
201#define CFRAME_SIZE 208
202#define CFRAME_SHIFT_MULTRES 3
110#elif LJ_TARGET_PPC 203#elif LJ_TARGET_PPC
111#if LJ_TARGET_XBOX360 204#if LJ_TARGET_XBOX360
112#define CFRAME_OFS_ERRF 424 205#define CFRAME_OFS_ERRF 424
@@ -117,7 +210,7 @@ enum {
117#define CFRAME_OFS_MULTRES 408 210#define CFRAME_OFS_MULTRES 408
118#define CFRAME_SIZE 384 211#define CFRAME_SIZE 384
119#define CFRAME_SHIFT_MULTRES 3 212#define CFRAME_SHIFT_MULTRES 3
120#elif LJ_ARCH_PPC64 213#elif LJ_ARCH_PPC32ON64
121#define CFRAME_OFS_ERRF 472 214#define CFRAME_OFS_ERRF 472
122#define CFRAME_OFS_NRES 468 215#define CFRAME_OFS_NRES 468
123#define CFRAME_OFS_PREV 448 216#define CFRAME_OFS_PREV 448
@@ -133,26 +226,43 @@ enum {
133#define CFRAME_OFS_L 36 226#define CFRAME_OFS_L 36
134#define CFRAME_OFS_PC 32 227#define CFRAME_OFS_PC 32
135#define CFRAME_OFS_MULTRES 28 228#define CFRAME_OFS_MULTRES 28
136#define CFRAME_SIZE 272 229#define CFRAME_SIZE (LJ_ARCH_HASFPU ? 272 : 128)
137#define CFRAME_SHIFT_MULTRES 3 230#define CFRAME_SHIFT_MULTRES 3
138#endif 231#endif
139#elif LJ_TARGET_PPCSPE 232#elif LJ_TARGET_MIPS32
140#define CFRAME_OFS_ERRF 28 233#if LJ_ARCH_HASFPU
141#define CFRAME_OFS_NRES 24
142#define CFRAME_OFS_PREV 20
143#define CFRAME_OFS_L 16
144#define CFRAME_OFS_PC 12
145#define CFRAME_OFS_MULTRES 8
146#define CFRAME_SIZE 184
147#define CFRAME_SHIFT_MULTRES 3
148#elif LJ_TARGET_MIPS
149#define CFRAME_OFS_ERRF 124 234#define CFRAME_OFS_ERRF 124
150#define CFRAME_OFS_NRES 120 235#define CFRAME_OFS_NRES 120
151#define CFRAME_OFS_PREV 116 236#define CFRAME_OFS_PREV 116
152#define CFRAME_OFS_L 112 237#define CFRAME_OFS_L 112
238#define CFRAME_SIZE 112
239#else
240#define CFRAME_OFS_ERRF 76
241#define CFRAME_OFS_NRES 72
242#define CFRAME_OFS_PREV 68
243#define CFRAME_OFS_L 64
244#define CFRAME_SIZE 64
245#endif
153#define CFRAME_OFS_PC 20 246#define CFRAME_OFS_PC 20
154#define CFRAME_OFS_MULTRES 16 247#define CFRAME_OFS_MULTRES 16
155#define CFRAME_SIZE 112 248#define CFRAME_SHIFT_MULTRES 3
249#elif LJ_TARGET_MIPS64
250#if LJ_ARCH_HASFPU
251#define CFRAME_OFS_ERRF 188
252#define CFRAME_OFS_NRES 184
253#define CFRAME_OFS_PREV 176
254#define CFRAME_OFS_L 168
255#define CFRAME_OFS_PC 160
256#define CFRAME_SIZE 192
257#else
258#define CFRAME_OFS_ERRF 124
259#define CFRAME_OFS_NRES 120
260#define CFRAME_OFS_PREV 112
261#define CFRAME_OFS_L 104
262#define CFRAME_OFS_PC 96
263#define CFRAME_SIZE 128
264#endif
265#define CFRAME_OFS_MULTRES 0
156#define CFRAME_SHIFT_MULTRES 3 266#define CFRAME_SHIFT_MULTRES 3
157#else 267#else
158#error "Missing CFRAME_* definitions for this architecture" 268#error "Missing CFRAME_* definitions for this architecture"
diff --git a/src/lj_func.c b/src/lj_func.c
index 9afdb638..fb267885 100644
--- a/src/lj_func.c
+++ b/src/lj_func.c
@@ -24,9 +24,11 @@ void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt)
24 24
25/* -- Upvalues ------------------------------------------------------------ */ 25/* -- Upvalues ------------------------------------------------------------ */
26 26
27static void unlinkuv(GCupval *uv) 27static void unlinkuv(global_State *g, GCupval *uv)
28{ 28{
29 lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); 29 UNUSED(g);
30 lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
31 "broken upvalue chain");
30 setgcrefr(uvnext(uv)->prev, uv->prev); 32 setgcrefr(uvnext(uv)->prev, uv->prev);
31 setgcrefr(uvprev(uv)->next, uv->next); 33 setgcrefr(uvprev(uv)->next, uv->next);
32} 34}
@@ -40,7 +42,7 @@ static GCupval *func_finduv(lua_State *L, TValue *slot)
40 GCupval *uv; 42 GCupval *uv;
41 /* Search the sorted list of open upvalues. */ 43 /* Search the sorted list of open upvalues. */
42 while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) { 44 while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) {
43 lua_assert(!p->closed && uvval(p) != &p->tv); 45 lj_assertG(!p->closed && uvval(p) != &p->tv, "closed upvalue in chain");
44 if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */ 46 if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */
45 if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */ 47 if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */
46 flipwhite(obj2gco(p)); 48 flipwhite(obj2gco(p));
@@ -61,7 +63,8 @@ static GCupval *func_finduv(lua_State *L, TValue *slot)
61 setgcrefr(uv->next, g->uvhead.next); 63 setgcrefr(uv->next, g->uvhead.next);
62 setgcref(uvnext(uv)->prev, obj2gco(uv)); 64 setgcref(uvnext(uv)->prev, obj2gco(uv));
63 setgcref(g->uvhead.next, obj2gco(uv)); 65 setgcref(g->uvhead.next, obj2gco(uv));
64 lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); 66 lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
67 "broken upvalue chain");
65 return uv; 68 return uv;
66} 69}
67 70
@@ -84,12 +87,13 @@ void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level)
84 while (gcref(L->openupval) != NULL && 87 while (gcref(L->openupval) != NULL &&
85 uvval((uv = gco2uv(gcref(L->openupval)))) >= level) { 88 uvval((uv = gco2uv(gcref(L->openupval)))) >= level) {
86 GCobj *o = obj2gco(uv); 89 GCobj *o = obj2gco(uv);
87 lua_assert(!isblack(o) && !uv->closed && uvval(uv) != &uv->tv); 90 lj_assertG(!isblack(o), "bad black upvalue");
91 lj_assertG(!uv->closed && uvval(uv) != &uv->tv, "closed upvalue in chain");
88 setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */ 92 setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */
89 if (isdead(g, o)) { 93 if (isdead(g, o)) {
90 lj_func_freeuv(g, uv); 94 lj_func_freeuv(g, uv);
91 } else { 95 } else {
92 unlinkuv(uv); 96 unlinkuv(g, uv);
93 lj_gc_closeuv(g, uv); 97 lj_gc_closeuv(g, uv);
94 } 98 }
95 } 99 }
@@ -98,7 +102,7 @@ void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level)
98void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv) 102void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv)
99{ 103{
100 if (!uv->closed) 104 if (!uv->closed)
101 unlinkuv(uv); 105 unlinkuv(g, uv);
102 lj_mem_freet(g, uv); 106 lj_mem_freet(g, uv);
103} 107}
104 108
diff --git a/src/lj_gc.c b/src/lj_gc.c
index 86fcd6eb..cc4232a6 100644
--- a/src/lj_gc.c
+++ b/src/lj_gc.c
@@ -12,6 +12,7 @@
12#include "lj_obj.h" 12#include "lj_obj.h"
13#include "lj_gc.h" 13#include "lj_gc.h"
14#include "lj_err.h" 14#include "lj_err.h"
15#include "lj_buf.h"
15#include "lj_str.h" 16#include "lj_str.h"
16#include "lj_tab.h" 17#include "lj_tab.h"
17#include "lj_func.h" 18#include "lj_func.h"
@@ -24,6 +25,7 @@
24#include "lj_cdata.h" 25#include "lj_cdata.h"
25#endif 26#endif
26#include "lj_trace.h" 27#include "lj_trace.h"
28#include "lj_dispatch.h"
27#include "lj_vm.h" 29#include "lj_vm.h"
28 30
29#define GCSTEPSIZE 1024u 31#define GCSTEPSIZE 1024u
@@ -40,7 +42,8 @@
40 42
41/* Mark a TValue (if needed). */ 43/* Mark a TValue (if needed). */
42#define gc_marktv(g, tv) \ 44#define gc_marktv(g, tv) \
43 { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \ 45 { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \
46 "TValue and GC type mismatch"); \
44 if (tviswhite(tv)) gc_mark(g, gcV(tv)); } 47 if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
45 48
46/* Mark a GCobj (if needed). */ 49/* Mark a GCobj (if needed). */
@@ -54,7 +57,8 @@
54static void gc_mark(global_State *g, GCobj *o) 57static void gc_mark(global_State *g, GCobj *o)
55{ 58{
56 int gct = o->gch.gct; 59 int gct = o->gch.gct;
57 lua_assert(iswhite(o) && !isdead(g, o)); 60 lj_assertG(iswhite(o), "mark of non-white object");
61 lj_assertG(!isdead(g, o), "mark of dead object");
58 white2gray(o); 62 white2gray(o);
59 if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) { 63 if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
60 GCtab *mt = tabref(gco2ud(o)->metatable); 64 GCtab *mt = tabref(gco2ud(o)->metatable);
@@ -67,8 +71,9 @@ static void gc_mark(global_State *g, GCobj *o)
67 if (uv->closed) 71 if (uv->closed)
68 gray2black(o); /* Closed upvalues are never gray. */ 72 gray2black(o); /* Closed upvalues are never gray. */
69 } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { 73 } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
70 lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || 74 lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
71 gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO); 75 gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE,
76 "bad GC type %d", gct);
72 setgcrefr(o->gch.gclist, g->gc.gray); 77 setgcrefr(o->gch.gclist, g->gc.gray);
73 setgcref(g->gc.gray, o); 78 setgcref(g->gc.gray, o);
74 } 79 }
@@ -101,7 +106,8 @@ static void gc_mark_uv(global_State *g)
101{ 106{
102 GCupval *uv; 107 GCupval *uv;
103 for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) { 108 for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
104 lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); 109 lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
110 "broken upvalue chain");
105 if (isgray(obj2gco(uv))) 111 if (isgray(obj2gco(uv)))
106 gc_marktv(g, uvval(uv)); 112 gc_marktv(g, uvval(uv));
107 } 113 }
@@ -196,7 +202,7 @@ static int gc_traverse_tab(global_State *g, GCtab *t)
196 for (i = 0; i <= hmask; i++) { 202 for (i = 0; i <= hmask; i++) {
197 Node *n = &node[i]; 203 Node *n = &node[i];
198 if (!tvisnil(&n->val)) { /* Mark non-empty slot. */ 204 if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
199 lua_assert(!tvisnil(&n->key)); 205 lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot");
200 if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key); 206 if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
201 if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val); 207 if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
202 } 208 }
@@ -211,7 +217,8 @@ static void gc_traverse_func(global_State *g, GCfunc *fn)
211 gc_markobj(g, tabref(fn->c.env)); 217 gc_markobj(g, tabref(fn->c.env));
212 if (isluafunc(fn)) { 218 if (isluafunc(fn)) {
213 uint32_t i; 219 uint32_t i;
214 lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv); 220 lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv,
221 "function upvalues out of range");
215 gc_markobj(g, funcproto(fn)); 222 gc_markobj(g, funcproto(fn));
216 for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */ 223 for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
217 gc_markobj(g, &gcref(fn->l.uvptr[i])->uv); 224 gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
@@ -227,7 +234,7 @@ static void gc_traverse_func(global_State *g, GCfunc *fn)
227static void gc_marktrace(global_State *g, TraceNo traceno) 234static void gc_marktrace(global_State *g, TraceNo traceno)
228{ 235{
229 GCobj *o = obj2gco(traceref(G2J(g), traceno)); 236 GCobj *o = obj2gco(traceref(G2J(g), traceno));
230 lua_assert(traceno != G2J(g)->cur.traceno); 237 lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped");
231 if (iswhite(o)) { 238 if (iswhite(o)) {
232 white2gray(o); 239 white2gray(o);
233 setgcrefr(o->gch.gclist, g->gc.gray); 240 setgcrefr(o->gch.gclist, g->gc.gray);
@@ -244,6 +251,8 @@ static void gc_traverse_trace(global_State *g, GCtrace *T)
244 IRIns *ir = &T->ir[ref]; 251 IRIns *ir = &T->ir[ref];
245 if (ir->o == IR_KGC) 252 if (ir->o == IR_KGC)
246 gc_markobj(g, ir_kgc(ir)); 253 gc_markobj(g, ir_kgc(ir));
254 if (irt_is64(ir->t) && ir->o != IR_KNULL)
255 ref++;
247 } 256 }
248 if (T->link) gc_marktrace(g, T->link); 257 if (T->link) gc_marktrace(g, T->link);
249 if (T->nextroot) gc_marktrace(g, T->nextroot); 258 if (T->nextroot) gc_marktrace(g, T->nextroot);
@@ -274,12 +283,12 @@ static MSize gc_traverse_frames(global_State *g, lua_State *th)
274{ 283{
275 TValue *frame, *top = th->top-1, *bot = tvref(th->stack); 284 TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
276 /* Note: extra vararg frame not skipped, marks function twice (harmless). */ 285 /* Note: extra vararg frame not skipped, marks function twice (harmless). */
277 for (frame = th->base-1; frame > bot; frame = frame_prev(frame)) { 286 for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
278 GCfunc *fn = frame_func(frame); 287 GCfunc *fn = frame_func(frame);
279 TValue *ftop = frame; 288 TValue *ftop = frame;
280 if (isluafunc(fn)) ftop += funcproto(fn)->framesize; 289 if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
281 if (ftop > top) top = ftop; 290 if (ftop > top) top = ftop;
282 gc_markobj(g, fn); /* Need to mark hidden function (or L). */ 291 if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */
283 } 292 }
284 top++; /* Correct bias of -1 (frame == base-1). */ 293 top++; /* Correct bias of -1 (frame == base-1). */
285 if (top > tvref(th->maxstack)) top = tvref(th->maxstack); 294 if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
@@ -290,7 +299,7 @@ static MSize gc_traverse_frames(global_State *g, lua_State *th)
290static void gc_traverse_thread(global_State *g, lua_State *th) 299static void gc_traverse_thread(global_State *g, lua_State *th)
291{ 300{
292 TValue *o, *top = th->top; 301 TValue *o, *top = th->top;
293 for (o = tvref(th->stack)+1; o < top; o++) 302 for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
294 gc_marktv(g, o); 303 gc_marktv(g, o);
295 if (g->gc.state == GCSatomic) { 304 if (g->gc.state == GCSatomic) {
296 top = tvref(th->stack) + th->stacksize; 305 top = tvref(th->stack) + th->stacksize;
@@ -306,7 +315,7 @@ static size_t propagatemark(global_State *g)
306{ 315{
307 GCobj *o = gcref(g->gc.gray); 316 GCobj *o = gcref(g->gc.gray);
308 int gct = o->gch.gct; 317 int gct = o->gch.gct;
309 lua_assert(isgray(o)); 318 lj_assertG(isgray(o), "propagation of non-gray object");
310 gray2black(o); 319 gray2black(o);
311 setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */ 320 setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
312 if (LJ_LIKELY(gct == ~LJ_TTAB)) { 321 if (LJ_LIKELY(gct == ~LJ_TTAB)) {
@@ -338,7 +347,7 @@ static size_t propagatemark(global_State *g)
338 return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + 347 return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
339 T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry); 348 T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
340#else 349#else
341 lua_assert(0); 350 lj_assertG(0, "bad GC type %d", gct);
342 return 0; 351 return 0;
343#endif 352#endif
344 } 353 }
@@ -355,15 +364,6 @@ static size_t gc_propagate_gray(global_State *g)
355 364
356/* -- Sweep phase --------------------------------------------------------- */ 365/* -- Sweep phase --------------------------------------------------------- */
357 366
358/* Try to shrink some common data structures. */
359static void gc_shrink(global_State *g, lua_State *L)
360{
361 if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
362 lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */
363 if (g->tmpbuf.sz > LJ_MIN_SBUF*2)
364 lj_str_resizebuf(L, &g->tmpbuf, g->tmpbuf.sz >> 1); /* Shrink temp buf. */
365}
366
367/* Type of GC free functions. */ 367/* Type of GC free functions. */
368typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o); 368typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
369 369
@@ -389,7 +389,7 @@ static const GCFreeFunc gc_freefunc[] = {
389}; 389};
390 390
391/* Full sweep of a GC list. */ 391/* Full sweep of a GC list. */
392#define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM) 392#define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0)
393 393
394/* Partial sweep of a GC list. */ 394/* Partial sweep of a GC list. */
395static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) 395static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
@@ -401,11 +401,13 @@ static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
401 if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */ 401 if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
402 gc_fullsweep(g, &gco2th(o)->openupval); 402 gc_fullsweep(g, &gco2th(o)->openupval);
403 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */ 403 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
404 lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED)); 404 lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
405 "sweep of undead object");
405 makewhite(g, o); /* Value is alive, change to the current white. */ 406 makewhite(g, o); /* Value is alive, change to the current white. */
406 p = &o->gch.nextgc; 407 p = &o->gch.nextgc;
407 } else { /* Otherwise value is dead, free it. */ 408 } else { /* Otherwise value is dead, free it. */
408 lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED); 409 lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
410 "sweep of unlive object");
409 setgcrefr(*p, o->gch.nextgc); 411 setgcrefr(*p, o->gch.nextgc);
410 if (o == gcref(g->gc.root)) 412 if (o == gcref(g->gc.root))
411 setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */ 413 setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
@@ -415,6 +417,32 @@ static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
415 return p; 417 return p;
416} 418}
417 419
420/* Sweep one string interning table chain. Preserves hashalg bit. */
421static void gc_sweepstr(global_State *g, GCRef *chain)
422{
423 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
424 int ow = otherwhite(g);
425 uintptr_t u = gcrefu(*chain);
426 GCRef q;
427 GCRef *p = &q;
428 GCobj *o;
429 setgcrefp(q, (u & ~(uintptr_t)1));
430 while ((o = gcref(*p)) != NULL) {
431 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
432 lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
433 "sweep of undead string");
434 makewhite(g, o); /* String is alive, change to the current white. */
435 p = &o->gch.nextgc;
436 } else { /* Otherwise string is dead, free it. */
437 lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
438 "sweep of unlive string");
439 setgcrefr(*p, o->gch.nextgc);
440 lj_str_free(g, gco2str(o));
441 }
442 }
443 setgcrefp(*chain, (gcrefu(q) | (u & 1)));
444}
445
418/* Check whether we can clear a key or a value slot from a table. */ 446/* Check whether we can clear a key or a value slot from a table. */
419static int gc_mayclear(cTValue *o, int val) 447static int gc_mayclear(cTValue *o, int val)
420{ 448{
@@ -432,11 +460,12 @@ static int gc_mayclear(cTValue *o, int val)
432} 460}
433 461
434/* Clear collected entries from weak tables. */ 462/* Clear collected entries from weak tables. */
435static void gc_clearweak(GCobj *o) 463static void gc_clearweak(global_State *g, GCobj *o)
436{ 464{
465 UNUSED(g);
437 while (o) { 466 while (o) {
438 GCtab *t = gco2tab(o); 467 GCtab *t = gco2tab(o);
439 lua_assert((t->marked & LJ_GC_WEAK)); 468 lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table");
440 if ((t->marked & LJ_GC_WEAKVAL)) { 469 if ((t->marked & LJ_GC_WEAKVAL)) {
441 MSize i, asize = t->asize; 470 MSize i, asize = t->asize;
442 for (i = 0; i < asize; i++) { 471 for (i = 0; i < asize; i++) {
@@ -467,18 +496,21 @@ static void gc_call_finalizer(global_State *g, lua_State *L,
467{ 496{
468 /* Save and restore lots of state around the __gc callback. */ 497 /* Save and restore lots of state around the __gc callback. */
469 uint8_t oldh = hook_save(g); 498 uint8_t oldh = hook_save(g);
470 MSize oldt = g->gc.threshold; 499 GCSize oldt = g->gc.threshold;
471 int errcode; 500 int errcode;
472 TValue *top; 501 TValue *top;
473 lj_trace_abort(g); 502 lj_trace_abort(g);
474 top = L->top;
475 L->top = top+2;
476 hook_entergc(g); /* Disable hooks and new traces during __gc. */ 503 hook_entergc(g); /* Disable hooks and new traces during __gc. */
504 if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
477 g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */ 505 g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
478 copyTV(L, top, mo); 506 top = L->top;
479 setgcV(L, top+1, o, ~o->gch.gct); 507 copyTV(L, top++, mo);
480 errcode = lj_vm_pcall(L, top+1, 1+0, -1); /* Stack: |mo|o| -> | */ 508 if (LJ_FR2) setnilV(top++);
509 setgcV(L, top, o, ~o->gch.gct);
510 L->top = top+1;
511 errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */
481 hook_restore(g, oldh); 512 hook_restore(g, oldh);
513 if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
482 g->gc.threshold = oldt; /* Restore GC threshold. */ 514 g->gc.threshold = oldt; /* Restore GC threshold. */
483 if (errcode) 515 if (errcode)
484 lj_err_throw(L, errcode); /* Propagate errors. */ 516 lj_err_throw(L, errcode); /* Propagate errors. */
@@ -490,7 +522,7 @@ static void gc_finalize(lua_State *L)
490 global_State *g = G(L); 522 global_State *g = G(L);
491 GCobj *o = gcnext(gcref(g->gc.mmudata)); 523 GCobj *o = gcnext(gcref(g->gc.mmudata));
492 cTValue *mo; 524 cTValue *mo;
493 lua_assert(gcref(g->jit_L) == NULL); /* Must not be called on trace. */ 525 lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace");
494 /* Unchain from list of userdata to be finalized. */ 526 /* Unchain from list of userdata to be finalized. */
495 if (o == gcref(g->gc.mmudata)) 527 if (o == gcref(g->gc.mmudata))
496 setgcrefnull(g->gc.mmudata); 528 setgcrefnull(g->gc.mmudata);
@@ -565,9 +597,9 @@ void lj_gc_freeall(global_State *g)
565 /* Free everything, except super-fixed objects (the main thread). */ 597 /* Free everything, except super-fixed objects (the main thread). */
566 g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED; 598 g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
567 gc_fullsweep(g, &g->gc.root); 599 gc_fullsweep(g, &g->gc.root);
568 strmask = g->strmask; 600 strmask = g->str.mask;
569 for (i = 0; i <= strmask; i++) /* Free all string hash chains. */ 601 for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
570 gc_fullsweep(g, &g->strhash[i]); 602 gc_sweepstr(g, &g->str.tab[i]);
571} 603}
572 604
573/* -- Collector ----------------------------------------------------------- */ 605/* -- Collector ----------------------------------------------------------- */
@@ -582,7 +614,7 @@ static void atomic(global_State *g, lua_State *L)
582 614
583 setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */ 615 setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
584 setgcrefnull(g->gc.weak); 616 setgcrefnull(g->gc.weak);
585 lua_assert(!iswhite(obj2gco(mainthread(g)))); 617 lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white");
586 gc_markobj(g, L); /* Mark running thread. */ 618 gc_markobj(g, L); /* Mark running thread. */
587 gc_traverse_curtrace(g); /* Traverse current trace. */ 619 gc_traverse_curtrace(g); /* Traverse current trace. */
588 gc_mark_gcroot(g); /* Mark GC roots (again). */ 620 gc_mark_gcroot(g); /* Mark GC roots (again). */
@@ -597,13 +629,15 @@ static void atomic(global_State *g, lua_State *L)
597 udsize += gc_propagate_gray(g); /* And propagate the marks. */ 629 udsize += gc_propagate_gray(g); /* And propagate the marks. */
598 630
599 /* All marking done, clear weak tables. */ 631 /* All marking done, clear weak tables. */
600 gc_clearweak(gcref(g->gc.weak)); 632 gc_clearweak(g, gcref(g->gc.weak));
633
634 lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */
601 635
602 /* Prepare for sweep phase. */ 636 /* Prepare for sweep phase. */
603 g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */ 637 g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
604 g->strempty.marked = g->gc.currentwhite; 638 g->strempty.marked = g->gc.currentwhite;
605 setmref(g->gc.sweep, &g->gc.root); 639 setmref(g->gc.sweep, &g->gc.root);
606 g->gc.estimate = g->gc.total - (MSize)udsize; /* Initial estimate. */ 640 g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */
607} 641}
608 642
609/* GC state machine. Returns a cost estimate for each step performed. */ 643/* GC state machine. Returns a cost estimate for each step performed. */
@@ -620,28 +654,29 @@ static size_t gc_onestep(lua_State *L)
620 g->gc.state = GCSatomic; /* End of mark phase. */ 654 g->gc.state = GCSatomic; /* End of mark phase. */
621 return 0; 655 return 0;
622 case GCSatomic: 656 case GCSatomic:
623 if (gcref(g->jit_L)) /* Don't run atomic phase on trace. */ 657 if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */
624 return LJ_MAX_MEM; 658 return LJ_MAX_MEM;
625 atomic(g, L); 659 atomic(g, L);
626 g->gc.state = GCSsweepstring; /* Start of sweep phase. */ 660 g->gc.state = GCSsweepstring; /* Start of sweep phase. */
627 g->gc.sweepstr = 0; 661 g->gc.sweepstr = 0;
628 return 0; 662 return 0;
629 case GCSsweepstring: { 663 case GCSsweepstring: {
630 MSize old = g->gc.total; 664 GCSize old = g->gc.total;
631 gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */ 665 gc_sweepstr(g, &g->str.tab[g->gc.sweepstr++]); /* Sweep one chain. */
632 if (g->gc.sweepstr > g->strmask) 666 if (g->gc.sweepstr > g->str.mask)
633 g->gc.state = GCSsweep; /* All string hash chains sweeped. */ 667 g->gc.state = GCSsweep; /* All string hash chains sweeped. */
634 lua_assert(old >= g->gc.total); 668 lj_assertG(old >= g->gc.total, "sweep increased memory");
635 g->gc.estimate -= old - g->gc.total; 669 g->gc.estimate -= old - g->gc.total;
636 return GCSWEEPCOST; 670 return GCSWEEPCOST;
637 } 671 }
638 case GCSsweep: { 672 case GCSsweep: {
639 MSize old = g->gc.total; 673 GCSize old = g->gc.total;
640 setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); 674 setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
641 lua_assert(old >= g->gc.total); 675 lj_assertG(old >= g->gc.total, "sweep increased memory");
642 g->gc.estimate -= old - g->gc.total; 676 g->gc.estimate -= old - g->gc.total;
643 if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { 677 if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
644 gc_shrink(g, L); 678 if (g->str.num <= (g->str.mask >> 2) && g->str.mask > LJ_MIN_STRTAB*2-1)
679 lj_str_resize(L, g->str.mask >> 1); /* Shrink string table. */
645 if (gcref(g->gc.mmudata)) { /* Need any finalizations? */ 680 if (gcref(g->gc.mmudata)) { /* Need any finalizations? */
646 g->gc.state = GCSfinalize; 681 g->gc.state = GCSfinalize;
647#if LJ_HASFFI 682#if LJ_HASFFI
@@ -656,7 +691,7 @@ static size_t gc_onestep(lua_State *L)
656 } 691 }
657 case GCSfinalize: 692 case GCSfinalize:
658 if (gcref(g->gc.mmudata) != NULL) { 693 if (gcref(g->gc.mmudata) != NULL) {
659 if (gcref(g->jit_L)) /* Don't call finalizers on trace. */ 694 if (tvref(g->jit_base)) /* Don't call finalizers on trace. */
660 return LJ_MAX_MEM; 695 return LJ_MAX_MEM;
661 gc_finalize(L); /* Finalize one userdata object. */ 696 gc_finalize(L); /* Finalize one userdata object. */
662 if (g->gc.estimate > GCFINALIZECOST) 697 if (g->gc.estimate > GCFINALIZECOST)
@@ -670,7 +705,7 @@ static size_t gc_onestep(lua_State *L)
670 g->gc.debt = 0; 705 g->gc.debt = 0;
671 return 0; 706 return 0;
672 default: 707 default:
673 lua_assert(0); 708 lj_assertG(0, "bad GC state");
674 return 0; 709 return 0;
675 } 710 }
676} 711}
@@ -679,7 +714,7 @@ static size_t gc_onestep(lua_State *L)
679int LJ_FASTCALL lj_gc_step(lua_State *L) 714int LJ_FASTCALL lj_gc_step(lua_State *L)
680{ 715{
681 global_State *g = G(L); 716 global_State *g = G(L);
682 MSize lim; 717 GCSize lim;
683 int32_t ostate = g->vmstate; 718 int32_t ostate = g->vmstate;
684 setvmstate(g, GC); 719 setvmstate(g, GC);
685 lim = (GCSTEPSIZE/100) * g->gc.stepmul; 720 lim = (GCSTEPSIZE/100) * g->gc.stepmul;
@@ -688,13 +723,13 @@ int LJ_FASTCALL lj_gc_step(lua_State *L)
688 if (g->gc.total > g->gc.threshold) 723 if (g->gc.total > g->gc.threshold)
689 g->gc.debt += g->gc.total - g->gc.threshold; 724 g->gc.debt += g->gc.total - g->gc.threshold;
690 do { 725 do {
691 lim -= (MSize)gc_onestep(L); 726 lim -= (GCSize)gc_onestep(L);
692 if (g->gc.state == GCSpause) { 727 if (g->gc.state == GCSpause) {
693 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; 728 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
694 g->vmstate = ostate; 729 g->vmstate = ostate;
695 return 1; /* Finished a GC cycle. */ 730 return 1; /* Finished a GC cycle. */
696 } 731 }
697 } while ((int32_t)lim > 0); 732 } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
698 if (g->gc.debt < GCSTEPSIZE) { 733 if (g->gc.debt < GCSTEPSIZE) {
699 g->gc.threshold = g->gc.total + GCSTEPSIZE; 734 g->gc.threshold = g->gc.total + GCSTEPSIZE;
700 g->vmstate = ostate; 735 g->vmstate = ostate;
@@ -718,8 +753,8 @@ void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
718/* Perform multiple GC steps. Called from JIT-compiled code. */ 753/* Perform multiple GC steps. Called from JIT-compiled code. */
719int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps) 754int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
720{ 755{
721 lua_State *L = gco2th(gcref(g->jit_L)); 756 lua_State *L = gco2th(gcref(g->cur_L));
722 L->base = mref(G(L)->jit_base, TValue); 757 L->base = tvref(G(L)->jit_base);
723 L->top = curr_topL(L); 758 L->top = curr_topL(L);
724 while (steps-- > 0 && lj_gc_step(L) == 0) 759 while (steps-- > 0 && lj_gc_step(L) == 0)
725 ; 760 ;
@@ -744,7 +779,8 @@ void lj_gc_fullgc(lua_State *L)
744 } 779 }
745 while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep) 780 while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
746 gc_onestep(L); /* Finish sweep. */ 781 gc_onestep(L); /* Finish sweep. */
747 lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause); 782 lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause,
783 "bad GC state");
748 /* Now perform a full GC. */ 784 /* Now perform a full GC. */
749 g->gc.state = GCSpause; 785 g->gc.state = GCSpause;
750 do { gc_onestep(L); } while (g->gc.state != GCSpause); 786 do { gc_onestep(L); } while (g->gc.state != GCSpause);
@@ -757,9 +793,11 @@ void lj_gc_fullgc(lua_State *L)
757/* Move the GC propagation frontier forward. */ 793/* Move the GC propagation frontier forward. */
758void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v) 794void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
759{ 795{
760 lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); 796 lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o),
761 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); 797 "bad object states for forward barrier");
762 lua_assert(o->gch.gct != ~LJ_TTAB); 798 lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
799 "bad GC state");
800 lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table");
763 /* Preserve invariant during propagation. Otherwise it doesn't matter. */ 801 /* Preserve invariant during propagation. Otherwise it doesn't matter. */
764 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) 802 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
765 gc_mark(g, v); /* Move frontier forward. */ 803 gc_mark(g, v); /* Move frontier forward. */
@@ -796,7 +834,8 @@ void lj_gc_closeuv(global_State *g, GCupval *uv)
796 lj_gc_barrierf(g, o, gcV(&uv->tv)); 834 lj_gc_barrierf(g, o, gcV(&uv->tv));
797 } else { 835 } else {
798 makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */ 836 makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
799 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); 837 lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
838 "bad GC state");
800 } 839 }
801 } 840 }
802} 841}
@@ -813,27 +852,29 @@ void lj_gc_barriertrace(global_State *g, uint32_t traceno)
813/* -- Allocator ----------------------------------------------------------- */ 852/* -- Allocator ----------------------------------------------------------- */
814 853
815/* Call pluggable memory allocator to allocate or resize a fragment. */ 854/* Call pluggable memory allocator to allocate or resize a fragment. */
816void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz) 855void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
817{ 856{
818 global_State *g = G(L); 857 global_State *g = G(L);
819 lua_assert((osz == 0) == (p == NULL)); 858 lj_assertG((osz == 0) == (p == NULL), "realloc API violation");
820 p = g->allocf(g->allocd, p, osz, nsz); 859 p = g->allocf(g->allocd, p, osz, nsz);
821 if (p == NULL && nsz > 0) 860 if (p == NULL && nsz > 0)
822 lj_err_mem(L); 861 lj_err_mem(L);
823 lua_assert((nsz == 0) == (p == NULL)); 862 lj_assertG((nsz == 0) == (p == NULL), "allocf API violation");
824 lua_assert(checkptr32(p)); 863 lj_assertG(checkptrGC(p),
864 "allocated memory address %p outside required range", p);
825 g->gc.total = (g->gc.total - osz) + nsz; 865 g->gc.total = (g->gc.total - osz) + nsz;
826 return p; 866 return p;
827} 867}
828 868
829/* Allocate new GC object and link it to the root set. */ 869/* Allocate new GC object and link it to the root set. */
830void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size) 870void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
831{ 871{
832 global_State *g = G(L); 872 global_State *g = G(L);
833 GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); 873 GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
834 if (o == NULL) 874 if (o == NULL)
835 lj_err_mem(L); 875 lj_err_mem(L);
836 lua_assert(checkptr32(o)); 876 lj_assertG(checkptrGC(o),
877 "allocated memory address %p outside required range", o);
837 g->gc.total += size; 878 g->gc.total += size;
838 setgcrefr(o->gch.nextgc, g->gc.root); 879 setgcrefr(o->gch.nextgc, g->gc.root);
839 setgcref(g->gc.root, o); 880 setgcref(g->gc.root, o);
diff --git a/src/lj_gc.h b/src/lj_gc.h
index e42dbcf0..6fc88cf9 100644
--- a/src/lj_gc.h
+++ b/src/lj_gc.h
@@ -81,8 +81,10 @@ LJ_FUNC void lj_gc_barriertrace(global_State *g, uint32_t traceno);
81static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t) 81static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t)
82{ 82{
83 GCobj *o = obj2gco(t); 83 GCobj *o = obj2gco(t);
84 lua_assert(isblack(o) && !isdead(g, o)); 84 lj_assertG(isblack(o) && !isdead(g, o),
85 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); 85 "bad object states for backward barrier");
86 lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
87 "bad GC state");
86 black2gray(o); 88 black2gray(o);
87 setgcrefr(t->gclist, g->gc.grayagain); 89 setgcrefr(t->gclist, g->gc.grayagain);
88 setgcref(g->gc.grayagain, o); 90 setgcref(g->gc.grayagain, o);
@@ -107,8 +109,8 @@ static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t)
107 lj_gc_barrierf(G(L), obj2gco(p), obj2gco(o)); } 109 lj_gc_barrierf(G(L), obj2gco(p), obj2gco(o)); }
108 110
109/* Allocator. */ 111/* Allocator. */
110LJ_FUNC void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz); 112LJ_FUNC void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz);
111LJ_FUNC void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size); 113LJ_FUNC void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size);
112LJ_FUNC void *lj_mem_grow(lua_State *L, void *p, 114LJ_FUNC void *lj_mem_grow(lua_State *L, void *p,
113 MSize *szp, MSize lim, MSize esz); 115 MSize *szp, MSize lim, MSize esz);
114 116
@@ -116,13 +118,13 @@ LJ_FUNC void *lj_mem_grow(lua_State *L, void *p,
116 118
117static LJ_AINLINE void lj_mem_free(global_State *g, void *p, size_t osize) 119static LJ_AINLINE void lj_mem_free(global_State *g, void *p, size_t osize)
118{ 120{
119 g->gc.total -= (MSize)osize; 121 g->gc.total -= (GCSize)osize;
120 g->allocf(g->allocd, p, osize, 0); 122 g->allocf(g->allocd, p, osize, 0);
121} 123}
122 124
123#define lj_mem_newvec(L, n, t) ((t *)lj_mem_new(L, (MSize)((n)*sizeof(t)))) 125#define lj_mem_newvec(L, n, t) ((t *)lj_mem_new(L, (GCSize)((n)*sizeof(t))))
124#define lj_mem_reallocvec(L, p, on, n, t) \ 126#define lj_mem_reallocvec(L, p, on, n, t) \
125 ((p) = (t *)lj_mem_realloc(L, p, (on)*sizeof(t), (MSize)((n)*sizeof(t)))) 127 ((p) = (t *)lj_mem_realloc(L, p, (on)*sizeof(t), (GCSize)((n)*sizeof(t))))
126#define lj_mem_growvec(L, p, n, m, t) \ 128#define lj_mem_growvec(L, p, n, m, t) \
127 ((p) = (t *)lj_mem_grow(L, (p), &(n), (m), (MSize)sizeof(t))) 129 ((p) = (t *)lj_mem_grow(L, (p), &(n), (m), (MSize)sizeof(t)))
128#define lj_mem_freevec(g, p, n, t) lj_mem_free(g, (p), (n)*sizeof(t)) 130#define lj_mem_freevec(g, p, n, t) lj_mem_free(g, (p), (n)*sizeof(t))
diff --git a/src/lj_gdbjit.c b/src/lj_gdbjit.c
index c2a9e901..5b9fe0ad 100644
--- a/src/lj_gdbjit.c
+++ b/src/lj_gdbjit.c
@@ -14,6 +14,8 @@
14#include "lj_err.h" 14#include "lj_err.h"
15#include "lj_debug.h" 15#include "lj_debug.h"
16#include "lj_frame.h" 16#include "lj_frame.h"
17#include "lj_buf.h"
18#include "lj_strfmt.h"
17#include "lj_jit.h" 19#include "lj_jit.h"
18#include "lj_dispatch.h" 20#include "lj_dispatch.h"
19 21
@@ -294,6 +296,9 @@ enum {
294#elif LJ_TARGET_ARM 296#elif LJ_TARGET_ARM
295 DW_REG_SP = 13, 297 DW_REG_SP = 13,
296 DW_REG_RA = 14, 298 DW_REG_RA = 14,
299#elif LJ_TARGET_ARM64
300 DW_REG_SP = 31,
301 DW_REG_RA = 30,
297#elif LJ_TARGET_PPC 302#elif LJ_TARGET_PPC
298 DW_REG_SP = 1, 303 DW_REG_SP = 1,
299 DW_REG_RA = 65, 304 DW_REG_RA = 65,
@@ -358,7 +363,7 @@ static const ELFheader elfhdr_template = {
358 .eosabi = 12, 363 .eosabi = 12,
359#elif defined(__DragonFly__) 364#elif defined(__DragonFly__)
360 .eosabi = 0, 365 .eosabi = 0,
361#elif (defined(__sun__) && defined(__svr4__)) 366#elif LJ_TARGET_SOLARIS
362 .eosabi = 6, 367 .eosabi = 6,
363#else 368#else
364 .eosabi = 0, 369 .eosabi = 0,
@@ -372,6 +377,8 @@ static const ELFheader elfhdr_template = {
372 .machine = 62, 377 .machine = 62,
373#elif LJ_TARGET_ARM 378#elif LJ_TARGET_ARM
374 .machine = 40, 379 .machine = 40,
380#elif LJ_TARGET_ARM64
381 .machine = 183,
375#elif LJ_TARGET_PPC 382#elif LJ_TARGET_PPC
376 .machine = 20, 383 .machine = 20,
377#elif LJ_TARGET_MIPS 384#elif LJ_TARGET_MIPS
@@ -428,16 +435,6 @@ static void gdbjit_catnum(GDBJITctx *ctx, uint32_t n)
428 *ctx->p++ = '0' + n; 435 *ctx->p++ = '0' + n;
429} 436}
430 437
431/* Add a ULEB128 value. */
432static void gdbjit_uleb128(GDBJITctx *ctx, uint32_t v)
433{
434 uint8_t *p = ctx->p;
435 for (; v >= 0x80; v >>= 7)
436 *p++ = (uint8_t)((v & 0x7f) | 0x80);
437 *p++ = (uint8_t)v;
438 ctx->p = p;
439}
440
441/* Add a SLEB128 value. */ 438/* Add a SLEB128 value. */
442static void gdbjit_sleb128(GDBJITctx *ctx, int32_t v) 439static void gdbjit_sleb128(GDBJITctx *ctx, int32_t v)
443{ 440{
@@ -454,7 +451,7 @@ static void gdbjit_sleb128(GDBJITctx *ctx, int32_t v)
454#define DU16(x) (*(uint16_t *)p = (x), p += 2) 451#define DU16(x) (*(uint16_t *)p = (x), p += 2)
455#define DU32(x) (*(uint32_t *)p = (x), p += 4) 452#define DU32(x) (*(uint32_t *)p = (x), p += 4)
456#define DADDR(x) (*(uintptr_t *)p = (x), p += sizeof(uintptr_t)) 453#define DADDR(x) (*(uintptr_t *)p = (x), p += sizeof(uintptr_t))
457#define DUV(x) (ctx->p = p, gdbjit_uleb128(ctx, (x)), p = ctx->p) 454#define DUV(x) (p = (uint8_t *)lj_strfmt_wuleb128((char *)p, (x)))
458#define DSV(x) (ctx->p = p, gdbjit_sleb128(ctx, (x)), p = ctx->p) 455#define DSV(x) (ctx->p = p, gdbjit_sleb128(ctx, (x)), p = ctx->p)
459#define DSTR(str) (ctx->p = p, gdbjit_strz(ctx, (str)), p = ctx->p) 456#define DSTR(str) (ctx->p = p, gdbjit_strz(ctx, (str)), p = ctx->p)
460#define DALIGNNOP(s) while ((uintptr_t)p & ((s)-1)) *p++ = DW_CFA_nop 457#define DALIGNNOP(s) while ((uintptr_t)p & ((s)-1)) *p++ = DW_CFA_nop
@@ -564,13 +561,20 @@ static void LJ_FASTCALL gdbjit_ehframe(GDBJITctx *ctx)
564 DB(DW_CFA_offset|DW_REG_15); DUV(4); 561 DB(DW_CFA_offset|DW_REG_15); DUV(4);
565 DB(DW_CFA_offset|DW_REG_14); DUV(5); 562 DB(DW_CFA_offset|DW_REG_14); DUV(5);
566 /* Extra registers saved for JIT-compiled code. */ 563 /* Extra registers saved for JIT-compiled code. */
567 DB(DW_CFA_offset|DW_REG_13); DUV(9); 564 DB(DW_CFA_offset|DW_REG_13); DUV(LJ_GC64 ? 10 : 9);
568 DB(DW_CFA_offset|DW_REG_12); DUV(10); 565 DB(DW_CFA_offset|DW_REG_12); DUV(LJ_GC64 ? 11 : 10);
569#elif LJ_TARGET_ARM 566#elif LJ_TARGET_ARM
570 { 567 {
571 int i; 568 int i;
572 for (i = 11; i >= 4; i--) { DB(DW_CFA_offset|i); DUV(2+(11-i)); } 569 for (i = 11; i >= 4; i--) { DB(DW_CFA_offset|i); DUV(2+(11-i)); }
573 } 570 }
571#elif LJ_TARGET_ARM64
572 {
573 int i;
574 DB(DW_CFA_offset|31); DUV(2);
575 for (i = 28; i >= 19; i--) { DB(DW_CFA_offset|i); DUV(3+(28-i)); }
576 for (i = 15; i >= 8; i--) { DB(DW_CFA_offset|32|i); DUV(28-i); }
577 }
574#elif LJ_TARGET_PPC 578#elif LJ_TARGET_PPC
575 { 579 {
576 int i; 580 int i;
@@ -720,13 +724,27 @@ static void gdbjit_buildobj(GDBJITctx *ctx)
720 SECTALIGN(ctx->p, sizeof(uintptr_t)); 724 SECTALIGN(ctx->p, sizeof(uintptr_t));
721 gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe); 725 gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe);
722 ctx->objsize = (size_t)((char *)ctx->p - (char *)obj); 726 ctx->objsize = (size_t)((char *)ctx->p - (char *)obj);
723 lua_assert(ctx->objsize < sizeof(GDBJITobj)); 727 lj_assertX(ctx->objsize < sizeof(GDBJITobj), "GDBJITobj overflow");
724} 728}
725 729
726#undef SECTALIGN 730#undef SECTALIGN
727 731
728/* -- Interface to GDB JIT API -------------------------------------------- */ 732/* -- Interface to GDB JIT API -------------------------------------------- */
729 733
734static int gdbjit_lock;
735
736static void gdbjit_lock_acquire()
737{
738 while (__sync_lock_test_and_set(&gdbjit_lock, 1)) {
739 /* Just spin; futexes or pthreads aren't worth the portability cost. */
740 }
741}
742
743static void gdbjit_lock_release()
744{
745 __sync_lock_release(&gdbjit_lock);
746}
747
730/* Add new entry to GDB JIT symbol chain. */ 748/* Add new entry to GDB JIT symbol chain. */
731static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx) 749static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx)
732{ 750{
@@ -738,6 +756,7 @@ static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx)
738 ctx->T->gdbjit_entry = (void *)eo; 756 ctx->T->gdbjit_entry = (void *)eo;
739 /* Link new entry to chain and register it. */ 757 /* Link new entry to chain and register it. */
740 eo->entry.prev_entry = NULL; 758 eo->entry.prev_entry = NULL;
759 gdbjit_lock_acquire();
741 eo->entry.next_entry = __jit_debug_descriptor.first_entry; 760 eo->entry.next_entry = __jit_debug_descriptor.first_entry;
742 if (eo->entry.next_entry) 761 if (eo->entry.next_entry)
743 eo->entry.next_entry->prev_entry = &eo->entry; 762 eo->entry.next_entry->prev_entry = &eo->entry;
@@ -747,6 +766,7 @@ static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx)
747 __jit_debug_descriptor.relevant_entry = &eo->entry; 766 __jit_debug_descriptor.relevant_entry = &eo->entry;
748 __jit_debug_descriptor.action_flag = GDBJIT_REGISTER; 767 __jit_debug_descriptor.action_flag = GDBJIT_REGISTER;
749 __jit_debug_register_code(); 768 __jit_debug_register_code();
769 gdbjit_lock_release();
750} 770}
751 771
752/* Add debug info for newly compiled trace and notify GDB. */ 772/* Add debug info for newly compiled trace and notify GDB. */
@@ -762,7 +782,8 @@ void lj_gdbjit_addtrace(jit_State *J, GCtrace *T)
762 ctx.spadjp = CFRAME_SIZE_JIT + 782 ctx.spadjp = CFRAME_SIZE_JIT +
763 (MSize)(parent ? traceref(J, parent)->spadjust : 0); 783 (MSize)(parent ? traceref(J, parent)->spadjust : 0);
764 ctx.spadj = CFRAME_SIZE_JIT + T->spadjust; 784 ctx.spadj = CFRAME_SIZE_JIT + T->spadjust;
765 lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc); 785 lj_assertJ(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc,
786 "start PC out of range");
766 ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); 787 ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
767 ctx.filename = proto_chunknamestr(pt); 788 ctx.filename = proto_chunknamestr(pt);
768 if (*ctx.filename == '@' || *ctx.filename == '=') 789 if (*ctx.filename == '@' || *ctx.filename == '=')
@@ -778,6 +799,7 @@ void lj_gdbjit_deltrace(jit_State *J, GCtrace *T)
778{ 799{
779 GDBJITentryobj *eo = (GDBJITentryobj *)T->gdbjit_entry; 800 GDBJITentryobj *eo = (GDBJITentryobj *)T->gdbjit_entry;
780 if (eo) { 801 if (eo) {
802 gdbjit_lock_acquire();
781 if (eo->entry.prev_entry) 803 if (eo->entry.prev_entry)
782 eo->entry.prev_entry->next_entry = eo->entry.next_entry; 804 eo->entry.prev_entry->next_entry = eo->entry.next_entry;
783 else 805 else
@@ -787,6 +809,7 @@ void lj_gdbjit_deltrace(jit_State *J, GCtrace *T)
787 __jit_debug_descriptor.relevant_entry = &eo->entry; 809 __jit_debug_descriptor.relevant_entry = &eo->entry;
788 __jit_debug_descriptor.action_flag = GDBJIT_UNREGISTER; 810 __jit_debug_descriptor.action_flag = GDBJIT_UNREGISTER;
789 __jit_debug_register_code(); 811 __jit_debug_register_code();
812 gdbjit_lock_release();
790 lj_mem_free(J2G(J), eo, eo->sz); 813 lj_mem_free(J2G(J), eo, eo->sz);
791 } 814 }
792} 815}
diff --git a/src/lj_ir.c b/src/lj_ir.c
index 38f289cb..b5e94eb8 100644
--- a/src/lj_ir.c
+++ b/src/lj_ir.c
@@ -15,6 +15,7 @@
15#if LJ_HASJIT 15#if LJ_HASJIT
16 16
17#include "lj_gc.h" 17#include "lj_gc.h"
18#include "lj_buf.h"
18#include "lj_str.h" 19#include "lj_str.h"
19#include "lj_tab.h" 20#include "lj_tab.h"
20#include "lj_ir.h" 21#include "lj_ir.h"
@@ -29,14 +30,15 @@
29#endif 30#endif
30#include "lj_vm.h" 31#include "lj_vm.h"
31#include "lj_strscan.h" 32#include "lj_strscan.h"
32#include "lj_lib.h" 33#include "lj_strfmt.h"
34#include "lj_prng.h"
33 35
34/* Some local macros to save typing. Undef'd at the end. */ 36/* Some local macros to save typing. Undef'd at the end. */
35#define IR(ref) (&J->cur.ir[(ref)]) 37#define IR(ref) (&J->cur.ir[(ref)])
36#define fins (&J->fold.ins) 38#define fins (&J->fold.ins)
37 39
38/* Pass IR on to next optimization in chain (FOLD). */ 40/* Pass IR on to next optimization in chain (FOLD). */
39#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) 41#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
40 42
41/* -- IR tables ----------------------------------------------------------- */ 43/* -- IR tables ----------------------------------------------------------- */
42 44
@@ -88,8 +90,9 @@ static void lj_ir_growbot(jit_State *J)
88{ 90{
89 IRIns *baseir = J->irbuf + J->irbotlim; 91 IRIns *baseir = J->irbuf + J->irbotlim;
90 MSize szins = J->irtoplim - J->irbotlim; 92 MSize szins = J->irtoplim - J->irbotlim;
91 lua_assert(szins != 0); 93 lj_assertJ(szins != 0, "zero IR size");
92 lua_assert(J->cur.nk == J->irbotlim); 94 lj_assertJ(J->cur.nk == J->irbotlim || J->cur.nk-1 == J->irbotlim,
95 "unexpected IR growth");
93 if (J->cur.nins + (szins >> 1) < J->irtoplim) { 96 if (J->cur.nins + (szins >> 1) < J->irtoplim) {
94 /* More than half of the buffer is free on top: shift up by a quarter. */ 97 /* More than half of the buffer is free on top: shift up by a quarter. */
95 MSize ofs = szins >> 2; 98 MSize ofs = szins >> 2;
@@ -143,6 +146,17 @@ TRef lj_ir_call(jit_State *J, IRCallID id, ...)
143 return emitir(CCI_OPTYPE(ci), tr, id); 146 return emitir(CCI_OPTYPE(ci), tr, id);
144} 147}
145 148
149/* Load field of type t from GG_State + offset. Must be 32 bit aligned. */
150LJ_FUNC TRef lj_ir_ggfload(jit_State *J, IRType t, uintptr_t ofs)
151{
152 lj_assertJ((ofs & 3) == 0, "unaligned GG_State field offset");
153 ofs >>= 2;
154 lj_assertJ(ofs >= IRFL__MAX && ofs <= 0x3ff,
155 "GG_State field offset breaks 10 bit FOLD key limit");
156 lj_ir_set(J, IRT(IR_FLOAD, t), REF_NIL, ofs);
157 return lj_opt_fold(J);
158}
159
146/* -- Interning of constants ---------------------------------------------- */ 160/* -- Interning of constants ---------------------------------------------- */
147 161
148/* 162/*
@@ -163,6 +177,24 @@ static LJ_AINLINE IRRef ir_nextk(jit_State *J)
163 return ref; 177 return ref;
164} 178}
165 179
180/* Get ref of next 64 bit IR constant and optionally grow IR.
181** Note: this may invalidate all IRIns *!
182*/
183static LJ_AINLINE IRRef ir_nextk64(jit_State *J)
184{
185 IRRef ref = J->cur.nk - 2;
186 lj_assertJ(J->state != LJ_TRACE_ASM, "bad JIT state");
187 if (LJ_UNLIKELY(ref < J->irbotlim)) lj_ir_growbot(J);
188 J->cur.nk = ref;
189 return ref;
190}
191
192#if LJ_GC64
193#define ir_nextkgc ir_nextk64
194#else
195#define ir_nextkgc ir_nextk
196#endif
197
166/* Intern int32_t constant. */ 198/* Intern int32_t constant. */
167TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k) 199TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k)
168{ 200{
@@ -182,79 +214,21 @@ found:
182 return TREF(ref, IRT_INT); 214 return TREF(ref, IRT_INT);
183} 215}
184 216
185/* The MRef inside the KNUM/KINT64 IR instructions holds the address of the 217/* Intern 64 bit constant, given by its 64 bit pattern. */
186** 64 bit constant. The constants themselves are stored in a chained array 218TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64)
187** and shared across traces.
188**
189** Rationale for choosing this data structure:
190** - The address of the constants is embedded in the generated machine code
191** and must never move. A resizable array or hash table wouldn't work.
192** - Most apps need very few non-32 bit integer constants (less than a dozen).
193** - Linear search is hard to beat in terms of speed and low complexity.
194*/
195typedef struct K64Array {
196 MRef next; /* Pointer to next list. */
197 MSize numk; /* Number of used elements in this array. */
198 TValue k[LJ_MIN_K64SZ]; /* Array of constants. */
199} K64Array;
200
201/* Free all chained arrays. */
202void lj_ir_k64_freeall(jit_State *J)
203{
204 K64Array *k;
205 for (k = mref(J->k64, K64Array); k; ) {
206 K64Array *next = mref(k->next, K64Array);
207 lj_mem_free(J2G(J), k, sizeof(K64Array));
208 k = next;
209 }
210}
211
212/* Find 64 bit constant in chained array or add it. */
213cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64)
214{
215 K64Array *k, *kp = NULL;
216 TValue *ntv;
217 MSize idx;
218 /* Search for the constant in the whole chain of arrays. */
219 for (k = mref(J->k64, K64Array); k; k = mref(k->next, K64Array)) {
220 kp = k; /* Remember previous element in list. */
221 for (idx = 0; idx < k->numk; idx++) { /* Search one array. */
222 TValue *tv = &k->k[idx];
223 if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */
224 return tv;
225 }
226 }
227 /* Constant was not found, need to add it. */
228 if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */
229 K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array);
230 setmref(kn->next, NULL);
231 kn->numk = 0;
232 if (kp)
233 setmref(kp->next, kn); /* Chain to the end of the list. */
234 else
235 setmref(J->k64, kn); /* Link first array. */
236 kp = kn;
237 }
238 ntv = &kp->k[kp->numk++]; /* Add to current array. */
239 ntv->u64 = u64;
240 return ntv;
241}
242
243/* Intern 64 bit constant, given by its address. */
244TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv)
245{ 219{
246 IRIns *ir, *cir = J->cur.ir; 220 IRIns *ir, *cir = J->cur.ir;
247 IRRef ref; 221 IRRef ref;
248 IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64; 222 IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64;
249 for (ref = J->chain[op]; ref; ref = cir[ref].prev) 223 for (ref = J->chain[op]; ref; ref = cir[ref].prev)
250 if (ir_k64(&cir[ref]) == tv) 224 if (ir_k64(&cir[ref])->u64 == u64)
251 goto found; 225 goto found;
252 ref = ir_nextk(J); 226 ref = ir_nextk64(J);
253 ir = IR(ref); 227 ir = IR(ref);
254 lua_assert(checkptr32(tv)); 228 ir[1].tv.u64 = u64;
255 setmref(ir->ptr, tv);
256 ir->t.irt = t; 229 ir->t.irt = t;
257 ir->o = op; 230 ir->o = op;
231 ir->op12 = 0;
258 ir->prev = J->chain[op]; 232 ir->prev = J->chain[op];
259 J->chain[op] = (IRRef1)ref; 233 J->chain[op] = (IRRef1)ref;
260found: 234found:
@@ -264,13 +238,13 @@ found:
264/* Intern FP constant, given by its 64 bit pattern. */ 238/* Intern FP constant, given by its 64 bit pattern. */
265TRef lj_ir_knum_u64(jit_State *J, uint64_t u64) 239TRef lj_ir_knum_u64(jit_State *J, uint64_t u64)
266{ 240{
267 return lj_ir_k64(J, IR_KNUM, lj_ir_k64_find(J, u64)); 241 return lj_ir_k64(J, IR_KNUM, u64);
268} 242}
269 243
270/* Intern 64 bit integer constant. */ 244/* Intern 64 bit integer constant. */
271TRef lj_ir_kint64(jit_State *J, uint64_t u64) 245TRef lj_ir_kint64(jit_State *J, uint64_t u64)
272{ 246{
273 return lj_ir_k64(J, IR_KINT64, lj_ir_k64_find(J, u64)); 247 return lj_ir_k64(J, IR_KINT64, u64);
274} 248}
275 249
276/* Check whether a number is int and return it. -0 is NOT considered an int. */ 250/* Check whether a number is int and return it. -0 is NOT considered an int. */
@@ -305,14 +279,15 @@ TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t)
305{ 279{
306 IRIns *ir, *cir = J->cur.ir; 280 IRIns *ir, *cir = J->cur.ir;
307 IRRef ref; 281 IRRef ref;
308 lua_assert(!isdead(J2G(J), o)); 282 lj_assertJ(!isdead(J2G(J), o), "interning of dead GC object");
309 for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev) 283 for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev)
310 if (ir_kgc(&cir[ref]) == o) 284 if (ir_kgc(&cir[ref]) == o)
311 goto found; 285 goto found;
312 ref = ir_nextk(J); 286 ref = ir_nextkgc(J);
313 ir = IR(ref); 287 ir = IR(ref);
314 /* NOBARRIER: Current trace is a GC root. */ 288 /* NOBARRIER: Current trace is a GC root. */
315 setgcref(ir->gcr, o); 289 ir->op12 = 0;
290 setgcref(ir[LJ_GC64].gcr, o);
316 ir->t.irt = (uint8_t)t; 291 ir->t.irt = (uint8_t)t;
317 ir->o = IR_KGC; 292 ir->o = IR_KGC;
318 ir->prev = J->chain[IR_KGC]; 293 ir->prev = J->chain[IR_KGC];
@@ -321,24 +296,44 @@ found:
321 return TREF(ref, t); 296 return TREF(ref, t);
322} 297}
323 298
324/* Intern 32 bit pointer constant. */ 299/* Allocate GCtrace constant placeholder (no interning). */
300TRef lj_ir_ktrace(jit_State *J)
301{
302 IRRef ref = ir_nextkgc(J);
303 IRIns *ir = IR(ref);
304 lj_assertJ(irt_toitype_(IRT_P64) == LJ_TTRACE, "mismatched type mapping");
305 ir->t.irt = IRT_P64;
306 ir->o = LJ_GC64 ? IR_KNUM : IR_KNULL; /* Not IR_KGC yet, but same size. */
307 ir->op12 = 0;
308 ir->prev = 0;
309 return TREF(ref, IRT_P64);
310}
311
312/* Intern pointer constant. */
325TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr) 313TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr)
326{ 314{
327 IRIns *ir, *cir = J->cur.ir; 315 IRIns *ir, *cir = J->cur.ir;
328 IRRef ref; 316 IRRef ref;
329 lua_assert((void *)(intptr_t)i32ptr(ptr) == ptr); 317#if LJ_64 && !LJ_GC64
318 lj_assertJ((void *)(uintptr_t)u32ptr(ptr) == ptr, "out-of-range GC pointer");
319#endif
330 for (ref = J->chain[op]; ref; ref = cir[ref].prev) 320 for (ref = J->chain[op]; ref; ref = cir[ref].prev)
331 if (mref(cir[ref].ptr, void) == ptr) 321 if (ir_kptr(&cir[ref]) == ptr)
332 goto found; 322 goto found;
323#if LJ_GC64
324 ref = ir_nextk64(J);
325#else
333 ref = ir_nextk(J); 326 ref = ir_nextk(J);
327#endif
334 ir = IR(ref); 328 ir = IR(ref);
335 setmref(ir->ptr, ptr); 329 ir->op12 = 0;
336 ir->t.irt = IRT_P32; 330 setmref(ir[LJ_GC64].ptr, ptr);
331 ir->t.irt = IRT_PGC;
337 ir->o = op; 332 ir->o = op;
338 ir->prev = J->chain[op]; 333 ir->prev = J->chain[op];
339 J->chain[op] = (IRRef1)ref; 334 J->chain[op] = (IRRef1)ref;
340found: 335found:
341 return TREF(ref, IRT_P32); 336 return TREF(ref, IRT_PGC);
342} 337}
343 338
344/* Intern typed NULL constant. */ 339/* Intern typed NULL constant. */
@@ -367,7 +362,8 @@ TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot)
367 IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot); 362 IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot);
368 IRRef ref; 363 IRRef ref;
369 /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */ 364 /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */
370 lua_assert(tref_isk(key) && slot == (IRRef)(IRRef1)slot); 365 lj_assertJ(tref_isk(key) && slot == (IRRef)(IRRef1)slot,
366 "out-of-range key/slot");
371 for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev) 367 for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev)
372 if (cir[ref].op12 == op12) 368 if (cir[ref].op12 == op12)
373 goto found; 369 goto found;
@@ -388,14 +384,13 @@ found:
388void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir) 384void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir)
389{ 385{
390 UNUSED(L); 386 UNUSED(L);
391 lua_assert(ir->o != IR_KSLOT); /* Common mistake. */ 387 lj_assertL(ir->o != IR_KSLOT, "unexpected KSLOT"); /* Common mistake. */
392 switch (ir->o) { 388 switch (ir->o) {
393 case IR_KPRI: setitype(tv, irt_toitype(ir->t)); break; 389 case IR_KPRI: setpriV(tv, irt_toitype(ir->t)); break;
394 case IR_KINT: setintV(tv, ir->i); break; 390 case IR_KINT: setintV(tv, ir->i); break;
395 case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break; 391 case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break;
396 case IR_KPTR: case IR_KKPTR: case IR_KNULL: 392 case IR_KPTR: case IR_KKPTR: setlightudV(tv, ir_kptr(ir)); break;
397 setlightudV(tv, mref(ir->ptr, void)); 393 case IR_KNULL: setlightudV(tv, NULL); break;
398 break;
399 case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break; 394 case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break;
400#if LJ_HASFFI 395#if LJ_HASFFI
401 case IR_KINT64: { 396 case IR_KINT64: {
@@ -405,7 +400,7 @@ void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir)
405 break; 400 break;
406 } 401 }
407#endif 402#endif
408 default: lua_assert(0); break; 403 default: lj_assertL(0, "bad IR constant op %d", ir->o); break;
409 } 404 }
410} 405}
411 406
@@ -443,7 +438,8 @@ TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr)
443 if (!tref_isstr(tr)) { 438 if (!tref_isstr(tr)) {
444 if (!tref_isnumber(tr)) 439 if (!tref_isnumber(tr))
445 lj_trace_err(J, LJ_TRERR_BADTYPE); 440 lj_trace_err(J, LJ_TRERR_BADTYPE);
446 tr = emitir(IRT(IR_TOSTR, IRT_STR), tr, 0); 441 tr = emitir(IRT(IR_TOSTR, IRT_STR), tr,
442 tref_isnum(tr) ? IRTOSTR_NUM : IRTOSTR_INT);
447 } 443 }
448 return tr; 444 return tr;
449} 445}
@@ -464,7 +460,7 @@ int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op)
464 case IR_UGE: return !(a < b); 460 case IR_UGE: return !(a < b);
465 case IR_ULE: return !(a > b); 461 case IR_ULE: return !(a > b);
466 case IR_UGT: return !(a <= b); 462 case IR_UGT: return !(a <= b);
467 default: lua_assert(0); return 0; 463 default: lj_assertX(0, "bad IR op %d", op); return 0;
468 } 464 }
469} 465}
470 466
@@ -477,7 +473,7 @@ int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op)
477 case IR_GE: return (res >= 0); 473 case IR_GE: return (res >= 0);
478 case IR_LE: return (res <= 0); 474 case IR_LE: return (res <= 0);
479 case IR_GT: return (res > 0); 475 case IR_GT: return (res > 0);
480 default: lua_assert(0); return 0; 476 default: lj_assertX(0, "bad IR op %d", op); return 0;
481 } 477 }
482} 478}
483 479
diff --git a/src/lj_ir.h b/src/lj_ir.h
index f91d6d0e..6116f7e5 100644
--- a/src/lj_ir.h
+++ b/src/lj_ir.h
@@ -40,6 +40,7 @@
40 _(USE, S , ref, ___) \ 40 _(USE, S , ref, ___) \
41 _(PHI, S , ref, ref) \ 41 _(PHI, S , ref, ref) \
42 _(RENAME, S , ref, lit) \ 42 _(RENAME, S , ref, lit) \
43 _(PROF, S , ___, ___) \
43 \ 44 \
44 /* Constants. */ \ 45 /* Constants. */ \
45 _(KPRI, N , ___, ___) \ 46 _(KPRI, N , ___, ___) \
@@ -74,7 +75,6 @@
74 _(NEG, N , ref, ref) \ 75 _(NEG, N , ref, ref) \
75 \ 76 \
76 _(ABS, N , ref, ref) \ 77 _(ABS, N , ref, ref) \
77 _(ATAN2, N , ref, ref) \
78 _(LDEXP, N , ref, ref) \ 78 _(LDEXP, N , ref, ref) \
79 _(MIN, C , ref, ref) \ 79 _(MIN, C , ref, ref) \
80 _(MAX, C , ref, ref) \ 80 _(MAX, C , ref, ref) \
@@ -96,6 +96,7 @@
96 _(UREFC, LW, ref, lit) \ 96 _(UREFC, LW, ref, lit) \
97 _(FREF, R , ref, lit) \ 97 _(FREF, R , ref, lit) \
98 _(STRREF, N , ref, ref) \ 98 _(STRREF, N , ref, ref) \
99 _(LREF, L , ___, ___) \
99 \ 100 \
100 /* Loads and Stores. These must be in the same order. */ \ 101 /* Loads and Stores. These must be in the same order. */ \
101 _(ALOAD, L , ref, ___) \ 102 _(ALOAD, L , ref, ___) \
@@ -105,6 +106,7 @@
105 _(XLOAD, L , ref, lit) \ 106 _(XLOAD, L , ref, lit) \
106 _(SLOAD, L , lit, lit) \ 107 _(SLOAD, L , lit, lit) \
107 _(VLOAD, L , ref, ___) \ 108 _(VLOAD, L , ref, ___) \
109 _(ALEN, L , ref, ref) \
108 \ 110 \
109 _(ASTORE, S , ref, ref) \ 111 _(ASTORE, S , ref, ref) \
110 _(HSTORE, S , ref, ref) \ 112 _(HSTORE, S , ref, ref) \
@@ -120,6 +122,11 @@
120 _(CNEW, AW, ref, ref) \ 122 _(CNEW, AW, ref, ref) \
121 _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \ 123 _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \
122 \ 124 \
125 /* Buffer operations. */ \
126 _(BUFHDR, L , ref, lit) \
127 _(BUFPUT, L , ref, ref) \
128 _(BUFSTR, A , ref, ref) \
129 \
123 /* Barriers. */ \ 130 /* Barriers. */ \
124 _(TBAR, S , ref, ___) \ 131 _(TBAR, S , ref, ___) \
125 _(OBAR, S , ref, ref) \ 132 _(OBAR, S , ref, ref) \
@@ -128,11 +135,12 @@
128 /* Type conversions. */ \ 135 /* Type conversions. */ \
129 _(CONV, NW, ref, lit) \ 136 _(CONV, NW, ref, lit) \
130 _(TOBIT, N , ref, ref) \ 137 _(TOBIT, N , ref, ref) \
131 _(TOSTR, N , ref, ___) \ 138 _(TOSTR, N , ref, lit) \
132 _(STRTO, N , ref, ___) \ 139 _(STRTO, N , ref, ___) \
133 \ 140 \
134 /* Calls. */ \ 141 /* Calls. */ \
135 _(CALLN, N , ref, lit) \ 142 _(CALLN, N , ref, lit) \
143 _(CALLA, A , ref, lit) \
136 _(CALLL, L , ref, lit) \ 144 _(CALLL, L , ref, lit) \
137 _(CALLS, S , ref, lit) \ 145 _(CALLS, S , ref, lit) \
138 _(CALLXS, S , ref, ref) \ 146 _(CALLXS, S , ref, ref) \
@@ -170,8 +178,7 @@ LJ_STATIC_ASSERT((int)IR_XLOAD + IRDELTA_L2S == (int)IR_XSTORE);
170/* FPMATH sub-functions. ORDER FPM. */ 178/* FPMATH sub-functions. ORDER FPM. */
171#define IRFPMDEF(_) \ 179#define IRFPMDEF(_) \
172 _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \ 180 _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \
173 _(SQRT) _(EXP) _(EXP2) _(LOG) _(LOG2) _(LOG10) \ 181 _(SQRT) _(LOG) _(LOG2) \
174 _(SIN) _(COS) _(TAN) \
175 _(OTHER) 182 _(OTHER)
176 183
177typedef enum { 184typedef enum {
@@ -186,6 +193,8 @@ IRFPMDEF(FPMENUM)
186 _(STR_LEN, offsetof(GCstr, len)) \ 193 _(STR_LEN, offsetof(GCstr, len)) \
187 _(FUNC_ENV, offsetof(GCfunc, l.env)) \ 194 _(FUNC_ENV, offsetof(GCfunc, l.env)) \
188 _(FUNC_PC, offsetof(GCfunc, l.pc)) \ 195 _(FUNC_PC, offsetof(GCfunc, l.pc)) \
196 _(FUNC_FFID, offsetof(GCfunc, l.ffid)) \
197 _(THREAD_ENV, offsetof(lua_State, env)) \
189 _(TAB_META, offsetof(GCtab, metatable)) \ 198 _(TAB_META, offsetof(GCtab, metatable)) \
190 _(TAB_ARRAY, offsetof(GCtab, array)) \ 199 _(TAB_ARRAY, offsetof(GCtab, array)) \
191 _(TAB_NODE, offsetof(GCtab, node)) \ 200 _(TAB_NODE, offsetof(GCtab, node)) \
@@ -210,7 +219,7 @@ IRFLDEF(FLENUM)
210 219
211/* SLOAD mode bits, stored in op2. */ 220/* SLOAD mode bits, stored in op2. */
212#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */ 221#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */
213#define IRSLOAD_FRAME 0x02 /* Load hiword of frame. */ 222#define IRSLOAD_FRAME 0x02 /* Load 32 bits of ftsz. */
214#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */ 223#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */
215#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */ 224#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */
216#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */ 225#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */
@@ -221,13 +230,16 @@ IRFLDEF(FLENUM)
221#define IRXLOAD_VOLATILE 2 /* Load from volatile data. */ 230#define IRXLOAD_VOLATILE 2 /* Load from volatile data. */
222#define IRXLOAD_UNALIGNED 4 /* Unaligned load. */ 231#define IRXLOAD_UNALIGNED 4 /* Unaligned load. */
223 232
233/* BUFHDR mode, stored in op2. */
234#define IRBUFHDR_RESET 0 /* Reset buffer. */
235#define IRBUFHDR_APPEND 1 /* Append to buffer. */
236
224/* CONV mode, stored in op2. */ 237/* CONV mode, stored in op2. */
225#define IRCONV_SRCMASK 0x001f /* Source IRType. */ 238#define IRCONV_SRCMASK 0x001f /* Source IRType. */
226#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */ 239#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */
227#define IRCONV_DSH 5 240#define IRCONV_DSH 5
228#define IRCONV_NUM_INT ((IRT_NUM<<IRCONV_DSH)|IRT_INT) 241#define IRCONV_NUM_INT ((IRT_NUM<<IRCONV_DSH)|IRT_INT)
229#define IRCONV_INT_NUM ((IRT_INT<<IRCONV_DSH)|IRT_NUM) 242#define IRCONV_INT_NUM ((IRT_INT<<IRCONV_DSH)|IRT_NUM)
230#define IRCONV_TRUNC 0x0400 /* Truncate number to integer. */
231#define IRCONV_SEXT 0x0800 /* Sign-extend integer to integer. */ 243#define IRCONV_SEXT 0x0800 /* Sign-extend integer to integer. */
232#define IRCONV_MODEMASK 0x0fff 244#define IRCONV_MODEMASK 0x0fff
233#define IRCONV_CONVMASK 0xf000 245#define IRCONV_CONVMASK 0xf000
@@ -238,6 +250,11 @@ IRFLDEF(FLENUM)
238#define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */ 250#define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */
239#define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */ 251#define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */
240 252
253/* TOSTR mode, stored in op2. */
254#define IRTOSTR_INT 0 /* Convert integer to string. */
255#define IRTOSTR_NUM 1 /* Convert number to string. */
256#define IRTOSTR_CHAR 2 /* Convert char value to string. */
257
241/* -- IR operands --------------------------------------------------------- */ 258/* -- IR operands --------------------------------------------------------- */
242 259
243/* IR operand mode (2 bit). */ 260/* IR operand mode (2 bit). */
@@ -276,7 +293,9 @@ LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1];
276 293
277/* -- IR instruction types ------------------------------------------------ */ 294/* -- IR instruction types ------------------------------------------------ */
278 295
279/* Map of itypes to non-negative numbers. ORDER LJ_T. 296#define IRTSIZE_PGC (LJ_GC64 ? 8 : 4)
297
298/* Map of itypes to non-negative numbers and their sizes. ORDER LJ_T.
280** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for 299** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for
281** IRT_P32 and IRT_P64, which never escape the IR. 300** IRT_P32 and IRT_P64, which never escape the IR.
282** The various integers are only used in the IR and can only escape to 301** The various integers are only used in the IR and can only escape to
@@ -284,12 +303,13 @@ LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1];
284** contiguous and next to IRT_NUM (see the typerange macros below). 303** contiguous and next to IRT_NUM (see the typerange macros below).
285*/ 304*/
286#define IRTDEF(_) \ 305#define IRTDEF(_) \
287 _(NIL, 4) _(FALSE, 4) _(TRUE, 4) _(LIGHTUD, LJ_64 ? 8 : 4) _(STR, 4) \ 306 _(NIL, 4) _(FALSE, 4) _(TRUE, 4) _(LIGHTUD, LJ_64 ? 8 : 4) \
288 _(P32, 4) _(THREAD, 4) _(PROTO, 4) _(FUNC, 4) _(P64, 8) _(CDATA, 4) \ 307 _(STR, IRTSIZE_PGC) _(P32, 4) _(THREAD, IRTSIZE_PGC) _(PROTO, IRTSIZE_PGC) \
289 _(TAB, 4) _(UDATA, 4) \ 308 _(FUNC, IRTSIZE_PGC) _(P64, 8) _(CDATA, IRTSIZE_PGC) _(TAB, IRTSIZE_PGC) \
309 _(UDATA, IRTSIZE_PGC) \
290 _(FLOAT, 4) _(NUM, 8) _(I8, 1) _(U8, 1) _(I16, 2) _(U16, 2) \ 310 _(FLOAT, 4) _(NUM, 8) _(I8, 1) _(U8, 1) _(I16, 2) _(U16, 2) \
291 _(INT, 4) _(U32, 4) _(I64, 8) _(U64, 8) \ 311 _(INT, 4) _(U32, 4) _(I64, 8) _(U64, 8) \
292 _(SOFTFP, 4) /* There is room for 9 more types. */ 312 _(SOFTFP, 4) /* There is room for 8 more types. */
293 313
294/* IR result type and flags (8 bit). */ 314/* IR result type and flags (8 bit). */
295typedef enum { 315typedef enum {
@@ -300,6 +320,8 @@ IRTDEF(IRTENUM)
300 320
301 /* Native pointer type and the corresponding integer type. */ 321 /* Native pointer type and the corresponding integer type. */
302 IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32, 322 IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32,
323 IRT_PGC = LJ_GC64 ? IRT_P64 : IRT_P32,
324 IRT_IGC = LJ_GC64 ? IRT_I64 : IRT_INT,
303 IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT, 325 IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT,
304 IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32, 326 IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32,
305 327
@@ -353,7 +375,14 @@ typedef struct IRType1 { uint8_t irt; } IRType1;
353#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA)) 375#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA))
354#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64)) 376#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64))
355 377
356#if LJ_64 378#if LJ_GC64
379/* Include IRT_NIL, so IR(ASMREF_L) (aka REF_NIL) is considered 64 bit. */
380#define IRT_IS64 \
381 ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)|(1u<<IRT_P64)|\
382 (1u<<IRT_LIGHTUD)|(1u<<IRT_STR)|(1u<<IRT_THREAD)|(1u<<IRT_PROTO)|\
383 (1u<<IRT_FUNC)|(1u<<IRT_CDATA)|(1u<<IRT_TAB)|(1u<<IRT_UDATA)|\
384 (1u<<IRT_NIL))
385#elif LJ_64
357#define IRT_IS64 \ 386#define IRT_IS64 \
358 ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)|(1u<<IRT_P64)|(1u<<IRT_LIGHTUD)) 387 ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)|(1u<<IRT_P64)|(1u<<IRT_LIGHTUD))
359#else 388#else
@@ -374,7 +403,7 @@ static LJ_AINLINE IRType itype2irt(const TValue *tv)
374 return IRT_INT; 403 return IRT_INT;
375 else if (tvisnum(tv)) 404 else if (tvisnum(tv))
376 return IRT_NUM; 405 return IRT_NUM;
377#if LJ_64 406#if LJ_64 && !LJ_GC64
378 else if (tvislightud(tv)) 407 else if (tvislightud(tv))
379 return IRT_LIGHTUD; 408 return IRT_LIGHTUD;
380#endif 409#endif
@@ -384,11 +413,12 @@ static LJ_AINLINE IRType itype2irt(const TValue *tv)
384 413
385static LJ_AINLINE uint32_t irt_toitype_(IRType t) 414static LJ_AINLINE uint32_t irt_toitype_(IRType t)
386{ 415{
387 lua_assert(!LJ_64 || t != IRT_LIGHTUD); 416 lj_assertX(!LJ_64 || LJ_GC64 || t != IRT_LIGHTUD,
417 "no plain type tag for lightuserdata");
388 if (LJ_DUALNUM && t > IRT_NUM) { 418 if (LJ_DUALNUM && t > IRT_NUM) {
389 return LJ_TISNUM; 419 return LJ_TISNUM;
390 } else { 420 } else {
391 lua_assert(t <= IRT_NUM); 421 lj_assertX(t <= IRT_NUM, "no plain type tag for IR type %d", t);
392 return ~(uint32_t)t; 422 return ~(uint32_t)t;
393 } 423 }
394} 424}
@@ -464,6 +494,7 @@ typedef uint32_t TRef;
464#define tref_isnil(tr) (tref_istype((tr), IRT_NIL)) 494#define tref_isnil(tr) (tref_istype((tr), IRT_NIL))
465#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE)) 495#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE))
466#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE)) 496#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE))
497#define tref_islightud(tr) (tref_istype((tr), IRT_LIGHTUD))
467#define tref_isstr(tr) (tref_istype((tr), IRT_STR)) 498#define tref_isstr(tr) (tref_istype((tr), IRT_STR))
468#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC)) 499#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC))
469#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA)) 500#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA))
@@ -496,7 +527,9 @@ typedef uint32_t TRef;
496** +-------+-------+---+---+---+---+ 527** +-------+-------+---+---+---+---+
497** | op1 | op2 | t | o | r | s | 528** | op1 | op2 | t | o | r | s |
498** +-------+-------+---+---+---+---+ 529** +-------+-------+---+---+---+---+
499** | op12/i/gco | ot | prev | (alternative fields in union) 530** | op12/i/gco32 | ot | prev | (alternative fields in union)
531** +-------+-------+---+---+---+---+
532** | TValue/gco64 | (2nd IR slot for 64 bit constants)
500** +---------------+-------+-------+ 533** +---------------+-------+-------+
501** 32 16 16 534** 32 16 16
502** 535**
@@ -524,21 +557,27 @@ typedef union IRIns {
524 ) 557 )
525 }; 558 };
526 int32_t i; /* 32 bit signed integer literal (overlaps op12). */ 559 int32_t i; /* 32 bit signed integer literal (overlaps op12). */
527 GCRef gcr; /* GCobj constant (overlaps op12). */ 560 GCRef gcr; /* GCobj constant (overlaps op12 or entire slot). */
528 MRef ptr; /* Pointer constant (overlaps op12). */ 561 MRef ptr; /* Pointer constant (overlaps op12 or entire slot). */
562 TValue tv; /* TValue constant (overlaps entire slot). */
529} IRIns; 563} IRIns;
530 564
531#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)->gcr)) 565#define ir_isk64(ir) \
566 ((ir)->o == IR_KNUM || (ir)->o == IR_KINT64 || \
567 (LJ_GC64 && \
568 ((ir)->o == IR_KGC || (ir)->o == IR_KPTR || (ir)->o == IR_KKPTR)))
569
570#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)[LJ_GC64].gcr))
532#define ir_kstr(ir) (gco2str(ir_kgc((ir)))) 571#define ir_kstr(ir) (gco2str(ir_kgc((ir))))
533#define ir_ktab(ir) (gco2tab(ir_kgc((ir)))) 572#define ir_ktab(ir) (gco2tab(ir_kgc((ir))))
534#define ir_kfunc(ir) (gco2func(ir_kgc((ir)))) 573#define ir_kfunc(ir) (gco2func(ir_kgc((ir))))
535#define ir_kcdata(ir) (gco2cd(ir_kgc((ir)))) 574#define ir_kcdata(ir) (gco2cd(ir_kgc((ir))))
536#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, mref((ir)->ptr, cTValue)) 575#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, &(ir)[1].tv)
537#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, mref((ir)->ptr,cTValue)) 576#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, &(ir)[1].tv)
538#define ir_k64(ir) \ 577#define ir_k64(ir) check_exp(ir_isk64(ir), &(ir)[1].tv)
539 check_exp((ir)->o == IR_KNUM || (ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
540#define ir_kptr(ir) \ 578#define ir_kptr(ir) \
541 check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, mref((ir)->ptr, void)) 579 check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, \
580 mref((ir)[LJ_GC64].ptr, void))
542 581
543/* A store or any other op with a non-weak guard has a side-effect. */ 582/* A store or any other op with a non-weak guard has a side-effect. */
544static LJ_AINLINE int ir_sideeff(IRIns *ir) 583static LJ_AINLINE int ir_sideeff(IRIns *ir)
diff --git a/src/lj_ircall.h b/src/lj_ircall.h
index aae9adbb..a45dde34 100644
--- a/src/lj_ircall.h
+++ b/src/lj_ircall.h
@@ -16,15 +16,17 @@ typedef struct CCallInfo {
16 uint32_t flags; /* Number of arguments and flags. */ 16 uint32_t flags; /* Number of arguments and flags. */
17} CCallInfo; 17} CCallInfo;
18 18
19#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* Extract # of args. */ 19#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* # of args. */
20#define CCI_NARGS_MAX 32 /* Max. # of args. */ 20#define CCI_NARGS_MAX 32 /* Max. # of args. */
21 21
22#define CCI_OTSHIFT 16 22#define CCI_OTSHIFT 16
23#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */ 23#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */
24#define CCI_TYPE(ci) (((ci)->flags>>CCI_OTSHIFT) & IRT_TYPE)
24#define CCI_OPSHIFT 24 25#define CCI_OPSHIFT 24
25#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */ 26#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */
26 27
27#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT) 28#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT)
29#define CCI_CALL_A (IR_CALLA << CCI_OPSHIFT)
28#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT) 30#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT)
29#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT) 31#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT)
30#define CCI_CALL_FN (CCI_CALL_N|CCI_CC_FASTCALL) 32#define CCI_CALL_FN (CCI_CALL_N|CCI_CC_FASTCALL)
@@ -45,6 +47,17 @@ typedef struct CCallInfo {
45#define CCI_CC_FASTCALL 0x2000 /* Fastcall calling convention. */ 47#define CCI_CC_FASTCALL 0x2000 /* Fastcall calling convention. */
46#define CCI_CC_STDCALL 0x3000 /* Stdcall calling convention. */ 48#define CCI_CC_STDCALL 0x3000 /* Stdcall calling convention. */
47 49
50/* Extra args for SOFTFP, SPLIT 64 bit. */
51#define CCI_XARGS_SHIFT 14
52#define CCI_XARGS(ci) (((ci)->flags >> CCI_XARGS_SHIFT) & 3)
53#define CCI_XA (1u << CCI_XARGS_SHIFT)
54
55#if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
56#define CCI_XNARGS(ci) (CCI_NARGS((ci)) + CCI_XARGS((ci)))
57#else
58#define CCI_XNARGS(ci) CCI_NARGS((ci))
59#endif
60
48/* Helpers for conditional function definitions. */ 61/* Helpers for conditional function definitions. */
49#define IRCALLCOND_ANY(x) x 62#define IRCALLCOND_ANY(x) x
50 63
@@ -66,6 +79,18 @@ typedef struct CCallInfo {
66#define IRCALLCOND_SOFTFP_FFI(x) NULL 79#define IRCALLCOND_SOFTFP_FFI(x) NULL
67#endif 80#endif
68 81
82#if LJ_SOFTFP && LJ_TARGET_MIPS
83#define IRCALLCOND_SOFTFP_MIPS(x) x
84#else
85#define IRCALLCOND_SOFTFP_MIPS(x) NULL
86#endif
87
88#if LJ_SOFTFP && LJ_TARGET_MIPS64
89#define IRCALLCOND_SOFTFP_MIPS64(x) x
90#else
91#define IRCALLCOND_SOFTFP_MIPS64(x) NULL
92#endif
93
69#define LJ_NEED_FP64 (LJ_TARGET_ARM || LJ_TARGET_PPC || LJ_TARGET_MIPS) 94#define LJ_NEED_FP64 (LJ_TARGET_ARM || LJ_TARGET_PPC || LJ_TARGET_MIPS)
70 95
71#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64) 96#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
@@ -87,92 +112,138 @@ typedef struct CCallInfo {
87#endif 112#endif
88 113
89#if LJ_SOFTFP 114#if LJ_SOFTFP
90#define ARG1_FP 2 /* Treat as 2 32 bit arguments. */ 115#define XA_FP CCI_XA
116#define XA2_FP (CCI_XA+CCI_XA)
91#else 117#else
92#define ARG1_FP 1 118#define XA_FP 0
119#define XA2_FP 0
120#endif
121
122#if LJ_SOFTFP32
123#define XA_FP32 CCI_XA
124#define XA2_FP32 (CCI_XA+CCI_XA)
125#else
126#define XA_FP32 0
127#define XA2_FP32 0
93#endif 128#endif
94 129
95#if LJ_32 130#if LJ_32
96#define ARG2_64 4 /* Treat as 4 32 bit arguments. */ 131#define XA_64 CCI_XA
132#define XA2_64 (CCI_XA+CCI_XA)
97#else 133#else
98#define ARG2_64 2 134#define XA_64 0
135#define XA2_64 0
99#endif 136#endif
100 137
101/* Function definitions for CALL* instructions. */ 138/* Function definitions for CALL* instructions. */
102#define IRCALLDEF(_) \ 139#define IRCALLDEF(_) \
103 _(ANY, lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \ 140 _(ANY, lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \
141 _(ANY, lj_str_find, 4, N, PGC, 0) \
104 _(ANY, lj_str_new, 3, S, STR, CCI_L) \ 142 _(ANY, lj_str_new, 3, S, STR, CCI_L) \
105 _(ANY, lj_strscan_num, 2, FN, INT, 0) \ 143 _(ANY, lj_strscan_num, 2, FN, INT, 0) \
106 _(ANY, lj_str_fromint, 2, FN, STR, CCI_L) \ 144 _(ANY, lj_strfmt_int, 2, FN, STR, CCI_L) \
107 _(ANY, lj_str_fromnum, 2, FN, STR, CCI_L) \ 145 _(ANY, lj_strfmt_num, 2, FN, STR, CCI_L) \
146 _(ANY, lj_strfmt_char, 2, FN, STR, CCI_L) \
147 _(ANY, lj_strfmt_putint, 2, FL, PGC, 0) \
148 _(ANY, lj_strfmt_putnum, 2, FL, PGC, 0) \
149 _(ANY, lj_strfmt_putquoted, 2, FL, PGC, 0) \
150 _(ANY, lj_strfmt_putfxint, 3, L, PGC, XA_64) \
151 _(ANY, lj_strfmt_putfnum_int, 3, L, PGC, XA_FP) \
152 _(ANY, lj_strfmt_putfnum_uint, 3, L, PGC, XA_FP) \
153 _(ANY, lj_strfmt_putfnum, 3, L, PGC, XA_FP) \
154 _(ANY, lj_strfmt_putfstr, 3, L, PGC, 0) \
155 _(ANY, lj_strfmt_putfchar, 3, L, PGC, 0) \
156 _(ANY, lj_buf_putmem, 3, S, PGC, 0) \
157 _(ANY, lj_buf_putstr, 2, FL, PGC, 0) \
158 _(ANY, lj_buf_putchar, 2, FL, PGC, 0) \
159 _(ANY, lj_buf_putstr_reverse, 2, FL, PGC, 0) \
160 _(ANY, lj_buf_putstr_lower, 2, FL, PGC, 0) \
161 _(ANY, lj_buf_putstr_upper, 2, FL, PGC, 0) \
162 _(ANY, lj_buf_putstr_rep, 3, L, PGC, 0) \
163 _(ANY, lj_buf_puttab, 5, L, PGC, 0) \
164 _(ANY, lj_buf_tostr, 1, FL, STR, 0) \
165 _(ANY, lj_tab_new_ah, 3, A, TAB, CCI_L) \
108 _(ANY, lj_tab_new1, 2, FS, TAB, CCI_L) \ 166 _(ANY, lj_tab_new1, 2, FS, TAB, CCI_L) \
109 _(ANY, lj_tab_dup, 2, FS, TAB, CCI_L) \ 167 _(ANY, lj_tab_dup, 2, FS, TAB, CCI_L) \
110 _(ANY, lj_tab_newkey, 3, S, P32, CCI_L) \ 168 _(ANY, lj_tab_clear, 1, FS, NIL, 0) \
169 _(ANY, lj_tab_newkey, 3, S, PGC, CCI_L) \
111 _(ANY, lj_tab_len, 1, FL, INT, 0) \ 170 _(ANY, lj_tab_len, 1, FL, INT, 0) \
171 _(ANY, lj_tab_len_hint, 2, FL, INT, 0) \
112 _(ANY, lj_gc_step_jit, 2, FS, NIL, CCI_L) \ 172 _(ANY, lj_gc_step_jit, 2, FS, NIL, CCI_L) \
113 _(ANY, lj_gc_barrieruv, 2, FS, NIL, 0) \ 173 _(ANY, lj_gc_barrieruv, 2, FS, NIL, 0) \
114 _(ANY, lj_mem_newgco, 2, FS, P32, CCI_L) \ 174 _(ANY, lj_mem_newgco, 2, FS, PGC, CCI_L) \
115 _(ANY, lj_math_random_step, 1, FS, NUM, CCI_CASTU64) \ 175 _(ANY, lj_prng_u64d, 1, FS, NUM, CCI_CASTU64) \
116 _(ANY, lj_vm_modi, 2, FN, INT, 0) \ 176 _(ANY, lj_vm_modi, 2, FN, INT, 0) \
117 _(ANY, sinh, ARG1_FP, N, NUM, 0) \ 177 _(ANY, log10, 1, N, NUM, XA_FP) \
118 _(ANY, cosh, ARG1_FP, N, NUM, 0) \ 178 _(ANY, exp, 1, N, NUM, XA_FP) \
119 _(ANY, tanh, ARG1_FP, N, NUM, 0) \ 179 _(ANY, sin, 1, N, NUM, XA_FP) \
120 _(ANY, fputc, 2, S, INT, 0) \ 180 _(ANY, cos, 1, N, NUM, XA_FP) \
121 _(ANY, fwrite, 4, S, INT, 0) \ 181 _(ANY, tan, 1, N, NUM, XA_FP) \
122 _(ANY, fflush, 1, S, INT, 0) \ 182 _(ANY, asin, 1, N, NUM, XA_FP) \
183 _(ANY, acos, 1, N, NUM, XA_FP) \
184 _(ANY, atan, 1, N, NUM, XA_FP) \
185 _(ANY, sinh, 1, N, NUM, XA_FP) \
186 _(ANY, cosh, 1, N, NUM, XA_FP) \
187 _(ANY, tanh, 1, N, NUM, XA_FP) \
188 _(ANY, fputc, 2, S, INT, 0) \
189 _(ANY, fwrite, 4, S, INT, 0) \
190 _(ANY, fflush, 1, S, INT, 0) \
123 /* ORDER FPM */ \ 191 /* ORDER FPM */ \
124 _(FPMATH, lj_vm_floor, ARG1_FP, N, NUM, 0) \ 192 _(FPMATH, lj_vm_floor, 1, N, NUM, XA_FP) \
125 _(FPMATH, lj_vm_ceil, ARG1_FP, N, NUM, 0) \ 193 _(FPMATH, lj_vm_ceil, 1, N, NUM, XA_FP) \
126 _(FPMATH, lj_vm_trunc, ARG1_FP, N, NUM, 0) \ 194 _(FPMATH, lj_vm_trunc, 1, N, NUM, XA_FP) \
127 _(FPMATH, sqrt, ARG1_FP, N, NUM, 0) \ 195 _(FPMATH, sqrt, 1, N, NUM, XA_FP) \
128 _(FPMATH, exp, ARG1_FP, N, NUM, 0) \ 196 _(ANY, log, 1, N, NUM, XA_FP) \
129 _(FPMATH, lj_vm_exp2, ARG1_FP, N, NUM, 0) \ 197 _(ANY, lj_vm_log2, 1, N, NUM, XA_FP) \
130 _(FPMATH, log, ARG1_FP, N, NUM, 0) \ 198 _(ANY, lj_vm_powi, 2, N, NUM, XA_FP) \
131 _(FPMATH, lj_vm_log2, ARG1_FP, N, NUM, 0) \ 199 _(ANY, pow, 2, N, NUM, XA2_FP) \
132 _(FPMATH, log10, ARG1_FP, N, NUM, 0) \ 200 _(ANY, atan2, 2, N, NUM, XA2_FP) \
133 _(FPMATH, sin, ARG1_FP, N, NUM, 0) \ 201 _(ANY, ldexp, 2, N, NUM, XA_FP) \
134 _(FPMATH, cos, ARG1_FP, N, NUM, 0) \ 202 _(SOFTFP, lj_vm_tobit, 1, N, INT, XA_FP32) \
135 _(FPMATH, tan, ARG1_FP, N, NUM, 0) \ 203 _(SOFTFP, softfp_add, 2, N, NUM, XA2_FP32) \
136 _(FPMATH, lj_vm_powi, ARG1_FP+1, N, NUM, 0) \ 204 _(SOFTFP, softfp_sub, 2, N, NUM, XA2_FP32) \
137 _(FPMATH, pow, ARG1_FP*2, N, NUM, 0) \ 205 _(SOFTFP, softfp_mul, 2, N, NUM, XA2_FP32) \
138 _(FPMATH, atan2, ARG1_FP*2, N, NUM, 0) \ 206 _(SOFTFP, softfp_div, 2, N, NUM, XA2_FP32) \
139 _(FPMATH, ldexp, ARG1_FP+1, N, NUM, 0) \ 207 _(SOFTFP, softfp_cmp, 2, N, NIL, XA2_FP32) \
140 _(SOFTFP, lj_vm_tobit, 2, N, INT, 0) \
141 _(SOFTFP, softfp_add, 4, N, NUM, 0) \
142 _(SOFTFP, softfp_sub, 4, N, NUM, 0) \
143 _(SOFTFP, softfp_mul, 4, N, NUM, 0) \
144 _(SOFTFP, softfp_div, 4, N, NUM, 0) \
145 _(SOFTFP, softfp_cmp, 4, N, NIL, 0) \
146 _(SOFTFP, softfp_i2d, 1, N, NUM, 0) \ 208 _(SOFTFP, softfp_i2d, 1, N, NUM, 0) \
147 _(SOFTFP, softfp_d2i, 2, N, INT, 0) \ 209 _(SOFTFP, softfp_d2i, 1, N, INT, XA_FP32) \
210 _(SOFTFP_MIPS, lj_vm_sfmin, 2, N, NUM, XA2_FP32) \
211 _(SOFTFP_MIPS, lj_vm_sfmax, 2, N, NUM, XA2_FP32) \
212 _(SOFTFP_MIPS64, lj_vm_tointg, 1, N, INT, 0) \
148 _(SOFTFP_FFI, softfp_ui2d, 1, N, NUM, 0) \ 213 _(SOFTFP_FFI, softfp_ui2d, 1, N, NUM, 0) \
149 _(SOFTFP_FFI, softfp_f2d, 1, N, NUM, 0) \ 214 _(SOFTFP_FFI, softfp_f2d, 1, N, NUM, 0) \
150 _(SOFTFP_FFI, softfp_d2ui, 2, N, INT, 0) \ 215 _(SOFTFP_FFI, softfp_d2ui, 1, N, INT, XA_FP32) \
151 _(SOFTFP_FFI, softfp_d2f, 2, N, FLOAT, 0) \ 216 _(SOFTFP_FFI, softfp_d2f, 1, N, FLOAT, XA_FP32) \
152 _(SOFTFP_FFI, softfp_i2f, 1, N, FLOAT, 0) \ 217 _(SOFTFP_FFI, softfp_i2f, 1, N, FLOAT, 0) \
153 _(SOFTFP_FFI, softfp_ui2f, 1, N, FLOAT, 0) \ 218 _(SOFTFP_FFI, softfp_ui2f, 1, N, FLOAT, 0) \
154 _(SOFTFP_FFI, softfp_f2i, 1, N, INT, 0) \ 219 _(SOFTFP_FFI, softfp_f2i, 1, N, INT, 0) \
155 _(SOFTFP_FFI, softfp_f2ui, 1, N, INT, 0) \ 220 _(SOFTFP_FFI, softfp_f2ui, 1, N, INT, 0) \
156 _(FP64_FFI, fp64_l2d, 2, N, NUM, 0) \ 221 _(FP64_FFI, fp64_l2d, 1, N, NUM, XA_64) \
157 _(FP64_FFI, fp64_ul2d, 2, N, NUM, 0) \ 222 _(FP64_FFI, fp64_ul2d, 1, N, NUM, XA_64) \
158 _(FP64_FFI, fp64_l2f, 2, N, FLOAT, 0) \ 223 _(FP64_FFI, fp64_l2f, 1, N, FLOAT, XA_64) \
159 _(FP64_FFI, fp64_ul2f, 2, N, FLOAT, 0) \ 224 _(FP64_FFI, fp64_ul2f, 1, N, FLOAT, XA_64) \
160 _(FP64_FFI, fp64_d2l, ARG1_FP, N, I64, 0) \ 225 _(FP64_FFI, fp64_d2l, 1, N, I64, XA_FP) \
161 _(FP64_FFI, fp64_d2ul, ARG1_FP, N, U64, 0) \ 226 _(FP64_FFI, fp64_d2ul, 1, N, U64, XA_FP) \
162 _(FP64_FFI, fp64_f2l, 1, N, I64, 0) \ 227 _(FP64_FFI, fp64_f2l, 1, N, I64, 0) \
163 _(FP64_FFI, fp64_f2ul, 1, N, U64, 0) \ 228 _(FP64_FFI, fp64_f2ul, 1, N, U64, 0) \
164 _(FFI, lj_carith_divi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \ 229 _(FFI, lj_carith_divi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \
165 _(FFI, lj_carith_divu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \ 230 _(FFI, lj_carith_divu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \
166 _(FFI, lj_carith_modi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \ 231 _(FFI, lj_carith_modi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \
167 _(FFI, lj_carith_modu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \ 232 _(FFI, lj_carith_modu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \
168 _(FFI, lj_carith_powi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \ 233 _(FFI, lj_carith_powi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \
169 _(FFI, lj_carith_powu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \ 234 _(FFI, lj_carith_powu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \
170 _(FFI, lj_cdata_setfin, 2, FN, P32, CCI_L) \ 235 _(FFI, lj_cdata_newv, 4, S, CDATA, CCI_L) \
171 _(FFI, strlen, 1, L, INTP, 0) \ 236 _(FFI, lj_cdata_setfin, 4, S, NIL, CCI_L) \
172 _(FFI, memcpy, 3, S, PTR, 0) \ 237 _(FFI, strlen, 1, L, INTP, 0) \
173 _(FFI, memset, 3, S, PTR, 0) \ 238 _(FFI, memcpy, 3, S, PTR, 0) \
174 _(FFI, lj_vm_errno, 0, S, INT, CCI_NOFPRCLOBBER) \ 239 _(FFI, memset, 3, S, PTR, 0) \
175 _(FFI32, lj_carith_mul64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) 240 _(FFI, lj_vm_errno, 0, S, INT, CCI_NOFPRCLOBBER) \
241 _(FFI32, lj_carith_mul64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \
242 _(FFI32, lj_carith_shl64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \
243 _(FFI32, lj_carith_shr64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \
244 _(FFI32, lj_carith_sar64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \
245 _(FFI32, lj_carith_rol64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \
246 _(FFI32, lj_carith_ror64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \
176 \ 247 \
177 /* End of list. */ 248 /* End of list. */
178 249
@@ -220,6 +291,22 @@ LJ_DATA const CCallInfo lj_ir_callinfo[IRCALL__MAX+1];
220#define fp64_f2l __aeabi_f2lz 291#define fp64_f2l __aeabi_f2lz
221#define fp64_f2ul __aeabi_f2ulz 292#define fp64_f2ul __aeabi_f2ulz
222#endif 293#endif
294#elif LJ_TARGET_MIPS || LJ_TARGET_PPC
295#define softfp_add __adddf3
296#define softfp_sub __subdf3
297#define softfp_mul __muldf3
298#define softfp_div __divdf3
299#define softfp_cmp __ledf2
300#define softfp_i2d __floatsidf
301#define softfp_d2i __fixdfsi
302#define softfp_ui2d __floatunsidf
303#define softfp_f2d __extendsfdf2
304#define softfp_d2ui __fixunsdfsi
305#define softfp_d2f __truncdfsf2
306#define softfp_i2f __floatsisf
307#define softfp_ui2f __floatunsisf
308#define softfp_f2i __fixsfsi
309#define softfp_f2ui __fixunssfsi
223#else 310#else
224#error "Missing soft-float definitions for target architecture" 311#error "Missing soft-float definitions for target architecture"
225#endif 312#endif
@@ -240,10 +327,14 @@ extern float softfp_ui2f(uint32_t a);
240extern int32_t softfp_f2i(float a); 327extern int32_t softfp_f2i(float a);
241extern uint32_t softfp_f2ui(float a); 328extern uint32_t softfp_f2ui(float a);
242#endif 329#endif
330#if LJ_TARGET_MIPS
331extern double lj_vm_sfmin(double a, double b);
332extern double lj_vm_sfmax(double a, double b);
333#endif
243#endif 334#endif
244 335
245#if LJ_HASFFI && LJ_NEED_FP64 && !(LJ_TARGET_ARM && LJ_SOFTFP) 336#if LJ_HASFFI && LJ_NEED_FP64 && !(LJ_TARGET_ARM && LJ_SOFTFP)
246#ifdef __GNUC__ 337#if defined(__GNUC__) || defined(__clang__)
247#define fp64_l2d __floatdidf 338#define fp64_l2d __floatdidf
248#define fp64_ul2d __floatundidf 339#define fp64_ul2d __floatundidf
249#define fp64_l2f __floatdisf 340#define fp64_l2f __floatdisf
diff --git a/src/lj_iropt.h b/src/lj_iropt.h
index cf5b4d1f..8333483f 100644
--- a/src/lj_iropt.h
+++ b/src/lj_iropt.h
@@ -36,11 +36,11 @@ static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J)
36 return ref; 36 return ref;
37} 37}
38 38
39LJ_FUNC TRef lj_ir_ggfload(jit_State *J, IRType t, uintptr_t ofs);
40
39/* Interning of constants. */ 41/* Interning of constants. */
40LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k); 42LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k);
41LJ_FUNC void lj_ir_k64_freeall(jit_State *J); 43LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64);
42LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv);
43LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64);
44LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64); 44LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64);
45LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n); 45LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n);
46LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64); 46LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64);
@@ -48,6 +48,7 @@ LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t);
48LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr); 48LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr);
49LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t); 49LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t);
50LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot); 50LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot);
51LJ_FUNC TRef lj_ir_ktrace(jit_State *J);
51 52
52#if LJ_64 53#if LJ_64
53#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k)) 54#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k))
@@ -74,8 +75,8 @@ static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n)
74#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000)) 75#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000))
75 76
76/* Special 128 bit SIMD constants. */ 77/* Special 128 bit SIMD constants. */
77#define lj_ir_knum_abs(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_ABS)) 78#define lj_ir_ksimd(J, idx) \
78#define lj_ir_knum_neg(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_NEG)) 79 lj_ir_ggfload(J, IRT_NUM, (uintptr_t)LJ_KSIMD(J, idx) - (uintptr_t)J2GG(J))
79 80
80/* Access to constants. */ 81/* Access to constants. */
81LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir); 82LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir);
@@ -119,7 +120,7 @@ LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J);
119LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J); 120LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J);
120LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J); 121LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J);
121LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J); 122LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J);
122LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J); 123LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_alen(jit_State *J);
123LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J); 124LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J);
124LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J); 125LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J);
125LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim); 126LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim);
@@ -149,7 +150,7 @@ LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase);
149/* Optimization passes. */ 150/* Optimization passes. */
150LJ_FUNC void lj_opt_dce(jit_State *J); 151LJ_FUNC void lj_opt_dce(jit_State *J);
151LJ_FUNC int lj_opt_loop(jit_State *J); 152LJ_FUNC int lj_opt_loop(jit_State *J);
152#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) 153#if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
153LJ_FUNC void lj_opt_split(jit_State *J); 154LJ_FUNC void lj_opt_split(jit_State *J);
154#else 155#else
155#define lj_opt_split(J) UNUSED(J) 156#define lj_opt_split(J) UNUSED(J)
diff --git a/src/lj_jit.h b/src/lj_jit.h
index 4a4b0b1b..655b84c3 100644
--- a/src/lj_jit.h
+++ b/src/lj_jit.h
@@ -9,71 +9,85 @@
9#include "lj_obj.h" 9#include "lj_obj.h"
10#include "lj_ir.h" 10#include "lj_ir.h"
11 11
12/* JIT engine flags. */ 12/* -- JIT engine flags ---------------------------------------------------- */
13
14/* General JIT engine flags. 4 bits. */
13#define JIT_F_ON 0x00000001 15#define JIT_F_ON 0x00000001
14 16
15/* CPU-specific JIT engine flags. */ 17/* CPU-specific JIT engine flags. 12 bits. Flags and strings must match. */
18#define JIT_F_CPU 0x00000010
19
16#if LJ_TARGET_X86ORX64 20#if LJ_TARGET_X86ORX64
17#define JIT_F_CMOV 0x00000010 21
18#define JIT_F_SSE2 0x00000020 22#define JIT_F_SSE3 (JIT_F_CPU << 0)
19#define JIT_F_SSE3 0x00000040 23#define JIT_F_SSE4_1 (JIT_F_CPU << 1)
20#define JIT_F_SSE4_1 0x00000080 24#define JIT_F_BMI2 (JIT_F_CPU << 2)
21#define JIT_F_P4 0x00000100 25
22#define JIT_F_PREFER_IMUL 0x00000200 26
23#define JIT_F_SPLIT_XMM 0x00000400 27#define JIT_F_CPUSTRING "\4SSE3\6SSE4.1\4BMI2"
24#define JIT_F_LEA_AGU 0x00000800 28
25
26/* Names for the CPU-specific flags. Must match the order above. */
27#define JIT_F_CPU_FIRST JIT_F_CMOV
28#define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM"
29#elif LJ_TARGET_ARM 29#elif LJ_TARGET_ARM
30#define JIT_F_ARMV6_ 0x00000010 30
31#define JIT_F_ARMV6T2_ 0x00000020 31#define JIT_F_ARMV6_ (JIT_F_CPU << 0)
32#define JIT_F_ARMV7 0x00000040 32#define JIT_F_ARMV6T2_ (JIT_F_CPU << 1)
33#define JIT_F_VFPV2 0x00000080 33#define JIT_F_ARMV7 (JIT_F_CPU << 2)
34#define JIT_F_VFPV3 0x00000100 34#define JIT_F_ARMV8 (JIT_F_CPU << 3)
35 35#define JIT_F_VFPV2 (JIT_F_CPU << 4)
36#define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7) 36#define JIT_F_VFPV3 (JIT_F_CPU << 5)
37#define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7) 37
38#define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7|JIT_F_ARMV8)
39#define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7|JIT_F_ARMV8)
38#define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3) 40#define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3)
39 41
40/* Names for the CPU-specific flags. Must match the order above. */ 42#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5ARMv8\5VFPv2\5VFPv3"
41#define JIT_F_CPU_FIRST JIT_F_ARMV6_ 43
42#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5VFPv2\5VFPv3"
43#elif LJ_TARGET_PPC 44#elif LJ_TARGET_PPC
44#define JIT_F_SQRT 0x00000010
45#define JIT_F_ROUND 0x00000020
46 45
47/* Names for the CPU-specific flags. Must match the order above. */ 46#define JIT_F_SQRT (JIT_F_CPU << 0)
48#define JIT_F_CPU_FIRST JIT_F_SQRT 47#define JIT_F_ROUND (JIT_F_CPU << 1)
48
49#define JIT_F_CPUSTRING "\4SQRT\5ROUND" 49#define JIT_F_CPUSTRING "\4SQRT\5ROUND"
50
50#elif LJ_TARGET_MIPS 51#elif LJ_TARGET_MIPS
51#define JIT_F_MIPS32R2 0x00000010
52 52
53/* Names for the CPU-specific flags. Must match the order above. */ 53#define JIT_F_MIPSXXR2 (JIT_F_CPU << 0)
54#define JIT_F_CPU_FIRST JIT_F_MIPS32R2 54
55#if LJ_TARGET_MIPS32
56#if LJ_TARGET_MIPSR6
57#define JIT_F_CPUSTRING "\010MIPS32R6"
58#else
55#define JIT_F_CPUSTRING "\010MIPS32R2" 59#define JIT_F_CPUSTRING "\010MIPS32R2"
60#endif
61#else
62#if LJ_TARGET_MIPSR6
63#define JIT_F_CPUSTRING "\010MIPS64R6"
56#else 64#else
57#define JIT_F_CPU_FIRST 0 65#define JIT_F_CPUSTRING "\010MIPS64R2"
66#endif
67#endif
68
69#else
70
58#define JIT_F_CPUSTRING "" 71#define JIT_F_CPUSTRING ""
72
59#endif 73#endif
60 74
61/* Optimization flags. */ 75/* Optimization flags. 12 bits. */
76#define JIT_F_OPT 0x00010000
62#define JIT_F_OPT_MASK 0x0fff0000 77#define JIT_F_OPT_MASK 0x0fff0000
63 78
64#define JIT_F_OPT_FOLD 0x00010000 79#define JIT_F_OPT_FOLD (JIT_F_OPT << 0)
65#define JIT_F_OPT_CSE 0x00020000 80#define JIT_F_OPT_CSE (JIT_F_OPT << 1)
66#define JIT_F_OPT_DCE 0x00040000 81#define JIT_F_OPT_DCE (JIT_F_OPT << 2)
67#define JIT_F_OPT_FWD 0x00080000 82#define JIT_F_OPT_FWD (JIT_F_OPT << 3)
68#define JIT_F_OPT_DSE 0x00100000 83#define JIT_F_OPT_DSE (JIT_F_OPT << 4)
69#define JIT_F_OPT_NARROW 0x00200000 84#define JIT_F_OPT_NARROW (JIT_F_OPT << 5)
70#define JIT_F_OPT_LOOP 0x00400000 85#define JIT_F_OPT_LOOP (JIT_F_OPT << 6)
71#define JIT_F_OPT_ABC 0x00800000 86#define JIT_F_OPT_ABC (JIT_F_OPT << 7)
72#define JIT_F_OPT_SINK 0x01000000 87#define JIT_F_OPT_SINK (JIT_F_OPT << 8)
73#define JIT_F_OPT_FUSE 0x02000000 88#define JIT_F_OPT_FUSE (JIT_F_OPT << 9)
74 89
75/* Optimizations names for -O. Must match the order above. */ 90/* Optimizations names for -O. Must match the order above. */
76#define JIT_F_OPT_FIRST JIT_F_OPT_FOLD
77#define JIT_F_OPTSTRING \ 91#define JIT_F_OPTSTRING \
78 "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse" 92 "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse"
79 93
@@ -85,6 +99,8 @@
85 JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE) 99 JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE)
86#define JIT_F_OPT_DEFAULT JIT_F_OPT_3 100#define JIT_F_OPT_DEFAULT JIT_F_OPT_3
87 101
102/* -- JIT engine parameters ----------------------------------------------- */
103
88#if LJ_TARGET_WINDOWS || LJ_64 104#if LJ_TARGET_WINDOWS || LJ_64
89/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */ 105/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */
90#define JIT_P_sizemcode_DEFAULT 64 106#define JIT_P_sizemcode_DEFAULT 64
@@ -100,6 +116,7 @@
100 _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \ 116 _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \
101 _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \ 117 _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \
102 _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \ 118 _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \
119 _(\011, minstitch, 0) /* Min. # of IR ins for a stitched trace. */ \
103 \ 120 \
104 _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \ 121 _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \
105 _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \ 122 _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \
@@ -126,6 +143,8 @@ JIT_PARAMDEF(JIT_PARAMENUM)
126#define JIT_PARAMSTR(len, name, value) #len #name 143#define JIT_PARAMSTR(len, name, value) #len #name
127#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR) 144#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR)
128 145
146/* -- JIT engine data structures ------------------------------------------ */
147
129/* Trace compiler state. */ 148/* Trace compiler state. */
130typedef enum { 149typedef enum {
131 LJ_TRACE_IDLE, /* Trace compiler idle. */ 150 LJ_TRACE_IDLE, /* Trace compiler idle. */
@@ -186,14 +205,26 @@ LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT);
186#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref)) 205#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref))
187#define SNAP_TR(slot, tr) \ 206#define SNAP_TR(slot, tr) \
188 (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK))) 207 (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK)))
208#if !LJ_FR2
189#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc)) 209#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc))
210#endif
190#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz)) 211#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz))
191#define snap_ref(sn) ((sn) & 0xffff) 212#define snap_ref(sn) ((sn) & 0xffff)
192#define snap_slot(sn) ((BCReg)((sn) >> 24)) 213#define snap_slot(sn) ((BCReg)((sn) >> 24))
193#define snap_isframe(sn) ((sn) & SNAP_FRAME) 214#define snap_isframe(sn) ((sn) & SNAP_FRAME)
194#define snap_pc(sn) ((const BCIns *)(uintptr_t)(sn))
195#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref)) 215#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref))
196 216
217static LJ_AINLINE const BCIns *snap_pc(SnapEntry *sn)
218{
219#if LJ_FR2
220 uint64_t pcbase;
221 memcpy(&pcbase, sn, sizeof(uint64_t));
222 return (const BCIns *)(pcbase >> 8);
223#else
224 return (const BCIns *)(uintptr_t)*sn;
225#endif
226}
227
197/* Snapshot and exit numbers. */ 228/* Snapshot and exit numbers. */
198typedef uint32_t SnapNo; 229typedef uint32_t SnapNo;
199typedef uint32_t ExitNo; 230typedef uint32_t ExitNo;
@@ -211,7 +242,8 @@ typedef enum {
211 LJ_TRLINK_UPREC, /* Up-recursion. */ 242 LJ_TRLINK_UPREC, /* Up-recursion. */
212 LJ_TRLINK_DOWNREC, /* Down-recursion. */ 243 LJ_TRLINK_DOWNREC, /* Down-recursion. */
213 LJ_TRLINK_INTERP, /* Fallback to interpreter. */ 244 LJ_TRLINK_INTERP, /* Fallback to interpreter. */
214 LJ_TRLINK_RETURN /* Return to interpreter. */ 245 LJ_TRLINK_RETURN, /* Return to interpreter. */
246 LJ_TRLINK_STITCH /* Trace stitching. */
215} TraceLink; 247} TraceLink;
216 248
217/* Trace object. */ 249/* Trace object. */
@@ -219,6 +251,9 @@ typedef struct GCtrace {
219 GCHeader; 251 GCHeader;
220 uint16_t nsnap; /* Number of snapshots. */ 252 uint16_t nsnap; /* Number of snapshots. */
221 IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */ 253 IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */
254#if LJ_GC64
255 uint32_t unused_gc64;
256#endif
222 GCRef gclist; 257 GCRef gclist;
223 IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */ 258 IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */
224 IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */ 259 IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */
@@ -294,6 +329,16 @@ typedef struct ScEvEntry {
294 uint8_t dir; /* Direction. 1: +, 0: -. */ 329 uint8_t dir; /* Direction. 1: +, 0: -. */
295} ScEvEntry; 330} ScEvEntry;
296 331
332/* Reverse bytecode map (IRRef -> PC). Only for selected instructions. */
333typedef struct RBCHashEntry {
334 MRef pc; /* Bytecode PC. */
335 GCRef pt; /* Prototype. */
336 IRRef ref; /* IR reference. */
337} RBCHashEntry;
338
339/* Number of slots in the reverse bytecode hash table. Must be a power of 2. */
340#define RBCHASH_SLOTS 8
341
297/* 128 bit SIMD constants. */ 342/* 128 bit SIMD constants. */
298enum { 343enum {
299 LJ_KSIMD_ABS, 344 LJ_KSIMD_ABS,
@@ -301,12 +346,51 @@ enum {
301 LJ_KSIMD__MAX 346 LJ_KSIMD__MAX
302}; 347};
303 348
349enum {
350#if LJ_TARGET_X86ORX64
351 LJ_K64_TOBIT, /* 2^52 + 2^51 */
352 LJ_K64_2P64, /* 2^64 */
353 LJ_K64_M2P64, /* -2^64 */
354#if LJ_32
355 LJ_K64_M2P64_31, /* -2^64 or -2^31 */
356#else
357 LJ_K64_M2P64_31 = LJ_K64_M2P64,
358#endif
359#endif
360#if LJ_TARGET_MIPS
361 LJ_K64_2P31, /* 2^31 */
362#if LJ_64
363 LJ_K64_2P63, /* 2^63 */
364 LJ_K64_M2P64, /* -2^64 */
365#endif
366#endif
367 LJ_K64__MAX,
368};
369
370enum {
371#if LJ_TARGET_X86ORX64
372 LJ_K32_M2P64_31, /* -2^64 or -2^31 */
373#endif
374#if LJ_TARGET_PPC
375 LJ_K32_2P52_2P31, /* 2^52 + 2^31 */
376 LJ_K32_2P52, /* 2^52 */
377#endif
378#if LJ_TARGET_PPC || LJ_TARGET_MIPS
379 LJ_K32_2P31, /* 2^31 */
380#endif
381#if LJ_TARGET_MIPS64
382 LJ_K32_2P63, /* 2^63 */
383 LJ_K32_M2P64, /* -2^64 */
384#endif
385 LJ_K32__MAX
386};
387
304/* Get 16 byte aligned pointer to SIMD constant. */ 388/* Get 16 byte aligned pointer to SIMD constant. */
305#define LJ_KSIMD(J, n) \ 389#define LJ_KSIMD(J, n) \
306 ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15)) 390 ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15))
307 391
308/* Set/reset flag to activate the SPLIT pass for the current trace. */ 392/* Set/reset flag to activate the SPLIT pass for the current trace. */
309#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) 393#if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
310#define lj_needsplit(J) (J->needsplit = 1) 394#define lj_needsplit(J) (J->needsplit = 1)
311#define lj_resetsplit(J) (J->needsplit = 0) 395#define lj_resetsplit(J) (J->needsplit = 0)
312#else 396#else
@@ -317,13 +401,14 @@ enum {
317/* Fold state is used to fold instructions on-the-fly. */ 401/* Fold state is used to fold instructions on-the-fly. */
318typedef struct FoldState { 402typedef struct FoldState {
319 IRIns ins; /* Currently emitted instruction. */ 403 IRIns ins; /* Currently emitted instruction. */
320 IRIns left; /* Instruction referenced by left operand. */ 404 IRIns left[2]; /* Instruction referenced by left operand. */
321 IRIns right; /* Instruction referenced by right operand. */ 405 IRIns right[2]; /* Instruction referenced by right operand. */
322} FoldState; 406} FoldState;
323 407
324/* JIT compiler state. */ 408/* JIT compiler state. */
325typedef struct jit_State { 409typedef struct jit_State {
326 GCtrace cur; /* Current trace. */ 410 GCtrace cur; /* Current trace. */
411 GCtrace *curfinal; /* Final address of current trace (set during asm). */
327 412
328 lua_State *L; /* Current Lua state. */ 413 lua_State *L; /* Current Lua state. */
329 const BCIns *pc; /* Current PC. */ 414 const BCIns *pc; /* Current PC. */
@@ -353,8 +438,9 @@ typedef struct jit_State {
353 int32_t framedepth; /* Current frame depth. */ 438 int32_t framedepth; /* Current frame depth. */
354 int32_t retdepth; /* Return frame depth (count of RETF). */ 439 int32_t retdepth; /* Return frame depth (count of RETF). */
355 440
356 MRef k64; /* Pointer to chained array of 64 bit constants. */ 441 uint32_t k32[LJ_K32__MAX]; /* Common 4 byte constants used by backends. */
357 TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */ 442 TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */
443 TValue k64[LJ_K64__MAX]; /* Common 8 byte constants. */
358 444
359 IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */ 445 IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */
360 IRRef irtoplim; /* Upper limit of instuction buffer (biased). */ 446 IRRef irtoplim; /* Upper limit of instuction buffer (biased). */
@@ -367,13 +453,15 @@ typedef struct jit_State {
367 MSize sizesnapmap; /* Size of temp. snapshot map buffer. */ 453 MSize sizesnapmap; /* Size of temp. snapshot map buffer. */
368 454
369 PostProc postproc; /* Required post-processing after execution. */ 455 PostProc postproc; /* Required post-processing after execution. */
370#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI) 456#if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
371 int needsplit; /* Need SPLIT pass. */ 457 uint8_t needsplit; /* Need SPLIT pass. */
372#endif 458#endif
459 uint8_t retryrec; /* Retry recording. */
373 460
374 GCRef *trace; /* Array of traces. */ 461 GCRef *trace; /* Array of traces. */
375 TraceNo freetrace; /* Start of scan for next free trace. */ 462 TraceNo freetrace; /* Start of scan for next free trace. */
376 MSize sizetrace; /* Size of trace array. */ 463 MSize sizetrace; /* Size of trace array. */
464 IRRef1 ktrace; /* Reference to KGC with GCtrace. */
377 465
378 IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */ 466 IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */
379 TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */ 467 TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */
@@ -384,7 +472,10 @@ typedef struct jit_State {
384 472
385 HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */ 473 HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
386 uint32_t penaltyslot; /* Round-robin index into penalty slots. */ 474 uint32_t penaltyslot; /* Round-robin index into penalty slots. */
387 uint32_t prngstate; /* PRNG state. */ 475
476#ifdef LUAJIT_ENABLE_TABLE_BUMP
477 RBCHashEntry rbchash[RBCHASH_SLOTS]; /* Reverse bytecode map. */
478#endif
388 479
389 BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */ 480 BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */
390 uint32_t bpropslot; /* Round-robin index into bpropcache slots. */ 481 uint32_t bpropslot; /* Round-robin index into bpropcache slots. */
@@ -406,14 +497,18 @@ typedef struct jit_State {
406 size_t szallmcarea; /* Total size of all allocated mcode areas. */ 497 size_t szallmcarea; /* Total size of all allocated mcode areas. */
407 498
408 TValue errinfo; /* Additional info element for trace errors. */ 499 TValue errinfo; /* Additional info element for trace errors. */
500
501#if LJ_HASPROFILE
502 GCproto *prev_pt; /* Previous prototype. */
503 BCLine prev_line; /* Previous line. */
504 int prof_mode; /* Profiling mode: 0, 'f', 'l'. */
505#endif
409} jit_State; 506} jit_State;
410 507
411/* Trivial PRNG e.g. used for penalty randomization. */ 508#ifdef LUA_USE_ASSERT
412static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits) 509#define lj_assertJ(c, ...) lj_assertG_(J2G(J), (c), __VA_ARGS__)
413{ 510#else
414 /* Yes, this LCG is very weak, but that doesn't matter for our use case. */ 511#define lj_assertJ(c, ...) ((void)J)
415 J->prngstate = J->prngstate * 1103515245 + 12345; 512#endif
416 return J->prngstate >> (32-bits);
417}
418 513
419#endif 514#endif
diff --git a/src/lj_lex.c b/src/lj_lex.c
index ca942583..61c7ff43 100644
--- a/src/lj_lex.c
+++ b/src/lj_lex.c
@@ -12,6 +12,7 @@
12#include "lj_obj.h" 12#include "lj_obj.h"
13#include "lj_gc.h" 13#include "lj_gc.h"
14#include "lj_err.h" 14#include "lj_err.h"
15#include "lj_buf.h"
15#include "lj_str.h" 16#include "lj_str.h"
16#if LJ_HASFFI 17#if LJ_HASFFI
17#include "lj_tab.h" 18#include "lj_tab.h"
@@ -24,6 +25,7 @@
24#include "lj_parse.h" 25#include "lj_parse.h"
25#include "lj_char.h" 26#include "lj_char.h"
26#include "lj_strscan.h" 27#include "lj_strscan.h"
28#include "lj_strfmt.h"
27 29
28/* Lua lexer token names. */ 30/* Lua lexer token names. */
29static const char *const tokennames[] = { 31static const char *const tokennames[] = {
@@ -37,54 +39,54 @@ TKDEF(TKSTR1, TKSTR2)
37 39
38/* -- Buffer handling ----------------------------------------------------- */ 40/* -- Buffer handling ----------------------------------------------------- */
39 41
40#define char2int(c) ((int)(uint8_t)(c)) 42#define LEX_EOF (-1)
41#define next(ls) \ 43#define lex_iseol(ls) (ls->c == '\n' || ls->c == '\r')
42 (ls->current = (ls->n--) > 0 ? char2int(*ls->p++) : fillbuf(ls))
43#define save_and_next(ls) (save(ls, ls->current), next(ls))
44#define currIsNewline(ls) (ls->current == '\n' || ls->current == '\r')
45#define END_OF_STREAM (-1)
46 44
47static int fillbuf(LexState *ls) 45/* Get more input from reader. */
46static LJ_NOINLINE LexChar lex_more(LexState *ls)
48{ 47{
49 size_t sz; 48 size_t sz;
50 const char *buf = ls->rfunc(ls->L, ls->rdata, &sz); 49 const char *p = ls->rfunc(ls->L, ls->rdata, &sz);
51 if (buf == NULL || sz == 0) return END_OF_STREAM; 50 if (p == NULL || sz == 0) return LEX_EOF;
52 if (sz >= LJ_MAX_MEM) { 51 if (sz >= LJ_MAX_BUF) {
53 if (sz != ~(size_t)0) lj_err_mem(ls->L); 52 if (sz != ~(size_t)0) lj_err_mem(ls->L);
53 sz = ~(uintptr_t)0 - (uintptr_t)p;
54 if (sz >= LJ_MAX_BUF) sz = LJ_MAX_BUF-1;
54 ls->endmark = 1; 55 ls->endmark = 1;
55 } 56 }
56 ls->n = (MSize)sz - 1; 57 ls->pe = p + sz;
57 ls->p = buf; 58 ls->p = p + 1;
58 return char2int(*(ls->p++)); 59 return (LexChar)(uint8_t)p[0];
59} 60}
60 61
61static LJ_NOINLINE void save_grow(LexState *ls, int c) 62/* Get next character. */
63static LJ_AINLINE LexChar lex_next(LexState *ls)
62{ 64{
63 MSize newsize; 65 return (ls->c = ls->p < ls->pe ? (LexChar)(uint8_t)*ls->p++ : lex_more(ls));
64 if (ls->sb.sz >= LJ_MAX_STR/2)
65 lj_lex_error(ls, 0, LJ_ERR_XELEM);
66 newsize = ls->sb.sz * 2;
67 lj_str_resizebuf(ls->L, &ls->sb, newsize);
68 ls->sb.buf[ls->sb.n++] = (char)c;
69} 66}
70 67
71static LJ_AINLINE void save(LexState *ls, int c) 68/* Save character. */
69static LJ_AINLINE void lex_save(LexState *ls, LexChar c)
72{ 70{
73 if (LJ_UNLIKELY(ls->sb.n + 1 > ls->sb.sz)) 71 lj_buf_putb(&ls->sb, c);
74 save_grow(ls, c);
75 else
76 ls->sb.buf[ls->sb.n++] = (char)c;
77} 72}
78 73
79static void inclinenumber(LexState *ls) 74/* Save previous character and get next character. */
75static LJ_AINLINE LexChar lex_savenext(LexState *ls)
80{ 76{
81 int old = ls->current; 77 lex_save(ls, ls->c);
82 lua_assert(currIsNewline(ls)); 78 return lex_next(ls);
83 next(ls); /* skip `\n' or `\r' */ 79}
84 if (currIsNewline(ls) && ls->current != old) 80
85 next(ls); /* skip `\n\r' or `\r\n' */ 81/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */
82static void lex_newline(LexState *ls)
83{
84 LexChar old = ls->c;
85 lj_assertLS(lex_iseol(ls), "bad usage");
86 lex_next(ls); /* Skip "\n" or "\r". */
87 if (lex_iseol(ls) && ls->c != old) lex_next(ls); /* Skip "\n\r" or "\r\n". */
86 if (++ls->linenumber >= LJ_MAX_LINE) 88 if (++ls->linenumber >= LJ_MAX_LINE)
87 lj_lex_error(ls, ls->token, LJ_ERR_XLINES); 89 lj_lex_error(ls, ls->tok, LJ_ERR_XLINES);
88} 90}
89 91
90/* -- Scanner for terminals ----------------------------------------------- */ 92/* -- Scanner for terminals ----------------------------------------------- */
@@ -93,19 +95,17 @@ static void inclinenumber(LexState *ls)
93static void lex_number(LexState *ls, TValue *tv) 95static void lex_number(LexState *ls, TValue *tv)
94{ 96{
95 StrScanFmt fmt; 97 StrScanFmt fmt;
96 int c, xp = 'e'; 98 LexChar c, xp = 'e';
97 lua_assert(lj_char_isdigit(ls->current)); 99 lj_assertLS(lj_char_isdigit(ls->c), "bad usage");
98 if ((c = ls->current) == '0') { 100 if ((c = ls->c) == '0' && (lex_savenext(ls) | 0x20) == 'x')
99 save_and_next(ls); 101 xp = 'p';
100 if ((ls->current | 0x20) == 'x') xp = 'p'; 102 while (lj_char_isident(ls->c) || ls->c == '.' ||
101 } 103 ((ls->c == '-' || ls->c == '+') && (c | 0x20) == xp)) {
102 while (lj_char_isident(ls->current) || ls->current == '.' || 104 c = ls->c;
103 ((ls->current == '-' || ls->current == '+') && (c | 0x20) == xp)) { 105 lex_savenext(ls);
104 c = ls->current;
105 save_and_next(ls);
106 } 106 }
107 save(ls, '\0'); 107 lex_save(ls, '\0');
108 fmt = lj_strscan_scan((const uint8_t *)ls->sb.buf, tv, 108 fmt = lj_strscan_scan((const uint8_t *)sbufB(&ls->sb), sbuflen(&ls->sb)-1, tv,
109 (LJ_DUALNUM ? STRSCAN_OPT_TOINT : STRSCAN_OPT_TONUM) | 109 (LJ_DUALNUM ? STRSCAN_OPT_TOINT : STRSCAN_OPT_TONUM) |
110 (LJ_HASFFI ? (STRSCAN_OPT_LL|STRSCAN_OPT_IMAG) : 0)); 110 (LJ_HASFFI ? (STRSCAN_OPT_LL|STRSCAN_OPT_IMAG) : 0));
111 if (LJ_DUALNUM && fmt == STRSCAN_INT) { 111 if (LJ_DUALNUM && fmt == STRSCAN_INT) {
@@ -116,7 +116,8 @@ static void lex_number(LexState *ls, TValue *tv)
116 } else if (fmt != STRSCAN_ERROR) { 116 } else if (fmt != STRSCAN_ERROR) {
117 lua_State *L = ls->L; 117 lua_State *L = ls->L;
118 GCcdata *cd; 118 GCcdata *cd;
119 lua_assert(fmt == STRSCAN_I64 || fmt == STRSCAN_U64 || fmt == STRSCAN_IMAG); 119 lj_assertLS(fmt == STRSCAN_I64 || fmt == STRSCAN_U64 || fmt == STRSCAN_IMAG,
120 "unexpected number format %d", fmt);
120 if (!ctype_ctsG(G(L))) { 121 if (!ctype_ctsG(G(L))) {
121 ptrdiff_t oldtop = savestack(L, L->top); 122 ptrdiff_t oldtop = savestack(L, L->top);
122 luaopen_ffi(L); /* Load FFI library on-demand. */ 123 luaopen_ffi(L); /* Load FFI library on-demand. */
@@ -133,65 +134,66 @@ static void lex_number(LexState *ls, TValue *tv)
133 lj_parse_keepcdata(ls, tv, cd); 134 lj_parse_keepcdata(ls, tv, cd);
134#endif 135#endif
135 } else { 136 } else {
136 lua_assert(fmt == STRSCAN_ERROR); 137 lj_assertLS(fmt == STRSCAN_ERROR,
138 "unexpected number format %d", fmt);
137 lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER); 139 lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER);
138 } 140 }
139} 141}
140 142
141static int skip_sep(LexState *ls) 143/* Skip equal signs for "[=...=[" and "]=...=]" and return their count. */
144static int lex_skipeq(LexState *ls)
142{ 145{
143 int count = 0; 146 int count = 0;
144 int s = ls->current; 147 LexChar s = ls->c;
145 lua_assert(s == '[' || s == ']'); 148 lj_assertLS(s == '[' || s == ']', "bad usage");
146 save_and_next(ls); 149 while (lex_savenext(ls) == '=' && count < 0x20000000)
147 while (ls->current == '=' && count < 0x20000000) {
148 save_and_next(ls);
149 count++; 150 count++;
150 } 151 return (ls->c == s) ? count : (-count) - 1;
151 return (ls->current == s) ? count : (-count) - 1;
152} 152}
153 153
154static void read_long_string(LexState *ls, TValue *tv, int sep) 154/* Parse a long string or long comment (tv set to NULL). */
155static void lex_longstring(LexState *ls, TValue *tv, int sep)
155{ 156{
156 save_and_next(ls); /* skip 2nd `[' */ 157 lex_savenext(ls); /* Skip second '['. */
157 if (currIsNewline(ls)) /* string starts with a newline? */ 158 if (lex_iseol(ls)) /* Skip initial newline. */
158 inclinenumber(ls); /* skip it */ 159 lex_newline(ls);
159 for (;;) { 160 for (;;) {
160 switch (ls->current) { 161 switch (ls->c) {
161 case END_OF_STREAM: 162 case LEX_EOF:
162 lj_lex_error(ls, TK_eof, tv ? LJ_ERR_XLSTR : LJ_ERR_XLCOM); 163 lj_lex_error(ls, TK_eof, tv ? LJ_ERR_XLSTR : LJ_ERR_XLCOM);
163 break; 164 break;
164 case ']': 165 case ']':
165 if (skip_sep(ls) == sep) { 166 if (lex_skipeq(ls) == sep) {
166 save_and_next(ls); /* skip 2nd `]' */ 167 lex_savenext(ls); /* Skip second ']'. */
167 goto endloop; 168 goto endloop;
168 } 169 }
169 break; 170 break;
170 case '\n': 171 case '\n':
171 case '\r': 172 case '\r':
172 save(ls, '\n'); 173 lex_save(ls, '\n');
173 inclinenumber(ls); 174 lex_newline(ls);
174 if (!tv) lj_str_resetbuf(&ls->sb); /* avoid wasting space */ 175 if (!tv) lj_buf_reset(&ls->sb); /* Don't waste space for comments. */
175 break; 176 break;
176 default: 177 default:
177 if (tv) save_and_next(ls); 178 lex_savenext(ls);
178 else next(ls);
179 break; 179 break;
180 } 180 }
181 } endloop: 181 } endloop:
182 if (tv) { 182 if (tv) {
183 GCstr *str = lj_parse_keepstr(ls, ls->sb.buf + (2 + (MSize)sep), 183 GCstr *str = lj_parse_keepstr(ls, sbufB(&ls->sb) + (2 + (MSize)sep),
184 ls->sb.n - 2*(2 + (MSize)sep)); 184 sbuflen(&ls->sb) - 2*(2 + (MSize)sep));
185 setstrV(ls->L, tv, str); 185 setstrV(ls->L, tv, str);
186 } 186 }
187} 187}
188 188
189static void read_string(LexState *ls, int delim, TValue *tv) 189/* Parse a string. */
190static void lex_string(LexState *ls, TValue *tv)
190{ 191{
191 save_and_next(ls); 192 LexChar delim = ls->c; /* Delimiter is '\'' or '"'. */
192 while (ls->current != delim) { 193 lex_savenext(ls);
193 switch (ls->current) { 194 while (ls->c != delim) {
194 case END_OF_STREAM: 195 switch (ls->c) {
196 case LEX_EOF:
195 lj_lex_error(ls, TK_eof, LJ_ERR_XSTR); 197 lj_lex_error(ls, TK_eof, LJ_ERR_XSTR);
196 continue; 198 continue;
197 case '\n': 199 case '\n':
@@ -199,7 +201,7 @@ static void read_string(LexState *ls, int delim, TValue *tv)
199 lj_lex_error(ls, TK_string, LJ_ERR_XSTR); 201 lj_lex_error(ls, TK_string, LJ_ERR_XSTR);
200 continue; 202 continue;
201 case '\\': { 203 case '\\': {
202 int c = next(ls); /* Skip the '\\'. */ 204 LexChar c = lex_next(ls); /* Skip the '\\'. */
203 switch (c) { 205 switch (c) {
204 case 'a': c = '\a'; break; 206 case 'a': c = '\a'; break;
205 case 'b': c = '\b'; break; 207 case 'b': c = '\b'; break;
@@ -209,111 +211,139 @@ static void read_string(LexState *ls, int delim, TValue *tv)
209 case 't': c = '\t'; break; 211 case 't': c = '\t'; break;
210 case 'v': c = '\v'; break; 212 case 'v': c = '\v'; break;
211 case 'x': /* Hexadecimal escape '\xXX'. */ 213 case 'x': /* Hexadecimal escape '\xXX'. */
212 c = (next(ls) & 15u) << 4; 214 c = (lex_next(ls) & 15u) << 4;
213 if (!lj_char_isdigit(ls->current)) { 215 if (!lj_char_isdigit(ls->c)) {
214 if (!lj_char_isxdigit(ls->current)) goto err_xesc; 216 if (!lj_char_isxdigit(ls->c)) goto err_xesc;
215 c += 9 << 4; 217 c += 9 << 4;
216 } 218 }
217 c += (next(ls) & 15u); 219 c += (lex_next(ls) & 15u);
218 if (!lj_char_isdigit(ls->current)) { 220 if (!lj_char_isdigit(ls->c)) {
219 if (!lj_char_isxdigit(ls->current)) goto err_xesc; 221 if (!lj_char_isxdigit(ls->c)) goto err_xesc;
220 c += 9; 222 c += 9;
221 } 223 }
222 break; 224 break;
225 case 'u': /* Unicode escape '\u{XX...}'. */
226 if (lex_next(ls) != '{') goto err_xesc;
227 lex_next(ls);
228 c = 0;
229 do {
230 c = (c << 4) | (ls->c & 15u);
231 if (!lj_char_isdigit(ls->c)) {
232 if (!lj_char_isxdigit(ls->c)) goto err_xesc;
233 c += 9;
234 }
235 if (c >= 0x110000) goto err_xesc; /* Out of Unicode range. */
236 } while (lex_next(ls) != '}');
237 if (c < 0x800) {
238 if (c < 0x80) break;
239 lex_save(ls, 0xc0 | (c >> 6));
240 } else {
241 if (c >= 0x10000) {
242 lex_save(ls, 0xf0 | (c >> 18));
243 lex_save(ls, 0x80 | ((c >> 12) & 0x3f));
244 } else {
245 if (c >= 0xd800 && c < 0xe000) goto err_xesc; /* No surrogates. */
246 lex_save(ls, 0xe0 | (c >> 12));
247 }
248 lex_save(ls, 0x80 | ((c >> 6) & 0x3f));
249 }
250 c = 0x80 | (c & 0x3f);
251 break;
223 case 'z': /* Skip whitespace. */ 252 case 'z': /* Skip whitespace. */
224 next(ls); 253 lex_next(ls);
225 while (lj_char_isspace(ls->current)) 254 while (lj_char_isspace(ls->c))
226 if (currIsNewline(ls)) inclinenumber(ls); else next(ls); 255 if (lex_iseol(ls)) lex_newline(ls); else lex_next(ls);
227 continue; 256 continue;
228 case '\n': case '\r': save(ls, '\n'); inclinenumber(ls); continue; 257 case '\n': case '\r': lex_save(ls, '\n'); lex_newline(ls); continue;
229 case '\\': case '\"': case '\'': break; 258 case '\\': case '\"': case '\'': break;
230 case END_OF_STREAM: continue; 259 case LEX_EOF: continue;
231 default: 260 default:
232 if (!lj_char_isdigit(c)) 261 if (!lj_char_isdigit(c))
233 goto err_xesc; 262 goto err_xesc;
234 c -= '0'; /* Decimal escape '\ddd'. */ 263 c -= '0'; /* Decimal escape '\ddd'. */
235 if (lj_char_isdigit(next(ls))) { 264 if (lj_char_isdigit(lex_next(ls))) {
236 c = c*10 + (ls->current - '0'); 265 c = c*10 + (ls->c - '0');
237 if (lj_char_isdigit(next(ls))) { 266 if (lj_char_isdigit(lex_next(ls))) {
238 c = c*10 + (ls->current - '0'); 267 c = c*10 + (ls->c - '0');
239 if (c > 255) { 268 if (c > 255) {
240 err_xesc: 269 err_xesc:
241 lj_lex_error(ls, TK_string, LJ_ERR_XESC); 270 lj_lex_error(ls, TK_string, LJ_ERR_XESC);
242 } 271 }
243 next(ls); 272 lex_next(ls);
244 } 273 }
245 } 274 }
246 save(ls, c); 275 lex_save(ls, c);
247 continue; 276 continue;
248 } 277 }
249 save(ls, c); 278 lex_save(ls, c);
250 next(ls); 279 lex_next(ls);
251 continue; 280 continue;
252 } 281 }
253 default: 282 default:
254 save_and_next(ls); 283 lex_savenext(ls);
255 break; 284 break;
256 } 285 }
257 } 286 }
258 save_and_next(ls); /* skip delimiter */ 287 lex_savenext(ls); /* Skip trailing delimiter. */
259 setstrV(ls->L, tv, lj_parse_keepstr(ls, ls->sb.buf + 1, ls->sb.n - 2)); 288 setstrV(ls->L, tv,
289 lj_parse_keepstr(ls, sbufB(&ls->sb)+1, sbuflen(&ls->sb)-2));
260} 290}
261 291
262/* -- Main lexical scanner ------------------------------------------------ */ 292/* -- Main lexical scanner ------------------------------------------------ */
263 293
264static int llex(LexState *ls, TValue *tv) 294/* Get next lexical token. */
295static LexToken lex_scan(LexState *ls, TValue *tv)
265{ 296{
266 lj_str_resetbuf(&ls->sb); 297 lj_buf_reset(&ls->sb);
267 for (;;) { 298 for (;;) {
268 if (lj_char_isident(ls->current)) { 299 if (lj_char_isident(ls->c)) {
269 GCstr *s; 300 GCstr *s;
270 if (lj_char_isdigit(ls->current)) { /* Numeric literal. */ 301 if (lj_char_isdigit(ls->c)) { /* Numeric literal. */
271 lex_number(ls, tv); 302 lex_number(ls, tv);
272 return TK_number; 303 return TK_number;
273 } 304 }
274 /* Identifier or reserved word. */ 305 /* Identifier or reserved word. */
275 do { 306 do {
276 save_and_next(ls); 307 lex_savenext(ls);
277 } while (lj_char_isident(ls->current)); 308 } while (lj_char_isident(ls->c));
278 s = lj_parse_keepstr(ls, ls->sb.buf, ls->sb.n); 309 s = lj_parse_keepstr(ls, sbufB(&ls->sb), sbuflen(&ls->sb));
279 setstrV(ls->L, tv, s); 310 setstrV(ls->L, tv, s);
280 if (s->reserved > 0) /* Reserved word? */ 311 if (s->reserved > 0) /* Reserved word? */
281 return TK_OFS + s->reserved; 312 return TK_OFS + s->reserved;
282 return TK_name; 313 return TK_name;
283 } 314 }
284 switch (ls->current) { 315 switch (ls->c) {
285 case '\n': 316 case '\n':
286 case '\r': 317 case '\r':
287 inclinenumber(ls); 318 lex_newline(ls);
288 continue; 319 continue;
289 case ' ': 320 case ' ':
290 case '\t': 321 case '\t':
291 case '\v': 322 case '\v':
292 case '\f': 323 case '\f':
293 next(ls); 324 lex_next(ls);
294 continue; 325 continue;
295 case '-': 326 case '-':
296 next(ls); 327 lex_next(ls);
297 if (ls->current != '-') return '-'; 328 if (ls->c != '-') return '-';
298 /* else is a comment */ 329 lex_next(ls);
299 next(ls); 330 if (ls->c == '[') { /* Long comment "--[=*[...]=*]". */
300 if (ls->current == '[') { 331 int sep = lex_skipeq(ls);
301 int sep = skip_sep(ls); 332 lj_buf_reset(&ls->sb); /* `lex_skipeq' may dirty the buffer */
302 lj_str_resetbuf(&ls->sb); /* `skip_sep' may dirty the buffer */
303 if (sep >= 0) { 333 if (sep >= 0) {
304 read_long_string(ls, NULL, sep); /* long comment */ 334 lex_longstring(ls, NULL, sep);
305 lj_str_resetbuf(&ls->sb); 335 lj_buf_reset(&ls->sb);
306 continue; 336 continue;
307 } 337 }
308 } 338 }
309 /* else short comment */ 339 /* Short comment "--.*\n". */
310 while (!currIsNewline(ls) && ls->current != END_OF_STREAM) 340 while (!lex_iseol(ls) && ls->c != LEX_EOF)
311 next(ls); 341 lex_next(ls);
312 continue; 342 continue;
313 case '[': { 343 case '[': {
314 int sep = skip_sep(ls); 344 int sep = lex_skipeq(ls);
315 if (sep >= 0) { 345 if (sep >= 0) {
316 read_long_string(ls, tv, sep); 346 lex_longstring(ls, tv, sep);
317 return TK_string; 347 return TK_string;
318 } else if (sep == -1) { 348 } else if (sep == -1) {
319 return '['; 349 return '[';
@@ -323,44 +353,43 @@ static int llex(LexState *ls, TValue *tv)
323 } 353 }
324 } 354 }
325 case '=': 355 case '=':
326 next(ls); 356 lex_next(ls);
327 if (ls->current != '=') return '='; else { next(ls); return TK_eq; } 357 if (ls->c != '=') return '='; else { lex_next(ls); return TK_eq; }
328 case '<': 358 case '<':
329 next(ls); 359 lex_next(ls);
330 if (ls->current != '=') return '<'; else { next(ls); return TK_le; } 360 if (ls->c != '=') return '<'; else { lex_next(ls); return TK_le; }
331 case '>': 361 case '>':
332 next(ls); 362 lex_next(ls);
333 if (ls->current != '=') return '>'; else { next(ls); return TK_ge; } 363 if (ls->c != '=') return '>'; else { lex_next(ls); return TK_ge; }
334 case '~': 364 case '~':
335 next(ls); 365 lex_next(ls);
336 if (ls->current != '=') return '~'; else { next(ls); return TK_ne; } 366 if (ls->c != '=') return '~'; else { lex_next(ls); return TK_ne; }
337 case ':': 367 case ':':
338 next(ls); 368 lex_next(ls);
339 if (ls->current != ':') return ':'; else { next(ls); return TK_label; } 369 if (ls->c != ':') return ':'; else { lex_next(ls); return TK_label; }
340 case '"': 370 case '"':
341 case '\'': 371 case '\'':
342 read_string(ls, ls->current, tv); 372 lex_string(ls, tv);
343 return TK_string; 373 return TK_string;
344 case '.': 374 case '.':
345 save_and_next(ls); 375 if (lex_savenext(ls) == '.') {
346 if (ls->current == '.') { 376 lex_next(ls);
347 next(ls); 377 if (ls->c == '.') {
348 if (ls->current == '.') { 378 lex_next(ls);
349 next(ls);
350 return TK_dots; /* ... */ 379 return TK_dots; /* ... */
351 } 380 }
352 return TK_concat; /* .. */ 381 return TK_concat; /* .. */
353 } else if (!lj_char_isdigit(ls->current)) { 382 } else if (!lj_char_isdigit(ls->c)) {
354 return '.'; 383 return '.';
355 } else { 384 } else {
356 lex_number(ls, tv); 385 lex_number(ls, tv);
357 return TK_number; 386 return TK_number;
358 } 387 }
359 case END_OF_STREAM: 388 case LEX_EOF:
360 return TK_eof; 389 return TK_eof;
361 default: { 390 default: {
362 int c = ls->current; 391 LexChar c = ls->c;
363 next(ls); 392 lex_next(ls);
364 return c; /* Single-char tokens (+ - / ...). */ 393 return c; /* Single-char tokens (+ - / ...). */
365 } 394 }
366 } 395 }
@@ -375,36 +404,33 @@ int lj_lex_setup(lua_State *L, LexState *ls)
375 int header = 0; 404 int header = 0;
376 ls->L = L; 405 ls->L = L;
377 ls->fs = NULL; 406 ls->fs = NULL;
378 ls->n = 0; 407 ls->pe = ls->p = NULL;
379 ls->p = NULL;
380 ls->vstack = NULL; 408 ls->vstack = NULL;
381 ls->sizevstack = 0; 409 ls->sizevstack = 0;
382 ls->vtop = 0; 410 ls->vtop = 0;
383 ls->bcstack = NULL; 411 ls->bcstack = NULL;
384 ls->sizebcstack = 0; 412 ls->sizebcstack = 0;
385 ls->token = 0; 413 ls->tok = 0;
386 ls->lookahead = TK_eof; /* No look-ahead token. */ 414 ls->lookahead = TK_eof; /* No look-ahead token. */
387 ls->linenumber = 1; 415 ls->linenumber = 1;
388 ls->lastline = 1; 416 ls->lastline = 1;
389 ls->endmark = 0; 417 ls->endmark = 0;
390 lj_str_resizebuf(ls->L, &ls->sb, LJ_MIN_SBUF); 418 lex_next(ls); /* Read-ahead first char. */
391 next(ls); /* Read-ahead first char. */ 419 if (ls->c == 0xef && ls->p + 2 <= ls->pe && (uint8_t)ls->p[0] == 0xbb &&
392 if (ls->current == 0xef && ls->n >= 2 && char2int(ls->p[0]) == 0xbb && 420 (uint8_t)ls->p[1] == 0xbf) { /* Skip UTF-8 BOM (if buffered). */
393 char2int(ls->p[1]) == 0xbf) { /* Skip UTF-8 BOM (if buffered). */
394 ls->n -= 2;
395 ls->p += 2; 421 ls->p += 2;
396 next(ls); 422 lex_next(ls);
397 header = 1; 423 header = 1;
398 } 424 }
399 if (ls->current == '#') { /* Skip POSIX #! header line. */ 425 if (ls->c == '#') { /* Skip POSIX #! header line. */
400 do { 426 do {
401 next(ls); 427 lex_next(ls);
402 if (ls->current == END_OF_STREAM) return 0; 428 if (ls->c == LEX_EOF) return 0;
403 } while (!currIsNewline(ls)); 429 } while (!lex_iseol(ls));
404 inclinenumber(ls); 430 lex_newline(ls);
405 header = 1; 431 header = 1;
406 } 432 }
407 if (ls->current == LUA_SIGNATURE[0]) { /* Bytecode dump. */ 433 if (ls->c == LUA_SIGNATURE[0]) { /* Bytecode dump. */
408 if (header) { 434 if (header) {
409 /* 435 /*
410 ** Loading bytecode with an extra header is disabled for security 436 ** Loading bytecode with an extra header is disabled for security
@@ -426,55 +452,60 @@ void lj_lex_cleanup(lua_State *L, LexState *ls)
426 global_State *g = G(L); 452 global_State *g = G(L);
427 lj_mem_freevec(g, ls->bcstack, ls->sizebcstack, BCInsLine); 453 lj_mem_freevec(g, ls->bcstack, ls->sizebcstack, BCInsLine);
428 lj_mem_freevec(g, ls->vstack, ls->sizevstack, VarInfo); 454 lj_mem_freevec(g, ls->vstack, ls->sizevstack, VarInfo);
429 lj_str_freebuf(g, &ls->sb); 455 lj_buf_free(g, &ls->sb);
430} 456}
431 457
458/* Return next lexical token. */
432void lj_lex_next(LexState *ls) 459void lj_lex_next(LexState *ls)
433{ 460{
434 ls->lastline = ls->linenumber; 461 ls->lastline = ls->linenumber;
435 if (LJ_LIKELY(ls->lookahead == TK_eof)) { /* No lookahead token? */ 462 if (LJ_LIKELY(ls->lookahead == TK_eof)) { /* No lookahead token? */
436 ls->token = llex(ls, &ls->tokenval); /* Get next token. */ 463 ls->tok = lex_scan(ls, &ls->tokval); /* Get next token. */
437 } else { /* Otherwise return lookahead token. */ 464 } else { /* Otherwise return lookahead token. */
438 ls->token = ls->lookahead; 465 ls->tok = ls->lookahead;
439 ls->lookahead = TK_eof; 466 ls->lookahead = TK_eof;
440 ls->tokenval = ls->lookaheadval; 467 ls->tokval = ls->lookaheadval;
441 } 468 }
442} 469}
443 470
471/* Look ahead for the next token. */
444LexToken lj_lex_lookahead(LexState *ls) 472LexToken lj_lex_lookahead(LexState *ls)
445{ 473{
446 lua_assert(ls->lookahead == TK_eof); 474 lj_assertLS(ls->lookahead == TK_eof, "double lookahead");
447 ls->lookahead = llex(ls, &ls->lookaheadval); 475 ls->lookahead = lex_scan(ls, &ls->lookaheadval);
448 return ls->lookahead; 476 return ls->lookahead;
449} 477}
450 478
451const char *lj_lex_token2str(LexState *ls, LexToken token) 479/* Convert token to string. */
480const char *lj_lex_token2str(LexState *ls, LexToken tok)
452{ 481{
453 if (token > TK_OFS) 482 if (tok > TK_OFS)
454 return tokennames[token-TK_OFS-1]; 483 return tokennames[tok-TK_OFS-1];
455 else if (!lj_char_iscntrl(token)) 484 else if (!lj_char_iscntrl(tok))
456 return lj_str_pushf(ls->L, "%c", token); 485 return lj_strfmt_pushf(ls->L, "%c", tok);
457 else 486 else
458 return lj_str_pushf(ls->L, "char(%d)", token); 487 return lj_strfmt_pushf(ls->L, "char(%d)", tok);
459} 488}
460 489
461void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...) 490/* Lexer error. */
491void lj_lex_error(LexState *ls, LexToken tok, ErrMsg em, ...)
462{ 492{
463 const char *tok; 493 const char *tokstr;
464 va_list argp; 494 va_list argp;
465 if (token == 0) { 495 if (tok == 0) {
466 tok = NULL; 496 tokstr = NULL;
467 } else if (token == TK_name || token == TK_string || token == TK_number) { 497 } else if (tok == TK_name || tok == TK_string || tok == TK_number) {
468 save(ls, '\0'); 498 lex_save(ls, '\0');
469 tok = ls->sb.buf; 499 tokstr = sbufB(&ls->sb);
470 } else { 500 } else {
471 tok = lj_lex_token2str(ls, token); 501 tokstr = lj_lex_token2str(ls, tok);
472 } 502 }
473 va_start(argp, em); 503 va_start(argp, em);
474 lj_err_lex(ls->L, ls->chunkname, tok, ls->linenumber, em, argp); 504 lj_err_lex(ls->L, ls->chunkname, tokstr, ls->linenumber, em, argp);
475 va_end(argp); 505 va_end(argp);
476} 506}
477 507
508/* Initialize strings for reserved words. */
478void lj_lex_init(lua_State *L) 509void lj_lex_init(lua_State *L)
479{ 510{
480 uint32_t i; 511 uint32_t i;
diff --git a/src/lj_lex.h b/src/lj_lex.h
index b1bc4876..e1b5610b 100644
--- a/src/lj_lex.h
+++ b/src/lj_lex.h
@@ -30,7 +30,8 @@ TKDEF(TKENUM1, TKENUM2)
30 TK_RESERVED = TK_while - TK_OFS 30 TK_RESERVED = TK_while - TK_OFS
31}; 31};
32 32
33typedef int LexToken; 33typedef int LexChar; /* Lexical character. Unsigned ext. from char. */
34typedef int LexToken; /* Lexical token. */
34 35
35/* Combined bytecode ins/line. Only used during bytecode generation. */ 36/* Combined bytecode ins/line. Only used during bytecode generation. */
36typedef struct BCInsLine { 37typedef struct BCInsLine {
@@ -51,13 +52,13 @@ typedef struct VarInfo {
51typedef struct LexState { 52typedef struct LexState {
52 struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */ 53 struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */
53 struct lua_State *L; /* Lua state. */ 54 struct lua_State *L; /* Lua state. */
54 TValue tokenval; /* Current token value. */ 55 TValue tokval; /* Current token value. */
55 TValue lookaheadval; /* Lookahead token value. */ 56 TValue lookaheadval; /* Lookahead token value. */
56 int current; /* Current character (charint). */
57 LexToken token; /* Current token. */
58 LexToken lookahead; /* Lookahead token. */
59 MSize n; /* Bytes left in input buffer. */
60 const char *p; /* Current position in input buffer. */ 57 const char *p; /* Current position in input buffer. */
58 const char *pe; /* End of input buffer. */
59 LexChar c; /* Current character. */
60 LexToken tok; /* Current token. */
61 LexToken lookahead; /* Lookahead token. */
61 SBuf sb; /* String buffer for tokens. */ 62 SBuf sb; /* String buffer for tokens. */
62 lua_Reader rfunc; /* Reader callback. */ 63 lua_Reader rfunc; /* Reader callback. */
63 void *rdata; /* Reader callback data. */ 64 void *rdata; /* Reader callback data. */
@@ -79,8 +80,14 @@ LJ_FUNC int lj_lex_setup(lua_State *L, LexState *ls);
79LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls); 80LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls);
80LJ_FUNC void lj_lex_next(LexState *ls); 81LJ_FUNC void lj_lex_next(LexState *ls);
81LJ_FUNC LexToken lj_lex_lookahead(LexState *ls); 82LJ_FUNC LexToken lj_lex_lookahead(LexState *ls);
82LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken token); 83LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken tok);
83LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...); 84LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken tok, ErrMsg em, ...);
84LJ_FUNC void lj_lex_init(lua_State *L); 85LJ_FUNC void lj_lex_init(lua_State *L);
85 86
87#ifdef LUA_USE_ASSERT
88#define lj_assertLS(c, ...) (lj_assertG_(G(ls->L), (c), __VA_ARGS__))
89#else
90#define lj_assertLS(c, ...) ((void)ls)
91#endif
92
86#endif 93#endif
diff --git a/src/lj_lib.c b/src/lj_lib.c
index 0c91a1c8..56fb6555 100644
--- a/src/lj_lib.c
+++ b/src/lj_lib.c
@@ -18,6 +18,9 @@
18#include "lj_dispatch.h" 18#include "lj_dispatch.h"
19#include "lj_vm.h" 19#include "lj_vm.h"
20#include "lj_strscan.h" 20#include "lj_strscan.h"
21#include "lj_strfmt.h"
22#include "lj_lex.h"
23#include "lj_bcdump.h"
21#include "lj_lib.h" 24#include "lj_lib.h"
22 25
23/* -- Library initialization ---------------------------------------------- */ 26/* -- Library initialization ---------------------------------------------- */
@@ -43,6 +46,28 @@ static GCtab *lib_create_table(lua_State *L, const char *libname, int hsize)
43 return tabV(L->top-1); 46 return tabV(L->top-1);
44} 47}
45 48
49static const uint8_t *lib_read_lfunc(lua_State *L, const uint8_t *p, GCtab *tab)
50{
51 int len = *p++;
52 GCstr *name = lj_str_new(L, (const char *)p, len);
53 LexState ls;
54 GCproto *pt;
55 GCfunc *fn;
56 memset(&ls, 0, sizeof(ls));
57 ls.L = L;
58 ls.p = (const char *)(p+len);
59 ls.pe = (const char *)~(uintptr_t)0;
60 ls.c = -1;
61 ls.level = (BCDUMP_F_STRIP|(LJ_BE*BCDUMP_F_BE));
62 ls.chunkname = name;
63 pt = lj_bcread_proto(&ls);
64 pt->firstline = ~(BCLine)0;
65 fn = lj_func_newL_empty(L, pt, tabref(L->env));
66 /* NOBARRIER: See below for common barrier. */
67 setfuncV(L, lj_tab_setstr(L, tab, name), fn);
68 return (const uint8_t *)ls.p;
69}
70
46void lj_lib_register(lua_State *L, const char *libname, 71void lj_lib_register(lua_State *L, const char *libname,
47 const uint8_t *p, const lua_CFunction *cf) 72 const uint8_t *p, const lua_CFunction *cf)
48{ 73{
@@ -87,6 +112,9 @@ void lj_lib_register(lua_State *L, const char *libname,
87 ofn = fn; 112 ofn = fn;
88 } else { 113 } else {
89 switch (tag | len) { 114 switch (tag | len) {
115 case LIBINIT_LUA:
116 p = lib_read_lfunc(L, p, tab);
117 break;
90 case LIBINIT_SET: 118 case LIBINIT_SET:
91 L->top -= 2; 119 L->top -= 2;
92 if (tvisstr(L->top+1) && strV(L->top+1)->len == 0) 120 if (tvisstr(L->top+1) && strV(L->top+1)->len == 0)
@@ -120,6 +148,37 @@ void lj_lib_register(lua_State *L, const char *libname,
120 } 148 }
121} 149}
122 150
151/* Push internal function on the stack. */
152GCfunc *lj_lib_pushcc(lua_State *L, lua_CFunction f, int id, int n)
153{
154 GCfunc *fn;
155 lua_pushcclosure(L, f, n);
156 fn = funcV(L->top-1);
157 fn->c.ffid = (uint8_t)id;
158 setmref(fn->c.pc, &G(L)->bc_cfunc_int);
159 return fn;
160}
161
162void lj_lib_prereg(lua_State *L, const char *name, lua_CFunction f, GCtab *env)
163{
164 luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4);
165 lua_pushcfunction(L, f);
166 /* NOBARRIER: The function is new (marked white). */
167 setgcref(funcV(L->top-1)->c.env, obj2gco(env));
168 lua_setfield(L, -2, name);
169 L->top--;
170}
171
172int lj_lib_postreg(lua_State *L, lua_CFunction cf, int id, const char *name)
173{
174 GCfunc *fn = lj_lib_pushcf(L, cf, id);
175 GCtab *t = tabref(curr_func(L)->c.env); /* Reference to parent table. */
176 setfuncV(L, lj_tab_setstr(L, t, lj_str_newz(L, name)), fn);
177 lj_gc_anybarriert(L, t);
178 setfuncV(L, L->top++, fn);
179 return 1;
180}
181
123/* -- Type checks --------------------------------------------------------- */ 182/* -- Type checks --------------------------------------------------------- */
124 183
125TValue *lj_lib_checkany(lua_State *L, int narg) 184TValue *lj_lib_checkany(lua_State *L, int narg)
@@ -137,7 +196,7 @@ GCstr *lj_lib_checkstr(lua_State *L, int narg)
137 if (LJ_LIKELY(tvisstr(o))) { 196 if (LJ_LIKELY(tvisstr(o))) {
138 return strV(o); 197 return strV(o);
139 } else if (tvisnumber(o)) { 198 } else if (tvisnumber(o)) {
140 GCstr *s = lj_str_fromnumber(L, o); 199 GCstr *s = lj_strfmt_number(L, o);
141 setstrV(L, o, s); 200 setstrV(L, o, s);
142 return s; 201 return s;
143 } 202 }
@@ -196,20 +255,6 @@ int32_t lj_lib_optint(lua_State *L, int narg, int32_t def)
196 return (o < L->top && !tvisnil(o)) ? lj_lib_checkint(L, narg) : def; 255 return (o < L->top && !tvisnil(o)) ? lj_lib_checkint(L, narg) : def;
197} 256}
198 257
199int32_t lj_lib_checkbit(lua_State *L, int narg)
200{
201 TValue *o = L->base + narg-1;
202 if (!(o < L->top && lj_strscan_numberobj(o)))
203 lj_err_argt(L, narg, LUA_TNUMBER);
204 if (LJ_LIKELY(tvisint(o))) {
205 return intV(o);
206 } else {
207 int32_t i = lj_num2bit(numV(o));
208 if (LJ_DUALNUM) setintV(o, i);
209 return i;
210 }
211}
212
213GCfunc *lj_lib_checkfunc(lua_State *L, int narg) 258GCfunc *lj_lib_checkfunc(lua_State *L, int narg)
214{ 259{
215 TValue *o = L->base + narg-1; 260 TValue *o = L->base + narg-1;
diff --git a/src/lj_lib.h b/src/lj_lib.h
index 754e7444..496bdb2a 100644
--- a/src/lj_lib.h
+++ b/src/lj_lib.h
@@ -41,15 +41,22 @@ LJ_FUNC void lj_lib_checknumber(lua_State *L, int narg);
41LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg); 41LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg);
42LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg); 42LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg);
43LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def); 43LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def);
44LJ_FUNC int32_t lj_lib_checkbit(lua_State *L, int narg);
45LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg); 44LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg);
46LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg); 45LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg);
47LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg); 46LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg);
48LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst); 47LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst);
49 48
50/* Avoid including lj_frame.h. */ 49/* Avoid including lj_frame.h. */
50#if LJ_GC64
51#define lj_lib_upvalue(L, n) \
52 (&gcval(L->base-2)->fn.c.upvalue[(n)-1])
53#elif LJ_FR2
54#define lj_lib_upvalue(L, n) \
55 (&gcref((L->base-2)->gcr)->fn.c.upvalue[(n)-1])
56#else
51#define lj_lib_upvalue(L, n) \ 57#define lj_lib_upvalue(L, n) \
52 (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1]) 58 (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1])
59#endif
53 60
54#if LJ_TARGET_WINDOWS 61#if LJ_TARGET_WINDOWS
55#define lj_lib_checkfpu(L) \ 62#define lj_lib_checkfpu(L) \
@@ -60,23 +67,14 @@ LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst);
60#define lj_lib_checkfpu(L) UNUSED(L) 67#define lj_lib_checkfpu(L) UNUSED(L)
61#endif 68#endif
62 69
63/* Push internal function on the stack. */ 70LJ_FUNC GCfunc *lj_lib_pushcc(lua_State *L, lua_CFunction f, int id, int n);
64static LJ_AINLINE void lj_lib_pushcc(lua_State *L, lua_CFunction f,
65 int id, int n)
66{
67 GCfunc *fn;
68 lua_pushcclosure(L, f, n);
69 fn = funcV(L->top-1);
70 fn->c.ffid = (uint8_t)id;
71 setmref(fn->c.pc, &G(L)->bc_cfunc_int);
72}
73
74#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0)) 71#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0))
75 72
76/* Library function declarations. Scanned by buildvm. */ 73/* Library function declarations. Scanned by buildvm. */
77#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L) 74#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L)
78#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L) 75#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L)
79#define LJLIB_ASM_(name) 76#define LJLIB_ASM_(name)
77#define LJLIB_LUA(name)
80#define LJLIB_SET(name) 78#define LJLIB_SET(name)
81#define LJLIB_PUSH(arg) 79#define LJLIB_PUSH(arg)
82#define LJLIB_REC(handler) 80#define LJLIB_REC(handler)
@@ -88,6 +86,10 @@ static LJ_AINLINE void lj_lib_pushcc(lua_State *L, lua_CFunction f,
88 86
89LJ_FUNC void lj_lib_register(lua_State *L, const char *libname, 87LJ_FUNC void lj_lib_register(lua_State *L, const char *libname,
90 const uint8_t *init, const lua_CFunction *cf); 88 const uint8_t *init, const lua_CFunction *cf);
89LJ_FUNC void lj_lib_prereg(lua_State *L, const char *name, lua_CFunction f,
90 GCtab *env);
91LJ_FUNC int lj_lib_postreg(lua_State *L, lua_CFunction cf, int id,
92 const char *name);
91 93
92/* Library init data tags. */ 94/* Library init data tags. */
93#define LIBINIT_LENMASK 0x3f 95#define LIBINIT_LENMASK 0x3f
@@ -96,7 +98,8 @@ LJ_FUNC void lj_lib_register(lua_State *L, const char *libname,
96#define LIBINIT_ASM 0x40 98#define LIBINIT_ASM 0x40
97#define LIBINIT_ASM_ 0x80 99#define LIBINIT_ASM_ 0x80
98#define LIBINIT_STRING 0xc0 100#define LIBINIT_STRING 0xc0
99#define LIBINIT_MAXSTR 0x39 101#define LIBINIT_MAXSTR 0x38
102#define LIBINIT_LUA 0xf9
100#define LIBINIT_SET 0xfa 103#define LIBINIT_SET 0xfa
101#define LIBINIT_NUMBER 0xfb 104#define LIBINIT_NUMBER 0xfb
102#define LIBINIT_COPY 0xfc 105#define LIBINIT_COPY 0xfc
@@ -104,9 +107,4 @@ LJ_FUNC void lj_lib_register(lua_State *L, const char *libname,
104#define LIBINIT_FFID 0xfe 107#define LIBINIT_FFID 0xfe
105#define LIBINIT_END 0xff 108#define LIBINIT_END 0xff
106 109
107/* Exported library functions. */
108
109typedef struct RandomState RandomState;
110LJ_FUNC uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs);
111
112#endif 110#endif
diff --git a/src/lj_load.c b/src/lj_load.c
index ec6f0aba..e5918c04 100644
--- a/src/lj_load.c
+++ b/src/lj_load.c
@@ -15,7 +15,7 @@
15#include "lj_obj.h" 15#include "lj_obj.h"
16#include "lj_gc.h" 16#include "lj_gc.h"
17#include "lj_err.h" 17#include "lj_err.h"
18#include "lj_str.h" 18#include "lj_buf.h"
19#include "lj_func.h" 19#include "lj_func.h"
20#include "lj_frame.h" 20#include "lj_frame.h"
21#include "lj_vm.h" 21#include "lj_vm.h"
@@ -54,7 +54,7 @@ LUA_API int lua_loadx(lua_State *L, lua_Reader reader, void *data,
54 ls.rdata = data; 54 ls.rdata = data;
55 ls.chunkarg = chunkname ? chunkname : "?"; 55 ls.chunkarg = chunkname ? chunkname : "?";
56 ls.mode = mode; 56 ls.mode = mode;
57 lj_str_initbuf(&ls.sb); 57 lj_buf_init(L, &ls.sb);
58 status = lj_vm_cpcall(L, NULL, &ls, cpparser); 58 status = lj_vm_cpcall(L, NULL, &ls, cpparser);
59 lj_lex_cleanup(L, &ls); 59 lj_lex_cleanup(L, &ls);
60 lj_gc_check(L); 60 lj_gc_check(L);
@@ -159,7 +159,7 @@ LUALIB_API int luaL_loadstring(lua_State *L, const char *s)
159LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data) 159LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data)
160{ 160{
161 cTValue *o = L->top-1; 161 cTValue *o = L->top-1;
162 api_check(L, L->top > L->base); 162 lj_checkapi(L->top > L->base, "top slot empty");
163 if (tvisfunc(o) && isluafunc(funcV(o))) 163 if (tvisfunc(o) && isluafunc(funcV(o)))
164 return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0); 164 return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0);
165 else 165 else
diff --git a/src/lj_mcode.c b/src/lj_mcode.c
index 02ade1d4..a5153b25 100644
--- a/src/lj_mcode.c
+++ b/src/lj_mcode.c
@@ -14,6 +14,7 @@
14#include "lj_mcode.h" 14#include "lj_mcode.h"
15#include "lj_trace.h" 15#include "lj_trace.h"
16#include "lj_dispatch.h" 16#include "lj_dispatch.h"
17#include "lj_prng.h"
17#endif 18#endif
18#if LJ_HASJIT || LJ_HASFFI 19#if LJ_HASJIT || LJ_HASFFI
19#include "lj_vm.h" 20#include "lj_vm.h"
@@ -44,7 +45,7 @@ void lj_mcode_sync(void *start, void *end)
44 sys_icache_invalidate(start, (char *)end-(char *)start); 45 sys_icache_invalidate(start, (char *)end-(char *)start);
45#elif LJ_TARGET_PPC 46#elif LJ_TARGET_PPC
46 lj_vm_cachesync(start, end); 47 lj_vm_cachesync(start, end);
47#elif defined(__GNUC__) 48#elif defined(__GNUC__) || defined(__clang__)
48 __clear_cache(start, end); 49 __clear_cache(start, end);
49#else 50#else
50#error "Missing builtin to flush instruction cache" 51#error "Missing builtin to flush instruction cache"
@@ -66,8 +67,8 @@ void lj_mcode_sync(void *start, void *end)
66 67
67static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot) 68static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
68{ 69{
69 void *p = VirtualAlloc((void *)hint, sz, 70 void *p = LJ_WIN_VALLOC((void *)hint, sz,
70 MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot); 71 MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
71 if (!p && !hint) 72 if (!p && !hint)
72 lj_trace_err(J, LJ_TRERR_MCODEAL); 73 lj_trace_err(J, LJ_TRERR_MCODEAL);
73 return p; 74 return p;
@@ -82,7 +83,7 @@ static void mcode_free(jit_State *J, void *p, size_t sz)
82static int mcode_setprot(void *p, size_t sz, DWORD prot) 83static int mcode_setprot(void *p, size_t sz, DWORD prot)
83{ 84{
84 DWORD oprot; 85 DWORD oprot;
85 return !VirtualProtect(p, sz, prot, &oprot); 86 return !LJ_WIN_VPROTECT(p, sz, prot, &oprot);
86} 87}
87 88
88#elif LJ_TARGET_POSIX 89#elif LJ_TARGET_POSIX
@@ -118,52 +119,34 @@ static int mcode_setprot(void *p, size_t sz, int prot)
118 return mprotect(p, sz, prot); 119 return mprotect(p, sz, prot);
119} 120}
120 121
121#elif LJ_64
122
123#error "Missing OS support for explicit placement of executable memory"
124
125#else 122#else
126 123
127/* Fallback allocator. This will fail if memory is not executable by default. */ 124#error "Missing OS support for explicit placement of executable memory"
128#define LUAJIT_UNPROTECT_MCODE
129#define MCPROT_RW 0
130#define MCPROT_RX 0
131#define MCPROT_RWX 0
132
133static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
134{
135 UNUSED(hint); UNUSED(prot);
136 return lj_mem_new(J->L, sz);
137}
138
139static void mcode_free(jit_State *J, void *p, size_t sz)
140{
141 lj_mem_free(J2G(J), p, sz);
142}
143 125
144#endif 126#endif
145 127
146/* -- MCode area protection ----------------------------------------------- */ 128/* -- MCode area protection ----------------------------------------------- */
147 129
148/* Define this ONLY if page protection twiddling becomes a bottleneck. */ 130#if LUAJIT_SECURITY_MCODE == 0
149#ifdef LUAJIT_UNPROTECT_MCODE
150 131
151/* It's generally considered to be a potential security risk to have 132/* Define this ONLY if page protection twiddling becomes a bottleneck.
133**
134** It's generally considered to be a potential security risk to have
152** pages with simultaneous write *and* execute access in a process. 135** pages with simultaneous write *and* execute access in a process.
153** 136**
154** Do not even think about using this mode for server processes or 137** Do not even think about using this mode for server processes or
155** apps handling untrusted external data (such as a browser). 138** apps handling untrusted external data.
156** 139**
157** The security risk is not in LuaJIT itself -- but if an adversary finds 140** The security risk is not in LuaJIT itself -- but if an adversary finds
158** any *other* flaw in your C application logic, then any RWX memory page 141** any *other* flaw in your C application logic, then any RWX memory pages
159** simplifies writing an exploit considerably. 142** simplify writing an exploit considerably.
160*/ 143*/
161#define MCPROT_GEN MCPROT_RWX 144#define MCPROT_GEN MCPROT_RWX
162#define MCPROT_RUN MCPROT_RWX 145#define MCPROT_RUN MCPROT_RWX
163 146
164static void mcode_protect(jit_State *J, int prot) 147static void mcode_protect(jit_State *J, int prot)
165{ 148{
166 UNUSED(J); UNUSED(prot); 149 UNUSED(J); UNUSED(prot); UNUSED(mcode_setprot);
167} 150}
168 151
169#else 152#else
@@ -221,8 +204,8 @@ static void *mcode_alloc(jit_State *J, size_t sz)
221 */ 204 */
222#if LJ_TARGET_MIPS 205#if LJ_TARGET_MIPS
223 /* Use the middle of the 256MB-aligned region. */ 206 /* Use the middle of the 256MB-aligned region. */
224 uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) + 207 uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler &
225 0x08000000u; 208 ~(uintptr_t)0x0fffffffu) + 0x08000000u;
226#else 209#else
227 uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff; 210 uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
228#endif 211#endif
@@ -242,7 +225,7 @@ static void *mcode_alloc(jit_State *J, size_t sz)
242 } 225 }
243 /* Next try probing 64K-aligned pseudo-random addresses. */ 226 /* Next try probing 64K-aligned pseudo-random addresses. */
244 do { 227 do {
245 hint = LJ_PRNG_BITS(J, LJ_TARGET_JUMPRANGE-16) << 16; 228 hint = lj_prng_u64(&J2G(J)->prng) & ((1u<<LJ_TARGET_JUMPRANGE)-0x10000);
246 } while (!(hint + sz < range+range)); 229 } while (!(hint + sz < range+range));
247 hint = target + hint - range; 230 hint = target + hint - range;
248 } 231 }
@@ -255,7 +238,7 @@ static void *mcode_alloc(jit_State *J, size_t sz)
255/* All memory addresses are reachable by relative jumps. */ 238/* All memory addresses are reachable by relative jumps. */
256static void *mcode_alloc(jit_State *J, size_t sz) 239static void *mcode_alloc(jit_State *J, size_t sz)
257{ 240{
258#ifdef __OpenBSD__ 241#if defined(__OpenBSD__) || LJ_TARGET_UWP
259 /* Allow better executable memory allocation for OpenBSD W^X mode. */ 242 /* Allow better executable memory allocation for OpenBSD W^X mode. */
260 void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN); 243 void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN);
261 if (p && mcode_setprot(p, sz, MCPROT_GEN)) { 244 if (p && mcode_setprot(p, sz, MCPROT_GEN)) {
@@ -331,7 +314,7 @@ void lj_mcode_abort(jit_State *J)
331/* Set/reset protection to allow patching of MCode areas. */ 314/* Set/reset protection to allow patching of MCode areas. */
332MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) 315MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
333{ 316{
334#ifdef LUAJIT_UNPROTECT_MCODE 317#if LUAJIT_SECURITY_MCODE == 0
335 UNUSED(J); UNUSED(ptr); UNUSED(finish); 318 UNUSED(J); UNUSED(ptr); UNUSED(finish);
336 return NULL; 319 return NULL;
337#else 320#else
@@ -351,7 +334,7 @@ MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
351 /* Otherwise search through the list of MCode areas. */ 334 /* Otherwise search through the list of MCode areas. */
352 for (;;) { 335 for (;;) {
353 mc = ((MCLink *)mc)->next; 336 mc = ((MCLink *)mc)->next;
354 lua_assert(mc != NULL); 337 lj_assertJ(mc != NULL, "broken MCode area chain");
355 if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { 338 if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
356 if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN))) 339 if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN)))
357 mcode_protfail(J); 340 mcode_protfail(J);
diff --git a/src/lj_meta.c b/src/lj_meta.c
index 6affc18b..2cdb6a0f 100644
--- a/src/lj_meta.c
+++ b/src/lj_meta.c
@@ -12,6 +12,7 @@
12#include "lj_obj.h" 12#include "lj_obj.h"
13#include "lj_gc.h" 13#include "lj_gc.h"
14#include "lj_err.h" 14#include "lj_err.h"
15#include "lj_buf.h"
15#include "lj_str.h" 16#include "lj_str.h"
16#include "lj_tab.h" 17#include "lj_tab.h"
17#include "lj_meta.h" 18#include "lj_meta.h"
@@ -19,6 +20,8 @@
19#include "lj_bc.h" 20#include "lj_bc.h"
20#include "lj_vm.h" 21#include "lj_vm.h"
21#include "lj_strscan.h" 22#include "lj_strscan.h"
23#include "lj_strfmt.h"
24#include "lj_lib.h"
22 25
23/* -- Metamethod handling ------------------------------------------------- */ 26/* -- Metamethod handling ------------------------------------------------- */
24 27
@@ -44,7 +47,7 @@ void lj_meta_init(lua_State *L)
44cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name) 47cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name)
45{ 48{
46 cTValue *mo = lj_tab_getstr(mt, name); 49 cTValue *mo = lj_tab_getstr(mt, name);
47 lua_assert(mm <= MM_FAST); 50 lj_assertX(mm <= MM_FAST, "bad metamethod %d", mm);
48 if (!mo || tvisnil(mo)) { /* No metamethod? */ 51 if (!mo || tvisnil(mo)) { /* No metamethod? */
49 mt->nomm |= (uint8_t)(1u<<mm); /* Set negative cache flag. */ 52 mt->nomm |= (uint8_t)(1u<<mm); /* Set negative cache flag. */
50 return NULL; 53 return NULL;
@@ -77,12 +80,16 @@ int lj_meta_tailcall(lua_State *L, cTValue *tv)
77 TValue *base = L->base; 80 TValue *base = L->base;
78 TValue *top = L->top; 81 TValue *top = L->top;
79 const BCIns *pc = frame_pc(base-1); /* Preserve old PC from frame. */ 82 const BCIns *pc = frame_pc(base-1); /* Preserve old PC from frame. */
80 copyTV(L, base-1, tv); /* Replace frame with new object. */ 83 copyTV(L, base-1-LJ_FR2, tv); /* Replace frame with new object. */
81 top->u32.lo = LJ_CONT_TAILCALL; 84 if (LJ_FR2)
82 setframe_pc(top, pc); 85 (top++)->u64 = LJ_CONT_TAILCALL;
83 setframe_gc(top+1, obj2gco(L)); /* Dummy frame object. */ 86 else
84 setframe_ftsz(top+1, (int)((char *)(top+2) - (char *)base) + FRAME_CONT); 87 top->u32.lo = LJ_CONT_TAILCALL;
85 L->base = L->top = top+2; 88 setframe_pc(top++, pc);
89 if (LJ_FR2) top++;
90 setframe_gc(top, obj2gco(L), LJ_TTHREAD); /* Dummy frame object. */
91 setframe_ftsz(top, ((char *)(top+1) - (char *)base) + FRAME_CONT);
92 L->base = L->top = top+1;
86 /* 93 /*
87 ** before: [old_mo|PC] [... ...] 94 ** before: [old_mo|PC] [... ...]
88 ** ^base ^top 95 ** ^base ^top
@@ -113,11 +120,13 @@ static TValue *mmcall(lua_State *L, ASMFunction cont, cTValue *mo,
113 */ 120 */
114 TValue *top = L->top; 121 TValue *top = L->top;
115 if (curr_funcisL(L)) top = curr_topL(L); 122 if (curr_funcisL(L)) top = curr_topL(L);
116 setcont(top, cont); /* Assembler VM stores PC in upper word. */ 123 setcont(top++, cont); /* Assembler VM stores PC in upper word or FR2. */
117 copyTV(L, top+1, mo); /* Store metamethod and two arguments. */ 124 if (LJ_FR2) setnilV(top++);
118 copyTV(L, top+2, a); 125 copyTV(L, top++, mo); /* Store metamethod and two arguments. */
119 copyTV(L, top+3, b); 126 if (LJ_FR2) setnilV(top++);
120 return top+2; /* Return new base. */ 127 copyTV(L, top, a);
128 copyTV(L, top+1, b);
129 return top; /* Return new base. */
121} 130}
122 131
123/* -- C helpers for some instructions, called from assembler VM ----------- */ 132/* -- C helpers for some instructions, called from assembler VM ----------- */
@@ -225,27 +234,14 @@ TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, cTValue *rc,
225 } 234 }
226} 235}
227 236
228/* In-place coercion of a number to a string. */
229static LJ_AINLINE int tostring(lua_State *L, TValue *o)
230{
231 if (tvisstr(o)) {
232 return 1;
233 } else if (tvisnumber(o)) {
234 setstrV(L, o, lj_str_fromnumber(L, o));
235 return 1;
236 } else {
237 return 0;
238 }
239}
240
241/* Helper for CAT. Coercion, iterative concat, __concat metamethod. */ 237/* Helper for CAT. Coercion, iterative concat, __concat metamethod. */
242TValue *lj_meta_cat(lua_State *L, TValue *top, int left) 238TValue *lj_meta_cat(lua_State *L, TValue *top, int left)
243{ 239{
244 int fromc = 0; 240 int fromc = 0;
245 if (left < 0) { left = -left; fromc = 1; } 241 if (left < 0) { left = -left; fromc = 1; }
246 do { 242 do {
247 int n = 1; 243 if (!(tvisstr(top) || tvisnumber(top)) ||
248 if (!(tvisstr(top-1) || tvisnumber(top-1)) || !tostring(L, top)) { 244 !(tvisstr(top-1) || tvisnumber(top-1))) {
249 cTValue *mo = lj_meta_lookup(L, top-1, MM_concat); 245 cTValue *mo = lj_meta_lookup(L, top-1, MM_concat);
250 if (tvisnil(mo)) { 246 if (tvisnil(mo)) {
251 mo = lj_meta_lookup(L, top, MM_concat); 247 mo = lj_meta_lookup(L, top, MM_concat);
@@ -266,13 +262,12 @@ TValue *lj_meta_cat(lua_State *L, TValue *top, int left)
266 ** after mm: [...][CAT stack ...] <--push-- [result] 262 ** after mm: [...][CAT stack ...] <--push-- [result]
267 ** next step: [...][CAT stack .............] 263 ** next step: [...][CAT stack .............]
268 */ 264 */
269 copyTV(L, top+2, top); /* Careful with the order of stack copies! */ 265 copyTV(L, top+2*LJ_FR2+2, top); /* Carefully ordered stack copies! */
270 copyTV(L, top+1, top-1); 266 copyTV(L, top+2*LJ_FR2+1, top-1);
271 copyTV(L, top, mo); 267 copyTV(L, top+LJ_FR2, mo);
272 setcont(top-1, lj_cont_cat); 268 setcont(top-1, lj_cont_cat);
269 if (LJ_FR2) { setnilV(top); setnilV(top+2); top += 2; }
273 return top+1; /* Trigger metamethod call. */ 270 return top+1; /* Trigger metamethod call. */
274 } else if (strV(top)->len == 0) { /* Shortcut. */
275 (void)tostring(L, top-1);
276 } else { 271 } else {
277 /* Pick as many strings as possible from the top and concatenate them: 272 /* Pick as many strings as possible from the top and concatenate them:
278 ** 273 **
@@ -281,27 +276,28 @@ TValue *lj_meta_cat(lua_State *L, TValue *top, int left)
281 ** concat: [...][CAT stack ...] [result] 276 ** concat: [...][CAT stack ...] [result]
282 ** next step: [...][CAT stack ............] 277 ** next step: [...][CAT stack ............]
283 */ 278 */
284 MSize tlen = strV(top)->len; 279 TValue *e, *o = top;
285 char *buffer; 280 uint64_t tlen = tvisstr(o) ? strV(o)->len : STRFMT_MAXBUF_NUM;
286 int i; 281 SBuf *sb;
287 for (n = 1; n <= left && tostring(L, top-n); n++) { 282 do {
288 MSize len = strV(top-n)->len; 283 o--; tlen += tvisstr(o) ? strV(o)->len : STRFMT_MAXBUF_NUM;
289 if (len >= LJ_MAX_STR - tlen) 284 } while (--left > 0 && (tvisstr(o-1) || tvisnumber(o-1)));
290 lj_err_msg(L, LJ_ERR_STROV); 285 if (tlen >= LJ_MAX_STR) lj_err_msg(L, LJ_ERR_STROV);
291 tlen += len; 286 sb = lj_buf_tmp_(L);
292 } 287 lj_buf_more(sb, (MSize)tlen);
293 buffer = lj_str_needbuf(L, &G(L)->tmpbuf, tlen); 288 for (e = top, top = o; o <= e; o++) {
294 n--; 289 if (tvisstr(o)) {
295 tlen = 0; 290 GCstr *s = strV(o);
296 for (i = n; i >= 0; i--) { 291 MSize len = s->len;
297 MSize len = strV(top-i)->len; 292 lj_buf_putmem(sb, strdata(s), len);
298 memcpy(buffer + tlen, strVdata(top-i), len); 293 } else if (tvisint(o)) {
299 tlen += len; 294 lj_strfmt_putint(sb, intV(o));
295 } else {
296 lj_strfmt_putfnum(sb, STRFMT_G14, numV(o));
297 }
300 } 298 }
301 setstrV(L, top-n, lj_str_new(L, buffer, tlen)); 299 setstrV(L, top, lj_buf_str(L, sb));
302 } 300 }
303 left -= n;
304 top -= n;
305 } while (left >= 1); 301 } while (left >= 1);
306 if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) { 302 if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) {
307 if (!fromc) L->top = curr_topL(L); 303 if (!fromc) L->top = curr_topL(L);
@@ -338,12 +334,14 @@ TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne)
338 return (TValue *)(intptr_t)ne; 334 return (TValue *)(intptr_t)ne;
339 } 335 }
340 top = curr_top(L); 336 top = curr_top(L);
341 setcont(top, ne ? lj_cont_condf : lj_cont_condt); 337 setcont(top++, ne ? lj_cont_condf : lj_cont_condt);
342 copyTV(L, top+1, mo); 338 if (LJ_FR2) setnilV(top++);
339 copyTV(L, top++, mo);
340 if (LJ_FR2) setnilV(top++);
343 it = ~(uint32_t)o1->gch.gct; 341 it = ~(uint32_t)o1->gch.gct;
344 setgcV(L, top+2, o1, it); 342 setgcV(L, top, o1, it);
345 setgcV(L, top+3, o2, it); 343 setgcV(L, top+1, o2, it);
346 return top+2; /* Trigger metamethod call. */ 344 return top; /* Trigger metamethod call. */
347 } 345 }
348 return (TValue *)(intptr_t)ne; 346 return (TValue *)(intptr_t)ne;
349} 347}
@@ -365,8 +363,8 @@ TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins)
365 } else if (op == BC_ISEQN) { 363 } else if (op == BC_ISEQN) {
366 o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)]; 364 o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)];
367 } else { 365 } else {
368 lua_assert(op == BC_ISEQP); 366 lj_assertL(op == BC_ISEQP, "bad bytecode op %d", op);
369 setitype(&tv, ~bc_d(ins)); 367 setpriV(&tv, ~bc_d(ins));
370 o2 = &tv; 368 o2 = &tv;
371 } 369 }
372 mo = lj_meta_lookup(L, o1mm, MM_eq); 370 mo = lj_meta_lookup(L, o1mm, MM_eq);
@@ -423,6 +421,18 @@ TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op)
423 } 421 }
424} 422}
425 423
424/* Helper for ISTYPE and ISNUM. Implicit coercion or error. */
425void lj_meta_istype(lua_State *L, BCReg ra, BCReg tp)
426{
427 L->top = curr_topL(L);
428 ra++; tp--;
429 lj_assertL(LJ_DUALNUM || tp != ~LJ_TNUMX, "bad type for ISTYPE");
430 if (LJ_DUALNUM && tp == ~LJ_TNUMX) lj_lib_checkint(L, ra);
431 else if (tp == ~LJ_TNUMX+1) lj_lib_checknum(L, ra);
432 else if (tp == ~LJ_TSTR) lj_lib_checkstr(L, ra);
433 else lj_err_argtype(L, ra, lj_obj_itypename[tp]);
434}
435
426/* Helper for calls. __call metamethod. */ 436/* Helper for calls. __call metamethod. */
427void lj_meta_call(lua_State *L, TValue *func, TValue *top) 437void lj_meta_call(lua_State *L, TValue *func, TValue *top)
428{ 438{
@@ -430,7 +440,8 @@ void lj_meta_call(lua_State *L, TValue *func, TValue *top)
430 TValue *p; 440 TValue *p;
431 if (!tvisfunc(mo)) 441 if (!tvisfunc(mo))
432 lj_err_optype_call(L, func); 442 lj_err_optype_call(L, func);
433 for (p = top; p > func; p--) copyTV(L, p, p-1); 443 for (p = top; p > func+2*LJ_FR2; p--) copyTV(L, p, p-1);
444 if (LJ_FR2) copyTV(L, func+2, func);
434 copyTV(L, func, mo); 445 copyTV(L, func, mo);
435} 446}
436 447
diff --git a/src/lj_meta.h b/src/lj_meta.h
index bd911e94..d6d31924 100644
--- a/src/lj_meta.h
+++ b/src/lj_meta.h
@@ -31,6 +31,7 @@ LJ_FUNCA TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o);
31LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne); 31LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne);
32LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins); 32LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins);
33LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op); 33LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op);
34LJ_FUNCA void lj_meta_istype(lua_State *L, BCReg ra, BCReg tp);
34LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top); 35LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top);
35LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o); 36LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o);
36 37
diff --git a/src/lj_obj.c b/src/lj_obj.c
index 9cdce625..5d16e0e5 100644
--- a/src/lj_obj.c
+++ b/src/lj_obj.c
@@ -20,7 +20,7 @@ LJ_DATADEF const char *const lj_obj_itypename[] = { /* ORDER LJ_T */
20}; 20};
21 21
22/* Compare two objects without calling metamethods. */ 22/* Compare two objects without calling metamethods. */
23int lj_obj_equal(cTValue *o1, cTValue *o2) 23int LJ_FASTCALL lj_obj_equal(cTValue *o1, cTValue *o2)
24{ 24{
25 if (itype(o1) == itype(o2)) { 25 if (itype(o1) == itype(o2)) {
26 if (tvispri(o1)) 26 if (tvispri(o1))
@@ -33,3 +33,18 @@ int lj_obj_equal(cTValue *o1, cTValue *o2)
33 return numberVnum(o1) == numberVnum(o2); 33 return numberVnum(o1) == numberVnum(o2);
34} 34}
35 35
36/* Return pointer to object or its object data. */
37const void * LJ_FASTCALL lj_obj_ptr(cTValue *o)
38{
39 if (tvisudata(o))
40 return uddata(udataV(o));
41 else if (tvislightud(o))
42 return lightudV(o);
43 else if (LJ_HASFFI && tviscdata(o))
44 return cdataptr(cdataV(o));
45 else if (tvisgcv(o))
46 return gcV(o);
47 else
48 return NULL;
49}
50
diff --git a/src/lj_obj.h b/src/lj_obj.h
index 5c3c88fc..9d4bec08 100644
--- a/src/lj_obj.h
+++ b/src/lj_obj.h
@@ -13,44 +13,77 @@
13#include "lj_def.h" 13#include "lj_def.h"
14#include "lj_arch.h" 14#include "lj_arch.h"
15 15
16/* -- Memory references (32 bit address space) ---------------------------- */ 16/* -- Memory references --------------------------------------------------- */
17 17
18/* Memory size. */ 18/* Memory and GC object sizes. */
19typedef uint32_t MSize; 19typedef uint32_t MSize;
20#if LJ_GC64
21typedef uint64_t GCSize;
22#else
23typedef uint32_t GCSize;
24#endif
20 25
21/* Memory reference */ 26/* Memory reference */
22typedef struct MRef { 27typedef struct MRef {
28#if LJ_GC64
29 uint64_t ptr64; /* True 64 bit pointer. */
30#else
23 uint32_t ptr32; /* Pseudo 32 bit pointer. */ 31 uint32_t ptr32; /* Pseudo 32 bit pointer. */
32#endif
24} MRef; 33} MRef;
25 34
35#if LJ_GC64
36#define mref(r, t) ((t *)(void *)(r).ptr64)
37
38#define setmref(r, p) ((r).ptr64 = (uint64_t)(void *)(p))
39#define setmrefr(r, v) ((r).ptr64 = (v).ptr64)
40#else
26#define mref(r, t) ((t *)(void *)(uintptr_t)(r).ptr32) 41#define mref(r, t) ((t *)(void *)(uintptr_t)(r).ptr32)
27 42
28#define setmref(r, p) ((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p)) 43#define setmref(r, p) ((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p))
29#define setmrefr(r, v) ((r).ptr32 = (v).ptr32) 44#define setmrefr(r, v) ((r).ptr32 = (v).ptr32)
45#endif
30 46
31/* -- GC object references (32 bit address space) ------------------------- */ 47/* -- GC object references ------------------------------------------------ */
32 48
33/* GCobj reference */ 49/* GCobj reference */
34typedef struct GCRef { 50typedef struct GCRef {
51#if LJ_GC64
52 uint64_t gcptr64; /* True 64 bit pointer. */
53#else
35 uint32_t gcptr32; /* Pseudo 32 bit pointer. */ 54 uint32_t gcptr32; /* Pseudo 32 bit pointer. */
55#endif
36} GCRef; 56} GCRef;
37 57
38/* Common GC header for all collectable objects. */ 58/* Common GC header for all collectable objects. */
39#define GCHeader GCRef nextgc; uint8_t marked; uint8_t gct 59#define GCHeader GCRef nextgc; uint8_t marked; uint8_t gct
40/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */ 60/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */
41 61
62#if LJ_GC64
63#define gcref(r) ((GCobj *)(r).gcptr64)
64#define gcrefp(r, t) ((t *)(void *)(r).gcptr64)
65#define gcrefu(r) ((r).gcptr64)
66#define gcrefeq(r1, r2) ((r1).gcptr64 == (r2).gcptr64)
67
68#define setgcref(r, gc) ((r).gcptr64 = (uint64_t)&(gc)->gch)
69#define setgcreft(r, gc, it) \
70 (r).gcptr64 = (uint64_t)&(gc)->gch | (((uint64_t)(it)) << 47)
71#define setgcrefp(r, p) ((r).gcptr64 = (uint64_t)(p))
72#define setgcrefnull(r) ((r).gcptr64 = 0)
73#define setgcrefr(r, v) ((r).gcptr64 = (v).gcptr64)
74#else
42#define gcref(r) ((GCobj *)(uintptr_t)(r).gcptr32) 75#define gcref(r) ((GCobj *)(uintptr_t)(r).gcptr32)
43#define gcrefp(r, t) ((t *)(void *)(uintptr_t)(r).gcptr32) 76#define gcrefp(r, t) ((t *)(void *)(uintptr_t)(r).gcptr32)
44#define gcrefu(r) ((r).gcptr32) 77#define gcrefu(r) ((r).gcptr32)
45#define gcrefi(r) ((int32_t)(r).gcptr32)
46#define gcrefeq(r1, r2) ((r1).gcptr32 == (r2).gcptr32) 78#define gcrefeq(r1, r2) ((r1).gcptr32 == (r2).gcptr32)
47#define gcnext(gc) (gcref((gc)->gch.nextgc))
48 79
49#define setgcref(r, gc) ((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch) 80#define setgcref(r, gc) ((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch)
50#define setgcrefi(r, i) ((r).gcptr32 = (uint32_t)(i))
51#define setgcrefp(r, p) ((r).gcptr32 = (uint32_t)(uintptr_t)(p)) 81#define setgcrefp(r, p) ((r).gcptr32 = (uint32_t)(uintptr_t)(p))
52#define setgcrefnull(r) ((r).gcptr32 = 0) 82#define setgcrefnull(r) ((r).gcptr32 = 0)
53#define setgcrefr(r, v) ((r).gcptr32 = (v).gcptr32) 83#define setgcrefr(r, v) ((r).gcptr32 = (v).gcptr32)
84#endif
85
86#define gcnext(gc) (gcref((gc)->gch.nextgc))
54 87
55/* IMPORTANT NOTE: 88/* IMPORTANT NOTE:
56** 89**
@@ -119,11 +152,12 @@ typedef int32_t BCLine; /* Bytecode line number. */
119/* Internal assembler functions. Never call these directly from C. */ 152/* Internal assembler functions. Never call these directly from C. */
120typedef void (*ASMFunction)(void); 153typedef void (*ASMFunction)(void);
121 154
122/* Resizable string buffer. Need this here, details in lj_str.h. */ 155/* Resizable string buffer. Need this here, details in lj_buf.h. */
123typedef struct SBuf { 156typedef struct SBuf {
124 char *buf; /* String buffer base. */ 157 MRef p; /* String buffer pointer. */
125 MSize n; /* String buffer length. */ 158 MRef e; /* String buffer end pointer. */
126 MSize sz; /* String buffer size. */ 159 MRef b; /* String buffer base. */
160 MRef L; /* lua_State, used for buffer resizing. */
127} SBuf; 161} SBuf;
128 162
129/* -- Tags and values ----------------------------------------------------- */ 163/* -- Tags and values ----------------------------------------------------- */
@@ -131,13 +165,23 @@ typedef struct SBuf {
131/* Frame link. */ 165/* Frame link. */
132typedef union { 166typedef union {
133 int32_t ftsz; /* Frame type and size of previous frame. */ 167 int32_t ftsz; /* Frame type and size of previous frame. */
134 MRef pcr; /* Overlaps PC for Lua frames. */ 168 MRef pcr; /* Or PC for Lua frames. */
135} FrameLink; 169} FrameLink;
136 170
137/* Tagged value. */ 171/* Tagged value. */
138typedef LJ_ALIGN(8) union TValue { 172typedef LJ_ALIGN(8) union TValue {
139 uint64_t u64; /* 64 bit pattern overlaps number. */ 173 uint64_t u64; /* 64 bit pattern overlaps number. */
140 lua_Number n; /* Number object overlaps split tag/value object. */ 174 lua_Number n; /* Number object overlaps split tag/value object. */
175#if LJ_GC64
176 GCRef gcr; /* GCobj reference with tag. */
177 int64_t it64;
178 struct {
179 LJ_ENDIAN_LOHI(
180 int32_t i; /* Integer value. */
181 , uint32_t it; /* Internal object tag. Must overlap MSW of number. */
182 )
183 };
184#else
141 struct { 185 struct {
142 LJ_ENDIAN_LOHI( 186 LJ_ENDIAN_LOHI(
143 union { 187 union {
@@ -147,12 +191,17 @@ typedef LJ_ALIGN(8) union TValue {
147 , uint32_t it; /* Internal object tag. Must overlap MSW of number. */ 191 , uint32_t it; /* Internal object tag. Must overlap MSW of number. */
148 ) 192 )
149 }; 193 };
194#endif
195#if LJ_FR2
196 int64_t ftsz; /* Frame type and size of previous frame, or PC. */
197#else
150 struct { 198 struct {
151 LJ_ENDIAN_LOHI( 199 LJ_ENDIAN_LOHI(
152 GCRef func; /* Function for next frame (or dummy L). */ 200 GCRef func; /* Function for next frame (or dummy L). */
153 , FrameLink tp; /* Link to previous frame. */ 201 , FrameLink tp; /* Link to previous frame. */
154 ) 202 )
155 } fr; 203 } fr;
204#endif
156 struct { 205 struct {
157 LJ_ENDIAN_LOHI( 206 LJ_ENDIAN_LOHI(
158 uint32_t lo; /* Lower 32 bits of number. */ 207 uint32_t lo; /* Lower 32 bits of number. */
@@ -172,6 +221,8 @@ typedef const TValue cTValue;
172 221
173/* Internal object tags. 222/* Internal object tags.
174** 223**
224** Format for 32 bit GC references (!LJ_GC64):
225**
175** Internal tags overlap the MSW of a number object (must be a double). 226** Internal tags overlap the MSW of a number object (must be a double).
176** Interpreted as a double these are special NaNs. The FPU only generates 227** Interpreted as a double these are special NaNs. The FPU only generates
177** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available 228** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available
@@ -186,6 +237,18 @@ typedef const TValue cTValue;
186** int (LJ_DUALNUM)| itype | int | 237** int (LJ_DUALNUM)| itype | int |
187** number -------double------ 238** number -------double------
188** 239**
240** Format for 64 bit GC references (LJ_GC64):
241**
242** The upper 13 bits must be 1 (0xfff8...) for a special NaN. The next
243** 4 bits hold the internal tag. The lowest 47 bits either hold a pointer,
244** a zero-extended 32 bit integer or all bits set to 1 for primitive types.
245**
246** ------MSW------.------LSW------
247** primitive types |1..1|itype|1..................1|
248** GC objects/lightud |1..1|itype|-------GCRef--------|
249** int (LJ_DUALNUM) |1..1|itype|0..0|-----int-------|
250** number ------------double-------------
251**
189** ORDER LJ_T 252** ORDER LJ_T
190** Primitive types nil/false/true must be first, lightuserdata next. 253** Primitive types nil/false/true must be first, lightuserdata next.
191** GC objects are at the end, table/userdata must be lowest. 254** GC objects are at the end, table/userdata must be lowest.
@@ -208,7 +271,7 @@ typedef const TValue cTValue;
208#define LJ_TNUMX (~13u) 271#define LJ_TNUMX (~13u)
209 272
210/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */ 273/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */
211#if LJ_64 274#if LJ_64 && !LJ_GC64
212#define LJ_TISNUM 0xfffeffffu 275#define LJ_TISNUM 0xfffeffffu
213#else 276#else
214#define LJ_TISNUM LJ_TNUMX 277#define LJ_TISNUM LJ_TNUMX
@@ -218,14 +281,22 @@ typedef const TValue cTValue;
218#define LJ_TISGCV (LJ_TSTR+1) 281#define LJ_TISGCV (LJ_TSTR+1)
219#define LJ_TISTABUD LJ_TTAB 282#define LJ_TISTABUD LJ_TTAB
220 283
284#if LJ_GC64
285#define LJ_GCVMASK (((uint64_t)1 << 47) - 1)
286#endif
287
221/* -- String object ------------------------------------------------------- */ 288/* -- String object ------------------------------------------------------- */
222 289
290typedef uint32_t StrHash; /* String hash value. */
291typedef uint32_t StrID; /* String ID. */
292
223/* String object header. String payload follows. */ 293/* String object header. String payload follows. */
224typedef struct GCstr { 294typedef struct GCstr {
225 GCHeader; 295 GCHeader;
226 uint8_t reserved; /* Used by lexer for fast lookup of reserved words. */ 296 uint8_t reserved; /* Used by lexer for fast lookup of reserved words. */
227 uint8_t unused; 297 uint8_t hashalg; /* Hash algorithm. */
228 MSize hash; /* Hash of string. */ 298 StrID sid; /* Interned string ID. */
299 StrHash hash; /* Hash of string. */
229 MSize len; /* Size of string. */ 300 MSize len; /* Size of string. */
230} GCstr; 301} GCstr;
231 302
@@ -233,7 +304,6 @@ typedef struct GCstr {
233#define strdata(s) ((const char *)((s)+1)) 304#define strdata(s) ((const char *)((s)+1))
234#define strdatawr(s) ((char *)((s)+1)) 305#define strdatawr(s) ((char *)((s)+1))
235#define strVdata(o) strdata(strV(o)) 306#define strVdata(o) strdata(strV(o))
236#define sizestring(s) (sizeof(struct GCstr)+(s)->len+1)
237 307
238/* -- Userdata object ----------------------------------------------------- */ 308/* -- Userdata object ----------------------------------------------------- */
239 309
@@ -291,6 +361,9 @@ typedef struct GCproto {
291 uint8_t numparams; /* Number of parameters. */ 361 uint8_t numparams; /* Number of parameters. */
292 uint8_t framesize; /* Fixed frame size. */ 362 uint8_t framesize; /* Fixed frame size. */
293 MSize sizebc; /* Number of bytecode instructions. */ 363 MSize sizebc; /* Number of bytecode instructions. */
364#if LJ_GC64
365 uint32_t unused_gc64;
366#endif
294 GCRef gclist; 367 GCRef gclist;
295 MRef k; /* Split constant array (points to the middle). */ 368 MRef k; /* Split constant array (points to the middle). */
296 MRef uv; /* Upvalue list. local slot|0x8000 or parent uv idx. */ 369 MRef uv; /* Upvalue list. local slot|0x8000 or parent uv idx. */
@@ -402,7 +475,9 @@ typedef struct Node {
402 TValue val; /* Value object. Must be first field. */ 475 TValue val; /* Value object. Must be first field. */
403 TValue key; /* Key object. */ 476 TValue key; /* Key object. */
404 MRef next; /* Hash chain. */ 477 MRef next; /* Hash chain. */
478#if !LJ_GC64
405 MRef freetop; /* Top of free elements (stored in t->node[0]). */ 479 MRef freetop; /* Top of free elements (stored in t->node[0]). */
480#endif
406} Node; 481} Node;
407 482
408LJ_STATIC_ASSERT(offsetof(Node, val) == 0); 483LJ_STATIC_ASSERT(offsetof(Node, val) == 0);
@@ -417,12 +492,22 @@ typedef struct GCtab {
417 MRef node; /* Hash part. */ 492 MRef node; /* Hash part. */
418 uint32_t asize; /* Size of array part (keys [0, asize-1]). */ 493 uint32_t asize; /* Size of array part (keys [0, asize-1]). */
419 uint32_t hmask; /* Hash part mask (size of hash part - 1). */ 494 uint32_t hmask; /* Hash part mask (size of hash part - 1). */
495#if LJ_GC64
496 MRef freetop; /* Top of free elements. */
497#endif
420} GCtab; 498} GCtab;
421 499
422#define sizetabcolo(n) ((n)*sizeof(TValue) + sizeof(GCtab)) 500#define sizetabcolo(n) ((n)*sizeof(TValue) + sizeof(GCtab))
423#define tabref(r) (&gcref((r))->tab) 501#define tabref(r) (&gcref((r))->tab)
424#define noderef(r) (mref((r), Node)) 502#define noderef(r) (mref((r), Node))
425#define nextnode(n) (mref((n)->next, Node)) 503#define nextnode(n) (mref((n)->next, Node))
504#if LJ_GC64
505#define getfreetop(t, n) (noderef((t)->freetop))
506#define setfreetop(t, n, v) (setmref((t)->freetop, (v)))
507#else
508#define getfreetop(t, n) (noderef((n)->freetop))
509#define setfreetop(t, n, v) (setmref((n)->freetop, (v)))
510#endif
426 511
427/* -- State objects ------------------------------------------------------- */ 512/* -- State objects ------------------------------------------------------- */
428 513
@@ -488,9 +573,10 @@ typedef enum {
488#define basemt_obj(g, o) ((g)->gcroot[GCROOT_BASEMT+itypemap(o)]) 573#define basemt_obj(g, o) ((g)->gcroot[GCROOT_BASEMT+itypemap(o)])
489#define mmname_str(g, mm) (strref((g)->gcroot[GCROOT_MMNAME+(mm)])) 574#define mmname_str(g, mm) (strref((g)->gcroot[GCROOT_MMNAME+(mm)]))
490 575
576/* Garbage collector state. */
491typedef struct GCState { 577typedef struct GCState {
492 MSize total; /* Memory currently allocated. */ 578 GCSize total; /* Memory currently allocated. */
493 MSize threshold; /* Memory threshold. */ 579 GCSize threshold; /* Memory threshold. */
494 uint8_t currentwhite; /* Current white color. */ 580 uint8_t currentwhite; /* Current white color. */
495 uint8_t state; /* GC state. */ 581 uint8_t state; /* GC state. */
496 uint8_t nocdatafin; /* No cdata finalizer called. */ 582 uint8_t nocdatafin; /* No cdata finalizer called. */
@@ -502,42 +588,54 @@ typedef struct GCState {
502 GCRef grayagain; /* List of objects for atomic traversal. */ 588 GCRef grayagain; /* List of objects for atomic traversal. */
503 GCRef weak; /* List of weak tables (to be cleared). */ 589 GCRef weak; /* List of weak tables (to be cleared). */
504 GCRef mmudata; /* List of userdata (to be finalized). */ 590 GCRef mmudata; /* List of userdata (to be finalized). */
591 GCSize debt; /* Debt (how much GC is behind schedule). */
592 GCSize estimate; /* Estimate of memory actually in use. */
505 MSize stepmul; /* Incremental GC step granularity. */ 593 MSize stepmul; /* Incremental GC step granularity. */
506 MSize debt; /* Debt (how much GC is behind schedule). */
507 MSize estimate; /* Estimate of memory actually in use. */
508 MSize pause; /* Pause between successive GC cycles. */ 594 MSize pause; /* Pause between successive GC cycles. */
509} GCState; 595} GCState;
510 596
597/* String interning state. */
598typedef struct StrInternState {
599 GCRef *tab; /* String hash table anchors. */
600 MSize mask; /* String hash mask (size of hash table - 1). */
601 MSize num; /* Number of strings in hash table. */
602 StrID id; /* Next string ID. */
603 uint8_t idreseed; /* String ID reseed counter. */
604 uint8_t second; /* String interning table uses secondary hashing. */
605 uint8_t unused1;
606 uint8_t unused2;
607 LJ_ALIGN(8) uint64_t seed; /* Random string seed. */
608} StrInternState;
609
511/* Global state, shared by all threads of a Lua universe. */ 610/* Global state, shared by all threads of a Lua universe. */
512typedef struct global_State { 611typedef struct global_State {
513 GCRef *strhash; /* String hash table (hash chain anchors). */
514 MSize strmask; /* String hash mask (size of hash table - 1). */
515 MSize strnum; /* Number of strings in hash table. */
516 lua_Alloc allocf; /* Memory allocator. */ 612 lua_Alloc allocf; /* Memory allocator. */
517 void *allocd; /* Memory allocator data. */ 613 void *allocd; /* Memory allocator data. */
518 GCState gc; /* Garbage collector. */ 614 GCState gc; /* Garbage collector. */
519 SBuf tmpbuf; /* Temporary buffer for string concatenation. */
520 Node nilnode; /* Fallback 1-element hash part (nil key and value). */
521 GCstr strempty; /* Empty string. */ 615 GCstr strempty; /* Empty string. */
522 uint8_t stremptyz; /* Zero terminator of empty string. */ 616 uint8_t stremptyz; /* Zero terminator of empty string. */
523 uint8_t hookmask; /* Hook mask. */ 617 uint8_t hookmask; /* Hook mask. */
524 uint8_t dispatchmode; /* Dispatch mode. */ 618 uint8_t dispatchmode; /* Dispatch mode. */
525 uint8_t vmevmask; /* VM event mask. */ 619 uint8_t vmevmask; /* VM event mask. */
620 StrInternState str; /* String interning. */
621 volatile int32_t vmstate; /* VM state or current JIT code trace number. */
526 GCRef mainthref; /* Link to main thread. */ 622 GCRef mainthref; /* Link to main thread. */
527 TValue registrytv; /* Anchor for registry. */ 623 SBuf tmpbuf; /* Temporary string buffer. */
528 TValue tmptv, tmptv2; /* Temporary TValues. */ 624 TValue tmptv, tmptv2; /* Temporary TValues. */
625 Node nilnode; /* Fallback 1-element hash part (nil key and value). */
626 TValue registrytv; /* Anchor for registry. */
529 GCupval uvhead; /* Head of double-linked list of all open upvalues. */ 627 GCupval uvhead; /* Head of double-linked list of all open upvalues. */
530 int32_t hookcount; /* Instruction hook countdown. */ 628 int32_t hookcount; /* Instruction hook countdown. */
531 int32_t hookcstart; /* Start count for instruction hook counter. */ 629 int32_t hookcstart; /* Start count for instruction hook counter. */
532 lua_Hook hookf; /* Hook function. */ 630 lua_Hook hookf; /* Hook function. */
533 lua_CFunction wrapf; /* Wrapper for C function calls. */ 631 lua_CFunction wrapf; /* Wrapper for C function calls. */
534 lua_CFunction panic; /* Called as a last resort for errors. */ 632 lua_CFunction panic; /* Called as a last resort for errors. */
535 volatile int32_t vmstate; /* VM state or current JIT code trace number. */
536 BCIns bc_cfunc_int; /* Bytecode for internal C function calls. */ 633 BCIns bc_cfunc_int; /* Bytecode for internal C function calls. */
537 BCIns bc_cfunc_ext; /* Bytecode for external C function calls. */ 634 BCIns bc_cfunc_ext; /* Bytecode for external C function calls. */
538 GCRef jit_L; /* Current JIT code lua_State or NULL. */ 635 GCRef cur_L; /* Currently executing lua_State. */
539 MRef jit_base; /* Current JIT code L->base. */ 636 MRef jit_base; /* Current JIT code L->base or NULL. */
540 MRef ctype_state; /* Pointer to C type state. */ 637 MRef ctype_state; /* Pointer to C type state. */
638 PRNGState prng; /* Global PRNG state. */
541 GCRef gcroot[GCROOT_MAX]; /* GC roots. */ 639 GCRef gcroot[GCROOT_MAX]; /* GC roots. */
542} global_State; 640} global_State;
543 641
@@ -553,9 +651,11 @@ typedef struct global_State {
553#define HOOK_ACTIVE_SHIFT 4 651#define HOOK_ACTIVE_SHIFT 4
554#define HOOK_VMEVENT 0x20 652#define HOOK_VMEVENT 0x20
555#define HOOK_GC 0x40 653#define HOOK_GC 0x40
654#define HOOK_PROFILE 0x80
556#define hook_active(g) ((g)->hookmask & HOOK_ACTIVE) 655#define hook_active(g) ((g)->hookmask & HOOK_ACTIVE)
557#define hook_enter(g) ((g)->hookmask |= HOOK_ACTIVE) 656#define hook_enter(g) ((g)->hookmask |= HOOK_ACTIVE)
558#define hook_entergc(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_GC)) 657#define hook_entergc(g) \
658 ((g)->hookmask = ((g)->hookmask | (HOOK_ACTIVE|HOOK_GC)) & ~HOOK_PROFILE)
559#define hook_vmevent(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT)) 659#define hook_vmevent(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT))
560#define hook_leave(g) ((g)->hookmask &= ~HOOK_ACTIVE) 660#define hook_leave(g) ((g)->hookmask &= ~HOOK_ACTIVE)
561#define hook_save(g) ((g)->hookmask & ~HOOK_EVENTMASK) 661#define hook_save(g) ((g)->hookmask & ~HOOK_EVENTMASK)
@@ -583,12 +683,23 @@ struct lua_State {
583#define registry(L) (&G(L)->registrytv) 683#define registry(L) (&G(L)->registrytv)
584 684
585/* Macros to access the currently executing (Lua) function. */ 685/* Macros to access the currently executing (Lua) function. */
686#if LJ_GC64
687#define curr_func(L) (&gcval(L->base-2)->fn)
688#elif LJ_FR2
689#define curr_func(L) (&gcref((L->base-2)->gcr)->fn)
690#else
586#define curr_func(L) (&gcref((L->base-1)->fr.func)->fn) 691#define curr_func(L) (&gcref((L->base-1)->fr.func)->fn)
692#endif
587#define curr_funcisL(L) (isluafunc(curr_func(L))) 693#define curr_funcisL(L) (isluafunc(curr_func(L)))
588#define curr_proto(L) (funcproto(curr_func(L))) 694#define curr_proto(L) (funcproto(curr_func(L)))
589#define curr_topL(L) (L->base + curr_proto(L)->framesize) 695#define curr_topL(L) (L->base + curr_proto(L)->framesize)
590#define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top) 696#define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top)
591 697
698#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
699LJ_FUNC_NORET void lj_assert_fail(global_State *g, const char *file, int line,
700 const char *func, const char *fmt, ...);
701#endif
702
592/* -- GC object definition and conversions -------------------------------- */ 703/* -- GC object definition and conversions -------------------------------- */
593 704
594/* GC header for generic access to common fields of GC objects. */ 705/* GC header for generic access to common fields of GC objects. */
@@ -642,17 +753,18 @@ typedef union GCobj {
642 753
643/* -- TValue getters/setters ---------------------------------------------- */ 754/* -- TValue getters/setters ---------------------------------------------- */
644 755
645#ifdef LUA_USE_ASSERT
646#include "lj_gc.h"
647#endif
648
649/* Macros to test types. */ 756/* Macros to test types. */
757#if LJ_GC64
758#define itype(o) ((uint32_t)((o)->it64 >> 47))
759#define tvisnil(o) ((o)->it64 == -1)
760#else
650#define itype(o) ((o)->it) 761#define itype(o) ((o)->it)
651#define tvisnil(o) (itype(o) == LJ_TNIL) 762#define tvisnil(o) (itype(o) == LJ_TNIL)
763#endif
652#define tvisfalse(o) (itype(o) == LJ_TFALSE) 764#define tvisfalse(o) (itype(o) == LJ_TFALSE)
653#define tvistrue(o) (itype(o) == LJ_TTRUE) 765#define tvistrue(o) (itype(o) == LJ_TTRUE)
654#define tvisbool(o) (tvisfalse(o) || tvistrue(o)) 766#define tvisbool(o) (tvisfalse(o) || tvistrue(o))
655#if LJ_64 767#if LJ_64 && !LJ_GC64
656#define tvislightud(o) (((int32_t)itype(o) >> 15) == -2) 768#define tvislightud(o) (((int32_t)itype(o) >> 15) == -2)
657#else 769#else
658#define tvislightud(o) (itype(o) == LJ_TLIGHTUD) 770#define tvislightud(o) (itype(o) == LJ_TLIGHTUD)
@@ -686,7 +798,7 @@ typedef union GCobj {
686#define rawnumequal(o1, o2) ((o1)->u64 == (o2)->u64) 798#define rawnumequal(o1, o2) ((o1)->u64 == (o2)->u64)
687 799
688/* Macros to convert type ids. */ 800/* Macros to convert type ids. */
689#if LJ_64 801#if LJ_64 && !LJ_GC64
690#define itypemap(o) \ 802#define itypemap(o) \
691 (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o)) 803 (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o))
692#else 804#else
@@ -694,8 +806,12 @@ typedef union GCobj {
694#endif 806#endif
695 807
696/* Macros to get tagged values. */ 808/* Macros to get tagged values. */
809#if LJ_GC64
810#define gcval(o) ((GCobj *)(gcrefu((o)->gcr) & LJ_GCVMASK))
811#else
697#define gcval(o) (gcref((o)->gcr)) 812#define gcval(o) (gcref((o)->gcr))
698#define boolV(o) check_exp(tvisbool(o), (LJ_TFALSE - (o)->it)) 813#endif
814#define boolV(o) check_exp(tvisbool(o), (LJ_TFALSE - itype(o)))
699#if LJ_64 815#if LJ_64
700#define lightudV(o) \ 816#define lightudV(o) \
701 check_exp(tvislightud(o), (void *)((o)->u64 & U64x(00007fff,ffffffff))) 817 check_exp(tvislightud(o), (void *)((o)->u64 & U64x(00007fff,ffffffff)))
@@ -714,13 +830,23 @@ typedef union GCobj {
714#define intV(o) check_exp(tvisint(o), (int32_t)(o)->i) 830#define intV(o) check_exp(tvisint(o), (int32_t)(o)->i)
715 831
716/* Macros to set tagged values. */ 832/* Macros to set tagged values. */
833#if LJ_GC64
834#define setitype(o, i) ((o)->it = ((i) << 15))
835#define setnilV(o) ((o)->it64 = -1)
836#define setpriV(o, x) ((o)->it64 = (int64_t)~((uint64_t)~(x)<<47))
837#define setboolV(o, x) ((o)->it64 = (int64_t)~((uint64_t)((x)+1)<<47))
838#else
717#define setitype(o, i) ((o)->it = (i)) 839#define setitype(o, i) ((o)->it = (i))
718#define setnilV(o) ((o)->it = LJ_TNIL) 840#define setnilV(o) ((o)->it = LJ_TNIL)
719#define setboolV(o, x) ((o)->it = LJ_TFALSE-(uint32_t)(x)) 841#define setboolV(o, x) ((o)->it = LJ_TFALSE-(uint32_t)(x))
842#define setpriV(o, i) (setitype((o), (i)))
843#endif
720 844
721static LJ_AINLINE void setlightudV(TValue *o, void *p) 845static LJ_AINLINE void setlightudV(TValue *o, void *p)
722{ 846{
723#if LJ_64 847#if LJ_GC64
848 o->u64 = (uint64_t)p | (((uint64_t)LJ_TLIGHTUD) << 47);
849#elif LJ_64
724 o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48); 850 o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48);
725#else 851#else
726 setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD); 852 setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD);
@@ -730,20 +856,50 @@ static LJ_AINLINE void setlightudV(TValue *o, void *p)
730#if LJ_64 856#if LJ_64
731#define checklightudptr(L, p) \ 857#define checklightudptr(L, p) \
732 (((uint64_t)(p) >> 47) ? (lj_err_msg(L, LJ_ERR_BADLU), NULL) : (p)) 858 (((uint64_t)(p) >> 47) ? (lj_err_msg(L, LJ_ERR_BADLU), NULL) : (p))
859#else
860#define checklightudptr(L, p) (p)
861#endif
862
863#if LJ_FR2
864#define contptr(f) ((void *)(f))
865#define setcont(o, f) ((o)->u64 = (uint64_t)(uintptr_t)contptr(f))
866#elif LJ_64
867#define contptr(f) \
868 ((void *)(uintptr_t)(uint32_t)((intptr_t)(f) - (intptr_t)lj_vm_asm_begin))
733#define setcont(o, f) \ 869#define setcont(o, f) \
734 ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin) 870 ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin)
735#else 871#else
736#define checklightudptr(L, p) (p) 872#define contptr(f) ((void *)(f))
737#define setcont(o, f) setlightudV((o), (void *)(f)) 873#define setcont(o, f) setlightudV((o), contptr(f))
738#endif 874#endif
739 875
740#define tvchecklive(L, o) \ 876static LJ_AINLINE void checklivetv(lua_State *L, TValue *o, const char *msg)
741 UNUSED(L), lua_assert(!tvisgcv(o) || \ 877{
742 ((~itype(o) == gcval(o)->gch.gct) && !isdead(G(L), gcval(o)))) 878 UNUSED(L); UNUSED(o); UNUSED(msg);
879#if LUA_USE_ASSERT
880 if (tvisgcv(o)) {
881 lj_assertL(~itype(o) == gcval(o)->gch.gct,
882 "mismatch of TValue type %d vs GC type %d",
883 ~itype(o), gcval(o)->gch.gct);
884 /* Copy of isdead check from lj_gc.h to avoid circular include. */
885 lj_assertL(!(gcval(o)->gch.marked & (G(L)->gc.currentwhite ^ 3) & 3), msg);
886 }
887#endif
888}
743 889
744static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t itype) 890static LJ_AINLINE void setgcVraw(TValue *o, GCobj *v, uint32_t itype)
745{ 891{
746 setgcref(o->gcr, v); setitype(o, itype); tvchecklive(L, o); 892#if LJ_GC64
893 setgcreft(o->gcr, v, itype);
894#else
895 setgcref(o->gcr, v); setitype(o, itype);
896#endif
897}
898
899static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t it)
900{
901 setgcVraw(o, v, it);
902 checklivetv(L, o, "store to dead GC object");
747} 903}
748 904
749#define define_setV(name, type, tag) \ 905#define define_setV(name, type, tag) \
@@ -790,13 +946,17 @@ static LJ_AINLINE void setint64V(TValue *o, int64_t i)
790/* Copy tagged values. */ 946/* Copy tagged values. */
791static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2) 947static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2)
792{ 948{
793 *o1 = *o2; tvchecklive(L, o1); 949 *o1 = *o2;
950 checklivetv(L, o1, "copy of dead GC object");
794} 951}
795 952
796/* -- Number to integer conversion ---------------------------------------- */ 953/* -- Number to integer conversion ---------------------------------------- */
797 954
798#if LJ_SOFTFP 955#if LJ_SOFTFP
799LJ_ASMF int32_t lj_vm_tobit(double x); 956LJ_ASMF int32_t lj_vm_tobit(double x);
957#if LJ_TARGET_MIPS64
958LJ_ASMF int32_t lj_vm_tointg(double x);
959#endif
800#endif 960#endif
801 961
802static LJ_AINLINE int32_t lj_num2bit(lua_Number n) 962static LJ_AINLINE int32_t lj_num2bit(lua_Number n)
@@ -810,11 +970,7 @@ static LJ_AINLINE int32_t lj_num2bit(lua_Number n)
810#endif 970#endif
811} 971}
812 972
813#if LJ_TARGET_X86 && !defined(__SSE2__)
814#define lj_num2int(n) lj_num2bit((n))
815#else
816#define lj_num2int(n) ((int32_t)(n)) 973#define lj_num2int(n) ((int32_t)(n))
817#endif
818 974
819/* 975/*
820** This must match the JIT backend behavior. In particular for archs 976** This must match the JIT backend behavior. In particular for archs
@@ -859,6 +1015,7 @@ LJ_DATA const char *const lj_obj_itypename[~LJ_TNUMX+1];
859#define lj_typename(o) (lj_obj_itypename[itypemap(o)]) 1015#define lj_typename(o) (lj_obj_itypename[itypemap(o)])
860 1016
861/* Compare two objects without calling metamethods. */ 1017/* Compare two objects without calling metamethods. */
862LJ_FUNC int lj_obj_equal(cTValue *o1, cTValue *o2); 1018LJ_FUNC int LJ_FASTCALL lj_obj_equal(cTValue *o1, cTValue *o2);
1019LJ_FUNC const void * LJ_FASTCALL lj_obj_ptr(cTValue *o);
863 1020
864#endif 1021#endif
diff --git a/src/lj_opt_fold.c b/src/lj_opt_fold.c
index 928d3852..96f272b8 100644
--- a/src/lj_opt_fold.c
+++ b/src/lj_opt_fold.c
@@ -14,18 +14,21 @@
14 14
15#if LJ_HASJIT 15#if LJ_HASJIT
16 16
17#include "lj_buf.h"
17#include "lj_str.h" 18#include "lj_str.h"
18#include "lj_tab.h" 19#include "lj_tab.h"
19#include "lj_ir.h" 20#include "lj_ir.h"
20#include "lj_jit.h" 21#include "lj_jit.h"
22#include "lj_ircall.h"
21#include "lj_iropt.h" 23#include "lj_iropt.h"
22#include "lj_trace.h" 24#include "lj_trace.h"
23#if LJ_HASFFI 25#if LJ_HASFFI
24#include "lj_ctype.h" 26#include "lj_ctype.h"
25#endif
26#include "lj_carith.h" 27#include "lj_carith.h"
28#endif
27#include "lj_vm.h" 29#include "lj_vm.h"
28#include "lj_strscan.h" 30#include "lj_strscan.h"
31#include "lj_strfmt.h"
29 32
30/* Here's a short description how the FOLD engine processes instructions: 33/* Here's a short description how the FOLD engine processes instructions:
31** 34**
@@ -133,8 +136,8 @@
133/* Some local macros to save typing. Undef'd at the end. */ 136/* Some local macros to save typing. Undef'd at the end. */
134#define IR(ref) (&J->cur.ir[(ref)]) 137#define IR(ref) (&J->cur.ir[(ref)])
135#define fins (&J->fold.ins) 138#define fins (&J->fold.ins)
136#define fleft (&J->fold.left) 139#define fleft (J->fold.left)
137#define fright (&J->fold.right) 140#define fright (J->fold.right)
138#define knumleft (ir_knum(fleft)->n) 141#define knumleft (ir_knum(fleft)->n)
139#define knumright (ir_knum(fright)->n) 142#define knumright (ir_knum(fright)->n)
140 143
@@ -155,13 +158,14 @@ typedef IRRef (LJ_FASTCALL *FoldFunc)(jit_State *J);
155 158
156/* Barrier to prevent folding across a GC step. 159/* Barrier to prevent folding across a GC step.
157** GC steps can only happen at the head of a trace and at LOOP. 160** GC steps can only happen at the head of a trace and at LOOP.
158** And the GC is only driven forward if there is at least one allocation. 161** And the GC is only driven forward if there's at least one allocation.
159*/ 162*/
160#define gcstep_barrier(J, ref) \ 163#define gcstep_barrier(J, ref) \
161 ((ref) < J->chain[IR_LOOP] && \ 164 ((ref) < J->chain[IR_LOOP] && \
162 (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \ 165 (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \
163 J->chain[IR_TNEW] || J->chain[IR_TDUP] || \ 166 J->chain[IR_TNEW] || J->chain[IR_TDUP] || \
164 J->chain[IR_CNEW] || J->chain[IR_CNEWI] || J->chain[IR_TOSTR])) 167 J->chain[IR_CNEW] || J->chain[IR_CNEWI] || \
168 J->chain[IR_BUFSTR] || J->chain[IR_TOSTR] || J->chain[IR_CALLA]))
165 169
166/* -- Constant folding for FP numbers ------------------------------------- */ 170/* -- Constant folding for FP numbers ------------------------------------- */
167 171
@@ -169,9 +173,6 @@ LJFOLD(ADD KNUM KNUM)
169LJFOLD(SUB KNUM KNUM) 173LJFOLD(SUB KNUM KNUM)
170LJFOLD(MUL KNUM KNUM) 174LJFOLD(MUL KNUM KNUM)
171LJFOLD(DIV KNUM KNUM) 175LJFOLD(DIV KNUM KNUM)
172LJFOLD(NEG KNUM KNUM)
173LJFOLD(ABS KNUM KNUM)
174LJFOLD(ATAN2 KNUM KNUM)
175LJFOLD(LDEXP KNUM KNUM) 176LJFOLD(LDEXP KNUM KNUM)
176LJFOLD(MIN KNUM KNUM) 177LJFOLD(MIN KNUM KNUM)
177LJFOLD(MAX KNUM KNUM) 178LJFOLD(MAX KNUM KNUM)
@@ -183,6 +184,15 @@ LJFOLDF(kfold_numarith)
183 return lj_ir_knum(J, y); 184 return lj_ir_knum(J, y);
184} 185}
185 186
187LJFOLD(NEG KNUM FLOAD)
188LJFOLD(ABS KNUM FLOAD)
189LJFOLDF(kfold_numabsneg)
190{
191 lua_Number a = knumleft;
192 lua_Number y = lj_vm_foldarith(a, a, fins->o - IR_ADD);
193 return lj_ir_knum(J, y);
194}
195
186LJFOLD(LDEXP KNUM KINT) 196LJFOLD(LDEXP KNUM KINT)
187LJFOLDF(kfold_ldexp) 197LJFOLDF(kfold_ldexp)
188{ 198{
@@ -202,11 +212,36 @@ LJFOLDF(kfold_fpmath)
202 return lj_ir_knum(J, y); 212 return lj_ir_knum(J, y);
203} 213}
204 214
215LJFOLD(CALLN KNUM any)
216LJFOLDF(kfold_fpcall1)
217{
218 const CCallInfo *ci = &lj_ir_callinfo[fins->op2];
219 if (CCI_TYPE(ci) == IRT_NUM) {
220 double y = ((double (*)(double))ci->func)(knumleft);
221 return lj_ir_knum(J, y);
222 }
223 return NEXTFOLD;
224}
225
226LJFOLD(CALLN CARG IRCALL_atan2)
227LJFOLDF(kfold_fpcall2)
228{
229 if (irref_isk(fleft->op1) && irref_isk(fleft->op2)) {
230 const CCallInfo *ci = &lj_ir_callinfo[fins->op2];
231 double a = ir_knum(IR(fleft->op1))->n;
232 double b = ir_knum(IR(fleft->op2))->n;
233 double y = ((double (*)(double, double))ci->func)(a, b);
234 return lj_ir_knum(J, y);
235 }
236 return NEXTFOLD;
237}
238
205LJFOLD(POW KNUM KINT) 239LJFOLD(POW KNUM KINT)
240LJFOLD(POW KNUM KNUM)
206LJFOLDF(kfold_numpow) 241LJFOLDF(kfold_numpow)
207{ 242{
208 lua_Number a = knumleft; 243 lua_Number a = knumleft;
209 lua_Number b = (lua_Number)fright->i; 244 lua_Number b = fright->o == IR_KINT ? (lua_Number)fright->i : knumright;
210 lua_Number y = lj_vm_foldarith(a, b, IR_POW - IR_ADD); 245 lua_Number y = lj_vm_foldarith(a, b, IR_POW - IR_ADD);
211 return lj_ir_knum(J, y); 246 return lj_ir_knum(J, y);
212} 247}
@@ -247,7 +282,7 @@ static int32_t kfold_intop(int32_t k1, int32_t k2, IROp op)
247 case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break; 282 case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break;
248 case IR_MIN: k1 = k1 < k2 ? k1 : k2; break; 283 case IR_MIN: k1 = k1 < k2 ? k1 : k2; break;
249 case IR_MAX: k1 = k1 > k2 ? k1 : k2; break; 284 case IR_MAX: k1 = k1 > k2 ? k1 : k2; break;
250 default: lua_assert(0); break; 285 default: lj_assertX(0, "bad IR op %d", op); break;
251 } 286 }
252 return k1; 287 return k1;
253} 288}
@@ -319,7 +354,7 @@ LJFOLDF(kfold_intcomp)
319 case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b); 354 case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b);
320 case IR_ABC: 355 case IR_ABC:
321 case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b); 356 case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b);
322 default: lua_assert(0); return FAILFOLD; 357 default: lj_assertJ(0, "bad IR op %d", fins->o); return FAILFOLD;
323 } 358 }
324} 359}
325 360
@@ -333,21 +368,29 @@ LJFOLDF(kfold_intcomp0)
333 368
334/* -- Constant folding for 64 bit integers -------------------------------- */ 369/* -- Constant folding for 64 bit integers -------------------------------- */
335 370
336static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op) 371static uint64_t kfold_int64arith(jit_State *J, uint64_t k1, uint64_t k2,
372 IROp op)
337{ 373{
374 UNUSED(J);
375#if LJ_HASFFI
338 switch (op) { 376 switch (op) {
339#if LJ_64 || LJ_HASFFI
340 case IR_ADD: k1 += k2; break; 377 case IR_ADD: k1 += k2; break;
341 case IR_SUB: k1 -= k2; break; 378 case IR_SUB: k1 -= k2; break;
342#endif
343#if LJ_HASFFI
344 case IR_MUL: k1 *= k2; break; 379 case IR_MUL: k1 *= k2; break;
345 case IR_BAND: k1 &= k2; break; 380 case IR_BAND: k1 &= k2; break;
346 case IR_BOR: k1 |= k2; break; 381 case IR_BOR: k1 |= k2; break;
347 case IR_BXOR: k1 ^= k2; break; 382 case IR_BXOR: k1 ^= k2; break;
348#endif 383 case IR_BSHL: k1 <<= (k2 & 63); break;
349 default: UNUSED(k2); lua_assert(0); break; 384 case IR_BSHR: k1 = (int32_t)((uint32_t)k1 >> (k2 & 63)); break;
385 case IR_BSAR: k1 >>= (k2 & 63); break;
386 case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 63)); break;
387 case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 63)); break;
388 default: lj_assertJ(0, "bad IR op %d", op); break;
350 } 389 }
390#else
391 UNUSED(k2); UNUSED(op);
392 lj_assertJ(0, "FFI IR op without FFI");
393#endif
351 return k1; 394 return k1;
352} 395}
353 396
@@ -359,7 +402,7 @@ LJFOLD(BOR KINT64 KINT64)
359LJFOLD(BXOR KINT64 KINT64) 402LJFOLD(BXOR KINT64 KINT64)
360LJFOLDF(kfold_int64arith) 403LJFOLDF(kfold_int64arith)
361{ 404{
362 return INT64FOLD(kfold_int64arith(ir_k64(fleft)->u64, 405 return INT64FOLD(kfold_int64arith(J, ir_k64(fleft)->u64,
363 ir_k64(fright)->u64, (IROp)fins->o)); 406 ir_k64(fright)->u64, (IROp)fins->o));
364} 407}
365 408
@@ -381,7 +424,7 @@ LJFOLDF(kfold_int64arith2)
381 } 424 }
382 return INT64FOLD(k1); 425 return INT64FOLD(k1);
383#else 426#else
384 UNUSED(J); lua_assert(0); return FAILFOLD; 427 UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
385#endif 428#endif
386} 429}
387 430
@@ -392,22 +435,12 @@ LJFOLD(BROL KINT64 KINT)
392LJFOLD(BROR KINT64 KINT) 435LJFOLD(BROR KINT64 KINT)
393LJFOLDF(kfold_int64shift) 436LJFOLDF(kfold_int64shift)
394{ 437{
395#if LJ_HASFFI || LJ_64 438#if LJ_HASFFI
396 uint64_t k = ir_k64(fleft)->u64; 439 uint64_t k = ir_k64(fleft)->u64;
397 int32_t sh = (fright->i & 63); 440 int32_t sh = (fright->i & 63);
398 switch ((IROp)fins->o) { 441 return INT64FOLD(lj_carith_shift64(k, sh, fins->o - IR_BSHL));
399 case IR_BSHL: k <<= sh; break;
400#if LJ_HASFFI
401 case IR_BSHR: k >>= sh; break;
402 case IR_BSAR: k = (uint64_t)((int64_t)k >> sh); break;
403 case IR_BROL: k = lj_rol(k, sh); break;
404 case IR_BROR: k = lj_ror(k, sh); break;
405#endif
406 default: lua_assert(0); break;
407 }
408 return INT64FOLD(k);
409#else 442#else
410 UNUSED(J); lua_assert(0); return FAILFOLD; 443 UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
411#endif 444#endif
412} 445}
413 446
@@ -417,7 +450,7 @@ LJFOLDF(kfold_bnot64)
417#if LJ_HASFFI 450#if LJ_HASFFI
418 return INT64FOLD(~ir_k64(fleft)->u64); 451 return INT64FOLD(~ir_k64(fleft)->u64);
419#else 452#else
420 UNUSED(J); lua_assert(0); return FAILFOLD; 453 UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
421#endif 454#endif
422} 455}
423 456
@@ -427,7 +460,7 @@ LJFOLDF(kfold_bswap64)
427#if LJ_HASFFI 460#if LJ_HASFFI
428 return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64)); 461 return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64));
429#else 462#else
430 UNUSED(J); lua_assert(0); return FAILFOLD; 463 UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
431#endif 464#endif
432} 465}
433 466
@@ -452,10 +485,10 @@ LJFOLDF(kfold_int64comp)
452 case IR_UGE: return CONDFOLD(a >= b); 485 case IR_UGE: return CONDFOLD(a >= b);
453 case IR_ULE: return CONDFOLD(a <= b); 486 case IR_ULE: return CONDFOLD(a <= b);
454 case IR_UGT: return CONDFOLD(a > b); 487 case IR_UGT: return CONDFOLD(a > b);
455 default: lua_assert(0); return FAILFOLD; 488 default: lj_assertJ(0, "bad IR op %d", fins->o); return FAILFOLD;
456 } 489 }
457#else 490#else
458 UNUSED(J); lua_assert(0); return FAILFOLD; 491 UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
459#endif 492#endif
460} 493}
461 494
@@ -467,7 +500,7 @@ LJFOLDF(kfold_int64comp0)
467 return DROPFOLD; 500 return DROPFOLD;
468 return NEXTFOLD; 501 return NEXTFOLD;
469#else 502#else
470 UNUSED(J); lua_assert(0); return FAILFOLD; 503 UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
471#endif 504#endif
472} 505}
473 506
@@ -492,7 +525,7 @@ LJFOLD(STRREF KGC KINT)
492LJFOLDF(kfold_strref) 525LJFOLDF(kfold_strref)
493{ 526{
494 GCstr *str = ir_kstr(fleft); 527 GCstr *str = ir_kstr(fleft);
495 lua_assert((MSize)fright->i <= str->len); 528 lj_assertJ((MSize)fright->i <= str->len, "bad string ref");
496 return lj_ir_kkptr(J, (char *)strdata(str) + fright->i); 529 return lj_ir_kkptr(J, (char *)strdata(str) + fright->i);
497} 530}
498 531
@@ -510,7 +543,7 @@ LJFOLDF(kfold_strref_snew)
510 PHIBARRIER(ir); 543 PHIBARRIER(ir);
511 fins->op2 = emitir(IRTI(IR_ADD), ir->op2, fins->op2); /* Clobbers fins! */ 544 fins->op2 = emitir(IRTI(IR_ADD), ir->op2, fins->op2); /* Clobbers fins! */
512 fins->op1 = str; 545 fins->op1 = str;
513 fins->ot = IRT(IR_STRREF, IRT_P32); 546 fins->ot = IRT(IR_STRREF, IRT_PGC);
514 return RETRYFOLD; 547 return RETRYFOLD;
515 } 548 }
516 } 549 }
@@ -528,6 +561,182 @@ LJFOLDF(kfold_strcmp)
528 return NEXTFOLD; 561 return NEXTFOLD;
529} 562}
530 563
564/* -- Constant folding and forwarding for buffers ------------------------- */
565
566/*
567** Buffer ops perform stores, but their effect is limited to the buffer
568** itself. Also, buffer ops are chained: a use of an op implies a use of
569** all other ops up the chain. Conversely, if an op is unused, all ops
570** up the chain can go unsed. This largely eliminates the need to treat
571** them as stores.
572**
573** Alas, treating them as normal (IRM_N) ops doesn't work, because they
574** cannot be CSEd in isolation. CSE for IRM_N is implicitly done in LOOP
575** or if FOLD is disabled.
576**
577** The compromise is to declare them as loads, emit them like stores and
578** CSE whole chains manually when the BUFSTR is to be emitted. Any chain
579** fragments left over from CSE are eliminated by DCE.
580*/
581
582/* BUFHDR is emitted like a store, see below. */
583
584LJFOLD(BUFPUT BUFHDR BUFSTR)
585LJFOLDF(bufput_append)
586{
587 /* New buffer, no other buffer op inbetween and same buffer? */
588 if ((J->flags & JIT_F_OPT_FWD) &&
589 !(fleft->op2 & IRBUFHDR_APPEND) &&
590 fleft->prev == fright->op2 &&
591 fleft->op1 == IR(fright->op2)->op1) {
592 IRRef ref = fins->op1;
593 IR(ref)->op2 = (fleft->op2 | IRBUFHDR_APPEND); /* Modify BUFHDR. */
594 IR(ref)->op1 = fright->op1;
595 return ref;
596 }
597 return EMITFOLD; /* Always emit, CSE later. */
598}
599
600LJFOLD(BUFPUT any any)
601LJFOLDF(bufput_kgc)
602{
603 if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fright->o == IR_KGC) {
604 GCstr *s2 = ir_kstr(fright);
605 if (s2->len == 0) { /* Empty string? */
606 return LEFTFOLD;
607 } else {
608 if (fleft->o == IR_BUFPUT && irref_isk(fleft->op2) &&
609 !irt_isphi(fleft->t)) { /* Join two constant string puts in a row. */
610 GCstr *s1 = ir_kstr(IR(fleft->op2));
611 IRRef kref = lj_ir_kstr(J, lj_buf_cat2str(J->L, s1, s2));
612 /* lj_ir_kstr() may realloc the IR and invalidates any IRIns *. */
613 IR(fins->op1)->op2 = kref; /* Modify previous BUFPUT. */
614 return fins->op1;
615 }
616 }
617 }
618 return EMITFOLD; /* Always emit, CSE later. */
619}
620
621LJFOLD(BUFSTR any any)
622LJFOLDF(bufstr_kfold_cse)
623{
624 lj_assertJ(fleft->o == IR_BUFHDR || fleft->o == IR_BUFPUT ||
625 fleft->o == IR_CALLL,
626 "bad buffer constructor IR op %d", fleft->o);
627 if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
628 if (fleft->o == IR_BUFHDR) { /* No put operations? */
629 if (!(fleft->op2 & IRBUFHDR_APPEND)) /* Empty buffer? */
630 return lj_ir_kstr(J, &J2G(J)->strempty);
631 fins->op1 = fleft->op1;
632 fins->op2 = fleft->prev; /* Relies on checks in bufput_append. */
633 return CSEFOLD;
634 } else if (fleft->o == IR_BUFPUT) {
635 IRIns *irb = IR(fleft->op1);
636 if (irb->o == IR_BUFHDR && !(irb->op2 & IRBUFHDR_APPEND))
637 return fleft->op2; /* Shortcut for a single put operation. */
638 }
639 }
640 /* Try to CSE the whole chain. */
641 if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
642 IRRef ref = J->chain[IR_BUFSTR];
643 while (ref) {
644 IRIns *irs = IR(ref), *ira = fleft, *irb = IR(irs->op1);
645 while (ira->o == irb->o && ira->op2 == irb->op2) {
646 lj_assertJ(ira->o == IR_BUFHDR || ira->o == IR_BUFPUT ||
647 ira->o == IR_CALLL || ira->o == IR_CARG,
648 "bad buffer constructor IR op %d", ira->o);
649 if (ira->o == IR_BUFHDR && !(ira->op2 & IRBUFHDR_APPEND))
650 return ref; /* CSE succeeded. */
651 if (ira->o == IR_CALLL && ira->op2 == IRCALL_lj_buf_puttab)
652 break;
653 ira = IR(ira->op1);
654 irb = IR(irb->op1);
655 }
656 ref = irs->prev;
657 }
658 }
659 return EMITFOLD; /* No CSE possible. */
660}
661
662LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_reverse)
663LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_upper)
664LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_lower)
665LJFOLD(CALLL CARG IRCALL_lj_strfmt_putquoted)
666LJFOLDF(bufput_kfold_op)
667{
668 if (irref_isk(fleft->op2)) {
669 const CCallInfo *ci = &lj_ir_callinfo[fins->op2];
670 SBuf *sb = lj_buf_tmp_(J->L);
671 sb = ((SBuf * (LJ_FASTCALL *)(SBuf *, GCstr *))ci->func)(sb,
672 ir_kstr(IR(fleft->op2)));
673 fins->o = IR_BUFPUT;
674 fins->op1 = fleft->op1;
675 fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb));
676 return RETRYFOLD;
677 }
678 return EMITFOLD; /* Always emit, CSE later. */
679}
680
681LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_rep)
682LJFOLDF(bufput_kfold_rep)
683{
684 if (irref_isk(fleft->op2)) {
685 IRIns *irc = IR(fleft->op1);
686 if (irref_isk(irc->op2)) {
687 SBuf *sb = lj_buf_tmp_(J->L);
688 sb = lj_buf_putstr_rep(sb, ir_kstr(IR(irc->op2)), IR(fleft->op2)->i);
689 fins->o = IR_BUFPUT;
690 fins->op1 = irc->op1;
691 fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb));
692 return RETRYFOLD;
693 }
694 }
695 return EMITFOLD; /* Always emit, CSE later. */
696}
697
698LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfxint)
699LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum_int)
700LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum_uint)
701LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum)
702LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfstr)
703LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfchar)
704LJFOLDF(bufput_kfold_fmt)
705{
706 IRIns *irc = IR(fleft->op1);
707 lj_assertJ(irref_isk(irc->op2), "SFormat must be const");
708 if (irref_isk(fleft->op2)) {
709 SFormat sf = (SFormat)IR(irc->op2)->i;
710 IRIns *ira = IR(fleft->op2);
711 SBuf *sb = lj_buf_tmp_(J->L);
712 switch (fins->op2) {
713 case IRCALL_lj_strfmt_putfxint:
714 sb = lj_strfmt_putfxint(sb, sf, ir_k64(ira)->u64);
715 break;
716 case IRCALL_lj_strfmt_putfstr:
717 sb = lj_strfmt_putfstr(sb, sf, ir_kstr(ira));
718 break;
719 case IRCALL_lj_strfmt_putfchar:
720 sb = lj_strfmt_putfchar(sb, sf, ira->i);
721 break;
722 case IRCALL_lj_strfmt_putfnum_int:
723 case IRCALL_lj_strfmt_putfnum_uint:
724 case IRCALL_lj_strfmt_putfnum:
725 default: {
726 const CCallInfo *ci = &lj_ir_callinfo[fins->op2];
727 sb = ((SBuf * (*)(SBuf *, SFormat, lua_Number))ci->func)(sb, sf,
728 ir_knum(ira)->n);
729 break;
730 }
731 }
732 fins->o = IR_BUFPUT;
733 fins->op1 = irc->op1;
734 fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb));
735 return RETRYFOLD;
736 }
737 return EMITFOLD; /* Always emit, CSE later. */
738}
739
531/* -- Constant folding of pointer arithmetic ------------------------------ */ 740/* -- Constant folding of pointer arithmetic ------------------------------ */
532 741
533LJFOLD(ADD KGC KINT) 742LJFOLD(ADD KGC KINT)
@@ -648,27 +857,22 @@ LJFOLD(CONV KNUM IRCONV_INT_NUM)
648LJFOLDF(kfold_conv_knum_int_num) 857LJFOLDF(kfold_conv_knum_int_num)
649{ 858{
650 lua_Number n = knumleft; 859 lua_Number n = knumleft;
651 if (!(fins->op2 & IRCONV_TRUNC)) { 860 int32_t k = lj_num2int(n);
652 int32_t k = lj_num2int(n); 861 if (irt_isguard(fins->t) && n != (lua_Number)k) {
653 if (irt_isguard(fins->t) && n != (lua_Number)k) { 862 /* We're about to create a guard which always fails, like CONV +1.5.
654 /* We're about to create a guard which always fails, like CONV +1.5. 863 ** Some pathological loops cause this during LICM, e.g.:
655 ** Some pathological loops cause this during LICM, e.g.: 864 ** local x,k,t = 0,1.5,{1,[1.5]=2}
656 ** local x,k,t = 0,1.5,{1,[1.5]=2} 865 ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end
657 ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end 866 ** assert(x == 300)
658 ** assert(x == 300) 867 */
659 */ 868 return FAILFOLD;
660 return FAILFOLD;
661 }
662 return INTFOLD(k);
663 } else {
664 return INTFOLD((int32_t)n);
665 } 869 }
870 return INTFOLD(k);
666} 871}
667 872
668LJFOLD(CONV KNUM IRCONV_U32_NUM) 873LJFOLD(CONV KNUM IRCONV_U32_NUM)
669LJFOLDF(kfold_conv_knum_u32_num) 874LJFOLDF(kfold_conv_knum_u32_num)
670{ 875{
671 lua_assert((fins->op2 & IRCONV_TRUNC));
672#ifdef _MSC_VER 876#ifdef _MSC_VER
673 { /* Workaround for MSVC bug. */ 877 { /* Workaround for MSVC bug. */
674 volatile uint32_t u = (uint32_t)knumleft; 878 volatile uint32_t u = (uint32_t)knumleft;
@@ -682,27 +886,27 @@ LJFOLDF(kfold_conv_knum_u32_num)
682LJFOLD(CONV KNUM IRCONV_I64_NUM) 886LJFOLD(CONV KNUM IRCONV_I64_NUM)
683LJFOLDF(kfold_conv_knum_i64_num) 887LJFOLDF(kfold_conv_knum_i64_num)
684{ 888{
685 lua_assert((fins->op2 & IRCONV_TRUNC));
686 return INT64FOLD((uint64_t)(int64_t)knumleft); 889 return INT64FOLD((uint64_t)(int64_t)knumleft);
687} 890}
688 891
689LJFOLD(CONV KNUM IRCONV_U64_NUM) 892LJFOLD(CONV KNUM IRCONV_U64_NUM)
690LJFOLDF(kfold_conv_knum_u64_num) 893LJFOLDF(kfold_conv_knum_u64_num)
691{ 894{
692 lua_assert((fins->op2 & IRCONV_TRUNC));
693 return INT64FOLD(lj_num2u64(knumleft)); 895 return INT64FOLD(lj_num2u64(knumleft));
694} 896}
695 897
696LJFOLD(TOSTR KNUM) 898LJFOLD(TOSTR KNUM any)
697LJFOLDF(kfold_tostr_knum) 899LJFOLDF(kfold_tostr_knum)
698{ 900{
699 return lj_ir_kstr(J, lj_str_fromnum(J->L, &knumleft)); 901 return lj_ir_kstr(J, lj_strfmt_num(J->L, ir_knum(fleft)));
700} 902}
701 903
702LJFOLD(TOSTR KINT) 904LJFOLD(TOSTR KINT any)
703LJFOLDF(kfold_tostr_kint) 905LJFOLDF(kfold_tostr_kint)
704{ 906{
705 return lj_ir_kstr(J, lj_str_fromint(J->L, fleft->i)); 907 return lj_ir_kstr(J, fins->op2 == IRTOSTR_INT ?
908 lj_strfmt_int(J->L, fleft->i) :
909 lj_strfmt_char(J->L, fleft->i));
706} 910}
707 911
708LJFOLD(STRTO KGC) 912LJFOLD(STRTO KGC)
@@ -750,13 +954,13 @@ LJFOLDF(shortcut_round)
750 return NEXTFOLD; 954 return NEXTFOLD;
751} 955}
752 956
753LJFOLD(ABS ABS KNUM) 957LJFOLD(ABS ABS FLOAD)
754LJFOLDF(shortcut_left) 958LJFOLDF(shortcut_left)
755{ 959{
756 return LEFTFOLD; /* f(g(x)) ==> g(x) */ 960 return LEFTFOLD; /* f(g(x)) ==> g(x) */
757} 961}
758 962
759LJFOLD(ABS NEG KNUM) 963LJFOLD(ABS NEG FLOAD)
760LJFOLDF(shortcut_dropleft) 964LJFOLDF(shortcut_dropleft)
761{ 965{
762 PHIBARRIER(fleft); 966 PHIBARRIER(fleft);
@@ -837,8 +1041,10 @@ LJFOLDF(simplify_nummuldiv_k)
837 if (n == 1.0) { /* x o 1 ==> x */ 1041 if (n == 1.0) { /* x o 1 ==> x */
838 return LEFTFOLD; 1042 return LEFTFOLD;
839 } else if (n == -1.0) { /* x o -1 ==> -x */ 1043 } else if (n == -1.0) { /* x o -1 ==> -x */
1044 IRRef op1 = fins->op1;
1045 fins->op2 = (IRRef1)lj_ir_ksimd(J, LJ_KSIMD_NEG); /* Modifies fins. */
1046 fins->op1 = op1;
840 fins->o = IR_NEG; 1047 fins->o = IR_NEG;
841 fins->op2 = (IRRef1)lj_ir_knum_neg(J);
842 return RETRYFOLD; 1048 return RETRYFOLD;
843 } else if (fins->o == IR_MUL && n == 2.0) { /* x * 2 ==> x + x */ 1049 } else if (fins->o == IR_MUL && n == 2.0) { /* x * 2 ==> x + x */
844 fins->o = IR_ADD; 1050 fins->o = IR_ADD;
@@ -879,7 +1085,7 @@ LJFOLDF(simplify_nummuldiv_negneg)
879} 1085}
880 1086
881LJFOLD(POW any KINT) 1087LJFOLD(POW any KINT)
882LJFOLDF(simplify_numpow_xk) 1088LJFOLDF(simplify_numpow_xkint)
883{ 1089{
884 int32_t k = fright->i; 1090 int32_t k = fright->i;
885 TRef ref = fins->op1; 1091 TRef ref = fins->op1;
@@ -908,13 +1114,22 @@ LJFOLDF(simplify_numpow_xk)
908 return ref; 1114 return ref;
909} 1115}
910 1116
1117LJFOLD(POW any KNUM)
1118LJFOLDF(simplify_numpow_xknum)
1119{
1120 if (knumright == 0.5) /* x ^ 0.5 ==> sqrt(x) */
1121 return emitir(IRTN(IR_FPMATH), fins->op1, IRFPM_SQRT);
1122 return NEXTFOLD;
1123}
1124
911LJFOLD(POW KNUM any) 1125LJFOLD(POW KNUM any)
912LJFOLDF(simplify_numpow_kx) 1126LJFOLDF(simplify_numpow_kx)
913{ 1127{
914 lua_Number n = knumleft; 1128 lua_Number n = knumleft;
915 if (n == 2.0) { /* 2.0 ^ i ==> ldexp(1.0, tonum(i)) */ 1129 if (n == 2.0 && irt_isint(fright->t)) { /* 2.0 ^ i ==> ldexp(1.0, i) */
916 fins->o = IR_CONV;
917#if LJ_TARGET_X86ORX64 1130#if LJ_TARGET_X86ORX64
1131 /* Different IR_LDEXP calling convention on x86/x64 requires conversion. */
1132 fins->o = IR_CONV;
918 fins->op1 = fins->op2; 1133 fins->op1 = fins->op2;
919 fins->op2 = IRCONV_NUM_INT; 1134 fins->op2 = IRCONV_NUM_INT;
920 fins->op2 = (IRRef1)lj_opt_fold(J); 1135 fins->op2 = (IRRef1)lj_opt_fold(J);
@@ -1008,10 +1223,10 @@ LJFOLDF(simplify_tobit_conv)
1008{ 1223{
1009 /* Fold even across PHI to avoid expensive num->int conversions in loop. */ 1224 /* Fold even across PHI to avoid expensive num->int conversions in loop. */
1010 if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) { 1225 if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) {
1011 lua_assert(irt_isnum(fleft->t)); 1226 lj_assertJ(irt_isnum(fleft->t), "expected TOBIT number arg");
1012 return fleft->op1; 1227 return fleft->op1;
1013 } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) { 1228 } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) {
1014 lua_assert(irt_isnum(fleft->t)); 1229 lj_assertJ(irt_isnum(fleft->t), "expected TOBIT number arg");
1015 fins->o = IR_CONV; 1230 fins->o = IR_CONV;
1016 fins->op1 = fleft->op1; 1231 fins->op1 = fleft->op1;
1017 fins->op2 = (IRT_INT<<5)|IRT_U32; 1232 fins->op2 = (IRT_INT<<5)|IRT_U32;
@@ -1051,7 +1266,7 @@ LJFOLDF(simplify_conv_sext)
1051 /* Use scalar evolution analysis results to strength-reduce sign-extension. */ 1266 /* Use scalar evolution analysis results to strength-reduce sign-extension. */
1052 if (ref == J->scev.idx) { 1267 if (ref == J->scev.idx) {
1053 IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop; 1268 IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop;
1054 lua_assert(irt_isint(J->scev.t)); 1269 lj_assertJ(irt_isint(J->scev.t), "only int SCEV supported");
1055 if (lo && IR(lo)->o == IR_KINT && IR(lo)->i + ofs >= 0) { 1270 if (lo && IR(lo)->o == IR_KINT && IR(lo)->i + ofs >= 0) {
1056 ok_reduce: 1271 ok_reduce:
1057#if LJ_TARGET_X64 1272#if LJ_TARGET_X64
@@ -1127,7 +1342,8 @@ LJFOLDF(narrow_convert)
1127 /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */ 1342 /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */
1128 if (J->chain[IR_LOOP]) 1343 if (J->chain[IR_LOOP])
1129 return NEXTFOLD; 1344 return NEXTFOLD;
1130 lua_assert(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT); 1345 lj_assertJ(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT,
1346 "unexpected CONV TOBIT");
1131 return lj_opt_narrow_convert(J); 1347 return lj_opt_narrow_convert(J);
1132} 1348}
1133 1349
@@ -1205,7 +1421,9 @@ static TRef simplify_intmul_k(jit_State *J, int32_t k)
1205 ** But this is mainly intended for simple address arithmetic. 1421 ** But this is mainly intended for simple address arithmetic.
1206 ** Also it's easier for the backend to optimize the original multiplies. 1422 ** Also it's easier for the backend to optimize the original multiplies.
1207 */ 1423 */
1208 if (k == 1) { /* i * 1 ==> i */ 1424 if (k == 0) { /* i * 0 ==> 0 */
1425 return RIGHTFOLD;
1426 } else if (k == 1) { /* i * 1 ==> i */
1209 return LEFTFOLD; 1427 return LEFTFOLD;
1210 } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */ 1428 } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */
1211 fins->o = IR_BSHL; 1429 fins->o = IR_BSHL;
@@ -1218,9 +1436,7 @@ static TRef simplify_intmul_k(jit_State *J, int32_t k)
1218LJFOLD(MUL any KINT) 1436LJFOLD(MUL any KINT)
1219LJFOLDF(simplify_intmul_k32) 1437LJFOLDF(simplify_intmul_k32)
1220{ 1438{
1221 if (fright->i == 0) /* i * 0 ==> 0 */ 1439 if (fright->i >= 0)
1222 return INTFOLD(0);
1223 else if (fright->i > 0)
1224 return simplify_intmul_k(J, fright->i); 1440 return simplify_intmul_k(J, fright->i);
1225 return NEXTFOLD; 1441 return NEXTFOLD;
1226} 1442}
@@ -1228,21 +1444,20 @@ LJFOLDF(simplify_intmul_k32)
1228LJFOLD(MUL any KINT64) 1444LJFOLD(MUL any KINT64)
1229LJFOLDF(simplify_intmul_k64) 1445LJFOLDF(simplify_intmul_k64)
1230{ 1446{
1231 if (ir_kint64(fright)->u64 == 0) /* i * 0 ==> 0 */ 1447#if LJ_HASFFI
1232 return INT64FOLD(0); 1448 if (ir_kint64(fright)->u64 < 0x80000000u)
1233#if LJ_64
1234 /* NYI: SPLIT for BSHL and 32 bit backend support. */
1235 else if (ir_kint64(fright)->u64 < 0x80000000u)
1236 return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64); 1449 return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64);
1237#endif
1238 return NEXTFOLD; 1450 return NEXTFOLD;
1451#else
1452 UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
1453#endif
1239} 1454}
1240 1455
1241LJFOLD(MOD any KINT) 1456LJFOLD(MOD any KINT)
1242LJFOLDF(simplify_intmod_k) 1457LJFOLDF(simplify_intmod_k)
1243{ 1458{
1244 int32_t k = fright->i; 1459 int32_t k = fright->i;
1245 lua_assert(k != 0); 1460 lj_assertJ(k != 0, "integer mod 0");
1246 if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */ 1461 if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */
1247 fins->o = IR_BAND; 1462 fins->o = IR_BAND;
1248 fins->op2 = lj_ir_kint(J, k-1); 1463 fins->op2 = lj_ir_kint(J, k-1);
@@ -1491,6 +1706,15 @@ LJFOLDF(simplify_shiftk_andk)
1491 fins->op2 = (IRRef1)lj_ir_kint(J, k); 1706 fins->op2 = (IRRef1)lj_ir_kint(J, k);
1492 fins->ot = IRTI(IR_BAND); 1707 fins->ot = IRTI(IR_BAND);
1493 return RETRYFOLD; 1708 return RETRYFOLD;
1709 } else if (irk->o == IR_KINT64) {
1710 uint64_t k = kfold_int64arith(J, ir_k64(irk)->u64, fright->i,
1711 (IROp)fins->o);
1712 IROpT ot = fleft->ot;
1713 fins->op1 = fleft->op1;
1714 fins->op1 = (IRRef1)lj_opt_fold(J);
1715 fins->op2 = (IRRef1)lj_ir_kint64(J, k);
1716 fins->ot = ot;
1717 return RETRYFOLD;
1494 } 1718 }
1495 return NEXTFOLD; 1719 return NEXTFOLD;
1496} 1720}
@@ -1506,6 +1730,47 @@ LJFOLDF(simplify_andk_shiftk)
1506 return NEXTFOLD; 1730 return NEXTFOLD;
1507} 1731}
1508 1732
1733LJFOLD(BAND BOR KINT)
1734LJFOLD(BOR BAND KINT)
1735LJFOLDF(simplify_andor_k)
1736{
1737 IRIns *irk = IR(fleft->op2);
1738 PHIBARRIER(fleft);
1739 if (irk->o == IR_KINT) {
1740 int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o);
1741 /* (i | k1) & k2 ==> i & k2, if (k1 & k2) == 0. */
1742 /* (i & k1) | k2 ==> i | k2, if (k1 | k2) == -1. */
1743 if (k == (fins->o == IR_BAND ? 0 : -1)) {
1744 fins->op1 = fleft->op1;
1745 return RETRYFOLD;
1746 }
1747 }
1748 return NEXTFOLD;
1749}
1750
1751LJFOLD(BAND BOR KINT64)
1752LJFOLD(BOR BAND KINT64)
1753LJFOLDF(simplify_andor_k64)
1754{
1755#if LJ_HASFFI
1756 IRIns *irk = IR(fleft->op2);
1757 PHIBARRIER(fleft);
1758 if (irk->o == IR_KINT64) {
1759 uint64_t k = kfold_int64arith(J, ir_k64(irk)->u64, ir_k64(fright)->u64,
1760 (IROp)fins->o);
1761 /* (i | k1) & k2 ==> i & k2, if (k1 & k2) == 0. */
1762 /* (i & k1) | k2 ==> i | k2, if (k1 | k2) == -1. */
1763 if (k == (fins->o == IR_BAND ? (uint64_t)0 : ~(uint64_t)0)) {
1764 fins->op1 = fleft->op1;
1765 return RETRYFOLD;
1766 }
1767 }
1768 return NEXTFOLD;
1769#else
1770 UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
1771#endif
1772}
1773
1509/* -- Reassociation ------------------------------------------------------- */ 1774/* -- Reassociation ------------------------------------------------------- */
1510 1775
1511LJFOLD(ADD ADD KINT) 1776LJFOLD(ADD ADD KINT)
@@ -1535,11 +1800,11 @@ LJFOLD(BOR BOR KINT64)
1535LJFOLD(BXOR BXOR KINT64) 1800LJFOLD(BXOR BXOR KINT64)
1536LJFOLDF(reassoc_intarith_k64) 1801LJFOLDF(reassoc_intarith_k64)
1537{ 1802{
1538#if LJ_HASFFI || LJ_64 1803#if LJ_HASFFI
1539 IRIns *irk = IR(fleft->op2); 1804 IRIns *irk = IR(fleft->op2);
1540 if (irk->o == IR_KINT64) { 1805 if (irk->o == IR_KINT64) {
1541 uint64_t k = kfold_int64arith(ir_k64(irk)->u64, 1806 uint64_t k = kfold_int64arith(J, ir_k64(irk)->u64, ir_k64(fright)->u64,
1542 ir_k64(fright)->u64, (IROp)fins->o); 1807 (IROp)fins->o);
1543 PHIBARRIER(fleft); 1808 PHIBARRIER(fleft);
1544 fins->op1 = fleft->op1; 1809 fins->op1 = fleft->op1;
1545 fins->op2 = (IRRef1)lj_ir_kint64(J, k); 1810 fins->op2 = (IRRef1)lj_ir_kint64(J, k);
@@ -1547,12 +1812,10 @@ LJFOLDF(reassoc_intarith_k64)
1547 } 1812 }
1548 return NEXTFOLD; 1813 return NEXTFOLD;
1549#else 1814#else
1550 UNUSED(J); lua_assert(0); return FAILFOLD; 1815 UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
1551#endif 1816#endif
1552} 1817}
1553 1818
1554LJFOLD(MIN MIN any)
1555LJFOLD(MAX MAX any)
1556LJFOLD(BAND BAND any) 1819LJFOLD(BAND BAND any)
1557LJFOLD(BOR BOR any) 1820LJFOLD(BOR BOR any)
1558LJFOLDF(reassoc_dup) 1821LJFOLDF(reassoc_dup)
@@ -1562,6 +1825,15 @@ LJFOLDF(reassoc_dup)
1562 return NEXTFOLD; 1825 return NEXTFOLD;
1563} 1826}
1564 1827
1828LJFOLD(MIN MIN any)
1829LJFOLD(MAX MAX any)
1830LJFOLDF(reassoc_dup_minmax)
1831{
1832 if (fins->op2 == fleft->op2)
1833 return LEFTFOLD; /* (a o b) o b ==> a o b */
1834 return NEXTFOLD;
1835}
1836
1565LJFOLD(BXOR BXOR any) 1837LJFOLD(BXOR BXOR any)
1566LJFOLDF(reassoc_bxor) 1838LJFOLDF(reassoc_bxor)
1567{ 1839{
@@ -1600,23 +1872,12 @@ LJFOLDF(reassoc_shift)
1600 return NEXTFOLD; 1872 return NEXTFOLD;
1601} 1873}
1602 1874
1603LJFOLD(MIN MIN KNUM)
1604LJFOLD(MAX MAX KNUM)
1605LJFOLD(MIN MIN KINT) 1875LJFOLD(MIN MIN KINT)
1606LJFOLD(MAX MAX KINT) 1876LJFOLD(MAX MAX KINT)
1607LJFOLDF(reassoc_minmax_k) 1877LJFOLDF(reassoc_minmax_k)
1608{ 1878{
1609 IRIns *irk = IR(fleft->op2); 1879 IRIns *irk = IR(fleft->op2);
1610 if (irk->o == IR_KNUM) { 1880 if (irk->o == IR_KINT) {
1611 lua_Number a = ir_knum(irk)->n;
1612 lua_Number y = lj_vm_foldarith(a, knumright, fins->o - IR_ADD);
1613 if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */
1614 return LEFTFOLD;
1615 PHIBARRIER(fleft);
1616 fins->op1 = fleft->op1;
1617 fins->op2 = (IRRef1)lj_ir_knum(J, y);
1618 return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */
1619 } else if (irk->o == IR_KINT) {
1620 int32_t a = irk->i; 1881 int32_t a = irk->i;
1621 int32_t y = kfold_intop(a, fright->i, fins->o); 1882 int32_t y = kfold_intop(a, fright->i, fins->o);
1622 if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */ 1883 if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */
@@ -1629,24 +1890,6 @@ LJFOLDF(reassoc_minmax_k)
1629 return NEXTFOLD; 1890 return NEXTFOLD;
1630} 1891}
1631 1892
1632LJFOLD(MIN MAX any)
1633LJFOLD(MAX MIN any)
1634LJFOLDF(reassoc_minmax_left)
1635{
1636 if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2)
1637 return RIGHTFOLD; /* (b o1 a) o2 b ==> b; (a o1 b) o2 b ==> b */
1638 return NEXTFOLD;
1639}
1640
1641LJFOLD(MIN any MAX)
1642LJFOLD(MAX any MIN)
1643LJFOLDF(reassoc_minmax_right)
1644{
1645 if (fins->op1 == fright->op1 || fins->op1 == fright->op2)
1646 return LEFTFOLD; /* a o2 (a o1 b) ==> a; a o2 (b o1 a) ==> a */
1647 return NEXTFOLD;
1648}
1649
1650/* -- Array bounds check elimination -------------------------------------- */ 1893/* -- Array bounds check elimination -------------------------------------- */
1651 1894
1652/* Eliminate ABC across PHIs to handle t[i-1] forwarding case. 1895/* Eliminate ABC across PHIs to handle t[i-1] forwarding case.
@@ -1772,8 +2015,6 @@ LJFOLDF(comm_comp)
1772 2015
1773LJFOLD(BAND any any) 2016LJFOLD(BAND any any)
1774LJFOLD(BOR any any) 2017LJFOLD(BOR any any)
1775LJFOLD(MIN any any)
1776LJFOLD(MAX any any)
1777LJFOLDF(comm_dup) 2018LJFOLDF(comm_dup)
1778{ 2019{
1779 if (fins->op1 == fins->op2) /* x o x ==> x */ 2020 if (fins->op1 == fins->op2) /* x o x ==> x */
@@ -1781,6 +2022,15 @@ LJFOLDF(comm_dup)
1781 return fold_comm_swap(J); 2022 return fold_comm_swap(J);
1782} 2023}
1783 2024
2025LJFOLD(MIN any any)
2026LJFOLD(MAX any any)
2027LJFOLDF(comm_dup_minmax)
2028{
2029 if (fins->op1 == fins->op2) /* x o x ==> x */
2030 return LEFTFOLD;
2031 return NEXTFOLD;
2032}
2033
1784LJFOLD(BXOR any any) 2034LJFOLD(BXOR any any)
1785LJFOLDF(comm_bxor) 2035LJFOLDF(comm_bxor)
1786{ 2036{
@@ -1817,7 +2067,7 @@ LJFOLDF(merge_eqne_snew_kgc)
1817{ 2067{
1818 GCstr *kstr = ir_kstr(fright); 2068 GCstr *kstr = ir_kstr(fright);
1819 int32_t len = (int32_t)kstr->len; 2069 int32_t len = (int32_t)kstr->len;
1820 lua_assert(irt_isstr(fins->t)); 2070 lj_assertJ(irt_isstr(fins->t), "bad equality IR type");
1821 2071
1822#if LJ_TARGET_UNALIGNED 2072#if LJ_TARGET_UNALIGNED
1823#define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */ 2073#define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */
@@ -1881,7 +2131,7 @@ LJFOLD(HLOAD KKPTR)
1881LJFOLDF(kfold_hload_kkptr) 2131LJFOLDF(kfold_hload_kkptr)
1882{ 2132{
1883 UNUSED(J); 2133 UNUSED(J);
1884 lua_assert(ir_kptr(fleft) == niltvg(J2G(J))); 2134 lj_assertJ(ir_kptr(fleft) == niltvg(J2G(J)), "expected niltv");
1885 return TREF_NIL; 2135 return TREF_NIL;
1886} 2136}
1887 2137
@@ -1891,8 +2141,8 @@ LJFOLDX(lj_opt_fwd_hload)
1891LJFOLD(ULOAD any) 2141LJFOLD(ULOAD any)
1892LJFOLDX(lj_opt_fwd_uload) 2142LJFOLDX(lj_opt_fwd_uload)
1893 2143
1894LJFOLD(CALLL any IRCALL_lj_tab_len) 2144LJFOLD(ALEN any any)
1895LJFOLDX(lj_opt_fwd_tab_len) 2145LJFOLDX(lj_opt_fwd_alen)
1896 2146
1897/* Upvalue refs are really loads, but there are no corresponding stores. 2147/* Upvalue refs are really loads, but there are no corresponding stores.
1898** So CSE is ok for them, except for UREFO across a GC step (see below). 2148** So CSE is ok for them, except for UREFO across a GC step (see below).
@@ -1953,6 +2203,7 @@ LJFOLDF(fwd_href_tdup)
1953** an aliased table, as it may invalidate all of the pointers and fields. 2203** an aliased table, as it may invalidate all of the pointers and fields.
1954** Only HREF needs the NEWREF check -- AREF and HREFK already depend on 2204** Only HREF needs the NEWREF check -- AREF and HREFK already depend on
1955** FLOADs. And NEWREF itself is treated like a store (see below). 2205** FLOADs. And NEWREF itself is treated like a store (see below).
2206** LREF is constant (per trace) since coroutine switches are not inlined.
1956*/ 2207*/
1957LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE) 2208LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE)
1958LJFOLDF(fload_tab_tnew_asize) 2209LJFOLDF(fload_tab_tnew_asize)
@@ -2016,6 +2267,14 @@ LJFOLDF(fload_str_len_snew)
2016 return NEXTFOLD; 2267 return NEXTFOLD;
2017} 2268}
2018 2269
2270LJFOLD(FLOAD TOSTR IRFL_STR_LEN)
2271LJFOLDF(fload_str_len_tostr)
2272{
2273 if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fleft->op2 == IRTOSTR_CHAR)
2274 return INTFOLD(1);
2275 return NEXTFOLD;
2276}
2277
2019/* The C type ID of cdata objects is immutable. */ 2278/* The C type ID of cdata objects is immutable. */
2020LJFOLD(FLOAD KGC IRFL_CDATA_CTYPEID) 2279LJFOLD(FLOAD KGC IRFL_CDATA_CTYPEID)
2021LJFOLDF(fload_cdata_typeid_kgc) 2280LJFOLDF(fload_cdata_typeid_kgc)
@@ -2062,6 +2321,8 @@ LJFOLDF(fload_cdata_ptr_int64_cnew)
2062} 2321}
2063 2322
2064LJFOLD(FLOAD any IRFL_STR_LEN) 2323LJFOLD(FLOAD any IRFL_STR_LEN)
2324LJFOLD(FLOAD any IRFL_FUNC_ENV)
2325LJFOLD(FLOAD any IRFL_THREAD_ENV)
2065LJFOLD(FLOAD any IRFL_CDATA_CTYPEID) 2326LJFOLD(FLOAD any IRFL_CDATA_CTYPEID)
2066LJFOLD(FLOAD any IRFL_CDATA_PTR) 2327LJFOLD(FLOAD any IRFL_CDATA_PTR)
2067LJFOLD(FLOAD any IRFL_CDATA_INT) 2328LJFOLD(FLOAD any IRFL_CDATA_INT)
@@ -2081,7 +2342,7 @@ LJFOLDF(fwd_sload)
2081 TRef tr = lj_opt_cse(J); 2342 TRef tr = lj_opt_cse(J);
2082 return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr; 2343 return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr;
2083 } else { 2344 } else {
2084 lua_assert(J->slot[fins->op1] != 0); 2345 lj_assertJ(J->slot[fins->op1] != 0, "uninitialized slot accessed");
2085 return J->slot[fins->op1]; 2346 return J->slot[fins->op1];
2086 } 2347 }
2087} 2348}
@@ -2127,6 +2388,17 @@ LJFOLDF(barrier_tnew_tdup)
2127 return DROPFOLD; 2388 return DROPFOLD;
2128} 2389}
2129 2390
2391/* -- Profiling ----------------------------------------------------------- */
2392
2393LJFOLD(PROF any any)
2394LJFOLDF(prof)
2395{
2396 IRRef ref = J->chain[IR_PROF];
2397 if (ref+1 == J->cur.nins) /* Drop neighbouring IR_PROF. */
2398 return ref;
2399 return EMITFOLD;
2400}
2401
2130/* -- Stores and allocations ---------------------------------------------- */ 2402/* -- Stores and allocations ---------------------------------------------- */
2131 2403
2132/* Stores and allocations cannot be folded or passed on to CSE in general. 2404/* Stores and allocations cannot be folded or passed on to CSE in general.
@@ -2149,8 +2421,9 @@ LJFOLD(XSTORE any any)
2149LJFOLDX(lj_opt_dse_xstore) 2421LJFOLDX(lj_opt_dse_xstore)
2150 2422
2151LJFOLD(NEWREF any any) /* Treated like a store. */ 2423LJFOLD(NEWREF any any) /* Treated like a store. */
2152LJFOLD(CALLS any any) 2424LJFOLD(CALLA any any)
2153LJFOLD(CALLL any any) /* Safeguard fallback. */ 2425LJFOLD(CALLL any any) /* Safeguard fallback. */
2426LJFOLD(CALLS any any)
2154LJFOLD(CALLXS any any) 2427LJFOLD(CALLXS any any)
2155LJFOLD(XBAR) 2428LJFOLD(XBAR)
2156LJFOLD(RETF any any) /* Modifies BASE. */ 2429LJFOLD(RETF any any) /* Modifies BASE. */
@@ -2158,6 +2431,7 @@ LJFOLD(TNEW any any)
2158LJFOLD(TDUP any) 2431LJFOLD(TDUP any)
2159LJFOLD(CNEW any any) 2432LJFOLD(CNEW any any)
2160LJFOLD(XSNEW any any) 2433LJFOLD(XSNEW any any)
2434LJFOLD(BUFHDR any any)
2161LJFOLDX(lj_ir_emit) 2435LJFOLDX(lj_ir_emit)
2162 2436
2163/* ------------------------------------------------------------------------ */ 2437/* ------------------------------------------------------------------------ */
@@ -2183,8 +2457,9 @@ TRef LJ_FASTCALL lj_opt_fold(jit_State *J)
2183 IRRef ref; 2457 IRRef ref;
2184 2458
2185 if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) { 2459 if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) {
2186 lua_assert(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) | 2460 lj_assertJ(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) |
2187 JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT); 2461 JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT,
2462 "bad JIT_F_OPT_DEFAULT");
2188 /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */ 2463 /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */
2189 if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N) 2464 if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N)
2190 return lj_opt_cse(J); 2465 return lj_opt_cse(J);
@@ -2209,10 +2484,14 @@ retry:
2209 if (fins->op1 >= J->cur.nk) { 2484 if (fins->op1 >= J->cur.nk) {
2210 key += (uint32_t)IR(fins->op1)->o << 10; 2485 key += (uint32_t)IR(fins->op1)->o << 10;
2211 *fleft = *IR(fins->op1); 2486 *fleft = *IR(fins->op1);
2487 if (fins->op1 < REF_TRUE)
2488 fleft[1] = IR(fins->op1)[1];
2212 } 2489 }
2213 if (fins->op2 >= J->cur.nk) { 2490 if (fins->op2 >= J->cur.nk) {
2214 key += (uint32_t)IR(fins->op2)->o; 2491 key += (uint32_t)IR(fins->op2)->o;
2215 *fright = *IR(fins->op2); 2492 *fright = *IR(fins->op2);
2493 if (fins->op2 < REF_TRUE)
2494 fright[1] = IR(fins->op2)[1];
2216 } else { 2495 } else {
2217 key += (fins->op2 & 0x3ffu); /* Literal mask. Must include IRCONV_*MASK. */ 2496 key += (fins->op2 & 0x3ffu); /* Literal mask. Must include IRCONV_*MASK. */
2218 } 2497 }
@@ -2242,7 +2521,7 @@ retry:
2242 return lj_ir_kint(J, fins->i); 2521 return lj_ir_kint(J, fins->i);
2243 if (ref == FAILFOLD) 2522 if (ref == FAILFOLD)
2244 lj_trace_err(J, LJ_TRERR_GFAIL); 2523 lj_trace_err(J, LJ_TRERR_GFAIL);
2245 lua_assert(ref == DROPFOLD); 2524 lj_assertJ(ref == DROPFOLD, "bad fold result");
2246 return REF_DROP; 2525 return REF_DROP;
2247} 2526}
2248 2527
diff --git a/src/lj_opt_loop.c b/src/lj_opt_loop.c
index d5e1eb13..0e5189cd 100644
--- a/src/lj_opt_loop.c
+++ b/src/lj_opt_loop.c
@@ -11,7 +11,7 @@
11#if LJ_HASJIT 11#if LJ_HASJIT
12 12
13#include "lj_err.h" 13#include "lj_err.h"
14#include "lj_str.h" 14#include "lj_buf.h"
15#include "lj_ir.h" 15#include "lj_ir.h"
16#include "lj_jit.h" 16#include "lj_jit.h"
17#include "lj_iropt.h" 17#include "lj_iropt.h"
@@ -254,9 +254,16 @@ static void loop_subst_snap(jit_State *J, SnapShot *osnap,
254 J->cur.nsnapmap = (uint32_t)(nmap - J->cur.snapmap); 254 J->cur.nsnapmap = (uint32_t)(nmap - J->cur.snapmap);
255} 255}
256 256
257typedef struct LoopState {
258 jit_State *J;
259 IRRef1 *subst;
260 MSize sizesubst;
261} LoopState;
262
257/* Unroll loop. */ 263/* Unroll loop. */
258static void loop_unroll(jit_State *J) 264static void loop_unroll(LoopState *lps)
259{ 265{
266 jit_State *J = lps->J;
260 IRRef1 phi[LJ_MAX_PHI]; 267 IRRef1 phi[LJ_MAX_PHI];
261 uint32_t nphi = 0; 268 uint32_t nphi = 0;
262 IRRef1 *subst; 269 IRRef1 *subst;
@@ -265,13 +272,13 @@ static void loop_unroll(jit_State *J)
265 SnapEntry *loopmap, *psentinel; 272 SnapEntry *loopmap, *psentinel;
266 IRRef ins, invar; 273 IRRef ins, invar;
267 274
268 /* Use temp buffer for substitution table. 275 /* Allocate substitution table.
269 ** Only non-constant refs in [REF_BIAS,invar) are valid indexes. 276 ** Only non-constant refs in [REF_BIAS,invar) are valid indexes.
270 ** Caveat: don't call into the VM or run the GC or the buffer may be gone.
271 */ 277 */
272 invar = J->cur.nins; 278 invar = J->cur.nins;
273 subst = (IRRef1 *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf, 279 lps->sizesubst = invar - REF_BIAS;
274 (invar-REF_BIAS)*sizeof(IRRef1)) - REF_BIAS; 280 lps->subst = lj_mem_newvec(J->L, lps->sizesubst, IRRef1);
281 subst = lps->subst - REF_BIAS;
275 subst[REF_BASE] = REF_BASE; 282 subst[REF_BASE] = REF_BASE;
276 283
277 /* LOOP separates the pre-roll from the loop body. */ 284 /* LOOP separates the pre-roll from the loop body. */
@@ -292,7 +299,8 @@ static void loop_unroll(jit_State *J)
292 loopmap = &J->cur.snapmap[loopsnap->mapofs]; 299 loopmap = &J->cur.snapmap[loopsnap->mapofs];
293 /* The PC of snapshot #0 and the loop snapshot must match. */ 300 /* The PC of snapshot #0 and the loop snapshot must match. */
294 psentinel = &loopmap[loopsnap->nent]; 301 psentinel = &loopmap[loopsnap->nent];
295 lua_assert(*psentinel == J->cur.snapmap[J->cur.snap[0].nent]); 302 lj_assertJ(*psentinel == J->cur.snapmap[J->cur.snap[0].nent],
303 "mismatched PC for loop snapshot");
296 *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */ 304 *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */
297 305
298 /* Start substitution with snapshot #1 (#0 is empty for root traces). */ 306 /* Start substitution with snapshot #1 (#0 is empty for root traces). */
@@ -345,10 +353,12 @@ static void loop_unroll(jit_State *J)
345 irr = IR(ref); 353 irr = IR(ref);
346 goto phiconv; 354 goto phiconv;
347 } 355 }
348 } else if (ref != REF_DROP && irr->o == IR_CONV && 356 } else if (ref != REF_DROP && ref > invar &&
349 ref > invar && irr->op1 < invar) { 357 ((irr->o == IR_CONV && irr->op1 < invar) ||
350 /* May need an extra PHI for a CONV. */ 358 (irr->o == IR_ALEN && irr->op2 < invar &&
351 ref = irr->op1; 359 irr->op2 != REF_NIL))) {
360 /* May need an extra PHI for a CONV or ALEN hint. */
361 ref = irr->o == IR_CONV ? irr->op1 : irr->op2;
352 irr = IR(ref); 362 irr = IR(ref);
353 phiconv: 363 phiconv:
354 if (ref < invar && !irref_isk(ref) && !irt_isphi(irr->t)) { 364 if (ref < invar && !irref_isk(ref) && !irt_isphi(irr->t)) {
@@ -363,7 +373,7 @@ static void loop_unroll(jit_State *J)
363 } 373 }
364 if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */ 374 if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */
365 J->cur.nsnapmap = (uint32_t)J->cur.snap[--J->cur.nsnap].mapofs; 375 J->cur.nsnapmap = (uint32_t)J->cur.snap[--J->cur.nsnap].mapofs;
366 lua_assert(J->cur.nsnapmap <= J->sizesnapmap); 376 lj_assertJ(J->cur.nsnapmap <= J->sizesnapmap, "bad snapshot map index");
367 *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */ 377 *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */
368 378
369 loop_emit_phi(J, subst, phi, nphi, onsnap); 379 loop_emit_phi(J, subst, phi, nphi, onsnap);
@@ -396,7 +406,7 @@ static void loop_undo(jit_State *J, IRRef ins, SnapNo nsnap, MSize nsnapmap)
396static TValue *cploop_opt(lua_State *L, lua_CFunction dummy, void *ud) 406static TValue *cploop_opt(lua_State *L, lua_CFunction dummy, void *ud)
397{ 407{
398 UNUSED(L); UNUSED(dummy); 408 UNUSED(L); UNUSED(dummy);
399 loop_unroll((jit_State *)ud); 409 loop_unroll((LoopState *)ud);
400 return NULL; 410 return NULL;
401} 411}
402 412
@@ -406,7 +416,13 @@ int lj_opt_loop(jit_State *J)
406 IRRef nins = J->cur.nins; 416 IRRef nins = J->cur.nins;
407 SnapNo nsnap = J->cur.nsnap; 417 SnapNo nsnap = J->cur.nsnap;
408 MSize nsnapmap = J->cur.nsnapmap; 418 MSize nsnapmap = J->cur.nsnapmap;
409 int errcode = lj_vm_cpcall(J->L, NULL, J, cploop_opt); 419 LoopState lps;
420 int errcode;
421 lps.J = J;
422 lps.subst = NULL;
423 lps.sizesubst = 0;
424 errcode = lj_vm_cpcall(J->L, NULL, &lps, cploop_opt);
425 lj_mem_freevec(J2G(J), lps.subst, lps.sizesubst, IRRef1);
410 if (LJ_UNLIKELY(errcode)) { 426 if (LJ_UNLIKELY(errcode)) {
411 lua_State *L = J->L; 427 lua_State *L = J->L;
412 if (errcode == LUA_ERRRUN && tvisnumber(L->top-1)) { /* Trace error? */ 428 if (errcode == LUA_ERRRUN && tvisnumber(L->top-1)) { /* Trace error? */
diff --git a/src/lj_opt_mem.c b/src/lj_opt_mem.c
index 281f29ad..80517f16 100644
--- a/src/lj_opt_mem.c
+++ b/src/lj_opt_mem.c
@@ -17,12 +17,14 @@
17#include "lj_ir.h" 17#include "lj_ir.h"
18#include "lj_jit.h" 18#include "lj_jit.h"
19#include "lj_iropt.h" 19#include "lj_iropt.h"
20#include "lj_ircall.h"
21#include "lj_dispatch.h"
20 22
21/* Some local macros to save typing. Undef'd at the end. */ 23/* Some local macros to save typing. Undef'd at the end. */
22#define IR(ref) (&J->cur.ir[(ref)]) 24#define IR(ref) (&J->cur.ir[(ref)])
23#define fins (&J->fold.ins) 25#define fins (&J->fold.ins)
24#define fleft (&J->fold.left) 26#define fleft (J->fold.left)
25#define fright (&J->fold.right) 27#define fright (J->fold.right)
26 28
27/* 29/*
28** Caveat #1: return value is not always a TRef -- only use with tref_ref(). 30** Caveat #1: return value is not always a TRef -- only use with tref_ref().
@@ -55,8 +57,8 @@ static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
55{ 57{
56 IRIns *taba = IR(ta), *tabb = IR(tb); 58 IRIns *taba = IR(ta), *tabb = IR(tb);
57 int newa, newb; 59 int newa, newb;
58 lua_assert(ta != tb); 60 lj_assertJ(ta != tb, "bad usage");
59 lua_assert(irt_istab(taba->t) && irt_istab(tabb->t)); 61 lj_assertJ(irt_istab(taba->t) && irt_istab(tabb->t), "bad usage");
60 /* Disambiguate new allocations. */ 62 /* Disambiguate new allocations. */
61 newa = (taba->o == IR_TNEW || taba->o == IR_TDUP); 63 newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
62 newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP); 64 newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
@@ -98,7 +100,7 @@ static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
98 /* Disambiguate array references based on index arithmetic. */ 100 /* Disambiguate array references based on index arithmetic. */
99 int32_t ofsa = 0, ofsb = 0; 101 int32_t ofsa = 0, ofsb = 0;
100 IRRef basea = ka, baseb = kb; 102 IRRef basea = ka, baseb = kb;
101 lua_assert(refb->o == IR_AREF); 103 lj_assertJ(refb->o == IR_AREF, "expected AREF");
102 /* Gather base and offset from t[base] or t[base+-ofs]. */ 104 /* Gather base and offset from t[base] or t[base+-ofs]. */
103 if (keya->o == IR_ADD && irref_isk(keya->op2)) { 105 if (keya->o == IR_ADD && irref_isk(keya->op2)) {
104 basea = keya->op1; 106 basea = keya->op1;
@@ -116,8 +118,9 @@ static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
116 return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */ 118 return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
117 } else { 119 } else {
118 /* Disambiguate hash references based on the type of their keys. */ 120 /* Disambiguate hash references based on the type of their keys. */
119 lua_assert((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) && 121 lj_assertJ((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
120 (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF)); 122 (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF),
123 "bad xREF IR op %d or %d", refa->o, refb->o);
121 if (!irt_sametype(keya->t, keyb->t)) 124 if (!irt_sametype(keya->t, keyb->t))
122 return ALIAS_NO; /* Different key types. */ 125 return ALIAS_NO; /* Different key types. */
123 } 126 }
@@ -191,7 +194,8 @@ static TRef fwd_ahload(jit_State *J, IRRef xref)
191 if (key->o == IR_KSLOT) key = IR(key->op1); 194 if (key->o == IR_KSLOT) key = IR(key->op1);
192 lj_ir_kvalue(J->L, &keyv, key); 195 lj_ir_kvalue(J->L, &keyv, key);
193 tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv); 196 tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
194 lua_assert(itype2irt(tv) == irt_type(fins->t)); 197 lj_assertJ(itype2irt(tv) == irt_type(fins->t),
198 "mismatched type in constant table");
195 if (irt_isnum(fins->t)) 199 if (irt_isnum(fins->t))
196 return lj_ir_knum_u64(J, tv->u64); 200 return lj_ir_knum_u64(J, tv->u64);
197 else if (LJ_DUALNUM && irt_isint(fins->t)) 201 else if (LJ_DUALNUM && irt_isint(fins->t))
@@ -309,7 +313,21 @@ int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
309 return 1; /* No conflict. Can fold to niltv. */ 313 return 1; /* No conflict. Can fold to niltv. */
310} 314}
311 315
312/* Check whether there's no aliasing NEWREF for the left operand. */ 316/* Check whether there's no aliasing table.clear. */
317static int fwd_aa_tab_clear(jit_State *J, IRRef lim, IRRef ta)
318{
319 IRRef ref = J->chain[IR_CALLS];
320 while (ref > lim) {
321 IRIns *calls = IR(ref);
322 if (calls->op2 == IRCALL_lj_tab_clear &&
323 (ta == calls->op1 || aa_table(J, ta, calls->op1) != ALIAS_NO))
324 return 0; /* Conflict. */
325 ref = calls->prev;
326 }
327 return 1; /* No conflict. Can safely FOLD/CSE. */
328}
329
330/* Check whether there's no aliasing NEWREF/table.clear for the left operand. */
313int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim) 331int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
314{ 332{
315 IRRef ta = fins->op1; 333 IRRef ta = fins->op1;
@@ -320,7 +338,7 @@ int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
320 return 0; /* Conflict. */ 338 return 0; /* Conflict. */
321 ref = newref->prev; 339 ref = newref->prev;
322 } 340 }
323 return 1; /* No conflict. Can safely FOLD/CSE. */ 341 return fwd_aa_tab_clear(J, lim, ta);
324} 342}
325 343
326/* ASTORE/HSTORE elimination. */ 344/* ASTORE/HSTORE elimination. */
@@ -348,7 +366,7 @@ TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
348 IRIns *ir; 366 IRIns *ir;
349 /* Check for any intervening guards (includes conflicting loads). */ 367 /* Check for any intervening guards (includes conflicting loads). */
350 for (ir = IR(J->cur.nins-1); ir > store; ir--) 368 for (ir = IR(J->cur.nins-1); ir > store; ir--)
351 if (irt_isguard(ir->t) || ir->o == IR_CALLL) 369 if (irt_isguard(ir->t) || ir->o == IR_ALEN)
352 goto doemit; /* No elimination possible. */ 370 goto doemit; /* No elimination possible. */
353 /* Remove redundant store from chain and replace with NOP. */ 371 /* Remove redundant store from chain and replace with NOP. */
354 *refp = store->prev; 372 *refp = store->prev;
@@ -366,6 +384,67 @@ doemit:
366 return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ 384 return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
367} 385}
368 386
387/* ALEN forwarding. */
388TRef LJ_FASTCALL lj_opt_fwd_alen(jit_State *J)
389{
390 IRRef tab = fins->op1; /* Table reference. */
391 IRRef lim = tab; /* Search limit. */
392 IRRef ref;
393
394 /* Search for conflicting HSTORE with numeric key. */
395 ref = J->chain[IR_HSTORE];
396 while (ref > lim) {
397 IRIns *store = IR(ref);
398 IRIns *href = IR(store->op1);
399 IRIns *key = IR(href->op2);
400 if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
401 lim = ref; /* Conflicting store found, limits search for ALEN. */
402 break;
403 }
404 ref = store->prev;
405 }
406
407 /* Try to find a matching ALEN. */
408 ref = J->chain[IR_ALEN];
409 while (ref > lim) {
410 /* CSE for ALEN only depends on the table, not the hint. */
411 if (IR(ref)->op1 == tab) {
412 IRRef sref;
413
414 /* Search for aliasing table.clear. */
415 if (!fwd_aa_tab_clear(J, ref, tab))
416 break;
417
418 /* Search for hint-forwarding or conflicting store. */
419 sref = J->chain[IR_ASTORE];
420 while (sref > ref) {
421 IRIns *store = IR(sref);
422 IRIns *aref = IR(store->op1);
423 IRIns *fref = IR(aref->op1);
424 if (tab == fref->op1) { /* ASTORE to the same table. */
425 /* Detect t[#t+1] = x idiom for push. */
426 IRIns *idx = IR(aref->op2);
427 if (!irt_isnil(store->t) &&
428 idx->o == IR_ADD && idx->op1 == ref &&
429 IR(idx->op2)->o == IR_KINT && IR(idx->op2)->i == 1) {
430 /* Note: this requires an extra PHI check in loop unroll. */
431 fins->op2 = aref->op2; /* Set ALEN hint. */
432 }
433 goto doemit; /* Conflicting store, possibly giving a hint. */
434 } else if (aa_table(J, tab, fref->op1) == ALIAS_NO) {
435 goto doemit; /* Conflicting store. */
436 }
437 sref = store->prev;
438 }
439
440 return ref; /* Plain ALEN forwarding. */
441 }
442 ref = IR(ref)->prev;
443 }
444doemit:
445 return EMITFOLD;
446}
447
369/* -- ULOAD forwarding ---------------------------------------------------- */ 448/* -- ULOAD forwarding ---------------------------------------------------- */
370 449
371/* The current alias analysis for upvalues is very simplistic. It only 450/* The current alias analysis for upvalues is very simplistic. It only
@@ -415,7 +494,6 @@ TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
415 494
416cselim: 495cselim:
417 /* Try to find a matching load. Below the conflicting store, if any. */ 496 /* Try to find a matching load. Below the conflicting store, if any. */
418
419 ref = J->chain[IR_ULOAD]; 497 ref = J->chain[IR_ULOAD];
420 while (ref > lim) { 498 while (ref > lim) {
421 IRIns *ir = IR(ref); 499 IRIns *ir = IR(ref);
@@ -830,35 +908,6 @@ doemit:
830 return EMITFOLD; /* Otherwise we have a conflict or simply no match. */ 908 return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
831} 909}
832 910
833/* -- Forwarding of lj_tab_len -------------------------------------------- */
834
835/* This is rather simplistic right now, but better than nothing. */
836TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J)
837{
838 IRRef tab = fins->op1; /* Table reference. */
839 IRRef lim = tab; /* Search limit. */
840 IRRef ref;
841
842 /* Any ASTORE is a conflict and limits the search. */
843 if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE];
844
845 /* Search for conflicting HSTORE with numeric key. */
846 ref = J->chain[IR_HSTORE];
847 while (ref > lim) {
848 IRIns *store = IR(ref);
849 IRIns *href = IR(store->op1);
850 IRIns *key = IR(href->op2);
851 if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
852 lim = ref; /* Conflicting store found, limits search for TLEN. */
853 break;
854 }
855 ref = store->prev;
856 }
857
858 /* Try to find a matching load. Below the conflicting store, if any. */
859 return lj_opt_cselim(J, lim);
860}
861
862/* -- ASTORE/HSTORE previous type analysis -------------------------------- */ 911/* -- ASTORE/HSTORE previous type analysis -------------------------------- */
863 912
864/* Check whether the previous value for a table store is non-nil. 913/* Check whether the previous value for a table store is non-nil.
diff --git a/src/lj_opt_narrow.c b/src/lj_opt_narrow.c
index 28d3c255..a381d8d8 100644
--- a/src/lj_opt_narrow.c
+++ b/src/lj_opt_narrow.c
@@ -372,17 +372,17 @@ static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc)
372 } else if (op == NARROW_CONV) { 372 } else if (op == NARROW_CONV) {
373 *sp++ = emitir_raw(convot, ref, convop2); /* Raw emit avoids a loop. */ 373 *sp++ = emitir_raw(convot, ref, convop2); /* Raw emit avoids a loop. */
374 } else if (op == NARROW_SEXT) { 374 } else if (op == NARROW_SEXT) {
375 lua_assert(sp >= nc->stack+1); 375 lj_assertJ(sp >= nc->stack+1, "stack underflow");
376 sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1], 376 sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1],
377 (IRT_I64<<5)|IRT_INT|IRCONV_SEXT); 377 (IRT_I64<<5)|IRT_INT|IRCONV_SEXT);
378 } else if (op == NARROW_INT) { 378 } else if (op == NARROW_INT) {
379 lua_assert(next < last); 379 lj_assertJ(next < last, "missing arg to NARROW_INT");
380 *sp++ = nc->t == IRT_I64 ? 380 *sp++ = nc->t == IRT_I64 ?
381 lj_ir_kint64(J, (int64_t)(int32_t)*next++) : 381 lj_ir_kint64(J, (int64_t)(int32_t)*next++) :
382 lj_ir_kint(J, *next++); 382 lj_ir_kint(J, *next++);
383 } else { /* Regular IROpT. Pops two operands and pushes one result. */ 383 } else { /* Regular IROpT. Pops two operands and pushes one result. */
384 IRRef mode = nc->mode; 384 IRRef mode = nc->mode;
385 lua_assert(sp >= nc->stack+2); 385 lj_assertJ(sp >= nc->stack+2, "stack underflow");
386 sp--; 386 sp--;
387 /* Omit some overflow checks for array indexing. See comments above. */ 387 /* Omit some overflow checks for array indexing. See comments above. */
388 if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) { 388 if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) {
@@ -398,7 +398,7 @@ static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc)
398 narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode); 398 narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode);
399 } 399 }
400 } 400 }
401 lua_assert(sp == nc->stack+1); 401 lj_assertJ(sp == nc->stack+1, "stack misalignment");
402 return nc->stack[0]; 402 return nc->stack[0];
403} 403}
404 404
@@ -452,7 +452,7 @@ static TRef narrow_stripov(jit_State *J, TRef tr, int lastop, IRRef mode)
452TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr) 452TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr)
453{ 453{
454 IRIns *ir; 454 IRIns *ir;
455 lua_assert(tref_isnumber(tr)); 455 lj_assertJ(tref_isnumber(tr), "expected number type");
456 if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ 456 if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */
457 return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX); 457 return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX);
458 /* Omit some overflow checks for array indexing. See comments above. */ 458 /* Omit some overflow checks for array indexing. See comments above. */
@@ -499,7 +499,7 @@ TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr)
499/* Narrow C array index (overflow undefined). */ 499/* Narrow C array index (overflow undefined). */
500TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr) 500TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr)
501{ 501{
502 lua_assert(tref_isnumber(tr)); 502 lj_assertJ(tref_isnumber(tr), "expected number type");
503 if (tref_isnum(tr)) 503 if (tref_isnum(tr))
504 return emitir(IRT(IR_CONV, IRT_INTP), tr, (IRT_INTP<<5)|IRT_NUM|IRCONV_ANY); 504 return emitir(IRT(IR_CONV, IRT_INTP), tr, (IRT_INTP<<5)|IRT_NUM|IRCONV_ANY);
505 /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */ 505 /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */
@@ -551,11 +551,16 @@ TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc)
551{ 551{
552 rc = conv_str_tonum(J, rc, vc); 552 rc = conv_str_tonum(J, rc, vc);
553 if (tref_isinteger(rc)) { 553 if (tref_isinteger(rc)) {
554 if ((uint32_t)numberVint(vc) != 0x80000000u) 554 uint32_t k = (uint32_t)numberVint(vc);
555 return emitir(IRTGI(IR_SUBOV), lj_ir_kint(J, 0), rc); 555 if ((LJ_DUALNUM || k != 0) && k != 0x80000000u) {
556 TRef zero = lj_ir_kint(J, 0);
557 if (!LJ_DUALNUM)
558 emitir(IRTGI(IR_NE), rc, zero);
559 return emitir(IRTGI(IR_SUBOV), zero, rc);
560 }
556 rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT); 561 rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
557 } 562 }
558 return emitir(IRTN(IR_NEG), rc, lj_ir_knum_neg(J)); 563 return emitir(IRTN(IR_NEG), rc, lj_ir_ksimd(J, LJ_KSIMD_NEG));
559} 564}
560 565
561/* Narrowing of modulo operator. */ 566/* Narrowing of modulo operator. */
@@ -588,10 +593,10 @@ TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc)
588 /* Narrowing must be unconditional to preserve (-x)^i semantics. */ 593 /* Narrowing must be unconditional to preserve (-x)^i semantics. */
589 if (tvisint(vc) || numisint(numV(vc))) { 594 if (tvisint(vc) || numisint(numV(vc))) {
590 int checkrange = 0; 595 int checkrange = 0;
591 /* Split pow is faster for bigger exponents. But do this only for (+k)^i. */ 596 /* pow() is faster for bigger exponents. But do this only for (+k)^i. */
592 if (tref_isk(rb) && (int32_t)ir_knum(IR(tref_ref(rb)))->u32.hi >= 0) { 597 if (tref_isk(rb) && (int32_t)ir_knum(IR(tref_ref(rb)))->u32.hi >= 0) {
593 int32_t k = numberVint(vc); 598 int32_t k = numberVint(vc);
594 if (!(k >= -65536 && k <= 65536)) goto split_pow; 599 if (!(k >= -65536 && k <= 65536)) goto force_pow_num;
595 checkrange = 1; 600 checkrange = 1;
596 } 601 }
597 if (!tref_isinteger(rc)) { 602 if (!tref_isinteger(rc)) {
@@ -602,19 +607,11 @@ TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc)
602 TRef tmp = emitir(IRTI(IR_ADD), rc, lj_ir_kint(J, 65536)); 607 TRef tmp = emitir(IRTI(IR_ADD), rc, lj_ir_kint(J, 65536));
603 emitir(IRTGI(IR_ULE), tmp, lj_ir_kint(J, 2*65536)); 608 emitir(IRTGI(IR_ULE), tmp, lj_ir_kint(J, 2*65536));
604 } 609 }
605 return emitir(IRTN(IR_POW), rb, rc); 610 } else {
611force_pow_num:
612 rc = lj_ir_tonum(J, rc); /* Want POW(num, num), not POW(num, int). */
606 } 613 }
607split_pow: 614 return emitir(IRTN(IR_POW), rb, rc);
608 /* FOLD covers most cases, but some are easier to do here. */
609 if (tref_isk(rb) && tvispone(ir_knum(IR(tref_ref(rb)))))
610 return rb; /* 1 ^ x ==> 1 */
611 rc = lj_ir_tonum(J, rc);
612 if (tref_isk(rc) && ir_knum(IR(tref_ref(rc)))->n == 0.5)
613 return emitir(IRTN(IR_FPMATH), rb, IRFPM_SQRT); /* x ^ 0.5 ==> sqrt(x) */
614 /* Split up b^c into exp2(c*log2(b)). Assembler may rejoin later. */
615 rb = emitir(IRTN(IR_FPMATH), rb, IRFPM_LOG2);
616 rc = emitir(IRTN(IR_MUL), rb, rc);
617 return emitir(IRTN(IR_FPMATH), rc, IRFPM_EXP2);
618} 615}
619 616
620/* -- Predictive narrowing of induction variables ------------------------- */ 617/* -- Predictive narrowing of induction variables ------------------------- */
@@ -630,9 +627,10 @@ static int narrow_forl(jit_State *J, cTValue *o)
630/* Narrow the FORL index type by looking at the runtime values. */ 627/* Narrow the FORL index type by looking at the runtime values. */
631IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv) 628IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv)
632{ 629{
633 lua_assert(tvisnumber(&tv[FORL_IDX]) && 630 lj_assertJ(tvisnumber(&tv[FORL_IDX]) &&
634 tvisnumber(&tv[FORL_STOP]) && 631 tvisnumber(&tv[FORL_STOP]) &&
635 tvisnumber(&tv[FORL_STEP])); 632 tvisnumber(&tv[FORL_STEP]),
633 "expected number types");
636 /* Narrow only if the runtime values of start/stop/step are all integers. */ 634 /* Narrow only if the runtime values of start/stop/step are all integers. */
637 if (narrow_forl(J, &tv[FORL_IDX]) && 635 if (narrow_forl(J, &tv[FORL_IDX]) &&
638 narrow_forl(J, &tv[FORL_STOP]) && 636 narrow_forl(J, &tv[FORL_STOP]) &&
diff --git a/src/lj_opt_sink.c b/src/lj_opt_sink.c
index df7f58af..11101702 100644
--- a/src/lj_opt_sink.c
+++ b/src/lj_opt_sink.c
@@ -78,8 +78,7 @@ static void sink_mark_ins(jit_State *J)
78 switch (ir->o) { 78 switch (ir->o) {
79 case IR_BASE: 79 case IR_BASE:
80 return; /* Finished. */ 80 return; /* Finished. */
81 case IR_CALLL: /* IRCALL_lj_tab_len */ 81 case IR_ALOAD: case IR_HLOAD: case IR_XLOAD: case IR_TBAR: case IR_ALEN:
82 case IR_ALOAD: case IR_HLOAD: case IR_XLOAD: case IR_TBAR:
83 irt_setmark(IR(ir->op1)->t); /* Mark ref for remaining loads. */ 82 irt_setmark(IR(ir->op1)->t); /* Mark ref for remaining loads. */
84 break; 83 break;
85 case IR_FLOAD: 84 case IR_FLOAD:
@@ -165,8 +164,8 @@ static void sink_remark_phi(jit_State *J)
165/* Sweep instructions and tag sunken allocations and stores. */ 164/* Sweep instructions and tag sunken allocations and stores. */
166static void sink_sweep_ins(jit_State *J) 165static void sink_sweep_ins(jit_State *J)
167{ 166{
168 IRIns *ir, *irfirst = IR(J->cur.nk); 167 IRIns *ir, *irbase = IR(REF_BASE);
169 for (ir = IR(J->cur.nins-1) ; ir >= irfirst; ir--) { 168 for (ir = IR(J->cur.nins-1) ; ir >= irbase; ir--) {
170 switch (ir->o) { 169 switch (ir->o) {
171 case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: { 170 case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
172 IRIns *ira = sink_checkalloc(J, ir); 171 IRIns *ira = sink_checkalloc(J, ir);
@@ -216,6 +215,13 @@ static void sink_sweep_ins(jit_State *J)
216 break; 215 break;
217 } 216 }
218 } 217 }
218 for (ir = IR(J->cur.nk); ir < irbase; ir++) {
219 irt_clearmark(ir->t);
220 ir->prev = REGSP_INIT;
221 /* The false-positive of irt_is64() for ASMREF_L (REF_NIL) is OK here. */
222 if (irt_is64(ir->t) && ir->o != IR_KNULL)
223 ir++;
224 }
219} 225}
220 226
221/* Allocation sinking and store sinking. 227/* Allocation sinking and store sinking.
diff --git a/src/lj_opt_split.c b/src/lj_opt_split.c
index a517fa8a..798a02cc 100644
--- a/src/lj_opt_split.c
+++ b/src/lj_opt_split.c
@@ -8,14 +8,15 @@
8 8
9#include "lj_obj.h" 9#include "lj_obj.h"
10 10
11#if LJ_HASJIT && (LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) 11#if LJ_HASJIT && (LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI))
12 12
13#include "lj_err.h" 13#include "lj_err.h"
14#include "lj_str.h" 14#include "lj_buf.h"
15#include "lj_ir.h" 15#include "lj_ir.h"
16#include "lj_jit.h" 16#include "lj_jit.h"
17#include "lj_ircall.h" 17#include "lj_ircall.h"
18#include "lj_iropt.h" 18#include "lj_iropt.h"
19#include "lj_dispatch.h"
19#include "lj_vm.h" 20#include "lj_vm.h"
20 21
21/* SPLIT pass: 22/* SPLIT pass:
@@ -139,6 +140,7 @@ static IRRef split_call_l(jit_State *J, IRRef1 *hisubst, IRIns *oir,
139 ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); 140 ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
140 return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); 141 return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
141} 142}
143#endif
142 144
143/* Emit a CALLN with one split 64 bit argument and a 32 bit argument. */ 145/* Emit a CALLN with one split 64 bit argument and a 32 bit argument. */
144static IRRef split_call_li(jit_State *J, IRRef1 *hisubst, IRIns *oir, 146static IRRef split_call_li(jit_State *J, IRRef1 *hisubst, IRIns *oir,
@@ -155,7 +157,6 @@ static IRRef split_call_li(jit_State *J, IRRef1 *hisubst, IRIns *oir,
155 ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); 157 ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
156 return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); 158 return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
157} 159}
158#endif
159 160
160/* Emit a CALLN with two split 64 bit arguments. */ 161/* Emit a CALLN with two split 64 bit arguments. */
161static IRRef split_call_ll(jit_State *J, IRRef1 *hisubst, IRIns *oir, 162static IRRef split_call_ll(jit_State *J, IRRef1 *hisubst, IRIns *oir,
@@ -192,9 +193,121 @@ static IRRef split_ptr(jit_State *J, IRIns *oir, IRRef ref)
192 nref = ir->op1; 193 nref = ir->op1;
193 if (ofs == 0) return nref; 194 if (ofs == 0) return nref;
194 } 195 }
195 return split_emit(J, IRTI(IR_ADD), nref, lj_ir_kint(J, ofs)); 196 return split_emit(J, IRT(IR_ADD, IRT_PTR), nref, lj_ir_kint(J, ofs));
196} 197}
197 198
199#if LJ_HASFFI
200static IRRef split_bitshift(jit_State *J, IRRef1 *hisubst,
201 IRIns *oir, IRIns *nir, IRIns *ir)
202{
203 IROp op = ir->o;
204 IRRef kref = nir->op2;
205 if (irref_isk(kref)) { /* Optimize constant shifts. */
206 int32_t k = (IR(kref)->i & 63);
207 IRRef lo = nir->op1, hi = hisubst[ir->op1];
208 if (op == IR_BROL || op == IR_BROR) {
209 if (op == IR_BROR) k = (-k & 63);
210 if (k >= 32) { IRRef t = lo; lo = hi; hi = t; k -= 32; }
211 if (k == 0) {
212 passthrough:
213 J->cur.nins--;
214 ir->prev = lo;
215 return hi;
216 } else {
217 TRef k1, k2;
218 IRRef t1, t2, t3, t4;
219 J->cur.nins--;
220 k1 = lj_ir_kint(J, k);
221 k2 = lj_ir_kint(J, (-k & 31));
222 t1 = split_emit(J, IRTI(IR_BSHL), lo, k1);
223 t2 = split_emit(J, IRTI(IR_BSHL), hi, k1);
224 t3 = split_emit(J, IRTI(IR_BSHR), lo, k2);
225 t4 = split_emit(J, IRTI(IR_BSHR), hi, k2);
226 ir->prev = split_emit(J, IRTI(IR_BOR), t1, t4);
227 return split_emit(J, IRTI(IR_BOR), t2, t3);
228 }
229 } else if (k == 0) {
230 goto passthrough;
231 } else if (k < 32) {
232 if (op == IR_BSHL) {
233 IRRef t1 = split_emit(J, IRTI(IR_BSHL), hi, kref);
234 IRRef t2 = split_emit(J, IRTI(IR_BSHR), lo, lj_ir_kint(J, (-k&31)));
235 return split_emit(J, IRTI(IR_BOR), t1, t2);
236 } else {
237 IRRef t1 = ir->prev, t2;
238 lj_assertJ(op == IR_BSHR || op == IR_BSAR, "bad usage");
239 nir->o = IR_BSHR;
240 t2 = split_emit(J, IRTI(IR_BSHL), hi, lj_ir_kint(J, (-k&31)));
241 ir->prev = split_emit(J, IRTI(IR_BOR), t1, t2);
242 return split_emit(J, IRTI(op), hi, kref);
243 }
244 } else {
245 if (op == IR_BSHL) {
246 if (k == 32)
247 J->cur.nins--;
248 else
249 lo = ir->prev;
250 ir->prev = lj_ir_kint(J, 0);
251 return lo;
252 } else {
253 lj_assertJ(op == IR_BSHR || op == IR_BSAR, "bad usage");
254 if (k == 32) {
255 J->cur.nins--;
256 ir->prev = hi;
257 } else {
258 nir->op1 = hi;
259 }
260 if (op == IR_BSHR)
261 return lj_ir_kint(J, 0);
262 else
263 return split_emit(J, IRTI(IR_BSAR), hi, lj_ir_kint(J, 31));
264 }
265 }
266 }
267 return split_call_li(J, hisubst, oir, ir,
268 op - IR_BSHL + IRCALL_lj_carith_shl64);
269}
270
271static IRRef split_bitop(jit_State *J, IRRef1 *hisubst,
272 IRIns *nir, IRIns *ir)
273{
274 IROp op = ir->o;
275 IRRef hi, kref = nir->op2;
276 if (irref_isk(kref)) { /* Optimize bit operations with lo constant. */
277 int32_t k = IR(kref)->i;
278 if (k == 0 || k == -1) {
279 if (op == IR_BAND) k = ~k;
280 if (k == 0) {
281 J->cur.nins--;
282 ir->prev = nir->op1;
283 } else if (op == IR_BXOR) {
284 nir->o = IR_BNOT;
285 nir->op2 = 0;
286 } else {
287 J->cur.nins--;
288 ir->prev = kref;
289 }
290 }
291 }
292 hi = hisubst[ir->op1];
293 kref = hisubst[ir->op2];
294 if (irref_isk(kref)) { /* Optimize bit operations with hi constant. */
295 int32_t k = IR(kref)->i;
296 if (k == 0 || k == -1) {
297 if (op == IR_BAND) k = ~k;
298 if (k == 0) {
299 return hi;
300 } else if (op == IR_BXOR) {
301 return split_emit(J, IRTI(IR_BNOT), hi, 0);
302 } else {
303 return kref;
304 }
305 }
306 }
307 return split_emit(J, IRTI(op), hi, kref);
308}
309#endif
310
198/* Substitute references of a snapshot. */ 311/* Substitute references of a snapshot. */
199static void split_subst_snap(jit_State *J, SnapShot *snap, IRIns *oir) 312static void split_subst_snap(jit_State *J, SnapShot *snap, IRIns *oir)
200{ 313{
@@ -214,7 +327,7 @@ static void split_ir(jit_State *J)
214 IRRef nins = J->cur.nins, nk = J->cur.nk; 327 IRRef nins = J->cur.nins, nk = J->cur.nk;
215 MSize irlen = nins - nk; 328 MSize irlen = nins - nk;
216 MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1)); 329 MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1));
217 IRIns *oir = (IRIns *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf, need); 330 IRIns *oir = (IRIns *)lj_buf_tmp(J->L, need);
218 IRRef1 *hisubst; 331 IRRef1 *hisubst;
219 IRRef ref, snref; 332 IRRef ref, snref;
220 SnapShot *snap; 333 SnapShot *snap;
@@ -241,6 +354,8 @@ static void split_ir(jit_State *J)
241 ir->prev = ref; /* Identity substitution for loword. */ 354 ir->prev = ref; /* Identity substitution for loword. */
242 hisubst[ref] = 0; 355 hisubst[ref] = 0;
243 } 356 }
357 if (irt_is64(ir->t) && ir->o != IR_KNULL)
358 ref++;
244 } 359 }
245 360
246 /* Process old IR instructions. */ 361 /* Process old IR instructions. */
@@ -288,32 +403,8 @@ static void split_ir(jit_State *J)
288 hi = split_call_li(J, hisubst, oir, ir, IRCALL_lj_vm_powi); 403 hi = split_call_li(J, hisubst, oir, ir, IRCALL_lj_vm_powi);
289 break; 404 break;
290 case IR_FPMATH: 405 case IR_FPMATH:
291 /* Try to rejoin pow from EXP2, MUL and LOG2. */
292 if (nir->op2 == IRFPM_EXP2 && nir->op1 > J->loopref) {
293 IRIns *irp = IR(nir->op1);
294 if (irp->o == IR_CALLN && irp->op2 == IRCALL_softfp_mul) {
295 IRIns *irm4 = IR(irp->op1);
296 IRIns *irm3 = IR(irm4->op1);
297 IRIns *irm12 = IR(irm3->op1);
298 IRIns *irl1 = IR(irm12->op1);
299 if (irm12->op1 > J->loopref && irl1->o == IR_CALLN &&
300 irl1->op2 == IRCALL_lj_vm_log2) {
301 IRRef tmp = irl1->op1; /* Recycle first two args from LOG2. */
302 IRRef arg3 = irm3->op2, arg4 = irm4->op2;
303 J->cur.nins--;
304 tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg3);
305 tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg4);
306 ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_pow);
307 hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
308 break;
309 }
310 }
311 }
312 hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2); 406 hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2);
313 break; 407 break;
314 case IR_ATAN2:
315 hi = split_call_ll(J, hisubst, oir, ir, IRCALL_atan2);
316 break;
317 case IR_LDEXP: 408 case IR_LDEXP:
318 hi = split_call_li(J, hisubst, oir, ir, IRCALL_ldexp); 409 hi = split_call_li(J, hisubst, oir, ir, IRCALL_ldexp);
319 break; 410 break;
@@ -321,7 +412,8 @@ static void split_ir(jit_State *J)
321 nir->o = IR_CONV; /* Pass through loword. */ 412 nir->o = IR_CONV; /* Pass through loword. */
322 nir->op2 = (IRT_INT << 5) | IRT_INT; 413 nir->op2 = (IRT_INT << 5) | IRT_INT;
323 hi = split_emit(J, IRT(ir->o == IR_NEG ? IR_BXOR : IR_BAND, IRT_SOFTFP), 414 hi = split_emit(J, IRT(ir->o == IR_NEG ? IR_BXOR : IR_BAND, IRT_SOFTFP),
324 hisubst[ir->op1], hisubst[ir->op2]); 415 hisubst[ir->op1],
416 lj_ir_kint(J, (int32_t)(0x7fffffffu + (ir->o == IR_NEG))));
325 break; 417 break;
326 case IR_SLOAD: 418 case IR_SLOAD:
327 if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from int to number. */ 419 if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from int to number. */
@@ -336,15 +428,24 @@ static void split_ir(jit_State *J)
336 case IR_STRTO: 428 case IR_STRTO:
337 hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); 429 hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
338 break; 430 break;
431 case IR_FLOAD:
432 lj_assertJ(ir->op1 == REF_NIL, "expected FLOAD from GG_State");
433 hi = lj_ir_kint(J, *(int32_t*)((char*)J2GG(J) + ir->op2 + LJ_LE*4));
434 nir->op2 += LJ_BE*4;
435 break;
339 case IR_XLOAD: { 436 case IR_XLOAD: {
340 IRIns inslo = *nir; /* Save/undo the emit of the lo XLOAD. */ 437 IRIns inslo = *nir; /* Save/undo the emit of the lo XLOAD. */
341 J->cur.nins--; 438 J->cur.nins--;
342 hi = split_ptr(J, oir, ir->op1); /* Insert the hiref ADD. */ 439 hi = split_ptr(J, oir, ir->op1); /* Insert the hiref ADD. */
440#if LJ_BE
441 hi = split_emit(J, IRT(IR_XLOAD, IRT_INT), hi, ir->op2);
442 inslo.t.irt = IRT_SOFTFP | (inslo.t.irt & IRT_GUARD);
443#endif
343 nref = lj_ir_nextins(J); 444 nref = lj_ir_nextins(J);
344 nir = IR(nref); 445 nir = IR(nref);
345 *nir = inslo; /* Re-emit lo XLOAD immediately before hi XLOAD. */ 446 *nir = inslo; /* Re-emit lo XLOAD. */
346 hi = split_emit(J, IRT(IR_XLOAD, IRT_SOFTFP), hi, ir->op2);
347#if LJ_LE 447#if LJ_LE
448 hi = split_emit(J, IRT(IR_XLOAD, IRT_SOFTFP), hi, ir->op2);
348 ir->prev = nref; 449 ir->prev = nref;
349#else 450#else
350 ir->prev = hi; hi = nref; 451 ir->prev = hi; hi = nref;
@@ -364,8 +465,9 @@ static void split_ir(jit_State *J)
364 break; 465 break;
365 } 466 }
366#endif 467#endif
367 lua_assert(st == IRT_INT || 468 lj_assertJ(st == IRT_INT ||
368 (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT))); 469 (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT)),
470 "bad source type for CONV");
369 nir->o = IR_CALLN; 471 nir->o = IR_CALLN;
370#if LJ_32 && LJ_HASFFI 472#if LJ_32 && LJ_HASFFI
371 nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d : 473 nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d :
@@ -395,7 +497,8 @@ static void split_ir(jit_State *J)
395 hi = nir->op2; 497 hi = nir->op2;
396 break; 498 break;
397 default: 499 default:
398 lua_assert(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX); 500 lj_assertJ(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX,
501 "bad IR op %d", ir->o);
399 hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), 502 hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP),
400 hisubst[ir->op1], hisubst[ir->op2]); 503 hisubst[ir->op1], hisubst[ir->op2]);
401 break; 504 break;
@@ -438,8 +541,21 @@ static void split_ir(jit_State *J)
438 irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : 541 irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
439 IRCALL_lj_carith_powu64); 542 IRCALL_lj_carith_powu64);
440 break; 543 break;
544 case IR_BNOT:
545 hi = split_emit(J, IRTI(IR_BNOT), hiref, 0);
546 break;
547 case IR_BSWAP:
548 ir->prev = split_emit(J, IRTI(IR_BSWAP), hiref, 0);
549 hi = nref;
550 break;
551 case IR_BAND: case IR_BOR: case IR_BXOR:
552 hi = split_bitop(J, hisubst, nir, ir);
553 break;
554 case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR:
555 hi = split_bitshift(J, hisubst, oir, nir, ir);
556 break;
441 case IR_FLOAD: 557 case IR_FLOAD:
442 lua_assert(ir->op2 == IRFL_CDATA_INT64); 558 lj_assertJ(ir->op2 == IRFL_CDATA_INT64, "only INT64 supported");
443 hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4); 559 hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4);
444#if LJ_BE 560#if LJ_BE
445 ir->prev = hi; hi = nref; 561 ir->prev = hi; hi = nref;
@@ -505,7 +621,7 @@ static void split_ir(jit_State *J)
505 hi = nir->op2; 621 hi = nir->op2;
506 break; 622 break;
507 default: 623 default:
508 lua_assert(ir->o <= IR_NE); /* Comparisons. */ 624 lj_assertJ(ir->o <= IR_NE, "bad IR op %d", ir->o); /* Comparisons. */
509 split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]); 625 split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]);
510 break; 626 break;
511 } 627 }
@@ -583,7 +699,7 @@ static void split_ir(jit_State *J)
583#if LJ_SOFTFP 699#if LJ_SOFTFP
584 if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) { 700 if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) {
585 if (irt_isguard(ir->t)) { 701 if (irt_isguard(ir->t)) {
586 lua_assert(st == IRT_NUM && irt_isint(ir->t)); 702 lj_assertJ(st == IRT_NUM && irt_isint(ir->t), "bad CONV types");
587 J->cur.nins--; 703 J->cur.nins--;
588 ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1); 704 ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1);
589 } else { 705 } else {
@@ -714,7 +830,7 @@ void lj_opt_split(jit_State *J)
714 if (!J->needsplit) 830 if (!J->needsplit)
715 J->needsplit = split_needsplit(J); 831 J->needsplit = split_needsplit(J);
716#else 832#else
717 lua_assert(J->needsplit >= split_needsplit(J)); /* Verify flag. */ 833 lj_assertJ(J->needsplit >= split_needsplit(J), "bad SPLIT state");
718#endif 834#endif
719 if (J->needsplit) { 835 if (J->needsplit) {
720 int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit); 836 int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit);
diff --git a/src/lj_parse.c b/src/lj_parse.c
index e18f4bfb..3ae05446 100644
--- a/src/lj_parse.c
+++ b/src/lj_parse.c
@@ -13,6 +13,7 @@
13#include "lj_gc.h" 13#include "lj_gc.h"
14#include "lj_err.h" 14#include "lj_err.h"
15#include "lj_debug.h" 15#include "lj_debug.h"
16#include "lj_buf.h"
16#include "lj_str.h" 17#include "lj_str.h"
17#include "lj_tab.h" 18#include "lj_tab.h"
18#include "lj_func.h" 19#include "lj_func.h"
@@ -21,6 +22,7 @@
21#if LJ_HASFFI 22#if LJ_HASFFI
22#include "lj_ctype.h" 23#include "lj_ctype.h"
23#endif 24#endif
25#include "lj_strfmt.h"
24#include "lj_lex.h" 26#include "lj_lex.h"
25#include "lj_parse.h" 27#include "lj_parse.h"
26#include "lj_vm.h" 28#include "lj_vm.h"
@@ -161,16 +163,22 @@ LJ_STATIC_ASSERT((int)BC_MULVV-(int)BC_ADDVV == (int)OPR_MUL-(int)OPR_ADD);
161LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD); 163LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD);
162LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD); 164LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD);
163 165
166#ifdef LUA_USE_ASSERT
167#define lj_assertFS(c, ...) (lj_assertG_(G(fs->L), (c), __VA_ARGS__))
168#else
169#define lj_assertFS(c, ...) ((void)fs)
170#endif
171
164/* -- Error handling ------------------------------------------------------ */ 172/* -- Error handling ------------------------------------------------------ */
165 173
166LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em) 174LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em)
167{ 175{
168 lj_lex_error(ls, ls->token, em); 176 lj_lex_error(ls, ls->tok, em);
169} 177}
170 178
171LJ_NORET LJ_NOINLINE static void err_token(LexState *ls, LexToken token) 179LJ_NORET LJ_NOINLINE static void err_token(LexState *ls, LexToken tok)
172{ 180{
173 lj_lex_error(ls, ls->token, LJ_ERR_XTOKEN, lj_lex_token2str(ls, token)); 181 lj_lex_error(ls, ls->tok, LJ_ERR_XTOKEN, lj_lex_token2str(ls, tok));
174} 182}
175 183
176LJ_NORET static void err_limit(FuncState *fs, uint32_t limit, const char *what) 184LJ_NORET static void err_limit(FuncState *fs, uint32_t limit, const char *what)
@@ -198,7 +206,7 @@ static BCReg const_num(FuncState *fs, ExpDesc *e)
198{ 206{
199 lua_State *L = fs->L; 207 lua_State *L = fs->L;
200 TValue *o; 208 TValue *o;
201 lua_assert(expr_isnumk(e)); 209 lj_assertFS(expr_isnumk(e), "bad usage");
202 o = lj_tab_set(L, fs->kt, &e->u.nval); 210 o = lj_tab_set(L, fs->kt, &e->u.nval);
203 if (tvhaskslot(o)) 211 if (tvhaskslot(o))
204 return tvkslot(o); 212 return tvkslot(o);
@@ -223,7 +231,7 @@ static BCReg const_gc(FuncState *fs, GCobj *gc, uint32_t itype)
223/* Add a string constant. */ 231/* Add a string constant. */
224static BCReg const_str(FuncState *fs, ExpDesc *e) 232static BCReg const_str(FuncState *fs, ExpDesc *e)
225{ 233{
226 lua_assert(expr_isstrk(e) || e->k == VGLOBAL); 234 lj_assertFS(expr_isstrk(e) || e->k == VGLOBAL, "bad usage");
227 return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR); 235 return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR);
228} 236}
229 237
@@ -311,7 +319,7 @@ static void jmp_patchins(FuncState *fs, BCPos pc, BCPos dest)
311{ 319{
312 BCIns *jmp = &fs->bcbase[pc].ins; 320 BCIns *jmp = &fs->bcbase[pc].ins;
313 BCPos offset = dest-(pc+1)+BCBIAS_J; 321 BCPos offset = dest-(pc+1)+BCBIAS_J;
314 lua_assert(dest != NO_JMP); 322 lj_assertFS(dest != NO_JMP, "uninitialized jump target");
315 if (offset > BCMAX_D) 323 if (offset > BCMAX_D)
316 err_syntax(fs->ls, LJ_ERR_XJUMP); 324 err_syntax(fs->ls, LJ_ERR_XJUMP);
317 setbc_d(jmp, offset); 325 setbc_d(jmp, offset);
@@ -360,7 +368,7 @@ static void jmp_patch(FuncState *fs, BCPos list, BCPos target)
360 if (target == fs->pc) { 368 if (target == fs->pc) {
361 jmp_tohere(fs, list); 369 jmp_tohere(fs, list);
362 } else { 370 } else {
363 lua_assert(target < fs->pc); 371 lj_assertFS(target < fs->pc, "bad jump target");
364 jmp_patchval(fs, list, target, NO_REG, target); 372 jmp_patchval(fs, list, target, NO_REG, target);
365 } 373 }
366} 374}
@@ -390,7 +398,7 @@ static void bcreg_free(FuncState *fs, BCReg reg)
390{ 398{
391 if (reg >= fs->nactvar) { 399 if (reg >= fs->nactvar) {
392 fs->freereg--; 400 fs->freereg--;
393 lua_assert(reg == fs->freereg); 401 lj_assertFS(reg == fs->freereg, "bad regfree");
394 } 402 }
395} 403}
396 404
@@ -540,7 +548,7 @@ static void expr_toreg_nobranch(FuncState *fs, ExpDesc *e, BCReg reg)
540 } else if (e->k <= VKTRUE) { 548 } else if (e->k <= VKTRUE) {
541 ins = BCINS_AD(BC_KPRI, reg, const_pri(e)); 549 ins = BCINS_AD(BC_KPRI, reg, const_pri(e));
542 } else { 550 } else {
543 lua_assert(e->k == VVOID || e->k == VJMP); 551 lj_assertFS(e->k == VVOID || e->k == VJMP, "bad expr type %d", e->k);
544 return; 552 return;
545 } 553 }
546 bcemit_INS(fs, ins); 554 bcemit_INS(fs, ins);
@@ -635,7 +643,7 @@ static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e)
635 ins = BCINS_AD(BC_GSET, ra, const_str(fs, var)); 643 ins = BCINS_AD(BC_GSET, ra, const_str(fs, var));
636 } else { 644 } else {
637 BCReg ra, rc; 645 BCReg ra, rc;
638 lua_assert(var->k == VINDEXED); 646 lj_assertFS(var->k == VINDEXED, "bad expr type %d", var->k);
639 ra = expr_toanyreg(fs, e); 647 ra = expr_toanyreg(fs, e);
640 rc = var->u.s.aux; 648 rc = var->u.s.aux;
641 if ((int32_t)rc < 0) { 649 if ((int32_t)rc < 0) {
@@ -643,10 +651,12 @@ static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e)
643 } else if (rc > BCMAX_C) { 651 } else if (rc > BCMAX_C) {
644 ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1)); 652 ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1));
645 } else { 653 } else {
654#ifdef LUA_USE_ASSERT
646 /* Free late alloced key reg to avoid assert on free of value reg. */ 655 /* Free late alloced key reg to avoid assert on free of value reg. */
647 /* This can only happen when called from expr_table(). */ 656 /* This can only happen when called from expr_table(). */
648 lua_assert(e->k != VNONRELOC || ra < fs->nactvar || 657 if (e->k == VNONRELOC && ra >= fs->nactvar && rc >= ra)
649 rc < ra || (bcreg_free(fs, rc),1)); 658 bcreg_free(fs, rc);
659#endif
650 ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc); 660 ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc);
651 } 661 }
652 } 662 }
@@ -660,16 +670,16 @@ static void bcemit_method(FuncState *fs, ExpDesc *e, ExpDesc *key)
660 BCReg idx, func, obj = expr_toanyreg(fs, e); 670 BCReg idx, func, obj = expr_toanyreg(fs, e);
661 expr_free(fs, e); 671 expr_free(fs, e);
662 func = fs->freereg; 672 func = fs->freereg;
663 bcemit_AD(fs, BC_MOV, func+1, obj); /* Copy object to first argument. */ 673 bcemit_AD(fs, BC_MOV, func+1+LJ_FR2, obj); /* Copy object to 1st argument. */
664 lua_assert(expr_isstrk(key)); 674 lj_assertFS(expr_isstrk(key), "bad usage");
665 idx = const_str(fs, key); 675 idx = const_str(fs, key);
666 if (idx <= BCMAX_C) { 676 if (idx <= BCMAX_C) {
667 bcreg_reserve(fs, 2); 677 bcreg_reserve(fs, 2+LJ_FR2);
668 bcemit_ABC(fs, BC_TGETS, func, obj, idx); 678 bcemit_ABC(fs, BC_TGETS, func, obj, idx);
669 } else { 679 } else {
670 bcreg_reserve(fs, 3); 680 bcreg_reserve(fs, 3+LJ_FR2);
671 bcemit_AD(fs, BC_KSTR, func+2, idx); 681 bcemit_AD(fs, BC_KSTR, func+2+LJ_FR2, idx);
672 bcemit_ABC(fs, BC_TGETV, func, obj, func+2); 682 bcemit_ABC(fs, BC_TGETV, func, obj, func+2+LJ_FR2);
673 fs->freereg--; 683 fs->freereg--;
674 } 684 }
675 e->u.s.info = func; 685 e->u.s.info = func;
@@ -801,7 +811,8 @@ static void bcemit_arith(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2)
801 else 811 else
802 rc = expr_toanyreg(fs, e2); 812 rc = expr_toanyreg(fs, e2);
803 /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */ 813 /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */
804 lua_assert(expr_isnumk(e1) || e1->k == VNONRELOC); 814 lj_assertFS(expr_isnumk(e1) || e1->k == VNONRELOC,
815 "bad expr type %d", e1->k);
805 expr_toval(fs, e1); 816 expr_toval(fs, e1);
806 /* Avoid two consts to satisfy bytecode constraints. */ 817 /* Avoid two consts to satisfy bytecode constraints. */
807 if (expr_isnumk(e1) && !expr_isnumk(e2) && 818 if (expr_isnumk(e1) && !expr_isnumk(e2) &&
@@ -889,19 +900,20 @@ static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2)
889 if (op <= OPR_POW) { 900 if (op <= OPR_POW) {
890 bcemit_arith(fs, op, e1, e2); 901 bcemit_arith(fs, op, e1, e2);
891 } else if (op == OPR_AND) { 902 } else if (op == OPR_AND) {
892 lua_assert(e1->t == NO_JMP); /* List must be closed. */ 903 lj_assertFS(e1->t == NO_JMP, "jump list not closed");
893 expr_discharge(fs, e2); 904 expr_discharge(fs, e2);
894 jmp_append(fs, &e2->f, e1->f); 905 jmp_append(fs, &e2->f, e1->f);
895 *e1 = *e2; 906 *e1 = *e2;
896 } else if (op == OPR_OR) { 907 } else if (op == OPR_OR) {
897 lua_assert(e1->f == NO_JMP); /* List must be closed. */ 908 lj_assertFS(e1->f == NO_JMP, "jump list not closed");
898 expr_discharge(fs, e2); 909 expr_discharge(fs, e2);
899 jmp_append(fs, &e2->t, e1->t); 910 jmp_append(fs, &e2->t, e1->t);
900 *e1 = *e2; 911 *e1 = *e2;
901 } else if (op == OPR_CONCAT) { 912 } else if (op == OPR_CONCAT) {
902 expr_toval(fs, e2); 913 expr_toval(fs, e2);
903 if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) { 914 if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) {
904 lua_assert(e1->u.s.info == bc_b(*bcptr(fs, e2))-1); 915 lj_assertFS(e1->u.s.info == bc_b(*bcptr(fs, e2))-1,
916 "bad CAT stack layout");
905 expr_free(fs, e1); 917 expr_free(fs, e1);
906 setbc_b(bcptr(fs, e2), e1->u.s.info); 918 setbc_b(bcptr(fs, e2), e1->u.s.info);
907 e1->u.s.info = e2->u.s.info; 919 e1->u.s.info = e2->u.s.info;
@@ -913,8 +925,9 @@ static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2)
913 } 925 }
914 e1->k = VRELOCABLE; 926 e1->k = VRELOCABLE;
915 } else { 927 } else {
916 lua_assert(op == OPR_NE || op == OPR_EQ || 928 lj_assertFS(op == OPR_NE || op == OPR_EQ ||
917 op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT); 929 op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT,
930 "bad binop %d", op);
918 bcemit_comp(fs, op, e1, e2); 931 bcemit_comp(fs, op, e1, e2);
919 } 932 }
920} 933}
@@ -943,10 +956,10 @@ static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e)
943 e->u.s.info = fs->freereg-1; 956 e->u.s.info = fs->freereg-1;
944 e->k = VNONRELOC; 957 e->k = VNONRELOC;
945 } else { 958 } else {
946 lua_assert(e->k == VNONRELOC); 959 lj_assertFS(e->k == VNONRELOC, "bad expr type %d", e->k);
947 } 960 }
948 } else { 961 } else {
949 lua_assert(op == BC_UNM || op == BC_LEN); 962 lj_assertFS(op == BC_UNM || op == BC_LEN, "bad unop %d", op);
950 if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */ 963 if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */
951#if LJ_HASFFI 964#if LJ_HASFFI
952 if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */ 965 if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */
@@ -986,7 +999,7 @@ static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e)
986/* Check and consume optional token. */ 999/* Check and consume optional token. */
987static int lex_opt(LexState *ls, LexToken tok) 1000static int lex_opt(LexState *ls, LexToken tok)
988{ 1001{
989 if (ls->token == tok) { 1002 if (ls->tok == tok) {
990 lj_lex_next(ls); 1003 lj_lex_next(ls);
991 return 1; 1004 return 1;
992 } 1005 }
@@ -996,7 +1009,7 @@ static int lex_opt(LexState *ls, LexToken tok)
996/* Check and consume token. */ 1009/* Check and consume token. */
997static void lex_check(LexState *ls, LexToken tok) 1010static void lex_check(LexState *ls, LexToken tok)
998{ 1011{
999 if (ls->token != tok) 1012 if (ls->tok != tok)
1000 err_token(ls, tok); 1013 err_token(ls, tok);
1001 lj_lex_next(ls); 1014 lj_lex_next(ls);
1002} 1015}
@@ -1010,7 +1023,7 @@ static void lex_match(LexState *ls, LexToken what, LexToken who, BCLine line)
1010 } else { 1023 } else {
1011 const char *swhat = lj_lex_token2str(ls, what); 1024 const char *swhat = lj_lex_token2str(ls, what);
1012 const char *swho = lj_lex_token2str(ls, who); 1025 const char *swho = lj_lex_token2str(ls, who);
1013 lj_lex_error(ls, ls->token, LJ_ERR_XMATCH, swhat, swho, line); 1026 lj_lex_error(ls, ls->tok, LJ_ERR_XMATCH, swhat, swho, line);
1014 } 1027 }
1015 } 1028 }
1016} 1029}
@@ -1019,9 +1032,9 @@ static void lex_match(LexState *ls, LexToken what, LexToken who, BCLine line)
1019static GCstr *lex_str(LexState *ls) 1032static GCstr *lex_str(LexState *ls)
1020{ 1033{
1021 GCstr *s; 1034 GCstr *s;
1022 if (ls->token != TK_name && (LJ_52 || ls->token != TK_goto)) 1035 if (ls->tok != TK_name && (LJ_52 || ls->tok != TK_goto))
1023 err_token(ls, TK_name); 1036 err_token(ls, TK_name);
1024 s = strV(&ls->tokenval); 1037 s = strV(&ls->tokval);
1025 lj_lex_next(ls); 1038 lj_lex_next(ls);
1026 return s; 1039 return s;
1027} 1040}
@@ -1041,8 +1054,9 @@ static void var_new(LexState *ls, BCReg n, GCstr *name)
1041 lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); 1054 lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK);
1042 lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); 1055 lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo);
1043 } 1056 }
1044 lua_assert((uintptr_t)name < VARNAME__MAX || 1057 lj_assertFS((uintptr_t)name < VARNAME__MAX ||
1045 lj_tab_getstr(fs->kt, name) != NULL); 1058 lj_tab_getstr(fs->kt, name) != NULL,
1059 "unanchored variable name");
1046 /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ 1060 /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */
1047 setgcref(ls->vstack[vtop].name, obj2gco(name)); 1061 setgcref(ls->vstack[vtop].name, obj2gco(name));
1048 fs->varmap[fs->nactvar+n] = (uint16_t)vtop; 1062 fs->varmap[fs->nactvar+n] = (uint16_t)vtop;
@@ -1097,7 +1111,7 @@ static MSize var_lookup_uv(FuncState *fs, MSize vidx, ExpDesc *e)
1097 return i; /* Already exists. */ 1111 return i; /* Already exists. */
1098 /* Otherwise create a new one. */ 1112 /* Otherwise create a new one. */
1099 checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues"); 1113 checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues");
1100 lua_assert(e->k == VLOCAL || e->k == VUPVAL); 1114 lj_assertFS(e->k == VLOCAL || e->k == VUPVAL, "bad expr type %d", e->k);
1101 fs->uvmap[n] = (uint16_t)vidx; 1115 fs->uvmap[n] = (uint16_t)vidx;
1102 fs->uvtmp[n] = (uint16_t)(e->k == VLOCAL ? vidx : LJ_MAX_VSTACK+e->u.s.info); 1116 fs->uvtmp[n] = (uint16_t)(e->k == VLOCAL ? vidx : LJ_MAX_VSTACK+e->u.s.info);
1103 fs->nuv = n+1; 1117 fs->nuv = n+1;
@@ -1148,7 +1162,8 @@ static MSize gola_new(LexState *ls, GCstr *name, uint8_t info, BCPos pc)
1148 lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); 1162 lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK);
1149 lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); 1163 lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo);
1150 } 1164 }
1151 lua_assert(name == NAME_BREAK || lj_tab_getstr(fs->kt, name) != NULL); 1165 lj_assertFS(name == NAME_BREAK || lj_tab_getstr(fs->kt, name) != NULL,
1166 "unanchored label name");
1152 /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ 1167 /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */
1153 setgcref(ls->vstack[vtop].name, obj2gco(name)); 1168 setgcref(ls->vstack[vtop].name, obj2gco(name));
1154 ls->vstack[vtop].startpc = pc; 1169 ls->vstack[vtop].startpc = pc;
@@ -1178,8 +1193,9 @@ static void gola_close(LexState *ls, VarInfo *vg)
1178 FuncState *fs = ls->fs; 1193 FuncState *fs = ls->fs;
1179 BCPos pc = vg->startpc; 1194 BCPos pc = vg->startpc;
1180 BCIns *ip = &fs->bcbase[pc].ins; 1195 BCIns *ip = &fs->bcbase[pc].ins;
1181 lua_assert(gola_isgoto(vg)); 1196 lj_assertFS(gola_isgoto(vg), "expected goto");
1182 lua_assert(bc_op(*ip) == BC_JMP || bc_op(*ip) == BC_UCLO); 1197 lj_assertFS(bc_op(*ip) == BC_JMP || bc_op(*ip) == BC_UCLO,
1198 "bad bytecode op %d", bc_op(*ip));
1183 setbc_a(ip, vg->slot); 1199 setbc_a(ip, vg->slot);
1184 if (bc_op(*ip) == BC_JMP) { 1200 if (bc_op(*ip) == BC_JMP) {
1185 BCPos next = jmp_next(fs, pc); 1201 BCPos next = jmp_next(fs, pc);
@@ -1198,9 +1214,9 @@ static void gola_resolve(LexState *ls, FuncScope *bl, MSize idx)
1198 if (gcrefeq(vg->name, vl->name) && gola_isgoto(vg)) { 1214 if (gcrefeq(vg->name, vl->name) && gola_isgoto(vg)) {
1199 if (vg->slot < vl->slot) { 1215 if (vg->slot < vl->slot) {
1200 GCstr *name = strref(var_get(ls, ls->fs, vg->slot).name); 1216 GCstr *name = strref(var_get(ls, ls->fs, vg->slot).name);
1201 lua_assert((uintptr_t)name >= VARNAME__MAX); 1217 lj_assertLS((uintptr_t)name >= VARNAME__MAX, "expected goto name");
1202 ls->linenumber = ls->fs->bcbase[vg->startpc].line; 1218 ls->linenumber = ls->fs->bcbase[vg->startpc].line;
1203 lua_assert(strref(vg->name) != NAME_BREAK); 1219 lj_assertLS(strref(vg->name) != NAME_BREAK, "unexpected break");
1204 lj_lex_error(ls, 0, LJ_ERR_XGSCOPE, 1220 lj_lex_error(ls, 0, LJ_ERR_XGSCOPE,
1205 strdata(strref(vg->name)), strdata(name)); 1221 strdata(strref(vg->name)), strdata(name));
1206 } 1222 }
@@ -1264,7 +1280,7 @@ static void fscope_begin(FuncState *fs, FuncScope *bl, int flags)
1264 bl->vstart = fs->ls->vtop; 1280 bl->vstart = fs->ls->vtop;
1265 bl->prev = fs->bl; 1281 bl->prev = fs->bl;
1266 fs->bl = bl; 1282 fs->bl = bl;
1267 lua_assert(fs->freereg == fs->nactvar); 1283 lj_assertFS(fs->freereg == fs->nactvar, "bad regalloc");
1268} 1284}
1269 1285
1270/* End a scope. */ 1286/* End a scope. */
@@ -1275,7 +1291,7 @@ static void fscope_end(FuncState *fs)
1275 fs->bl = bl->prev; 1291 fs->bl = bl->prev;
1276 var_remove(ls, bl->nactvar); 1292 var_remove(ls, bl->nactvar);
1277 fs->freereg = fs->nactvar; 1293 fs->freereg = fs->nactvar;
1278 lua_assert(bl->nactvar == fs->nactvar); 1294 lj_assertFS(bl->nactvar == fs->nactvar, "bad regalloc");
1279 if ((bl->flags & (FSCOPE_UPVAL|FSCOPE_NOCLOSE)) == FSCOPE_UPVAL) 1295 if ((bl->flags & (FSCOPE_UPVAL|FSCOPE_NOCLOSE)) == FSCOPE_UPVAL)
1280 bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0); 1296 bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0);
1281 if ((bl->flags & FSCOPE_BREAK)) { 1297 if ((bl->flags & FSCOPE_BREAK)) {
@@ -1362,13 +1378,13 @@ static void fs_fixup_k(FuncState *fs, GCproto *pt, void *kptr)
1362 Node *n = &node[i]; 1378 Node *n = &node[i];
1363 if (tvhaskslot(&n->val)) { 1379 if (tvhaskslot(&n->val)) {
1364 ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val); 1380 ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val);
1365 lua_assert(!tvisint(&n->key)); 1381 lj_assertFS(!tvisint(&n->key), "unexpected integer key");
1366 if (tvisnum(&n->key)) { 1382 if (tvisnum(&n->key)) {
1367 TValue *tv = &((TValue *)kptr)[kidx]; 1383 TValue *tv = &((TValue *)kptr)[kidx];
1368 if (LJ_DUALNUM) { 1384 if (LJ_DUALNUM) {
1369 lua_Number nn = numV(&n->key); 1385 lua_Number nn = numV(&n->key);
1370 int32_t k = lj_num2int(nn); 1386 int32_t k = lj_num2int(nn);
1371 lua_assert(!tvismzero(&n->key)); 1387 lj_assertFS(!tvismzero(&n->key), "unexpected -0 key");
1372 if ((lua_Number)k == nn) 1388 if ((lua_Number)k == nn)
1373 setintV(tv, k); 1389 setintV(tv, k);
1374 else 1390 else
@@ -1416,98 +1432,66 @@ static void fs_fixup_line(FuncState *fs, GCproto *pt,
1416 uint8_t *li = (uint8_t *)lineinfo; 1432 uint8_t *li = (uint8_t *)lineinfo;
1417 do { 1433 do {
1418 BCLine delta = base[i].line - first; 1434 BCLine delta = base[i].line - first;
1419 lua_assert(delta >= 0 && delta < 256); 1435 lj_assertFS(delta >= 0 && delta < 256, "bad line delta");
1420 li[i] = (uint8_t)delta; 1436 li[i] = (uint8_t)delta;
1421 } while (++i < n); 1437 } while (++i < n);
1422 } else if (LJ_LIKELY(numline < 65536)) { 1438 } else if (LJ_LIKELY(numline < 65536)) {
1423 uint16_t *li = (uint16_t *)lineinfo; 1439 uint16_t *li = (uint16_t *)lineinfo;
1424 do { 1440 do {
1425 BCLine delta = base[i].line - first; 1441 BCLine delta = base[i].line - first;
1426 lua_assert(delta >= 0 && delta < 65536); 1442 lj_assertFS(delta >= 0 && delta < 65536, "bad line delta");
1427 li[i] = (uint16_t)delta; 1443 li[i] = (uint16_t)delta;
1428 } while (++i < n); 1444 } while (++i < n);
1429 } else { 1445 } else {
1430 uint32_t *li = (uint32_t *)lineinfo; 1446 uint32_t *li = (uint32_t *)lineinfo;
1431 do { 1447 do {
1432 BCLine delta = base[i].line - first; 1448 BCLine delta = base[i].line - first;
1433 lua_assert(delta >= 0); 1449 lj_assertFS(delta >= 0, "bad line delta");
1434 li[i] = (uint32_t)delta; 1450 li[i] = (uint32_t)delta;
1435 } while (++i < n); 1451 } while (++i < n);
1436 } 1452 }
1437} 1453}
1438 1454
1439/* Resize buffer if needed. */
1440static LJ_NOINLINE void fs_buf_resize(LexState *ls, MSize len)
1441{
1442 MSize sz = ls->sb.sz * 2;
1443 while (ls->sb.n + len > sz) sz = sz * 2;
1444 lj_str_resizebuf(ls->L, &ls->sb, sz);
1445}
1446
1447static LJ_AINLINE void fs_buf_need(LexState *ls, MSize len)
1448{
1449 if (LJ_UNLIKELY(ls->sb.n + len > ls->sb.sz))
1450 fs_buf_resize(ls, len);
1451}
1452
1453/* Add string to buffer. */
1454static void fs_buf_str(LexState *ls, const char *str, MSize len)
1455{
1456 char *p = ls->sb.buf + ls->sb.n;
1457 MSize i;
1458 ls->sb.n += len;
1459 for (i = 0; i < len; i++) p[i] = str[i];
1460}
1461
1462/* Add ULEB128 value to buffer. */
1463static void fs_buf_uleb128(LexState *ls, uint32_t v)
1464{
1465 MSize n = ls->sb.n;
1466 uint8_t *p = (uint8_t *)ls->sb.buf;
1467 for (; v >= 0x80; v >>= 7)
1468 p[n++] = (uint8_t)((v & 0x7f) | 0x80);
1469 p[n++] = (uint8_t)v;
1470 ls->sb.n = n;
1471}
1472
1473/* Prepare variable info for prototype. */ 1455/* Prepare variable info for prototype. */
1474static size_t fs_prep_var(LexState *ls, FuncState *fs, size_t *ofsvar) 1456static size_t fs_prep_var(LexState *ls, FuncState *fs, size_t *ofsvar)
1475{ 1457{
1476 VarInfo *vs =ls->vstack, *ve; 1458 VarInfo *vs =ls->vstack, *ve;
1477 MSize i, n; 1459 MSize i, n;
1478 BCPos lastpc; 1460 BCPos lastpc;
1479 lj_str_resetbuf(&ls->sb); /* Copy to temp. string buffer. */ 1461 lj_buf_reset(&ls->sb); /* Copy to temp. string buffer. */
1480 /* Store upvalue names. */ 1462 /* Store upvalue names. */
1481 for (i = 0, n = fs->nuv; i < n; i++) { 1463 for (i = 0, n = fs->nuv; i < n; i++) {
1482 GCstr *s = strref(vs[fs->uvmap[i]].name); 1464 GCstr *s = strref(vs[fs->uvmap[i]].name);
1483 MSize len = s->len+1; 1465 MSize len = s->len+1;
1484 fs_buf_need(ls, len); 1466 char *p = lj_buf_more(&ls->sb, len);
1485 fs_buf_str(ls, strdata(s), len); 1467 p = lj_buf_wmem(p, strdata(s), len);
1468 setsbufP(&ls->sb, p);
1486 } 1469 }
1487 *ofsvar = ls->sb.n; 1470 *ofsvar = sbuflen(&ls->sb);
1488 lastpc = 0; 1471 lastpc = 0;
1489 /* Store local variable names and compressed ranges. */ 1472 /* Store local variable names and compressed ranges. */
1490 for (ve = vs + ls->vtop, vs += fs->vbase; vs < ve; vs++) { 1473 for (ve = vs + ls->vtop, vs += fs->vbase; vs < ve; vs++) {
1491 if (!gola_isgotolabel(vs)) { 1474 if (!gola_isgotolabel(vs)) {
1492 GCstr *s = strref(vs->name); 1475 GCstr *s = strref(vs->name);
1493 BCPos startpc; 1476 BCPos startpc;
1477 char *p;
1494 if ((uintptr_t)s < VARNAME__MAX) { 1478 if ((uintptr_t)s < VARNAME__MAX) {
1495 fs_buf_need(ls, 1 + 2*5); 1479 p = lj_buf_more(&ls->sb, 1 + 2*5);
1496 ls->sb.buf[ls->sb.n++] = (uint8_t)(uintptr_t)s; 1480 *p++ = (char)(uintptr_t)s;
1497 } else { 1481 } else {
1498 MSize len = s->len+1; 1482 MSize len = s->len+1;
1499 fs_buf_need(ls, len + 2*5); 1483 p = lj_buf_more(&ls->sb, len + 2*5);
1500 fs_buf_str(ls, strdata(s), len); 1484 p = lj_buf_wmem(p, strdata(s), len);
1501 } 1485 }
1502 startpc = vs->startpc; 1486 startpc = vs->startpc;
1503 fs_buf_uleb128(ls, startpc-lastpc); 1487 p = lj_strfmt_wuleb128(p, startpc-lastpc);
1504 fs_buf_uleb128(ls, vs->endpc-startpc); 1488 p = lj_strfmt_wuleb128(p, vs->endpc-startpc);
1489 setsbufP(&ls->sb, p);
1505 lastpc = startpc; 1490 lastpc = startpc;
1506 } 1491 }
1507 } 1492 }
1508 fs_buf_need(ls, 1); 1493 lj_buf_putb(&ls->sb, '\0'); /* Terminator for varinfo. */
1509 ls->sb.buf[ls->sb.n++] = '\0'; /* Terminator for varinfo. */ 1494 return sbuflen(&ls->sb);
1510 return ls->sb.n;
1511} 1495}
1512 1496
1513/* Fixup variable info for prototype. */ 1497/* Fixup variable info for prototype. */
@@ -1515,7 +1499,7 @@ static void fs_fixup_var(LexState *ls, GCproto *pt, uint8_t *p, size_t ofsvar)
1515{ 1499{
1516 setmref(pt->uvinfo, p); 1500 setmref(pt->uvinfo, p);
1517 setmref(pt->varinfo, (char *)p + ofsvar); 1501 setmref(pt->varinfo, (char *)p + ofsvar);
1518 memcpy(p, ls->sb.buf, ls->sb.n); /* Copy from temp. string buffer. */ 1502 memcpy(p, sbufB(&ls->sb), sbuflen(&ls->sb)); /* Copy from temp. buffer. */
1519} 1503}
1520#else 1504#else
1521 1505
@@ -1552,7 +1536,7 @@ static void fs_fixup_ret(FuncState *fs)
1552 } 1536 }
1553 fs->bl->flags |= FSCOPE_NOCLOSE; /* Handled above. */ 1537 fs->bl->flags |= FSCOPE_NOCLOSE; /* Handled above. */
1554 fscope_end(fs); 1538 fscope_end(fs);
1555 lua_assert(fs->bl == NULL); 1539 lj_assertFS(fs->bl == NULL, "bad scope nesting");
1556 /* May need to fixup returns encoded before first function was created. */ 1540 /* May need to fixup returns encoded before first function was created. */
1557 if (fs->flags & PROTO_FIXUP_RETURN) { 1541 if (fs->flags & PROTO_FIXUP_RETURN) {
1558 BCPos pc; 1542 BCPos pc;
@@ -1624,7 +1608,7 @@ static GCproto *fs_finish(LexState *ls, BCLine line)
1624 L->top--; /* Pop table of constants. */ 1608 L->top--; /* Pop table of constants. */
1625 ls->vtop = fs->vbase; /* Reset variable stack. */ 1609 ls->vtop = fs->vbase; /* Reset variable stack. */
1626 ls->fs = fs->prev; 1610 ls->fs = fs->prev;
1627 lua_assert(ls->fs != NULL || ls->token == TK_eof); 1611 lj_assertL(ls->fs != NULL || ls->tok == TK_eof, "bad parser state");
1628 return pt; 1612 return pt;
1629} 1613}
1630 1614
@@ -1718,15 +1702,15 @@ static void expr_bracket(LexState *ls, ExpDesc *v)
1718} 1702}
1719 1703
1720/* Get value of constant expression. */ 1704/* Get value of constant expression. */
1721static void expr_kvalue(TValue *v, ExpDesc *e) 1705static void expr_kvalue(FuncState *fs, TValue *v, ExpDesc *e)
1722{ 1706{
1707 UNUSED(fs);
1723 if (e->k <= VKTRUE) { 1708 if (e->k <= VKTRUE) {
1724 setitype(v, ~(uint32_t)e->k); 1709 setpriV(v, ~(uint32_t)e->k);
1725 } else if (e->k == VKSTR) { 1710 } else if (e->k == VKSTR) {
1726 setgcref(v->gcr, obj2gco(e->u.sval)); 1711 setgcVraw(v, obj2gco(e->u.sval), LJ_TSTR);
1727 setitype(v, LJ_TSTR);
1728 } else { 1712 } else {
1729 lua_assert(tvisnumber(expr_numtv(e))); 1713 lj_assertFS(tvisnumber(expr_numtv(e)), "bad number constant");
1730 *v = *expr_numtv(e); 1714 *v = *expr_numtv(e);
1731 } 1715 }
1732} 1716}
@@ -1746,15 +1730,15 @@ static void expr_table(LexState *ls, ExpDesc *e)
1746 bcreg_reserve(fs, 1); 1730 bcreg_reserve(fs, 1);
1747 freg++; 1731 freg++;
1748 lex_check(ls, '{'); 1732 lex_check(ls, '{');
1749 while (ls->token != '}') { 1733 while (ls->tok != '}') {
1750 ExpDesc key, val; 1734 ExpDesc key, val;
1751 vcall = 0; 1735 vcall = 0;
1752 if (ls->token == '[') { 1736 if (ls->tok == '[') {
1753 expr_bracket(ls, &key); /* Already calls expr_toval. */ 1737 expr_bracket(ls, &key); /* Already calls expr_toval. */
1754 if (!expr_isk(&key)) expr_index(fs, e, &key); 1738 if (!expr_isk(&key)) expr_index(fs, e, &key);
1755 if (expr_isnumk(&key) && expr_numiszero(&key)) needarr = 1; else nhash++; 1739 if (expr_isnumk(&key) && expr_numiszero(&key)) needarr = 1; else nhash++;
1756 lex_check(ls, '='); 1740 lex_check(ls, '=');
1757 } else if ((ls->token == TK_name || (!LJ_52 && ls->token == TK_goto)) && 1741 } else if ((ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) &&
1758 lj_lex_lookahead(ls) == '=') { 1742 lj_lex_lookahead(ls) == '=') {
1759 expr_str(ls, &key); 1743 expr_str(ls, &key);
1760 lex_check(ls, '='); 1744 lex_check(ls, '=');
@@ -1776,11 +1760,11 @@ static void expr_table(LexState *ls, ExpDesc *e)
1776 fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx); 1760 fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx);
1777 } 1761 }
1778 vcall = 0; 1762 vcall = 0;
1779 expr_kvalue(&k, &key); 1763 expr_kvalue(fs, &k, &key);
1780 v = lj_tab_set(fs->L, t, &k); 1764 v = lj_tab_set(fs->L, t, &k);
1781 lj_gc_anybarriert(fs->L, t); 1765 lj_gc_anybarriert(fs->L, t);
1782 if (expr_isk_nojump(&val)) { /* Add const key/value to template table. */ 1766 if (expr_isk_nojump(&val)) { /* Add const key/value to template table. */
1783 expr_kvalue(v, &val); 1767 expr_kvalue(fs, v, &val);
1784 } else { /* Otherwise create dummy string key (avoids lj_tab_newkey). */ 1768 } else { /* Otherwise create dummy string key (avoids lj_tab_newkey). */
1785 settabV(fs->L, v, t); /* Preserve key with table itself as value. */ 1769 settabV(fs->L, v, t); /* Preserve key with table itself as value. */
1786 fixt = 1; /* Fix this later, after all resizes. */ 1770 fixt = 1; /* Fix this later, after all resizes. */
@@ -1799,8 +1783,9 @@ static void expr_table(LexState *ls, ExpDesc *e)
1799 if (vcall) { 1783 if (vcall) {
1800 BCInsLine *ilp = &fs->bcbase[fs->pc-1]; 1784 BCInsLine *ilp = &fs->bcbase[fs->pc-1];
1801 ExpDesc en; 1785 ExpDesc en;
1802 lua_assert(bc_a(ilp->ins) == freg && 1786 lj_assertFS(bc_a(ilp->ins) == freg &&
1803 bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB)); 1787 bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB),
1788 "bad CALL code generation");
1804 expr_init(&en, VKNUM, 0); 1789 expr_init(&en, VKNUM, 0);
1805 en.u.nval.u32.lo = narr-1; 1790 en.u.nval.u32.lo = narr-1;
1806 en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */ 1791 en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */
@@ -1830,7 +1815,7 @@ static void expr_table(LexState *ls, ExpDesc *e)
1830 for (i = 0; i <= hmask; i++) { 1815 for (i = 0; i <= hmask; i++) {
1831 Node *n = &node[i]; 1816 Node *n = &node[i];
1832 if (tvistab(&n->val)) { 1817 if (tvistab(&n->val)) {
1833 lua_assert(tabV(&n->val) == t); 1818 lj_assertFS(tabV(&n->val) == t, "bad dummy key in template table");
1834 setnilV(&n->val); /* Turn value into nil. */ 1819 setnilV(&n->val); /* Turn value into nil. */
1835 } 1820 }
1836 } 1821 }
@@ -1847,11 +1832,11 @@ static BCReg parse_params(LexState *ls, int needself)
1847 lex_check(ls, '('); 1832 lex_check(ls, '(');
1848 if (needself) 1833 if (needself)
1849 var_new_lit(ls, nparams++, "self"); 1834 var_new_lit(ls, nparams++, "self");
1850 if (ls->token != ')') { 1835 if (ls->tok != ')') {
1851 do { 1836 do {
1852 if (ls->token == TK_name || (!LJ_52 && ls->token == TK_goto)) { 1837 if (ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) {
1853 var_new(ls, nparams++, lex_str(ls)); 1838 var_new(ls, nparams++, lex_str(ls));
1854 } else if (ls->token == TK_dots) { 1839 } else if (ls->tok == TK_dots) {
1855 lj_lex_next(ls); 1840 lj_lex_next(ls);
1856 fs->flags |= PROTO_VARARG; 1841 fs->flags |= PROTO_VARARG;
1857 break; 1842 break;
@@ -1861,7 +1846,7 @@ static BCReg parse_params(LexState *ls, int needself)
1861 } while (lex_opt(ls, ',')); 1846 } while (lex_opt(ls, ','));
1862 } 1847 }
1863 var_add(ls, nparams); 1848 var_add(ls, nparams);
1864 lua_assert(fs->nactvar == nparams); 1849 lj_assertFS(fs->nactvar == nparams, "bad regalloc");
1865 bcreg_reserve(fs, nparams); 1850 bcreg_reserve(fs, nparams);
1866 lex_check(ls, ')'); 1851 lex_check(ls, ')');
1867 return nparams; 1852 return nparams;
@@ -1885,7 +1870,7 @@ static void parse_body(LexState *ls, ExpDesc *e, int needself, BCLine line)
1885 fs.bclim = pfs->bclim - pfs->pc; 1870 fs.bclim = pfs->bclim - pfs->pc;
1886 bcemit_AD(&fs, BC_FUNCF, 0, 0); /* Placeholder. */ 1871 bcemit_AD(&fs, BC_FUNCF, 0, 0); /* Placeholder. */
1887 parse_chunk(ls); 1872 parse_chunk(ls);
1888 if (ls->token != TK_end) lex_match(ls, TK_end, TK_function, line); 1873 if (ls->tok != TK_end) lex_match(ls, TK_end, TK_function, line);
1889 pt = fs_finish(ls, (ls->lastline = ls->linenumber)); 1874 pt = fs_finish(ls, (ls->lastline = ls->linenumber));
1890 pfs->bcbase = ls->bcstack + oldbase; /* May have been reallocated. */ 1875 pfs->bcbase = ls->bcstack + oldbase; /* May have been reallocated. */
1891 pfs->bclim = (BCPos)(ls->sizebcstack - oldbase); 1876 pfs->bclim = (BCPos)(ls->sizebcstack - oldbase);
@@ -1924,13 +1909,13 @@ static void parse_args(LexState *ls, ExpDesc *e)
1924 BCIns ins; 1909 BCIns ins;
1925 BCReg base; 1910 BCReg base;
1926 BCLine line = ls->linenumber; 1911 BCLine line = ls->linenumber;
1927 if (ls->token == '(') { 1912 if (ls->tok == '(') {
1928#if !LJ_52 1913#if !LJ_52
1929 if (line != ls->lastline) 1914 if (line != ls->lastline)
1930 err_syntax(ls, LJ_ERR_XAMBIG); 1915 err_syntax(ls, LJ_ERR_XAMBIG);
1931#endif 1916#endif
1932 lj_lex_next(ls); 1917 lj_lex_next(ls);
1933 if (ls->token == ')') { /* f(). */ 1918 if (ls->tok == ')') { /* f(). */
1934 args.k = VVOID; 1919 args.k = VVOID;
1935 } else { 1920 } else {
1936 expr_list(ls, &args); 1921 expr_list(ls, &args);
@@ -1938,24 +1923,24 @@ static void parse_args(LexState *ls, ExpDesc *e)
1938 setbc_b(bcptr(fs, &args), 0); /* Pass on multiple results. */ 1923 setbc_b(bcptr(fs, &args), 0); /* Pass on multiple results. */
1939 } 1924 }
1940 lex_match(ls, ')', '(', line); 1925 lex_match(ls, ')', '(', line);
1941 } else if (ls->token == '{') { 1926 } else if (ls->tok == '{') {
1942 expr_table(ls, &args); 1927 expr_table(ls, &args);
1943 } else if (ls->token == TK_string) { 1928 } else if (ls->tok == TK_string) {
1944 expr_init(&args, VKSTR, 0); 1929 expr_init(&args, VKSTR, 0);
1945 args.u.sval = strV(&ls->tokenval); 1930 args.u.sval = strV(&ls->tokval);
1946 lj_lex_next(ls); 1931 lj_lex_next(ls);
1947 } else { 1932 } else {
1948 err_syntax(ls, LJ_ERR_XFUNARG); 1933 err_syntax(ls, LJ_ERR_XFUNARG);
1949 return; /* Silence compiler. */ 1934 return; /* Silence compiler. */
1950 } 1935 }
1951 lua_assert(e->k == VNONRELOC); 1936 lj_assertFS(e->k == VNONRELOC, "bad expr type %d", e->k);
1952 base = e->u.s.info; /* Base register for call. */ 1937 base = e->u.s.info; /* Base register for call. */
1953 if (args.k == VCALL) { 1938 if (args.k == VCALL) {
1954 ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1); 1939 ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1 - LJ_FR2);
1955 } else { 1940 } else {
1956 if (args.k != VVOID) 1941 if (args.k != VVOID)
1957 expr_tonextreg(fs, &args); 1942 expr_tonextreg(fs, &args);
1958 ins = BCINS_ABC(BC_CALL, base, 2, fs->freereg - base); 1943 ins = BCINS_ABC(BC_CALL, base, 2, fs->freereg - base - LJ_FR2);
1959 } 1944 }
1960 expr_init(e, VCALL, bcemit_INS(fs, ins)); 1945 expr_init(e, VCALL, bcemit_INS(fs, ins));
1961 e->u.s.aux = base; 1946 e->u.s.aux = base;
@@ -1968,33 +1953,34 @@ static void expr_primary(LexState *ls, ExpDesc *v)
1968{ 1953{
1969 FuncState *fs = ls->fs; 1954 FuncState *fs = ls->fs;
1970 /* Parse prefix expression. */ 1955 /* Parse prefix expression. */
1971 if (ls->token == '(') { 1956 if (ls->tok == '(') {
1972 BCLine line = ls->linenumber; 1957 BCLine line = ls->linenumber;
1973 lj_lex_next(ls); 1958 lj_lex_next(ls);
1974 expr(ls, v); 1959 expr(ls, v);
1975 lex_match(ls, ')', '(', line); 1960 lex_match(ls, ')', '(', line);
1976 expr_discharge(ls->fs, v); 1961 expr_discharge(ls->fs, v);
1977 } else if (ls->token == TK_name || (!LJ_52 && ls->token == TK_goto)) { 1962 } else if (ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) {
1978 var_lookup(ls, v); 1963 var_lookup(ls, v);
1979 } else { 1964 } else {
1980 err_syntax(ls, LJ_ERR_XSYMBOL); 1965 err_syntax(ls, LJ_ERR_XSYMBOL);
1981 } 1966 }
1982 for (;;) { /* Parse multiple expression suffixes. */ 1967 for (;;) { /* Parse multiple expression suffixes. */
1983 if (ls->token == '.') { 1968 if (ls->tok == '.') {
1984 expr_field(ls, v); 1969 expr_field(ls, v);
1985 } else if (ls->token == '[') { 1970 } else if (ls->tok == '[') {
1986 ExpDesc key; 1971 ExpDesc key;
1987 expr_toanyreg(fs, v); 1972 expr_toanyreg(fs, v);
1988 expr_bracket(ls, &key); 1973 expr_bracket(ls, &key);
1989 expr_index(fs, v, &key); 1974 expr_index(fs, v, &key);
1990 } else if (ls->token == ':') { 1975 } else if (ls->tok == ':') {
1991 ExpDesc key; 1976 ExpDesc key;
1992 lj_lex_next(ls); 1977 lj_lex_next(ls);
1993 expr_str(ls, &key); 1978 expr_str(ls, &key);
1994 bcemit_method(fs, v, &key); 1979 bcemit_method(fs, v, &key);
1995 parse_args(ls, v); 1980 parse_args(ls, v);
1996 } else if (ls->token == '(' || ls->token == TK_string || ls->token == '{') { 1981 } else if (ls->tok == '(' || ls->tok == TK_string || ls->tok == '{') {
1997 expr_tonextreg(fs, v); 1982 expr_tonextreg(fs, v);
1983 if (LJ_FR2) bcreg_reserve(fs, 1);
1998 parse_args(ls, v); 1984 parse_args(ls, v);
1999 } else { 1985 } else {
2000 break; 1986 break;
@@ -2005,14 +1991,14 @@ static void expr_primary(LexState *ls, ExpDesc *v)
2005/* Parse simple expression. */ 1991/* Parse simple expression. */
2006static void expr_simple(LexState *ls, ExpDesc *v) 1992static void expr_simple(LexState *ls, ExpDesc *v)
2007{ 1993{
2008 switch (ls->token) { 1994 switch (ls->tok) {
2009 case TK_number: 1995 case TK_number:
2010 expr_init(v, (LJ_HASFFI && tviscdata(&ls->tokenval)) ? VKCDATA : VKNUM, 0); 1996 expr_init(v, (LJ_HASFFI && tviscdata(&ls->tokval)) ? VKCDATA : VKNUM, 0);
2011 copyTV(ls->L, &v->u.nval, &ls->tokenval); 1997 copyTV(ls->L, &v->u.nval, &ls->tokval);
2012 break; 1998 break;
2013 case TK_string: 1999 case TK_string:
2014 expr_init(v, VKSTR, 0); 2000 expr_init(v, VKSTR, 0);
2015 v->u.sval = strV(&ls->tokenval); 2001 v->u.sval = strV(&ls->tokval);
2016 break; 2002 break;
2017 case TK_nil: 2003 case TK_nil:
2018 expr_init(v, VKNIL, 0); 2004 expr_init(v, VKNIL, 0);
@@ -2100,11 +2086,11 @@ static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit);
2100static void expr_unop(LexState *ls, ExpDesc *v) 2086static void expr_unop(LexState *ls, ExpDesc *v)
2101{ 2087{
2102 BCOp op; 2088 BCOp op;
2103 if (ls->token == TK_not) { 2089 if (ls->tok == TK_not) {
2104 op = BC_NOT; 2090 op = BC_NOT;
2105 } else if (ls->token == '-') { 2091 } else if (ls->tok == '-') {
2106 op = BC_UNM; 2092 op = BC_UNM;
2107 } else if (ls->token == '#') { 2093 } else if (ls->tok == '#') {
2108 op = BC_LEN; 2094 op = BC_LEN;
2109 } else { 2095 } else {
2110 expr_simple(ls, v); 2096 expr_simple(ls, v);
@@ -2121,7 +2107,7 @@ static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit)
2121 BinOpr op; 2107 BinOpr op;
2122 synlevel_begin(ls); 2108 synlevel_begin(ls);
2123 expr_unop(ls, v); 2109 expr_unop(ls, v);
2124 op = token2binop(ls->token); 2110 op = token2binop(ls->tok);
2125 while (op != OPR_NOBINOPR && priority[op].left > limit) { 2111 while (op != OPR_NOBINOPR && priority[op].left > limit) {
2126 ExpDesc v2; 2112 ExpDesc v2;
2127 BinOpr nextop; 2113 BinOpr nextop;
@@ -2310,9 +2296,9 @@ static void parse_func(LexState *ls, BCLine line)
2310 lj_lex_next(ls); /* Skip 'function'. */ 2296 lj_lex_next(ls); /* Skip 'function'. */
2311 /* Parse function name. */ 2297 /* Parse function name. */
2312 var_lookup(ls, &v); 2298 var_lookup(ls, &v);
2313 while (ls->token == '.') /* Multiple dot-separated fields. */ 2299 while (ls->tok == '.') /* Multiple dot-separated fields. */
2314 expr_field(ls, &v); 2300 expr_field(ls, &v);
2315 if (ls->token == ':') { /* Optional colon to signify method call. */ 2301 if (ls->tok == ':') { /* Optional colon to signify method call. */
2316 needself = 1; 2302 needself = 1;
2317 expr_field(ls, &v); 2303 expr_field(ls, &v);
2318 } 2304 }
@@ -2325,9 +2311,9 @@ static void parse_func(LexState *ls, BCLine line)
2325/* -- Control transfer statements ----------------------------------------- */ 2311/* -- Control transfer statements ----------------------------------------- */
2326 2312
2327/* Check for end of block. */ 2313/* Check for end of block. */
2328static int endofblock(LexToken token) 2314static int parse_isend(LexToken tok)
2329{ 2315{
2330 switch (token) { 2316 switch (tok) {
2331 case TK_else: case TK_elseif: case TK_end: case TK_until: case TK_eof: 2317 case TK_else: case TK_elseif: case TK_end: case TK_until: case TK_eof:
2332 return 1; 2318 return 1;
2333 default: 2319 default:
@@ -2342,7 +2328,7 @@ static void parse_return(LexState *ls)
2342 FuncState *fs = ls->fs; 2328 FuncState *fs = ls->fs;
2343 lj_lex_next(ls); /* Skip 'return'. */ 2329 lj_lex_next(ls); /* Skip 'return'. */
2344 fs->flags |= PROTO_HAS_RETURN; 2330 fs->flags |= PROTO_HAS_RETURN;
2345 if (endofblock(ls->token) || ls->token == ';') { /* Bare return. */ 2331 if (parse_isend(ls->tok) || ls->tok == ';') { /* Bare return. */
2346 ins = BCINS_AD(BC_RET0, 0, 1); 2332 ins = BCINS_AD(BC_RET0, 0, 1);
2347 } else { /* Return with one or more values. */ 2333 } else { /* Return with one or more values. */
2348 ExpDesc e; /* Receives the _last_ expression in the list. */ 2334 ExpDesc e; /* Receives the _last_ expression in the list. */
@@ -2408,18 +2394,18 @@ static void parse_label(LexState *ls)
2408 lex_check(ls, TK_label); 2394 lex_check(ls, TK_label);
2409 /* Recursively parse trailing statements: labels and ';' (Lua 5.2 only). */ 2395 /* Recursively parse trailing statements: labels and ';' (Lua 5.2 only). */
2410 for (;;) { 2396 for (;;) {
2411 if (ls->token == TK_label) { 2397 if (ls->tok == TK_label) {
2412 synlevel_begin(ls); 2398 synlevel_begin(ls);
2413 parse_label(ls); 2399 parse_label(ls);
2414 synlevel_end(ls); 2400 synlevel_end(ls);
2415 } else if (LJ_52 && ls->token == ';') { 2401 } else if (LJ_52 && ls->tok == ';') {
2416 lj_lex_next(ls); 2402 lj_lex_next(ls);
2417 } else { 2403 } else {
2418 break; 2404 break;
2419 } 2405 }
2420 } 2406 }
2421 /* Trailing label is considered to be outside of scope. */ 2407 /* Trailing label is considered to be outside of scope. */
2422 if (endofblock(ls->token) && ls->token != TK_until) 2408 if (parse_isend(ls->tok) && ls->tok != TK_until)
2423 ls->vstack[idx].slot = fs->bl->nactvar; 2409 ls->vstack[idx].slot = fs->bl->nactvar;
2424 gola_resolve(ls, fs->bl, idx); 2410 gola_resolve(ls, fs->bl, idx);
2425} 2411}
@@ -2575,7 +2561,8 @@ static void parse_for_iter(LexState *ls, GCstr *indexname)
2575 lex_check(ls, TK_in); 2561 lex_check(ls, TK_in);
2576 line = ls->linenumber; 2562 line = ls->linenumber;
2577 assign_adjust(ls, 3, expr_list(ls, &e), &e); 2563 assign_adjust(ls, 3, expr_list(ls, &e), &e);
2578 bcreg_bump(fs, 3); /* The iterator needs another 3 slots (func + 2 args). */ 2564 /* The iterator needs another 3 [4] slots (func [pc] | state ctl). */
2565 bcreg_bump(fs, 3+LJ_FR2);
2579 isnext = (nvars <= 5 && predict_next(ls, fs, exprpc)); 2566 isnext = (nvars <= 5 && predict_next(ls, fs, exprpc));
2580 var_add(ls, 3); /* Hidden control variables. */ 2567 var_add(ls, 3); /* Hidden control variables. */
2581 lex_check(ls, TK_do); 2568 lex_check(ls, TK_do);
@@ -2603,9 +2590,9 @@ static void parse_for(LexState *ls, BCLine line)
2603 fscope_begin(fs, &bl, FSCOPE_LOOP); 2590 fscope_begin(fs, &bl, FSCOPE_LOOP);
2604 lj_lex_next(ls); /* Skip 'for'. */ 2591 lj_lex_next(ls); /* Skip 'for'. */
2605 varname = lex_str(ls); /* Get first variable name. */ 2592 varname = lex_str(ls); /* Get first variable name. */
2606 if (ls->token == '=') 2593 if (ls->tok == '=')
2607 parse_for_num(ls, varname, line); 2594 parse_for_num(ls, varname, line);
2608 else if (ls->token == ',' || ls->token == TK_in) 2595 else if (ls->tok == ',' || ls->tok == TK_in)
2609 parse_for_iter(ls, varname); 2596 parse_for_iter(ls, varname);
2610 else 2597 else
2611 err_syntax(ls, LJ_ERR_XFOR); 2598 err_syntax(ls, LJ_ERR_XFOR);
@@ -2631,12 +2618,12 @@ static void parse_if(LexState *ls, BCLine line)
2631 BCPos flist; 2618 BCPos flist;
2632 BCPos escapelist = NO_JMP; 2619 BCPos escapelist = NO_JMP;
2633 flist = parse_then(ls); 2620 flist = parse_then(ls);
2634 while (ls->token == TK_elseif) { /* Parse multiple 'elseif' blocks. */ 2621 while (ls->tok == TK_elseif) { /* Parse multiple 'elseif' blocks. */
2635 jmp_append(fs, &escapelist, bcemit_jmp(fs)); 2622 jmp_append(fs, &escapelist, bcemit_jmp(fs));
2636 jmp_tohere(fs, flist); 2623 jmp_tohere(fs, flist);
2637 flist = parse_then(ls); 2624 flist = parse_then(ls);
2638 } 2625 }
2639 if (ls->token == TK_else) { /* Parse optional 'else' block. */ 2626 if (ls->tok == TK_else) { /* Parse optional 'else' block. */
2640 jmp_append(fs, &escapelist, bcemit_jmp(fs)); 2627 jmp_append(fs, &escapelist, bcemit_jmp(fs));
2641 jmp_tohere(fs, flist); 2628 jmp_tohere(fs, flist);
2642 lj_lex_next(ls); /* Skip 'else'. */ 2629 lj_lex_next(ls); /* Skip 'else'. */
@@ -2654,7 +2641,7 @@ static void parse_if(LexState *ls, BCLine line)
2654static int parse_stmt(LexState *ls) 2641static int parse_stmt(LexState *ls)
2655{ 2642{
2656 BCLine line = ls->linenumber; 2643 BCLine line = ls->linenumber;
2657 switch (ls->token) { 2644 switch (ls->tok) {
2658 case TK_if: 2645 case TK_if:
2659 parse_if(ls, line); 2646 parse_if(ls, line);
2660 break; 2647 break;
@@ -2713,11 +2700,12 @@ static void parse_chunk(LexState *ls)
2713{ 2700{
2714 int islast = 0; 2701 int islast = 0;
2715 synlevel_begin(ls); 2702 synlevel_begin(ls);
2716 while (!islast && !endofblock(ls->token)) { 2703 while (!islast && !parse_isend(ls->tok)) {
2717 islast = parse_stmt(ls); 2704 islast = parse_stmt(ls);
2718 lex_opt(ls, ';'); 2705 lex_opt(ls, ';');
2719 lua_assert(ls->fs->framesize >= ls->fs->freereg && 2706 lj_assertLS(ls->fs->framesize >= ls->fs->freereg &&
2720 ls->fs->freereg >= ls->fs->nactvar); 2707 ls->fs->freereg >= ls->fs->nactvar,
2708 "bad regalloc");
2721 ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */ 2709 ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */
2722 } 2710 }
2723 synlevel_end(ls); 2711 synlevel_end(ls);
@@ -2748,13 +2736,12 @@ GCproto *lj_parse(LexState *ls)
2748 bcemit_AD(&fs, BC_FUNCV, 0, 0); /* Placeholder. */ 2736 bcemit_AD(&fs, BC_FUNCV, 0, 0); /* Placeholder. */
2749 lj_lex_next(ls); /* Read-ahead first token. */ 2737 lj_lex_next(ls); /* Read-ahead first token. */
2750 parse_chunk(ls); 2738 parse_chunk(ls);
2751 if (ls->token != TK_eof) 2739 if (ls->tok != TK_eof)
2752 err_token(ls, TK_eof); 2740 err_token(ls, TK_eof);
2753 pt = fs_finish(ls, ls->linenumber); 2741 pt = fs_finish(ls, ls->linenumber);
2754 L->top--; /* Drop chunkname. */ 2742 L->top--; /* Drop chunkname. */
2755 lua_assert(fs.prev == NULL); 2743 lj_assertL(fs.prev == NULL && ls->fs == NULL, "mismatched frame nesting");
2756 lua_assert(ls->fs == NULL); 2744 lj_assertL(pt->sizeuv == 0, "toplevel proto has upvalues");
2757 lua_assert(pt->sizeuv == 0);
2758 return pt; 2745 return pt;
2759} 2746}
2760 2747
diff --git a/src/lj_prng.c b/src/lj_prng.c
new file mode 100644
index 00000000..a8b8b6de
--- /dev/null
+++ b/src/lj_prng.c
@@ -0,0 +1,244 @@
1/*
2** Pseudo-random number generation.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#define lj_prng_c
7#define LUA_CORE
8
9/* To get the syscall prototype. */
10#if defined(__linux__) && !defined(_GNU_SOURCE)
11#define _GNU_SOURCE
12#endif
13
14#include "lj_def.h"
15#include "lj_arch.h"
16#include "lj_prng.h"
17
18/* -- PRNG step function -------------------------------------------------- */
19
20/* This implements a Tausworthe PRNG with period 2^223. Based on:
21** Tables of maximally-equidistributed combined LFSR generators,
22** Pierre L'Ecuyer, 1991, table 3, 1st entry.
23** Full-period ME-CF generator with L=64, J=4, k=223, N1=49.
24**
25** Important note: This PRNG is NOT suitable for cryptographic use!
26**
27** But it works fine for math.random(), which has an API that's not
28** suitable for cryptography, anyway.
29**
30** When used as a securely seeded global PRNG, it substantially raises
31** the difficulty for various attacks on the VM.
32*/
33
34/* Update generator i and compute a running xor of all states. */
35#define TW223_GEN(rs, z, r, i, k, q, s) \
36 z = rs->u[i]; \
37 z = (((z<<q)^z) >> (k-s)) ^ ((z&((uint64_t)(int64_t)-1 << (64-k)))<<s); \
38 r ^= z; rs->u[i] = z;
39
40#define TW223_STEP(rs, z, r) \
41 TW223_GEN(rs, z, r, 0, 63, 31, 18) \
42 TW223_GEN(rs, z, r, 1, 58, 19, 28) \
43 TW223_GEN(rs, z, r, 2, 55, 24, 7) \
44 TW223_GEN(rs, z, r, 3, 47, 21, 8)
45
46/* PRNG step function with uint64_t result. */
47LJ_NOINLINE uint64_t LJ_FASTCALL lj_prng_u64(PRNGState *rs)
48{
49 uint64_t z, r = 0;
50 TW223_STEP(rs, z, r)
51 return r;
52}
53
54/* PRNG step function with double in uint64_t result. */
55LJ_NOINLINE uint64_t LJ_FASTCALL lj_prng_u64d(PRNGState *rs)
56{
57 uint64_t z, r = 0;
58 TW223_STEP(rs, z, r)
59 /* Returns a double bit pattern in the range 1.0 <= d < 2.0. */
60 return (r & U64x(000fffff,ffffffff)) | U64x(3ff00000,00000000);
61}
62
63/* Condition seed: ensure k[i] MSB of u[i] are non-zero. */
64static LJ_AINLINE void lj_prng_condition(PRNGState *rs)
65{
66 if (rs->u[0] < (1u << 1)) rs->u[0] += (1u << 1);
67 if (rs->u[1] < (1u << 6)) rs->u[1] += (1u << 6);
68 if (rs->u[2] < (1u << 9)) rs->u[2] += (1u << 9);
69 if (rs->u[3] < (1u << 17)) rs->u[3] += (1u << 17);
70}
71
72/* -- PRNG seeding from OS ------------------------------------------------ */
73
74#if LUAJIT_SECURITY_PRNG == 0
75
76/* Nothing to define. */
77
78#elif LJ_TARGET_XBOX360
79
80extern int XNetRandom(void *buf, unsigned int len);
81
82#elif LJ_TARGET_PS3
83
84extern int sys_get_random_number(void *buf, uint64_t len);
85
86#elif LJ_TARGET_PS4 || LJ_TARGET_PSVITA
87
88extern int sceRandomGetRandomNumber(void *buf, size_t len);
89
90#elif LJ_TARGET_WINDOWS || LJ_TARGET_XBOXONE
91
92#define WIN32_LEAN_AND_MEAN
93#include <windows.h>
94
95#if LJ_TARGET_UWP || LJ_TARGET_XBOXONE
96/* Must use BCryptGenRandom. */
97#include <bcrypt.h>
98#pragma comment(lib, "bcrypt.lib")
99#else
100/* If you wonder about this mess, then search online for RtlGenRandom. */
101typedef BOOLEAN (WINAPI *PRGR)(void *buf, ULONG len);
102static PRGR libfunc_rgr;
103#endif
104
105#elif LJ_TARGET_POSIX
106
107#if LJ_TARGET_LINUX
108/* Avoid a dependency on glibc 2.25+ and use the getrandom syscall instead. */
109#include <sys/syscall.h>
110#else
111
112#if LJ_TARGET_OSX
113#include <Availability.h>
114#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200 || \
115 __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000
116#define LJ_TARGET_HAS_GETENTROPY 1
117#endif
118#elif LJ_TARGET_BSD || LJ_TARGET_SOLARIS || LJ_TARGET_CYGWIN
119#define LJ_TARGET_HAS_GETENTROPY 1
120#endif
121
122#if LJ_TARGET_HAS_GETENTROPY
123extern int getentropy(void *buf, size_t len);
124#ifdef __ELF__
125 __attribute__((weak))
126#endif
127;
128#endif
129
130#endif
131
132/* For the /dev/urandom fallback. */
133#include <fcntl.h>
134#include <unistd.h>
135
136#endif
137
138#if LUAJIT_SECURITY_PRNG == 0
139
140/* If you really don't care about security, then define
141** LUAJIT_SECURITY_PRNG=0. This yields a predictable seed
142** and provides NO SECURITY against various attacks on the VM.
143**
144** BTW: This is NOT the way to get predictable table iteration,
145** predictable trace generation, predictable bytecode generation, etc.
146*/
147int LJ_FASTCALL lj_prng_seed_secure(PRNGState *rs)
148{
149 lj_prng_seed_fixed(rs); /* The fixed seed is already conditioned. */
150 return 1;
151}
152
153#else
154
155/* Securely seed PRNG from system entropy. Returns 0 on failure. */
156int LJ_FASTCALL lj_prng_seed_secure(PRNGState *rs)
157{
158#if LJ_TARGET_XBOX360
159
160 if (XNetRandom(rs->u, (unsigned int)sizeof(rs->u)) == 0)
161 goto ok;
162
163#elif LJ_TARGET_PS3
164
165 if (sys_get_random_number(rs->u, sizeof(rs->u)) == 0)
166 goto ok;
167
168#elif LJ_TARGET_PS4 || LJ_TARGET_PSVITA
169
170 if (sceRandomGetRandomNumber(rs->u, sizeof(rs->u) == 0)
171 goto ok;
172
173#elif LJ_TARGET_UWP || LJ_TARGET_XBOXONE
174
175 if (BCryptGenRandom(NULL, (PUCHAR)(rs->u), (ULONG)sizeof(rs->u),
176 BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0)
177 goto ok;
178
179#elif LJ_TARGET_WINDOWS
180
181 /* Keep the library loaded in case multiple VMs are started. */
182 if (!libfunc_rgr) {
183 HMODULE lib = LJ_WIN_LOADLIBA("advapi32.dll");
184 if (!lib) return 0;
185 libfunc_rgr = (PRGR)GetProcAddress(lib, "SystemFunction036");
186 if (!libfunc_rgr) return 0;
187 }
188 if (libfunc_rgr(rs->u, (ULONG)sizeof(rs->u)))
189 goto ok;
190
191#elif LJ_TARGET_POSIX
192
193#if LJ_TARGET_LINUX && defined(SYS_getrandom)
194
195 if (syscall(SYS_getrandom, rs->u, sizeof(rs->u), 0) == (long)sizeof(rs->u))
196 goto ok;
197
198#elif LJ_TARGET_HAS_GETENTROPY
199
200#ifdef __ELF__
201 if (getentropy && getentropy(rs->u, sizeof(rs->u)) == 0)
202 goto ok;
203#else
204 if (getentropy(rs->u, sizeof(rs->u)) == 0)
205 goto ok;
206#endif
207
208#endif
209
210 /* Fallback to /dev/urandom. This may fail if the device is not
211 ** existent or accessible in a chroot or container, or if the process
212 ** or the OS ran out of file descriptors.
213 */
214 {
215 int fd = open("/dev/urandom", O_RDONLY|O_CLOEXEC);
216 if (fd != -1) {
217 ssize_t n = read(fd, rs->u, sizeof(rs->u));
218 (void)close(fd);
219 if (n == (ssize_t)sizeof(rs->u))
220 goto ok;
221 }
222 }
223
224#else
225
226 /* Add an elif above for your OS with a secure PRNG seed.
227 ** Note that fiddling around with rand(), getpid(), time() or coercing
228 ** ASLR to yield a few bits of randomness is not helpful.
229 ** If you don't want any security, then don't pretend you have any
230 ** and simply define LUAJIT_SECURITY_PRNG=0 for the build.
231 */
232#error "Missing secure PRNG seed for this OS"
233
234#endif
235 return 0; /* Fail. */
236
237ok:
238 lj_prng_condition(rs);
239 (void)lj_prng_u64(rs);
240 return 1; /* Success. */
241}
242
243#endif
244
diff --git a/src/lj_prng.h b/src/lj_prng.h
new file mode 100644
index 00000000..40c34a71
--- /dev/null
+++ b/src/lj_prng.h
@@ -0,0 +1,24 @@
1/*
2** Pseudo-random number generation.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#ifndef _LJ_PRNG_H
7#define _LJ_PRNG_H
8
9#include "lj_def.h"
10
11LJ_FUNC int LJ_FASTCALL lj_prng_seed_secure(PRNGState *rs);
12LJ_FUNC uint64_t LJ_FASTCALL lj_prng_u64(PRNGState *rs);
13LJ_FUNC uint64_t LJ_FASTCALL lj_prng_u64d(PRNGState *rs);
14
15/* This is just the precomputed result of lib_math.c:random_seed(rs, 0.0). */
16static LJ_AINLINE void lj_prng_seed_fixed(PRNGState *rs)
17{
18 rs->u[0] = U64x(a0d27757,0a345b8c);
19 rs->u[1] = U64x(764a296c,5d4aa64f);
20 rs->u[2] = U64x(51220704,070adeaa);
21 rs->u[3] = U64x(2a2717b5,a7b7b927);
22}
23
24#endif
diff --git a/src/lj_profile.c b/src/lj_profile.c
new file mode 100644
index 00000000..2fe40858
--- /dev/null
+++ b/src/lj_profile.c
@@ -0,0 +1,368 @@
1/*
2** Low-overhead profiling.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#define lj_profile_c
7#define LUA_CORE
8
9#include "lj_obj.h"
10
11#if LJ_HASPROFILE
12
13#include "lj_buf.h"
14#include "lj_frame.h"
15#include "lj_debug.h"
16#include "lj_dispatch.h"
17#if LJ_HASJIT
18#include "lj_jit.h"
19#include "lj_trace.h"
20#endif
21#include "lj_profile.h"
22
23#include "luajit.h"
24
25#if LJ_PROFILE_SIGPROF
26
27#include <sys/time.h>
28#include <signal.h>
29#define profile_lock(ps) UNUSED(ps)
30#define profile_unlock(ps) UNUSED(ps)
31
32#elif LJ_PROFILE_PTHREAD
33
34#include <pthread.h>
35#include <time.h>
36#if LJ_TARGET_PS3
37#include <sys/timer.h>
38#endif
39#define profile_lock(ps) pthread_mutex_lock(&ps->lock)
40#define profile_unlock(ps) pthread_mutex_unlock(&ps->lock)
41
42#elif LJ_PROFILE_WTHREAD
43
44#define WIN32_LEAN_AND_MEAN
45#if LJ_TARGET_XBOX360
46#include <xtl.h>
47#include <xbox.h>
48#else
49#include <windows.h>
50#endif
51typedef unsigned int (WINAPI *WMM_TPFUNC)(unsigned int);
52#define profile_lock(ps) EnterCriticalSection(&ps->lock)
53#define profile_unlock(ps) LeaveCriticalSection(&ps->lock)
54
55#endif
56
57/* Profiler state. */
58typedef struct ProfileState {
59 global_State *g; /* VM state that started the profiler. */
60 luaJIT_profile_callback cb; /* Profiler callback. */
61 void *data; /* Profiler callback data. */
62 SBuf sb; /* String buffer for stack dumps. */
63 int interval; /* Sample interval in milliseconds. */
64 int samples; /* Number of samples for next callback. */
65 int vmstate; /* VM state when profile timer triggered. */
66#if LJ_PROFILE_SIGPROF
67 struct sigaction oldsa; /* Previous SIGPROF state. */
68#elif LJ_PROFILE_PTHREAD
69 pthread_mutex_t lock; /* g->hookmask update lock. */
70 pthread_t thread; /* Timer thread. */
71 int abort; /* Abort timer thread. */
72#elif LJ_PROFILE_WTHREAD
73#if LJ_TARGET_WINDOWS
74 HINSTANCE wmm; /* WinMM library handle. */
75 WMM_TPFUNC wmm_tbp; /* WinMM timeBeginPeriod function. */
76 WMM_TPFUNC wmm_tep; /* WinMM timeEndPeriod function. */
77#endif
78 CRITICAL_SECTION lock; /* g->hookmask update lock. */
79 HANDLE thread; /* Timer thread. */
80 int abort; /* Abort timer thread. */
81#endif
82} ProfileState;
83
84/* Sadly, we have to use a static profiler state.
85**
86** The SIGPROF variant needs a static pointer to the global state, anyway.
87** And it would be hard to extend for multiple threads. You can still use
88** multiple VMs in multiple threads, but only profile one at a time.
89*/
90static ProfileState profile_state;
91
92/* Default sample interval in milliseconds. */
93#define LJ_PROFILE_INTERVAL_DEFAULT 10
94
95/* -- Profiler/hook interaction ------------------------------------------- */
96
97#if !LJ_PROFILE_SIGPROF
98void LJ_FASTCALL lj_profile_hook_enter(global_State *g)
99{
100 ProfileState *ps = &profile_state;
101 if (ps->g) {
102 profile_lock(ps);
103 hook_enter(g);
104 profile_unlock(ps);
105 } else {
106 hook_enter(g);
107 }
108}
109
110void LJ_FASTCALL lj_profile_hook_leave(global_State *g)
111{
112 ProfileState *ps = &profile_state;
113 if (ps->g) {
114 profile_lock(ps);
115 hook_leave(g);
116 profile_unlock(ps);
117 } else {
118 hook_leave(g);
119 }
120}
121#endif
122
123/* -- Profile callbacks --------------------------------------------------- */
124
125/* Callback from profile hook (HOOK_PROFILE already cleared). */
126void LJ_FASTCALL lj_profile_interpreter(lua_State *L)
127{
128 ProfileState *ps = &profile_state;
129 global_State *g = G(L);
130 uint8_t mask;
131 profile_lock(ps);
132 mask = (g->hookmask & ~HOOK_PROFILE);
133 if (!(mask & HOOK_VMEVENT)) {
134 int samples = ps->samples;
135 ps->samples = 0;
136 g->hookmask = HOOK_VMEVENT;
137 lj_dispatch_update(g);
138 profile_unlock(ps);
139 ps->cb(ps->data, L, samples, ps->vmstate); /* Invoke user callback. */
140 profile_lock(ps);
141 mask |= (g->hookmask & HOOK_PROFILE);
142 }
143 g->hookmask = mask;
144 lj_dispatch_update(g);
145 profile_unlock(ps);
146}
147
148/* Trigger profile hook. Asynchronous call from OS-specific profile timer. */
149static void profile_trigger(ProfileState *ps)
150{
151 global_State *g = ps->g;
152 uint8_t mask;
153 profile_lock(ps);
154 ps->samples++; /* Always increment number of samples. */
155 mask = g->hookmask;
156 if (!(mask & (HOOK_PROFILE|HOOK_VMEVENT|HOOK_GC))) { /* Set profile hook. */
157 int st = g->vmstate;
158 ps->vmstate = st >= 0 ? 'N' :
159 st == ~LJ_VMST_INTERP ? 'I' :
160 st == ~LJ_VMST_C ? 'C' :
161 st == ~LJ_VMST_GC ? 'G' : 'J';
162 g->hookmask = (mask | HOOK_PROFILE);
163 lj_dispatch_update(g);
164 }
165 profile_unlock(ps);
166}
167
168/* -- OS-specific profile timer handling ---------------------------------- */
169
170#if LJ_PROFILE_SIGPROF
171
172/* SIGPROF handler. */
173static void profile_signal(int sig)
174{
175 UNUSED(sig);
176 profile_trigger(&profile_state);
177}
178
179/* Start profiling timer. */
180static void profile_timer_start(ProfileState *ps)
181{
182 int interval = ps->interval;
183 struct itimerval tm;
184 struct sigaction sa;
185 tm.it_value.tv_sec = tm.it_interval.tv_sec = interval / 1000;
186 tm.it_value.tv_usec = tm.it_interval.tv_usec = (interval % 1000) * 1000;
187 setitimer(ITIMER_PROF, &tm, NULL);
188 sa.sa_flags = SA_RESTART;
189 sa.sa_handler = profile_signal;
190 sigemptyset(&sa.sa_mask);
191 sigaction(SIGPROF, &sa, &ps->oldsa);
192}
193
194/* Stop profiling timer. */
195static void profile_timer_stop(ProfileState *ps)
196{
197 struct itimerval tm;
198 tm.it_value.tv_sec = tm.it_interval.tv_sec = 0;
199 tm.it_value.tv_usec = tm.it_interval.tv_usec = 0;
200 setitimer(ITIMER_PROF, &tm, NULL);
201 sigaction(SIGPROF, &ps->oldsa, NULL);
202}
203
204#elif LJ_PROFILE_PTHREAD
205
206/* POSIX timer thread. */
207static void *profile_thread(ProfileState *ps)
208{
209 int interval = ps->interval;
210#if !LJ_TARGET_PS3
211 struct timespec ts;
212 ts.tv_sec = interval / 1000;
213 ts.tv_nsec = (interval % 1000) * 1000000;
214#endif
215 while (1) {
216#if LJ_TARGET_PS3
217 sys_timer_usleep(interval * 1000);
218#else
219 nanosleep(&ts, NULL);
220#endif
221 if (ps->abort) break;
222 profile_trigger(ps);
223 }
224 return NULL;
225}
226
227/* Start profiling timer thread. */
228static void profile_timer_start(ProfileState *ps)
229{
230 pthread_mutex_init(&ps->lock, 0);
231 ps->abort = 0;
232 pthread_create(&ps->thread, NULL, (void *(*)(void *))profile_thread, ps);
233}
234
235/* Stop profiling timer thread. */
236static void profile_timer_stop(ProfileState *ps)
237{
238 ps->abort = 1;
239 pthread_join(ps->thread, NULL);
240 pthread_mutex_destroy(&ps->lock);
241}
242
243#elif LJ_PROFILE_WTHREAD
244
245/* Windows timer thread. */
246static DWORD WINAPI profile_thread(void *psx)
247{
248 ProfileState *ps = (ProfileState *)psx;
249 int interval = ps->interval;
250#if LJ_TARGET_WINDOWS && !LJ_TARGET_UWP
251 ps->wmm_tbp(interval);
252#endif
253 while (1) {
254 Sleep(interval);
255 if (ps->abort) break;
256 profile_trigger(ps);
257 }
258#if LJ_TARGET_WINDOWS && !LJ_TARGET_UWP
259 ps->wmm_tep(interval);
260#endif
261 return 0;
262}
263
264/* Start profiling timer thread. */
265static void profile_timer_start(ProfileState *ps)
266{
267#if LJ_TARGET_WINDOWS && !LJ_TARGET_UWP
268 if (!ps->wmm) { /* Load WinMM library on-demand. */
269 ps->wmm = LJ_WIN_LOADLIBA("winmm.dll");
270 if (ps->wmm) {
271 ps->wmm_tbp = (WMM_TPFUNC)GetProcAddress(ps->wmm, "timeBeginPeriod");
272 ps->wmm_tep = (WMM_TPFUNC)GetProcAddress(ps->wmm, "timeEndPeriod");
273 if (!ps->wmm_tbp || !ps->wmm_tep) {
274 ps->wmm = NULL;
275 return;
276 }
277 }
278 }
279#endif
280 InitializeCriticalSection(&ps->lock);
281 ps->abort = 0;
282 ps->thread = CreateThread(NULL, 0, profile_thread, ps, 0, NULL);
283}
284
285/* Stop profiling timer thread. */
286static void profile_timer_stop(ProfileState *ps)
287{
288 ps->abort = 1;
289 WaitForSingleObject(ps->thread, INFINITE);
290 DeleteCriticalSection(&ps->lock);
291}
292
293#endif
294
295/* -- Public profiling API ------------------------------------------------ */
296
297/* Start profiling. */
298LUA_API void luaJIT_profile_start(lua_State *L, const char *mode,
299 luaJIT_profile_callback cb, void *data)
300{
301 ProfileState *ps = &profile_state;
302 int interval = LJ_PROFILE_INTERVAL_DEFAULT;
303 while (*mode) {
304 int m = *mode++;
305 switch (m) {
306 case 'i':
307 interval = 0;
308 while (*mode >= '0' && *mode <= '9')
309 interval = interval * 10 + (*mode++ - '0');
310 if (interval <= 0) interval = 1;
311 break;
312#if LJ_HASJIT
313 case 'l': case 'f':
314 L2J(L)->prof_mode = m;
315 lj_trace_flushall(L);
316 break;
317#endif
318 default: /* Ignore unknown mode chars. */
319 break;
320 }
321 }
322 if (ps->g) {
323 luaJIT_profile_stop(L);
324 if (ps->g) return; /* Profiler in use by another VM. */
325 }
326 ps->g = G(L);
327 ps->interval = interval;
328 ps->cb = cb;
329 ps->data = data;
330 ps->samples = 0;
331 lj_buf_init(L, &ps->sb);
332 profile_timer_start(ps);
333}
334
335/* Stop profiling. */
336LUA_API void luaJIT_profile_stop(lua_State *L)
337{
338 ProfileState *ps = &profile_state;
339 global_State *g = ps->g;
340 if (G(L) == g) { /* Only stop profiler if started by this VM. */
341 profile_timer_stop(ps);
342 g->hookmask &= ~HOOK_PROFILE;
343 lj_dispatch_update(g);
344#if LJ_HASJIT
345 G2J(g)->prof_mode = 0;
346 lj_trace_flushall(L);
347#endif
348 lj_buf_free(g, &ps->sb);
349 setmref(ps->sb.b, NULL);
350 setmref(ps->sb.e, NULL);
351 ps->g = NULL;
352 }
353}
354
355/* Return a compact stack dump. */
356LUA_API const char *luaJIT_profile_dumpstack(lua_State *L, const char *fmt,
357 int depth, size_t *len)
358{
359 ProfileState *ps = &profile_state;
360 SBuf *sb = &ps->sb;
361 setsbufL(sb, L);
362 lj_buf_reset(sb);
363 lj_debug_dumpstack(L, sb, fmt, depth);
364 *len = (size_t)sbuflen(sb);
365 return sbufB(sb);
366}
367
368#endif
diff --git a/src/lj_profile.h b/src/lj_profile.h
new file mode 100644
index 00000000..db69eb9e
--- /dev/null
+++ b/src/lj_profile.h
@@ -0,0 +1,21 @@
1/*
2** Low-overhead profiling.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#ifndef _LJ_PROFILE_H
7#define _LJ_PROFILE_H
8
9#include "lj_obj.h"
10
11#if LJ_HASPROFILE
12
13LJ_FUNC void LJ_FASTCALL lj_profile_interpreter(lua_State *L);
14#if !LJ_PROFILE_SIGPROF
15LJ_FUNC void LJ_FASTCALL lj_profile_hook_enter(global_State *g);
16LJ_FUNC void LJ_FASTCALL lj_profile_hook_leave(global_State *g);
17#endif
18
19#endif
20
21#endif
diff --git a/src/lj_record.c b/src/lj_record.c
index 69822f54..df428818 100644
--- a/src/lj_record.c
+++ b/src/lj_record.c
@@ -20,6 +20,9 @@
20#endif 20#endif
21#include "lj_bc.h" 21#include "lj_bc.h"
22#include "lj_ff.h" 22#include "lj_ff.h"
23#if LJ_HASPROFILE
24#include "lj_debug.h"
25#endif
23#include "lj_ir.h" 26#include "lj_ir.h"
24#include "lj_jit.h" 27#include "lj_jit.h"
25#include "lj_ircall.h" 28#include "lj_ircall.h"
@@ -30,6 +33,7 @@
30#include "lj_snap.h" 33#include "lj_snap.h"
31#include "lj_dispatch.h" 34#include "lj_dispatch.h"
32#include "lj_vm.h" 35#include "lj_vm.h"
36#include "lj_prng.h"
33 37
34/* Some local macros to save typing. Undef'd at the end. */ 38/* Some local macros to save typing. Undef'd at the end. */
35#define IR(ref) (&J->cur.ir[(ref)]) 39#define IR(ref) (&J->cur.ir[(ref)])
@@ -47,31 +51,52 @@
47static void rec_check_ir(jit_State *J) 51static void rec_check_ir(jit_State *J)
48{ 52{
49 IRRef i, nins = J->cur.nins, nk = J->cur.nk; 53 IRRef i, nins = J->cur.nins, nk = J->cur.nk;
50 lua_assert(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536); 54 lj_assertJ(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536,
51 for (i = nins-1; i >= nk; i--) { 55 "inconsistent IR layout");
56 for (i = nk; i < nins; i++) {
52 IRIns *ir = IR(i); 57 IRIns *ir = IR(i);
53 uint32_t mode = lj_ir_mode[ir->o]; 58 uint32_t mode = lj_ir_mode[ir->o];
54 IRRef op1 = ir->op1; 59 IRRef op1 = ir->op1;
55 IRRef op2 = ir->op2; 60 IRRef op2 = ir->op2;
61 const char *err = NULL;
56 switch (irm_op1(mode)) { 62 switch (irm_op1(mode)) {
57 case IRMnone: lua_assert(op1 == 0); break; 63 case IRMnone:
58 case IRMref: lua_assert(op1 >= nk); 64 if (op1 != 0) err = "IRMnone op1 used";
59 lua_assert(i >= REF_BIAS ? op1 < i : op1 > i); break; 65 break;
66 case IRMref:
67 if (op1 < nk || (i >= REF_BIAS ? op1 >= i : op1 <= i))
68 err = "IRMref op1 out of range";
69 break;
60 case IRMlit: break; 70 case IRMlit: break;
61 case IRMcst: lua_assert(i < REF_BIAS); continue; 71 case IRMcst:
72 if (i >= REF_BIAS) { err = "constant in IR range"; break; }
73 if (irt_is64(ir->t) && ir->o != IR_KNULL)
74 i++;
75 continue;
62 } 76 }
63 switch (irm_op2(mode)) { 77 switch (irm_op2(mode)) {
64 case IRMnone: lua_assert(op2 == 0); break; 78 case IRMnone:
65 case IRMref: lua_assert(op2 >= nk); 79 if (op2) err = "IRMnone op2 used";
66 lua_assert(i >= REF_BIAS ? op2 < i : op2 > i); break; 80 break;
81 case IRMref:
82 if (op2 < nk || (i >= REF_BIAS ? op2 >= i : op2 <= i))
83 err = "IRMref op2 out of range";
84 break;
67 case IRMlit: break; 85 case IRMlit: break;
68 case IRMcst: lua_assert(0); break; 86 case IRMcst: err = "IRMcst op2"; break;
69 } 87 }
70 if (ir->prev) { 88 if (!err && ir->prev) {
71 lua_assert(ir->prev >= nk); 89 if (ir->prev < nk || (i >= REF_BIAS ? ir->prev >= i : ir->prev <= i))
72 lua_assert(i >= REF_BIAS ? ir->prev < i : ir->prev > i); 90 err = "chain out of range";
73 lua_assert(ir->o == IR_NOP || IR(ir->prev)->o == ir->o); 91 else if (ir->o != IR_NOP && IR(ir->prev)->o != ir->o)
92 err = "chain to different op";
74 } 93 }
94 lj_assertJ(!err, "bad IR %04d op %d(%04d,%04d): %s",
95 i-REF_BIAS,
96 ir->o,
97 irm_op1(mode) == IRMref ? op1-REF_BIAS : op1,
98 irm_op2(mode) == IRMref ? op2-REF_BIAS : op2,
99 err);
75 } 100 }
76} 101}
77 102
@@ -81,48 +106,76 @@ static void rec_check_slots(jit_State *J)
81 BCReg s, nslots = J->baseslot + J->maxslot; 106 BCReg s, nslots = J->baseslot + J->maxslot;
82 int32_t depth = 0; 107 int32_t depth = 0;
83 cTValue *base = J->L->base - J->baseslot; 108 cTValue *base = J->L->base - J->baseslot;
84 lua_assert(J->baseslot >= 1); 109 lj_assertJ(J->baseslot >= 1+LJ_FR2, "bad baseslot");
85 lua_assert(J->baseslot == 1 || (J->slot[J->baseslot-1] & TREF_FRAME)); 110 lj_assertJ(J->baseslot == 1+LJ_FR2 || (J->slot[J->baseslot-1] & TREF_FRAME),
86 lua_assert(nslots <= LJ_MAX_JSLOTS); 111 "baseslot does not point to frame");
112 lj_assertJ(nslots <= LJ_MAX_JSLOTS, "slot overflow");
87 for (s = 0; s < nslots; s++) { 113 for (s = 0; s < nslots; s++) {
88 TRef tr = J->slot[s]; 114 TRef tr = J->slot[s];
89 if (tr) { 115 if (tr) {
90 cTValue *tv = &base[s]; 116 cTValue *tv = &base[s];
91 IRRef ref = tref_ref(tr); 117 IRRef ref = tref_ref(tr);
92 IRIns *ir; 118 IRIns *ir = NULL; /* Silence compiler. */
93 lua_assert(ref >= J->cur.nk && ref < J->cur.nins); 119 if (!LJ_FR2 || ref || !(tr & (TREF_FRAME | TREF_CONT))) {
94 ir = IR(ref); 120 lj_assertJ(ref >= J->cur.nk && ref < J->cur.nins,
95 lua_assert(irt_t(ir->t) == tref_t(tr)); 121 "slot %d ref %04d out of range", s, ref - REF_BIAS);
122 ir = IR(ref);
123 lj_assertJ(irt_t(ir->t) == tref_t(tr), "slot %d IR type mismatch", s);
124 }
96 if (s == 0) { 125 if (s == 0) {
97 lua_assert(tref_isfunc(tr)); 126 lj_assertJ(tref_isfunc(tr), "frame slot 0 is not a function");
127#if LJ_FR2
128 } else if (s == 1) {
129 lj_assertJ((tr & ~TREF_FRAME) == 0, "bad frame slot 1");
130#endif
98 } else if ((tr & TREF_FRAME)) { 131 } else if ((tr & TREF_FRAME)) {
99 GCfunc *fn = gco2func(frame_gc(tv)); 132 GCfunc *fn = gco2func(frame_gc(tv));
100 BCReg delta = (BCReg)(tv - frame_prev(tv)); 133 BCReg delta = (BCReg)(tv - frame_prev(tv));
101 lua_assert(tref_isfunc(tr)); 134#if LJ_FR2
102 if (tref_isk(tr)) lua_assert(fn == ir_kfunc(ir)); 135 lj_assertJ(!ref || ir_knum(ir)->u64 == tv->u64,
103 lua_assert(s > delta ? (J->slot[s-delta] & TREF_FRAME) : (s == delta)); 136 "frame slot %d PC mismatch", s);
137 tr = J->slot[s-1];
138 ir = IR(tref_ref(tr));
139#endif
140 lj_assertJ(tref_isfunc(tr),
141 "frame slot %d is not a function", s-LJ_FR2);
142 lj_assertJ(!tref_isk(tr) || fn == ir_kfunc(ir),
143 "frame slot %d function mismatch", s-LJ_FR2);
144 lj_assertJ(s > delta + LJ_FR2 ? (J->slot[s-delta] & TREF_FRAME)
145 : (s == delta + LJ_FR2),
146 "frame slot %d broken chain", s-LJ_FR2);
104 depth++; 147 depth++;
105 } else if ((tr & TREF_CONT)) { 148 } else if ((tr & TREF_CONT)) {
106 lua_assert(ir_kptr(ir) == gcrefp(tv->gcr, void)); 149#if LJ_FR2
107 lua_assert((J->slot[s+1] & TREF_FRAME)); 150 lj_assertJ(!ref || ir_knum(ir)->u64 == tv->u64,
151 "cont slot %d continuation mismatch", s);
152#else
153 lj_assertJ(ir_kptr(ir) == gcrefp(tv->gcr, void),
154 "cont slot %d continuation mismatch", s);
155#endif
156 lj_assertJ((J->slot[s+1+LJ_FR2] & TREF_FRAME),
157 "cont slot %d not followed by frame", s);
108 depth++; 158 depth++;
109 } else { 159 } else {
110 if (tvisnumber(tv)) 160 /* Number repr. may differ, but other types must be the same. */
111 lua_assert(tref_isnumber(tr)); /* Could be IRT_INT etc., too. */ 161 lj_assertJ(tvisnumber(tv) ? tref_isnumber(tr) :
112 else 162 itype2irt(tv) == tref_type(tr),
113 lua_assert(itype2irt(tv) == tref_type(tr)); 163 "slot %d type mismatch: stack type %d vs IR type %d",
164 s, itypemap(tv), tref_type(tr));
114 if (tref_isk(tr)) { /* Compare constants. */ 165 if (tref_isk(tr)) { /* Compare constants. */
115 TValue tvk; 166 TValue tvk;
116 lj_ir_kvalue(J->L, &tvk, ir); 167 lj_ir_kvalue(J->L, &tvk, ir);
117 if (!(tvisnum(&tvk) && tvisnan(&tvk))) 168 lj_assertJ((tvisnum(&tvk) && tvisnan(&tvk)) ?
118 lua_assert(lj_obj_equal(tv, &tvk)); 169 (tvisnum(tv) && tvisnan(tv)) :
119 else 170 lj_obj_equal(tv, &tvk),
120 lua_assert(tvisnum(tv) && tvisnan(tv)); 171 "slot %d const mismatch: stack %016llx vs IR %016llx",
172 s, tv->u64, tvk.u64);
121 } 173 }
122 } 174 }
123 } 175 }
124 } 176 }
125 lua_assert(J->framedepth == depth); 177 lj_assertJ(J->framedepth == depth,
178 "frame depth mismatch %d vs %d", J->framedepth, depth);
126} 179}
127#endif 180#endif
128 181
@@ -156,10 +209,10 @@ static TRef sload(jit_State *J, int32_t slot)
156/* Get TRef for current function. */ 209/* Get TRef for current function. */
157static TRef getcurrf(jit_State *J) 210static TRef getcurrf(jit_State *J)
158{ 211{
159 if (J->base[-1]) 212 if (J->base[-1-LJ_FR2])
160 return J->base[-1]; 213 return J->base[-1-LJ_FR2];
161 lua_assert(J->baseslot == 1); 214 lj_assertJ(J->baseslot == 1+LJ_FR2, "bad baseslot");
162 return sloadt(J, -1, IRT_FUNC, IRSLOAD_READONLY); 215 return sloadt(J, -1-LJ_FR2, IRT_FUNC, IRSLOAD_READONLY);
163} 216}
164 217
165/* Compare for raw object equality. 218/* Compare for raw object equality.
@@ -230,8 +283,12 @@ static void canonicalize_slots(jit_State *J)
230} 283}
231 284
232/* Stop recording. */ 285/* Stop recording. */
233static void rec_stop(jit_State *J, TraceLink linktype, TraceNo lnk) 286void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk)
234{ 287{
288#ifdef LUAJIT_ENABLE_TABLE_BUMP
289 if (J->retryrec)
290 lj_trace_err(J, LJ_TRERR_RETRY);
291#endif
235 lj_trace_end(J); 292 lj_trace_end(J);
236 J->cur.linktype = (uint8_t)linktype; 293 J->cur.linktype = (uint8_t)linktype;
237 J->cur.link = (uint16_t)lnk; 294 J->cur.link = (uint16_t)lnk;
@@ -399,7 +456,8 @@ static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev,
399 TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode); 456 TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode);
400 TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode); 457 TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode);
401 int tc, dir = rec_for_direction(&tv[FORL_STEP]); 458 int tc, dir = rec_for_direction(&tv[FORL_STEP]);
402 lua_assert(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI); 459 lj_assertJ(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI,
460 "bad bytecode %d instead of FORI/JFORI", bc_op(*fori));
403 scev->t.irt = t; 461 scev->t.irt = t;
404 scev->dir = dir; 462 scev->dir = dir;
405 scev->stop = tref_ref(stop); 463 scev->stop = tref_ref(stop);
@@ -455,7 +513,7 @@ static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl)
455 IRT_NUM; 513 IRT_NUM;
456 for (i = FORL_IDX; i <= FORL_STEP; i++) { 514 for (i = FORL_IDX; i <= FORL_STEP; i++) {
457 if (!tr[i]) sload(J, ra+i); 515 if (!tr[i]) sload(J, ra+i);
458 lua_assert(tref_isnumber_str(tr[i])); 516 lj_assertJ(tref_isnumber_str(tr[i]), "bad FORI argument type");
459 if (tref_isstr(tr[i])) 517 if (tref_isstr(tr[i]))
460 tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0); 518 tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0);
461 if (t == IRT_INT) { 519 if (t == IRT_INT) {
@@ -499,8 +557,7 @@ static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl)
499static LoopEvent rec_iterl(jit_State *J, const BCIns iterins) 557static LoopEvent rec_iterl(jit_State *J, const BCIns iterins)
500{ 558{
501 BCReg ra = bc_a(iterins); 559 BCReg ra = bc_a(iterins);
502 lua_assert(J->base[ra] != 0); 560 if (!tref_isnil(getslot(J, ra))) { /* Looping back? */
503 if (!tref_isnil(J->base[ra])) { /* Looping back? */
504 J->base[ra-1] = J->base[ra]; /* Copy result of ITERC to control var. */ 561 J->base[ra-1] = J->base[ra]; /* Copy result of ITERC to control var. */
505 J->maxslot = ra-1+bc_b(J->pc[-1]); 562 J->maxslot = ra-1+bc_b(J->pc[-1]);
506 J->pc += bc_j(iterins)+1; 563 J->pc += bc_j(iterins)+1;
@@ -538,12 +595,12 @@ static int innerloopleft(jit_State *J, const BCIns *pc)
538/* Handle the case when an interpreted loop op is hit. */ 595/* Handle the case when an interpreted loop op is hit. */
539static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev) 596static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev)
540{ 597{
541 if (J->parent == 0) { 598 if (J->parent == 0 && J->exitno == 0) {
542 if (pc == J->startpc && J->framedepth + J->retdepth == 0) { 599 if (pc == J->startpc && J->framedepth + J->retdepth == 0) {
543 /* Same loop? */ 600 /* Same loop? */
544 if (ev == LOOPEV_LEAVE) /* Must loop back to form a root trace. */ 601 if (ev == LOOPEV_LEAVE) /* Must loop back to form a root trace. */
545 lj_trace_err(J, LJ_TRERR_LLEAVE); 602 lj_trace_err(J, LJ_TRERR_LLEAVE);
546 rec_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Looping root trace. */ 603 lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Looping trace. */
547 } else if (ev != LOOPEV_LEAVE) { /* Entering inner loop? */ 604 } else if (ev != LOOPEV_LEAVE) { /* Entering inner loop? */
548 /* It's usually better to abort here and wait until the inner loop 605 /* It's usually better to abort here and wait until the inner loop
549 ** is traced. But if the inner loop repeatedly didn't loop back, 606 ** is traced. But if the inner loop repeatedly didn't loop back,
@@ -568,18 +625,65 @@ static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev)
568/* Handle the case when an already compiled loop op is hit. */ 625/* Handle the case when an already compiled loop op is hit. */
569static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev) 626static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev)
570{ 627{
571 if (J->parent == 0) { /* Root trace hit an inner loop. */ 628 if (J->parent == 0 && J->exitno == 0) { /* Root trace hit an inner loop. */
572 /* Better let the inner loop spawn a side trace back here. */ 629 /* Better let the inner loop spawn a side trace back here. */
573 lj_trace_err(J, LJ_TRERR_LINNER); 630 lj_trace_err(J, LJ_TRERR_LINNER);
574 } else if (ev != LOOPEV_LEAVE) { /* Side trace enters a compiled loop. */ 631 } else if (ev != LOOPEV_LEAVE) { /* Side trace enters a compiled loop. */
575 J->instunroll = 0; /* Cannot continue across a compiled loop op. */ 632 J->instunroll = 0; /* Cannot continue across a compiled loop op. */
576 if (J->pc == J->startpc && J->framedepth + J->retdepth == 0) 633 if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
577 rec_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Form an extra loop. */ 634 lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Form extra loop. */
578 else 635 else
579 rec_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the loop. */ 636 lj_record_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the loop. */
580 } /* Side trace continues across a loop that's left or not entered. */ 637 } /* Side trace continues across a loop that's left or not entered. */
581} 638}
582 639
640/* -- Record profiler hook checks ----------------------------------------- */
641
642#if LJ_HASPROFILE
643
644/* Need to insert profiler hook check? */
645static int rec_profile_need(jit_State *J, GCproto *pt, const BCIns *pc)
646{
647 GCproto *ppt;
648 lj_assertJ(J->prof_mode == 'f' || J->prof_mode == 'l',
649 "bad profiler mode %c", J->prof_mode);
650 if (!pt)
651 return 0;
652 ppt = J->prev_pt;
653 J->prev_pt = pt;
654 if (pt != ppt && ppt) {
655 J->prev_line = -1;
656 return 1;
657 }
658 if (J->prof_mode == 'l') {
659 BCLine line = lj_debug_line(pt, proto_bcpos(pt, pc));
660 BCLine pline = J->prev_line;
661 J->prev_line = line;
662 if (pline != line)
663 return 1;
664 }
665 return 0;
666}
667
668static void rec_profile_ins(jit_State *J, const BCIns *pc)
669{
670 if (J->prof_mode && rec_profile_need(J, J->pt, pc)) {
671 emitir(IRTG(IR_PROF, IRT_NIL), 0, 0);
672 lj_snap_add(J);
673 }
674}
675
676static void rec_profile_ret(jit_State *J)
677{
678 if (J->prof_mode == 'f') {
679 emitir(IRTG(IR_PROF, IRT_NIL), 0, 0);
680 J->prev_pt = NULL;
681 lj_snap_add(J);
682 }
683}
684
685#endif
686
583/* -- Record calls and returns -------------------------------------------- */ 687/* -- Record calls and returns -------------------------------------------- */
584 688
585/* Specialize to the runtime value of the called function or its prototype. */ 689/* Specialize to the runtime value of the called function or its prototype. */
@@ -590,11 +694,26 @@ static TRef rec_call_specialize(jit_State *J, GCfunc *fn, TRef tr)
590 GCproto *pt = funcproto(fn); 694 GCproto *pt = funcproto(fn);
591 /* Too many closures created? Probably not a monomorphic function. */ 695 /* Too many closures created? Probably not a monomorphic function. */
592 if (pt->flags >= PROTO_CLC_POLY) { /* Specialize to prototype instead. */ 696 if (pt->flags >= PROTO_CLC_POLY) { /* Specialize to prototype instead. */
593 TRef trpt = emitir(IRT(IR_FLOAD, IRT_P32), tr, IRFL_FUNC_PC); 697 TRef trpt = emitir(IRT(IR_FLOAD, IRT_PGC), tr, IRFL_FUNC_PC);
594 emitir(IRTG(IR_EQ, IRT_P32), trpt, lj_ir_kptr(J, proto_bc(pt))); 698 emitir(IRTG(IR_EQ, IRT_PGC), trpt, lj_ir_kptr(J, proto_bc(pt)));
595 (void)lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); /* Prevent GC of proto. */ 699 (void)lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); /* Prevent GC of proto. */
596 return tr; 700 return tr;
597 } 701 }
702 } else {
703 /* Don't specialize to non-monomorphic builtins. */
704 switch (fn->c.ffid) {
705 case FF_coroutine_wrap_aux:
706 case FF_string_gmatch_aux:
707 /* NYI: io_file_iter doesn't have an ffid, yet. */
708 { /* Specialize to the ffid. */
709 TRef trid = emitir(IRT(IR_FLOAD, IRT_U8), tr, IRFL_FUNC_FFID);
710 emitir(IRTG(IR_EQ, IRT_INT), trid, lj_ir_kint(J, fn->c.ffid));
711 }
712 return tr;
713 default:
714 /* NYI: don't specialize to non-monomorphic C functions. */
715 break;
716 }
598 } 717 }
599 /* Otherwise specialize to the function (closure) value itself. */ 718 /* Otherwise specialize to the function (closure) value itself. */
600 kfunc = lj_ir_kfunc(J, fn); 719 kfunc = lj_ir_kfunc(J, fn);
@@ -607,21 +726,31 @@ static void rec_call_setup(jit_State *J, BCReg func, ptrdiff_t nargs)
607{ 726{
608 RecordIndex ix; 727 RecordIndex ix;
609 TValue *functv = &J->L->base[func]; 728 TValue *functv = &J->L->base[func];
610 TRef *fbase = &J->base[func]; 729 TRef kfunc, *fbase = &J->base[func];
611 ptrdiff_t i; 730 ptrdiff_t i;
612 for (i = 0; i <= nargs; i++) 731 (void)getslot(J, func); /* Ensure func has a reference. */
613 (void)getslot(J, func+i); /* Ensure func and all args have a reference. */ 732 for (i = 1; i <= nargs; i++)
733 (void)getslot(J, func+LJ_FR2+i); /* Ensure all args have a reference. */
614 if (!tref_isfunc(fbase[0])) { /* Resolve __call metamethod. */ 734 if (!tref_isfunc(fbase[0])) { /* Resolve __call metamethod. */
615 ix.tab = fbase[0]; 735 ix.tab = fbase[0];
616 copyTV(J->L, &ix.tabv, functv); 736 copyTV(J->L, &ix.tabv, functv);
617 if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj)) 737 if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj))
618 lj_trace_err(J, LJ_TRERR_NOMM); 738 lj_trace_err(J, LJ_TRERR_NOMM);
619 for (i = ++nargs; i > 0; i--) /* Shift arguments up. */ 739 for (i = ++nargs; i > LJ_FR2; i--) /* Shift arguments up. */
620 fbase[i] = fbase[i-1]; 740 fbase[i+LJ_FR2] = fbase[i+LJ_FR2-1];
741#if LJ_FR2
742 fbase[2] = fbase[0];
743#endif
621 fbase[0] = ix.mobj; /* Replace function. */ 744 fbase[0] = ix.mobj; /* Replace function. */
622 functv = &ix.mobjv; 745 functv = &ix.mobjv;
623 } 746 }
624 fbase[0] = TREF_FRAME | rec_call_specialize(J, funcV(functv), fbase[0]); 747 kfunc = rec_call_specialize(J, funcV(functv), fbase[0]);
748#if LJ_FR2
749 fbase[0] = kfunc;
750 fbase[1] = TREF_FRAME;
751#else
752 fbase[0] = kfunc | TREF_FRAME;
753#endif
625 J->maxslot = (BCReg)nargs; 754 J->maxslot = (BCReg)nargs;
626} 755}
627 756
@@ -631,8 +760,8 @@ void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs)
631 rec_call_setup(J, func, nargs); 760 rec_call_setup(J, func, nargs);
632 /* Bump frame. */ 761 /* Bump frame. */
633 J->framedepth++; 762 J->framedepth++;
634 J->base += func+1; 763 J->base += func+1+LJ_FR2;
635 J->baseslot += func+1; 764 J->baseslot += func+1+LJ_FR2;
636 if (J->baseslot + J->maxslot >= LJ_MAX_JSLOTS) 765 if (J->baseslot + J->maxslot >= LJ_MAX_JSLOTS)
637 lj_trace_err(J, LJ_TRERR_STACKOV); 766 lj_trace_err(J, LJ_TRERR_STACKOV);
638} 767}
@@ -650,7 +779,9 @@ void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs)
650 func += cbase; 779 func += cbase;
651 } 780 }
652 /* Move func + args down. */ 781 /* Move func + args down. */
653 memmove(&J->base[-1], &J->base[func], sizeof(TRef)*(J->maxslot+1)); 782 if (LJ_FR2 && J->baseslot == 2)
783 J->base[func+1] = TREF_FRAME;
784 memmove(&J->base[-1-LJ_FR2], &J->base[func], sizeof(TRef)*(J->maxslot+1+LJ_FR2));
654 /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */ 785 /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */
655 /* Tailcalls can form a loop, so count towards the loop unroll limit. */ 786 /* Tailcalls can form a loop, so count towards the loop unroll limit. */
656 if (++J->tailcalled > J->loopunroll) 787 if (++J->tailcalled > J->loopunroll)
@@ -680,6 +811,8 @@ static int check_downrec_unroll(jit_State *J, GCproto *pt)
680 return 0; 811 return 0;
681} 812}
682 813
814static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot);
815
683/* Record return. */ 816/* Record return. */
684void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults) 817void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
685{ 818{
@@ -691,7 +824,7 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
691 BCReg cbase = (BCReg)frame_delta(frame); 824 BCReg cbase = (BCReg)frame_delta(frame);
692 if (--J->framedepth <= 0) 825 if (--J->framedepth <= 0)
693 lj_trace_err(J, LJ_TRERR_NYIRETL); 826 lj_trace_err(J, LJ_TRERR_NYIRETL);
694 lua_assert(J->baseslot > 1); 827 lj_assertJ(J->baseslot > 1+LJ_FR2, "bad baseslot for return");
695 gotresults++; 828 gotresults++;
696 rbase += cbase; 829 rbase += cbase;
697 J->baseslot -= (BCReg)cbase; 830 J->baseslot -= (BCReg)cbase;
@@ -702,19 +835,20 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
702 /* Return to lower frame via interpreter for unhandled cases. */ 835 /* Return to lower frame via interpreter for unhandled cases. */
703 if (J->framedepth == 0 && J->pt && bc_isret(bc_op(*J->pc)) && 836 if (J->framedepth == 0 && J->pt && bc_isret(bc_op(*J->pc)) &&
704 (!frame_islua(frame) || 837 (!frame_islua(frame) ||
705 (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))))) { 838 (J->parent == 0 && J->exitno == 0 &&
839 !bc_isret(bc_op(J->cur.startins))))) {
706 /* NYI: specialize to frame type and return directly, not via RET*. */ 840 /* NYI: specialize to frame type and return directly, not via RET*. */
707 for (i = 0; i < (ptrdiff_t)rbase; i++) 841 for (i = 0; i < (ptrdiff_t)rbase; i++)
708 J->base[i] = 0; /* Purge dead slots. */ 842 J->base[i] = 0; /* Purge dead slots. */
709 J->maxslot = rbase + (BCReg)gotresults; 843 J->maxslot = rbase + (BCReg)gotresults;
710 rec_stop(J, LJ_TRLINK_RETURN, 0); /* Return to interpreter. */ 844 lj_record_stop(J, LJ_TRLINK_RETURN, 0); /* Return to interpreter. */
711 return; 845 return;
712 } 846 }
713 if (frame_isvarg(frame)) { 847 if (frame_isvarg(frame)) {
714 BCReg cbase = (BCReg)frame_delta(frame); 848 BCReg cbase = (BCReg)frame_delta(frame);
715 if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */ 849 if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */
716 lj_trace_err(J, LJ_TRERR_NYIRETL); 850 lj_trace_err(J, LJ_TRERR_NYIRETL);
717 lua_assert(J->baseslot > 1); 851 lj_assertJ(J->baseslot > 1+LJ_FR2, "bad baseslot for return");
718 rbase += cbase; 852 rbase += cbase;
719 J->baseslot -= (BCReg)cbase; 853 J->baseslot -= (BCReg)cbase;
720 J->base -= cbase; 854 J->base -= cbase;
@@ -724,27 +858,28 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
724 BCIns callins = *(frame_pc(frame)-1); 858 BCIns callins = *(frame_pc(frame)-1);
725 ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults; 859 ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults;
726 BCReg cbase = bc_a(callins); 860 BCReg cbase = bc_a(callins);
727 GCproto *pt = funcproto(frame_func(frame - (cbase+1))); 861 GCproto *pt = funcproto(frame_func(frame - (cbase+1+LJ_FR2)));
728 if ((pt->flags & PROTO_NOJIT)) 862 if ((pt->flags & PROTO_NOJIT))
729 lj_trace_err(J, LJ_TRERR_CJITOFF); 863 lj_trace_err(J, LJ_TRERR_CJITOFF);
730 if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) { 864 if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) {
731 if (check_downrec_unroll(J, pt)) { 865 if (check_downrec_unroll(J, pt)) {
732 J->maxslot = (BCReg)(rbase + gotresults); 866 J->maxslot = (BCReg)(rbase + gotresults);
733 lj_snap_purge(J); 867 lj_snap_purge(J);
734 rec_stop(J, LJ_TRLINK_DOWNREC, J->cur.traceno); /* Down-recursion. */ 868 lj_record_stop(J, LJ_TRLINK_DOWNREC, J->cur.traceno); /* Down-rec. */
735 return; 869 return;
736 } 870 }
737 lj_snap_add(J); 871 lj_snap_add(J);
738 } 872 }
739 for (i = 0; i < nresults; i++) /* Adjust results. */ 873 for (i = 0; i < nresults; i++) /* Adjust results. */
740 J->base[i-1] = i < gotresults ? J->base[rbase+i] : TREF_NIL; 874 J->base[i-1-LJ_FR2] = i < gotresults ? J->base[rbase+i] : TREF_NIL;
741 J->maxslot = cbase+(BCReg)nresults; 875 J->maxslot = cbase+(BCReg)nresults;
742 if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */ 876 if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */
743 J->framedepth--; 877 J->framedepth--;
744 lua_assert(J->baseslot > cbase+1); 878 lj_assertJ(J->baseslot > cbase+1+LJ_FR2, "bad baseslot for return");
745 J->baseslot -= cbase+1; 879 J->baseslot -= cbase+1+LJ_FR2;
746 J->base -= cbase+1; 880 J->base -= cbase+1+LJ_FR2;
747 } else if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) { 881 } else if (J->parent == 0 && J->exitno == 0 &&
882 !bc_isret(bc_op(J->cur.startins))) {
748 /* Return to lower frame would leave the loop in a root trace. */ 883 /* Return to lower frame would leave the loop in a root trace. */
749 lj_trace_err(J, LJ_TRERR_LLEAVE); 884 lj_trace_err(J, LJ_TRERR_LLEAVE);
750 } else if (J->needsnap) { /* Tailcalled to ff with side-effects. */ 885 } else if (J->needsnap) { /* Tailcalled to ff with side-effects. */
@@ -752,13 +887,13 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
752 } else { /* Return to lower frame. Guard for the target we return to. */ 887 } else { /* Return to lower frame. Guard for the target we return to. */
753 TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); 888 TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO);
754 TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame)); 889 TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame));
755 emitir(IRTG(IR_RETF, IRT_P32), trpt, trpc); 890 emitir(IRTG(IR_RETF, IRT_PGC), trpt, trpc);
756 J->retdepth++; 891 J->retdepth++;
757 J->needsnap = 1; 892 J->needsnap = 1;
758 lua_assert(J->baseslot == 1); 893 lj_assertJ(J->baseslot == 1+LJ_FR2, "bad baseslot for return");
759 /* Shift result slots up and clear the slots of the new frame below. */ 894 /* Shift result slots up and clear the slots of the new frame below. */
760 memmove(J->base + cbase, J->base-1, sizeof(TRef)*nresults); 895 memmove(J->base + cbase, J->base-1-LJ_FR2, sizeof(TRef)*nresults);
761 memset(J->base-1, 0, sizeof(TRef)*(cbase+1)); 896 memset(J->base-1-LJ_FR2, 0, sizeof(TRef)*(cbase+1+LJ_FR2));
762 } 897 }
763 } else if (frame_iscont(frame)) { /* Return to continuation frame. */ 898 } else if (frame_iscont(frame)) { /* Return to continuation frame. */
764 ASMFunction cont = frame_contf(frame); 899 ASMFunction cont = frame_contf(frame);
@@ -767,24 +902,49 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
767 lj_trace_err(J, LJ_TRERR_NYIRETL); 902 lj_trace_err(J, LJ_TRERR_NYIRETL);
768 J->baseslot -= (BCReg)cbase; 903 J->baseslot -= (BCReg)cbase;
769 J->base -= cbase; 904 J->base -= cbase;
770 J->maxslot = cbase-2; 905 J->maxslot = cbase-(2<<LJ_FR2);
771 if (cont == lj_cont_ra) { 906 if (cont == lj_cont_ra) {
772 /* Copy result to destination slot. */ 907 /* Copy result to destination slot. */
773 BCReg dst = bc_a(*(frame_contpc(frame)-1)); 908 BCReg dst = bc_a(*(frame_contpc(frame)-1));
774 J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL; 909 J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL;
775 if (dst >= J->maxslot) J->maxslot = dst+1; 910 if (dst >= J->maxslot) {
911 J->maxslot = dst+1;
912 }
776 } else if (cont == lj_cont_nop) { 913 } else if (cont == lj_cont_nop) {
777 /* Nothing to do here. */ 914 /* Nothing to do here. */
778 } else if (cont == lj_cont_cat) { 915 } else if (cont == lj_cont_cat) {
779 lua_assert(0); 916 BCReg bslot = bc_b(*(frame_contpc(frame)-1));
917 TRef tr = gotresults ? J->base[cbase+rbase] : TREF_NIL;
918 if (bslot != J->maxslot) { /* Concatenate the remainder. */
919 TValue *b = J->L->base, save; /* Simulate lower frame and result. */
920 J->base[J->maxslot] = tr;
921 copyTV(J->L, &save, b-(2<<LJ_FR2));
922 if (gotresults)
923 copyTV(J->L, b-(2<<LJ_FR2), b+rbase);
924 else
925 setnilV(b-(2<<LJ_FR2));
926 J->L->base = b - cbase;
927 tr = rec_cat(J, bslot, cbase-(2<<LJ_FR2));
928 b = J->L->base + cbase; /* Undo. */
929 J->L->base = b;
930 copyTV(J->L, b-(2<<LJ_FR2), &save);
931 }
932 if (tr) { /* Store final result. */
933 BCReg dst = bc_a(*(frame_contpc(frame)-1));
934 J->base[dst] = tr;
935 if (dst >= J->maxslot) {
936 J->maxslot = dst+1;
937 }
938 } /* Otherwise continue with another __concat call. */
780 } else { 939 } else {
781 /* Result type already specialized. */ 940 /* Result type already specialized. */
782 lua_assert(cont == lj_cont_condf || cont == lj_cont_condt); 941 lj_assertJ(cont == lj_cont_condf || cont == lj_cont_condt,
942 "bad continuation type");
783 } 943 }
784 } else { 944 } else {
785 lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */ 945 lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */
786 } 946 }
787 lua_assert(J->baseslot >= 1); 947 lj_assertJ(J->baseslot >= 1+LJ_FR2, "bad baseslot for return");
788} 948}
789 949
790/* -- Metamethod handling ------------------------------------------------- */ 950/* -- Metamethod handling ------------------------------------------------- */
@@ -792,19 +952,17 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
792/* Prepare to record call to metamethod. */ 952/* Prepare to record call to metamethod. */
793static BCReg rec_mm_prep(jit_State *J, ASMFunction cont) 953static BCReg rec_mm_prep(jit_State *J, ASMFunction cont)
794{ 954{
795 BCReg s, top = curr_proto(J->L)->framesize; 955 BCReg s, top = cont == lj_cont_cat ? J->maxslot : curr_proto(J->L)->framesize;
796 TRef trcont; 956#if LJ_FR2
797 setcont(&J->L->base[top], cont); 957 J->base[top] = lj_ir_k64(J, IR_KNUM, u64ptr(contptr(cont)));
798#if LJ_64 958 J->base[top+1] = TREF_CONT;
799 trcont = lj_ir_kptr(J, (void *)((int64_t)cont - (int64_t)lj_vm_asm_begin));
800#else 959#else
801 trcont = lj_ir_kptr(J, (void *)cont); 960 J->base[top] = lj_ir_kptr(J, contptr(cont)) | TREF_CONT;
802#endif 961#endif
803 J->base[top] = trcont | TREF_CONT;
804 J->framedepth++; 962 J->framedepth++;
805 for (s = J->maxslot; s < top; s++) 963 for (s = J->maxslot; s < top; s++)
806 J->base[s] = 0; /* Clear frame gap to avoid resurrecting previous refs. */ 964 J->base[s] = 0; /* Clear frame gap to avoid resurrecting previous refs. */
807 return top+1; 965 return top+1+LJ_FR2;
808} 966}
809 967
810/* Record metamethod lookup. */ 968/* Record metamethod lookup. */
@@ -823,7 +981,7 @@ int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm)
823 cTValue *mo; 981 cTValue *mo;
824 if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) { 982 if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) {
825 /* Specialize to the C library namespace object. */ 983 /* Specialize to the C library namespace object. */
826 emitir(IRTG(IR_EQ, IRT_P32), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv))); 984 emitir(IRTG(IR_EQ, IRT_PGC), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv)));
827 } else { 985 } else {
828 /* Specialize to the type of userdata. */ 986 /* Specialize to the type of userdata. */
829 TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE); 987 TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE);
@@ -852,7 +1010,8 @@ int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm)
852 } 1010 }
853 /* The cdata metatable is treated as immutable. */ 1011 /* The cdata metatable is treated as immutable. */
854 if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt; 1012 if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt;
855 ix->mt = mix.tab = lj_ir_ktab(J, mt); 1013 ix->mt = mix.tab = lj_ir_ggfload(J, IRT_TAB,
1014 GG_OFS(g.gcroot[GCROOT_BASEMT+itypemap(&ix->tabv)]));
856 goto nocheck; 1015 goto nocheck;
857 } 1016 }
858 ix->mt = mt ? mix.tab : TREF_NIL; 1017 ix->mt = mt ? mix.tab : TREF_NIL;
@@ -879,12 +1038,12 @@ nocheck:
879static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm) 1038static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm)
880{ 1039{
881 /* Set up metamethod call first to save ix->tab and ix->tabv. */ 1040 /* Set up metamethod call first to save ix->tab and ix->tabv. */
882 BCReg func = rec_mm_prep(J, lj_cont_ra); 1041 BCReg func = rec_mm_prep(J, mm == MM_concat ? lj_cont_cat : lj_cont_ra);
883 TRef *base = J->base + func; 1042 TRef *base = J->base + func;
884 TValue *basev = J->L->base + func; 1043 TValue *basev = J->L->base + func;
885 base[1] = ix->tab; base[2] = ix->key; 1044 base[1+LJ_FR2] = ix->tab; base[2+LJ_FR2] = ix->key;
886 copyTV(J->L, basev+1, &ix->tabv); 1045 copyTV(J->L, basev+1+LJ_FR2, &ix->tabv);
887 copyTV(J->L, basev+2, &ix->keyv); 1046 copyTV(J->L, basev+2+LJ_FR2, &ix->keyv);
888 if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */ 1047 if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
889 if (mm != MM_unm) { 1048 if (mm != MM_unm) {
890 ix->tab = ix->key; 1049 ix->tab = ix->key;
@@ -896,6 +1055,9 @@ static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm)
896 } 1055 }
897ok: 1056ok:
898 base[0] = ix->mobj; 1057 base[0] = ix->mobj;
1058#if LJ_FR2
1059 base[1] = 0;
1060#endif
899 copyTV(J->L, basev+0, &ix->mobjv); 1061 copyTV(J->L, basev+0, &ix->mobjv);
900 lj_record_call(J, func, 2); 1062 lj_record_call(J, func, 2);
901 return 0; /* No result yet. */ 1063 return 0; /* No result yet. */
@@ -912,6 +1074,8 @@ static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv)
912 TRef *base = J->base + func; 1074 TRef *base = J->base + func;
913 TValue *basev = J->L->base + func; 1075 TValue *basev = J->L->base + func;
914 base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv); 1076 base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv);
1077 base += LJ_FR2;
1078 basev += LJ_FR2;
915 base[1] = tr; copyTV(J->L, basev+1, tv); 1079 base[1] = tr; copyTV(J->L, basev+1, tv);
916#if LJ_52 1080#if LJ_52
917 base[2] = tr; copyTV(J->L, basev+2, tv); 1081 base[2] = tr; copyTV(J->L, basev+2, tv);
@@ -921,7 +1085,7 @@ static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv)
921 lj_record_call(J, func, 2); 1085 lj_record_call(J, func, 2);
922 } else { 1086 } else {
923 if (LJ_52 && tref_istab(tr)) 1087 if (LJ_52 && tref_istab(tr))
924 return lj_ir_call(J, IRCALL_lj_tab_len, tr); 1088 return emitir(IRTI(IR_ALEN), tr, TREF_NIL);
925 lj_trace_err(J, LJ_TRERR_NOMM); 1089 lj_trace_err(J, LJ_TRERR_NOMM);
926 } 1090 }
927 return 0; /* No result yet. */ 1091 return 0; /* No result yet. */
@@ -931,10 +1095,10 @@ static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv)
931static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op) 1095static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op)
932{ 1096{
933 BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt); 1097 BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt);
934 TRef *base = J->base + func; 1098 TRef *base = J->base + func + LJ_FR2;
935 TValue *tv = J->L->base + func; 1099 TValue *tv = J->L->base + func + LJ_FR2;
936 base[0] = ix->mobj; base[1] = ix->val; base[2] = ix->key; 1100 base[-LJ_FR2] = ix->mobj; base[1] = ix->val; base[2] = ix->key;
937 copyTV(J->L, tv+0, &ix->mobjv); 1101 copyTV(J->L, tv-LJ_FR2, &ix->mobjv);
938 copyTV(J->L, tv+1, &ix->valv); 1102 copyTV(J->L, tv+1, &ix->valv);
939 copyTV(J->L, tv+2, &ix->keyv); 1103 copyTV(J->L, tv+2, &ix->keyv);
940 lj_record_call(J, func, 2); 1104 lj_record_call(J, func, 2);
@@ -1030,7 +1194,7 @@ static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm)
1030 ix->tab = ix->val; 1194 ix->tab = ix->val;
1031 copyTV(J->L, &ix->tabv, &ix->valv); 1195 copyTV(J->L, &ix->tabv, &ix->valv);
1032 } else { 1196 } else {
1033 lua_assert(tref_iscdata(ix->key)); 1197 lj_assertJ(tref_iscdata(ix->key), "cdata expected");
1034 ix->tab = ix->key; 1198 ix->tab = ix->key;
1035 copyTV(J->L, &ix->tabv, &ix->keyv); 1199 copyTV(J->L, &ix->tabv, &ix->keyv);
1036 } 1200 }
@@ -1041,6 +1205,72 @@ static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm)
1041 1205
1042/* -- Indexed access ------------------------------------------------------ */ 1206/* -- Indexed access ------------------------------------------------------ */
1043 1207
1208#ifdef LUAJIT_ENABLE_TABLE_BUMP
1209/* Bump table allocations in bytecode when they grow during recording. */
1210static void rec_idx_bump(jit_State *J, RecordIndex *ix)
1211{
1212 RBCHashEntry *rbc = &J->rbchash[(ix->tab & (RBCHASH_SLOTS-1))];
1213 if (tref_ref(ix->tab) == rbc->ref) {
1214 const BCIns *pc = mref(rbc->pc, const BCIns);
1215 GCtab *tb = tabV(&ix->tabv);
1216 uint32_t nhbits;
1217 IRIns *ir;
1218 if (!tvisnil(&ix->keyv))
1219 (void)lj_tab_set(J->L, tb, &ix->keyv); /* Grow table right now. */
1220 nhbits = tb->hmask > 0 ? lj_fls(tb->hmask)+1 : 0;
1221 ir = IR(tref_ref(ix->tab));
1222 if (ir->o == IR_TNEW) {
1223 uint32_t ah = bc_d(*pc);
1224 uint32_t asize = ah & 0x7ff, hbits = ah >> 11;
1225 if (nhbits > hbits) hbits = nhbits;
1226 if (tb->asize > asize) {
1227 asize = tb->asize <= 0x7ff ? tb->asize : 0x7ff;
1228 }
1229 if ((asize | (hbits<<11)) != ah) { /* Has the size changed? */
1230 /* Patch bytecode, but continue recording (for more patching). */
1231 setbc_d(pc, (asize | (hbits<<11)));
1232 /* Patching TNEW operands is only safe if the trace is aborted. */
1233 ir->op1 = asize; ir->op2 = hbits;
1234 J->retryrec = 1; /* Abort the trace at the end of recording. */
1235 }
1236 } else if (ir->o == IR_TDUP) {
1237 GCtab *tpl = gco2tab(proto_kgc(&gcref(rbc->pt)->pt, ~(ptrdiff_t)bc_d(*pc)));
1238 /* Grow template table, but preserve keys with nil values. */
1239 if ((tb->asize > tpl->asize && (1u << nhbits)-1 == tpl->hmask) ||
1240 (tb->asize == tpl->asize && (1u << nhbits)-1 > tpl->hmask)) {
1241 Node *node = noderef(tpl->node);
1242 uint32_t i, hmask = tpl->hmask, asize;
1243 TValue *array;
1244 for (i = 0; i <= hmask; i++) {
1245 if (!tvisnil(&node[i].key) && tvisnil(&node[i].val))
1246 settabV(J->L, &node[i].val, tpl);
1247 }
1248 if (!tvisnil(&ix->keyv) && tref_isk(ix->key)) {
1249 TValue *o = lj_tab_set(J->L, tpl, &ix->keyv);
1250 if (tvisnil(o)) settabV(J->L, o, tpl);
1251 }
1252 lj_tab_resize(J->L, tpl, tb->asize, nhbits);
1253 node = noderef(tpl->node);
1254 hmask = tpl->hmask;
1255 for (i = 0; i <= hmask; i++) {
1256 /* This is safe, since template tables only hold immutable values. */
1257 if (tvistab(&node[i].val))
1258 setnilV(&node[i].val);
1259 }
1260 /* The shape of the table may have changed. Clean up array part, too. */
1261 asize = tpl->asize;
1262 array = tvref(tpl->array);
1263 for (i = 0; i < asize; i++) {
1264 if (tvistab(&array[i]))
1265 setnilV(&array[i]);
1266 }
1267 J->retryrec = 1; /* Abort the trace at the end of recording. */
1268 }
1269 }
1270 }
1271}
1272#endif
1273
1044/* Record bounds-check. */ 1274/* Record bounds-check. */
1045static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize) 1275static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize)
1046{ 1276{
@@ -1061,7 +1291,8 @@ static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize)
1061 /* Got scalar evolution analysis results for this reference? */ 1291 /* Got scalar evolution analysis results for this reference? */
1062 if (ref == J->scev.idx) { 1292 if (ref == J->scev.idx) {
1063 int32_t stop; 1293 int32_t stop;
1064 lua_assert(irt_isint(J->scev.t) && ir->o == IR_SLOAD); 1294 lj_assertJ(irt_isint(J->scev.t) && ir->o == IR_SLOAD,
1295 "only int SCEV supported");
1065 stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]); 1296 stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]);
1066 /* Runtime value for stop of loop is within bounds? */ 1297 /* Runtime value for stop of loop is within bounds? */
1067 if ((uint64_t)stop + ofs < (uint64_t)asize) { 1298 if ((uint64_t)stop + ofs < (uint64_t)asize) {
@@ -1080,11 +1311,14 @@ static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize)
1080} 1311}
1081 1312
1082/* Record indexed key lookup. */ 1313/* Record indexed key lookup. */
1083static TRef rec_idx_key(jit_State *J, RecordIndex *ix) 1314static TRef rec_idx_key(jit_State *J, RecordIndex *ix, IRRef *rbref,
1315 IRType1 *rbguard)
1084{ 1316{
1085 TRef key; 1317 TRef key;
1086 GCtab *t = tabV(&ix->tabv); 1318 GCtab *t = tabV(&ix->tabv);
1087 ix->oldv = lj_tab_get(J->L, t, &ix->keyv); /* Lookup previous value. */ 1319 ix->oldv = lj_tab_get(J->L, t, &ix->keyv); /* Lookup previous value. */
1320 *rbref = 0;
1321 rbguard->irt = 0;
1088 1322
1089 /* Integer keys are looked up in the array part first. */ 1323 /* Integer keys are looked up in the array part first. */
1090 key = ix->key; 1324 key = ix->key;
@@ -1098,8 +1332,8 @@ static TRef rec_idx_key(jit_State *J, RecordIndex *ix)
1098 if ((MSize)k < t->asize) { /* Currently an array key? */ 1332 if ((MSize)k < t->asize) { /* Currently an array key? */
1099 TRef arrayref; 1333 TRef arrayref;
1100 rec_idx_abc(J, asizeref, ikey, t->asize); 1334 rec_idx_abc(J, asizeref, ikey, t->asize);
1101 arrayref = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_ARRAY); 1335 arrayref = emitir(IRT(IR_FLOAD, IRT_PGC), ix->tab, IRFL_TAB_ARRAY);
1102 return emitir(IRT(IR_AREF, IRT_P32), arrayref, ikey); 1336 return emitir(IRT(IR_AREF, IRT_PGC), arrayref, ikey);
1103 } else { /* Currently not in array (may be an array extension)? */ 1337 } else { /* Currently not in array (may be an array extension)? */
1104 emitir(IRTGI(IR_ULE), asizeref, ikey); /* Inv. bounds check. */ 1338 emitir(IRTGI(IR_ULE), asizeref, ikey); /* Inv. bounds check. */
1105 if (k == 0 && tref_isk(key)) 1339 if (k == 0 && tref_isk(key))
@@ -1134,16 +1368,18 @@ static TRef rec_idx_key(jit_State *J, RecordIndex *ix)
1134 MSize hslot = (MSize)((char *)ix->oldv - (char *)&noderef(t->node)[0].val); 1368 MSize hslot = (MSize)((char *)ix->oldv - (char *)&noderef(t->node)[0].val);
1135 if (t->hmask > 0 && hslot <= t->hmask*(MSize)sizeof(Node) && 1369 if (t->hmask > 0 && hslot <= t->hmask*(MSize)sizeof(Node) &&
1136 hslot <= 65535*(MSize)sizeof(Node)) { 1370 hslot <= 65535*(MSize)sizeof(Node)) {
1137 TRef node, kslot; 1371 TRef node, kslot, hm;
1138 TRef hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK); 1372 *rbref = J->cur.nins; /* Mark possible rollback point. */
1373 *rbguard = J->guardemit;
1374 hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
1139 emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask)); 1375 emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask));
1140 node = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_NODE); 1376 node = emitir(IRT(IR_FLOAD, IRT_PGC), ix->tab, IRFL_TAB_NODE);
1141 kslot = lj_ir_kslot(J, key, hslot / sizeof(Node)); 1377 kslot = lj_ir_kslot(J, key, hslot / sizeof(Node));
1142 return emitir(IRTG(IR_HREFK, IRT_P32), node, kslot); 1378 return emitir(IRTG(IR_HREFK, IRT_PGC), node, kslot);
1143 } 1379 }
1144 } 1380 }
1145 /* Fall back to a regular hash lookup. */ 1381 /* Fall back to a regular hash lookup. */
1146 return emitir(IRT(IR_HREF, IRT_P32), ix->tab, key); 1382 return emitir(IRT(IR_HREF, IRT_PGC), ix->tab, key);
1147} 1383}
1148 1384
1149/* Determine whether a key is NOT one of the fast metamethod names. */ 1385/* Determine whether a key is NOT one of the fast metamethod names. */
@@ -1168,20 +1404,22 @@ TRef lj_record_idx(jit_State *J, RecordIndex *ix)
1168{ 1404{
1169 TRef xref; 1405 TRef xref;
1170 IROp xrefop, loadop; 1406 IROp xrefop, loadop;
1407 IRRef rbref;
1408 IRType1 rbguard;
1171 cTValue *oldv; 1409 cTValue *oldv;
1172 1410
1173 while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */ 1411 while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */
1174 /* Never call raw lj_record_idx() on non-table. */ 1412 /* Never call raw lj_record_idx() on non-table. */
1175 lua_assert(ix->idxchain != 0); 1413 lj_assertJ(ix->idxchain != 0, "bad usage");
1176 if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index)) 1414 if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index))
1177 lj_trace_err(J, LJ_TRERR_NOMM); 1415 lj_trace_err(J, LJ_TRERR_NOMM);
1178 handlemm: 1416 handlemm:
1179 if (tref_isfunc(ix->mobj)) { /* Handle metamethod call. */ 1417 if (tref_isfunc(ix->mobj)) { /* Handle metamethod call. */
1180 BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra); 1418 BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra);
1181 TRef *base = J->base + func; 1419 TRef *base = J->base + func + LJ_FR2;
1182 TValue *tv = J->L->base + func; 1420 TValue *tv = J->L->base + func + LJ_FR2;
1183 base[0] = ix->mobj; base[1] = ix->tab; base[2] = ix->key; 1421 base[-LJ_FR2] = ix->mobj; base[1] = ix->tab; base[2] = ix->key;
1184 setfuncV(J->L, tv+0, funcV(&ix->mobjv)); 1422 setfuncV(J->L, tv-LJ_FR2, funcV(&ix->mobjv));
1185 copyTV(J->L, tv+1, &ix->tabv); 1423 copyTV(J->L, tv+1, &ix->tabv);
1186 copyTV(J->L, tv+2, &ix->keyv); 1424 copyTV(J->L, tv+2, &ix->keyv);
1187 if (ix->val) { 1425 if (ix->val) {
@@ -1213,7 +1451,7 @@ TRef lj_record_idx(jit_State *J, RecordIndex *ix)
1213 } 1451 }
1214 1452
1215 /* Record the key lookup. */ 1453 /* Record the key lookup. */
1216 xref = rec_idx_key(J, ix); 1454 xref = rec_idx_key(J, ix, &rbref, &rbguard);
1217 xrefop = IR(tref_ref(xref))->o; 1455 xrefop = IR(tref_ref(xref))->o;
1218 loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD; 1456 loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD;
1219 /* The lj_meta_tset() inconsistency is gone, but better play safe. */ 1457 /* The lj_meta_tset() inconsistency is gone, but better play safe. */
@@ -1223,11 +1461,15 @@ TRef lj_record_idx(jit_State *J, RecordIndex *ix)
1223 IRType t = itype2irt(oldv); 1461 IRType t = itype2irt(oldv);
1224 TRef res; 1462 TRef res;
1225 if (oldv == niltvg(J2G(J))) { 1463 if (oldv == niltvg(J2G(J))) {
1226 emitir(IRTG(IR_EQ, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J)))); 1464 emitir(IRTG(IR_EQ, IRT_PGC), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
1227 res = TREF_NIL; 1465 res = TREF_NIL;
1228 } else { 1466 } else {
1229 res = emitir(IRTG(loadop, t), xref, 0); 1467 res = emitir(IRTG(loadop, t), xref, 0);
1230 } 1468 }
1469 if (tref_ref(res) < rbref) { /* HREFK + load forwarded? */
1470 lj_ir_rollback(J, rbref); /* Rollback to eliminate hmask guard. */
1471 J->guardemit = rbguard;
1472 }
1231 if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index)) 1473 if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
1232 goto handlemm; 1474 goto handlemm;
1233 if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitives. */ 1475 if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitives. */
@@ -1235,6 +1477,10 @@ TRef lj_record_idx(jit_State *J, RecordIndex *ix)
1235 } else { /* Indexed store. */ 1477 } else { /* Indexed store. */
1236 GCtab *mt = tabref(tabV(&ix->tabv)->metatable); 1478 GCtab *mt = tabref(tabV(&ix->tabv)->metatable);
1237 int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val); 1479 int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val);
1480 if (tref_ref(xref) < rbref) { /* HREFK forwarded? */
1481 lj_ir_rollback(J, rbref); /* Rollback to eliminate hmask guard. */
1482 J->guardemit = rbguard;
1483 }
1238 if (tvisnil(oldv)) { /* Previous value was nil? */ 1484 if (tvisnil(oldv)) { /* Previous value was nil? */
1239 /* Need to duplicate the hasmm check for the early guards. */ 1485 /* Need to duplicate the hasmm check for the early guards. */
1240 int hasmm = 0; 1486 int hasmm = 0;
@@ -1245,24 +1491,28 @@ TRef lj_record_idx(jit_State *J, RecordIndex *ix)
1245 if (hasmm) 1491 if (hasmm)
1246 emitir(IRTG(loadop, IRT_NIL), xref, 0); /* Guard for nil value. */ 1492 emitir(IRTG(loadop, IRT_NIL), xref, 0); /* Guard for nil value. */
1247 else if (xrefop == IR_HREF) 1493 else if (xrefop == IR_HREF)
1248 emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_P32), 1494 emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_PGC),
1249 xref, lj_ir_kkptr(J, niltvg(J2G(J)))); 1495 xref, lj_ir_kkptr(J, niltvg(J2G(J))));
1250 if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) { 1496 if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) {
1251 lua_assert(hasmm); 1497 lj_assertJ(hasmm, "inconsistent metamethod handling");
1252 goto handlemm; 1498 goto handlemm;
1253 } 1499 }
1254 lua_assert(!hasmm); 1500 lj_assertJ(!hasmm, "inconsistent metamethod handling");
1255 if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */ 1501 if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */
1256 TRef key = ix->key; 1502 TRef key = ix->key;
1257 if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */ 1503 if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */
1258 key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT); 1504 key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
1259 xref = emitir(IRT(IR_NEWREF, IRT_P32), ix->tab, key); 1505 xref = emitir(IRT(IR_NEWREF, IRT_PGC), ix->tab, key);
1260 keybarrier = 0; /* NEWREF already takes care of the key barrier. */ 1506 keybarrier = 0; /* NEWREF already takes care of the key barrier. */
1507#ifdef LUAJIT_ENABLE_TABLE_BUMP
1508 if ((J->flags & JIT_F_OPT_SINK)) /* Avoid a separate flag. */
1509 rec_idx_bump(J, ix);
1510#endif
1261 } 1511 }
1262 } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) { 1512 } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) {
1263 /* Cannot derive that the previous value was non-nil, must do checks. */ 1513 /* Cannot derive that the previous value was non-nil, must do checks. */
1264 if (xrefop == IR_HREF) /* Guard against store to niltv. */ 1514 if (xrefop == IR_HREF) /* Guard against store to niltv. */
1265 emitir(IRTG(IR_NE, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J)))); 1515 emitir(IRTG(IR_NE, IRT_PGC), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
1266 if (ix->idxchain) { /* Metamethod lookup required? */ 1516 if (ix->idxchain) { /* Metamethod lookup required? */
1267 /* A check for NULL metatable is cheaper (hoistable) than a load. */ 1517 /* A check for NULL metatable is cheaper (hoistable) than a load. */
1268 if (!mt) { 1518 if (!mt) {
@@ -1284,7 +1534,7 @@ TRef lj_record_idx(jit_State *J, RecordIndex *ix)
1284 emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0); 1534 emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0);
1285 /* Invalidate neg. metamethod cache for stores with certain string keys. */ 1535 /* Invalidate neg. metamethod cache for stores with certain string keys. */
1286 if (!nommstr(J, ix->key)) { 1536 if (!nommstr(J, ix->key)) {
1287 TRef fref = emitir(IRT(IR_FREF, IRT_P32), ix->tab, IRFL_TAB_NOMM); 1537 TRef fref = emitir(IRT(IR_FREF, IRT_PGC), ix->tab, IRFL_TAB_NOMM);
1288 emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0)); 1538 emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0));
1289 } 1539 }
1290 J->needsnap = 1; 1540 J->needsnap = 1;
@@ -1292,6 +1542,31 @@ TRef lj_record_idx(jit_State *J, RecordIndex *ix)
1292 } 1542 }
1293} 1543}
1294 1544
1545static void rec_tsetm(jit_State *J, BCReg ra, BCReg rn, int32_t i)
1546{
1547 RecordIndex ix;
1548 cTValue *basev = J->L->base;
1549 GCtab *t = tabV(&basev[ra-1]);
1550 settabV(J->L, &ix.tabv, t);
1551 ix.tab = getslot(J, ra-1);
1552 ix.idxchain = 0;
1553#ifdef LUAJIT_ENABLE_TABLE_BUMP
1554 if ((J->flags & JIT_F_OPT_SINK)) {
1555 if (t->asize < i+rn-ra)
1556 lj_tab_reasize(J->L, t, i+rn-ra);
1557 setnilV(&ix.keyv);
1558 rec_idx_bump(J, &ix);
1559 }
1560#endif
1561 for (; ra < rn; i++, ra++) {
1562 setintV(&ix.keyv, i);
1563 ix.key = lj_ir_kint(J, i);
1564 copyTV(J->L, &ix.valv, &basev[ra]);
1565 ix.val = getslot(J, ra);
1566 lj_record_idx(J, &ix);
1567 }
1568}
1569
1295/* -- Upvalue access ------------------------------------------------------ */ 1570/* -- Upvalue access ------------------------------------------------------ */
1296 1571
1297/* Check whether upvalue is immutable and ok to constify. */ 1572/* Check whether upvalue is immutable and ok to constify. */
@@ -1328,13 +1603,17 @@ static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val)
1328 int needbarrier = 0; 1603 int needbarrier = 0;
1329 if (rec_upvalue_constify(J, uvp)) { /* Try to constify immutable upvalue. */ 1604 if (rec_upvalue_constify(J, uvp)) { /* Try to constify immutable upvalue. */
1330 TRef tr, kfunc; 1605 TRef tr, kfunc;
1331 lua_assert(val == 0); 1606 lj_assertJ(val == 0, "bad usage");
1332 if (!tref_isk(fn)) { /* Late specialization of current function. */ 1607 if (!tref_isk(fn)) { /* Late specialization of current function. */
1333 if (J->pt->flags >= PROTO_CLC_POLY) 1608 if (J->pt->flags >= PROTO_CLC_POLY)
1334 goto noconstify; 1609 goto noconstify;
1335 kfunc = lj_ir_kfunc(J, J->fn); 1610 kfunc = lj_ir_kfunc(J, J->fn);
1336 emitir(IRTG(IR_EQ, IRT_FUNC), fn, kfunc); 1611 emitir(IRTG(IR_EQ, IRT_FUNC), fn, kfunc);
1337 J->base[-1] = TREF_FRAME | kfunc; 1612#if LJ_FR2
1613 J->base[-2] = kfunc;
1614#else
1615 J->base[-1] = kfunc | TREF_FRAME;
1616#endif
1338 fn = kfunc; 1617 fn = kfunc;
1339 } 1618 }
1340 tr = lj_record_constify(J, uvval(uvp)); 1619 tr = lj_record_constify(J, uvval(uvp));
@@ -1345,16 +1624,16 @@ noconstify:
1345 /* Note: this effectively limits LJ_MAX_UPVAL to 127. */ 1624 /* Note: this effectively limits LJ_MAX_UPVAL to 127. */
1346 uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff); 1625 uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff);
1347 if (!uvp->closed) { 1626 if (!uvp->closed) {
1348 uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_P32), fn, uv)); 1627 uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_PGC), fn, uv));
1349 /* In current stack? */ 1628 /* In current stack? */
1350 if (uvval(uvp) >= tvref(J->L->stack) && 1629 if (uvval(uvp) >= tvref(J->L->stack) &&
1351 uvval(uvp) < tvref(J->L->maxstack)) { 1630 uvval(uvp) < tvref(J->L->maxstack)) {
1352 int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot)); 1631 int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot));
1353 if (slot >= 0) { /* Aliases an SSA slot? */ 1632 if (slot >= 0) { /* Aliases an SSA slot? */
1354 emitir(IRTG(IR_EQ, IRT_P32), 1633 emitir(IRTG(IR_EQ, IRT_PGC),
1355 REF_BASE, 1634 REF_BASE,
1356 emitir(IRT(IR_ADD, IRT_P32), uref, 1635 emitir(IRT(IR_ADD, IRT_PGC), uref,
1357 lj_ir_kint(J, (slot - 1) * -8))); 1636 lj_ir_kint(J, (slot - 1 - LJ_FR2) * -8)));
1358 slot -= (int32_t)J->baseslot; /* Note: slot number may be negative! */ 1637 slot -= (int32_t)J->baseslot; /* Note: slot number may be negative! */
1359 if (val == 0) { 1638 if (val == 0) {
1360 return getslot(J, slot); 1639 return getslot(J, slot);
@@ -1365,12 +1644,12 @@ noconstify:
1365 } 1644 }
1366 } 1645 }
1367 } 1646 }
1368 emitir(IRTG(IR_UGT, IRT_P32), 1647 emitir(IRTG(IR_UGT, IRT_PGC),
1369 emitir(IRT(IR_SUB, IRT_P32), uref, REF_BASE), 1648 emitir(IRT(IR_SUB, IRT_PGC), uref, REF_BASE),
1370 lj_ir_kint(J, (J->baseslot + J->maxslot) * 8)); 1649 lj_ir_kint(J, (J->baseslot + J->maxslot) * 8));
1371 } else { 1650 } else {
1372 needbarrier = 1; 1651 needbarrier = 1;
1373 uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_P32), fn, uv)); 1652 uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_PGC), fn, uv));
1374 } 1653 }
1375 if (val == 0) { /* Upvalue load */ 1654 if (val == 0) { /* Upvalue load */
1376 IRType t = itype2irt(uvval(uvp)); 1655 IRType t = itype2irt(uvval(uvp));
@@ -1409,16 +1688,16 @@ static void check_call_unroll(jit_State *J, TraceNo lnk)
1409 if (count + J->tailcalled > J->param[JIT_P_recunroll]) { 1688 if (count + J->tailcalled > J->param[JIT_P_recunroll]) {
1410 J->pc++; 1689 J->pc++;
1411 if (J->framedepth + J->retdepth == 0) 1690 if (J->framedepth + J->retdepth == 0)
1412 rec_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Tail-recursion. */ 1691 lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Tail-rec. */
1413 else 1692 else
1414 rec_stop(J, LJ_TRLINK_UPREC, J->cur.traceno); /* Up-recursion. */ 1693 lj_record_stop(J, LJ_TRLINK_UPREC, J->cur.traceno); /* Up-recursion. */
1415 } 1694 }
1416 } else { 1695 } else {
1417 if (count > J->param[JIT_P_callunroll]) { 1696 if (count > J->param[JIT_P_callunroll]) {
1418 if (lnk) { /* Possible tail- or up-recursion. */ 1697 if (lnk) { /* Possible tail- or up-recursion. */
1419 lj_trace_flush(J, lnk); /* Flush trace that only returns. */ 1698 lj_trace_flush(J, lnk); /* Flush trace that only returns. */
1420 /* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */ 1699 /* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */
1421 hotcount_set(J2GG(J), J->pc+1, LJ_PRNG_BITS(J, 4)); 1700 hotcount_set(J2GG(J), J->pc+1, lj_prng_u64(&J2G(J)->prng) & 15u);
1422 } 1701 }
1423 lj_trace_err(J, LJ_TRERR_CUNROLL); 1702 lj_trace_err(J, LJ_TRERR_CUNROLL);
1424 } 1703 }
@@ -1445,11 +1724,14 @@ static void rec_func_setup(jit_State *J)
1445static void rec_func_vararg(jit_State *J) 1724static void rec_func_vararg(jit_State *J)
1446{ 1725{
1447 GCproto *pt = J->pt; 1726 GCproto *pt = J->pt;
1448 BCReg s, fixargs, vframe = J->maxslot+1; 1727 BCReg s, fixargs, vframe = J->maxslot+1+LJ_FR2;
1449 lua_assert((pt->flags & PROTO_VARARG)); 1728 lj_assertJ((pt->flags & PROTO_VARARG), "FUNCV in non-vararg function");
1450 if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS) 1729 if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS)
1451 lj_trace_err(J, LJ_TRERR_STACKOV); 1730 lj_trace_err(J, LJ_TRERR_STACKOV);
1452 J->base[vframe-1] = J->base[-1]; /* Copy function up. */ 1731 J->base[vframe-1-LJ_FR2] = J->base[-1-LJ_FR2]; /* Copy function up. */
1732#if LJ_FR2
1733 J->base[vframe-1] = TREF_FRAME;
1734#endif
1453 /* Copy fixarg slots up and set their original slots to nil. */ 1735 /* Copy fixarg slots up and set their original slots to nil. */
1454 fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot; 1736 fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot;
1455 for (s = 0; s < fixargs; s++) { 1737 for (s = 0; s < fixargs; s++) {
@@ -1485,9 +1767,9 @@ static void rec_func_jit(jit_State *J, TraceNo lnk)
1485 } 1767 }
1486 J->instunroll = 0; /* Cannot continue across a compiled function. */ 1768 J->instunroll = 0; /* Cannot continue across a compiled function. */
1487 if (J->pc == J->startpc && J->framedepth + J->retdepth == 0) 1769 if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
1488 rec_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Extra tail-recursion. */ 1770 lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Extra tail-rec. */
1489 else 1771 else
1490 rec_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the function. */ 1772 lj_record_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the function. */
1491} 1773}
1492 1774
1493/* -- Vararg handling ----------------------------------------------------- */ 1775/* -- Vararg handling ----------------------------------------------------- */
@@ -1511,8 +1793,10 @@ static int select_detect(jit_State *J)
1511static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults) 1793static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults)
1512{ 1794{
1513 int32_t numparams = J->pt->numparams; 1795 int32_t numparams = J->pt->numparams;
1514 ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1; 1796 ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1 - LJ_FR2;
1515 lua_assert(frame_isvarg(J->L->base-1)); 1797 lj_assertJ(frame_isvarg(J->L->base-1), "VARG in non-vararg frame");
1798 if (LJ_FR2 && dst > J->maxslot)
1799 J->base[dst-1] = 0; /* Prevent resurrection of unrelated slot. */
1516 if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */ 1800 if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */
1517 ptrdiff_t i; 1801 ptrdiff_t i;
1518 if (nvararg < 0) nvararg = 0; 1802 if (nvararg < 0) nvararg = 0;
@@ -1523,10 +1807,10 @@ static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults)
1523 J->maxslot = dst + (BCReg)nresults; 1807 J->maxslot = dst + (BCReg)nresults;
1524 } 1808 }
1525 for (i = 0; i < nresults; i++) 1809 for (i = 0; i < nresults; i++)
1526 J->base[dst+i] = i < nvararg ? getslot(J, i - nvararg - 1) : TREF_NIL; 1810 J->base[dst+i] = i < nvararg ? getslot(J, i - nvararg - 1 - LJ_FR2) : TREF_NIL;
1527 } else { /* Unknown number of varargs passed to trace. */ 1811 } else { /* Unknown number of varargs passed to trace. */
1528 TRef fr = emitir(IRTI(IR_SLOAD), 0, IRSLOAD_READONLY|IRSLOAD_FRAME); 1812 TRef fr = emitir(IRTI(IR_SLOAD), LJ_FR2, IRSLOAD_READONLY|IRSLOAD_FRAME);
1529 int32_t frofs = 8*(1+numparams)+FRAME_VARG; 1813 int32_t frofs = 8*(1+LJ_FR2+numparams)+FRAME_VARG;
1530 if (nresults >= 0) { /* Known fixed number of results. */ 1814 if (nresults >= 0) { /* Known fixed number of results. */
1531 ptrdiff_t i; 1815 ptrdiff_t i;
1532 if (nvararg > 0) { 1816 if (nvararg > 0) {
@@ -1535,12 +1819,13 @@ static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults)
1535 if (nvararg >= nresults) 1819 if (nvararg >= nresults)
1536 emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults)); 1820 emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults));
1537 else 1821 else
1538 emitir(IRTGI(IR_EQ), fr, lj_ir_kint(J, frame_ftsz(J->L->base-1))); 1822 emitir(IRTGI(IR_EQ), fr,
1539 vbase = emitir(IRTI(IR_SUB), REF_BASE, fr); 1823 lj_ir_kint(J, (int32_t)frame_ftsz(J->L->base-1)));
1540 vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8)); 1824 vbase = emitir(IRT(IR_SUB, IRT_IGC), REF_BASE, fr);
1825 vbase = emitir(IRT(IR_ADD, IRT_PGC), vbase, lj_ir_kint(J, frofs-8));
1541 for (i = 0; i < nload; i++) { 1826 for (i = 0; i < nload; i++) {
1542 IRType t = itype2irt(&J->L->base[i-1-nvararg]); 1827 IRType t = itype2irt(&J->L->base[i-1-LJ_FR2-nvararg]);
1543 TRef aref = emitir(IRT(IR_AREF, IRT_P32), 1828 TRef aref = emitir(IRT(IR_AREF, IRT_PGC),
1544 vbase, lj_ir_kint(J, (int32_t)i)); 1829 vbase, lj_ir_kint(J, (int32_t)i));
1545 TRef tr = emitir(IRTG(IR_VLOAD, t), aref, 0); 1830 TRef tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
1546 if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */ 1831 if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
@@ -1586,15 +1871,16 @@ static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults)
1586 } 1871 }
1587 if (idx != 0 && idx <= nvararg) { 1872 if (idx != 0 && idx <= nvararg) {
1588 IRType t; 1873 IRType t;
1589 TRef aref, vbase = emitir(IRTI(IR_SUB), REF_BASE, fr); 1874 TRef aref, vbase = emitir(IRT(IR_SUB, IRT_IGC), REF_BASE, fr);
1590 vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8)); 1875 vbase = emitir(IRT(IR_ADD, IRT_PGC), vbase,
1591 t = itype2irt(&J->L->base[idx-2-nvararg]); 1876 lj_ir_kint(J, frofs-(8<<LJ_FR2)));
1592 aref = emitir(IRT(IR_AREF, IRT_P32), vbase, tridx); 1877 t = itype2irt(&J->L->base[idx-2-LJ_FR2-nvararg]);
1878 aref = emitir(IRT(IR_AREF, IRT_PGC), vbase, tridx);
1593 tr = emitir(IRTG(IR_VLOAD, t), aref, 0); 1879 tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
1594 if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */ 1880 if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
1595 } 1881 }
1596 J->base[dst-2] = tr; 1882 J->base[dst-2-LJ_FR2] = tr;
1597 J->maxslot = dst-1; 1883 J->maxslot = dst-1-LJ_FR2;
1598 J->bcskip = 2; /* Skip CALLM + select. */ 1884 J->bcskip = 2; /* Skip CALLM + select. */
1599 } else { 1885 } else {
1600 nyivarg: 1886 nyivarg:
@@ -1612,8 +1898,63 @@ static TRef rec_tnew(jit_State *J, uint32_t ah)
1612{ 1898{
1613 uint32_t asize = ah & 0x7ff; 1899 uint32_t asize = ah & 0x7ff;
1614 uint32_t hbits = ah >> 11; 1900 uint32_t hbits = ah >> 11;
1901 TRef tr;
1615 if (asize == 0x7ff) asize = 0x801; 1902 if (asize == 0x7ff) asize = 0x801;
1616 return emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits); 1903 tr = emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits);
1904#ifdef LUAJIT_ENABLE_TABLE_BUMP
1905 J->rbchash[(tr & (RBCHASH_SLOTS-1))].ref = tref_ref(tr);
1906 setmref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pc, J->pc);
1907 setgcref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt));
1908#endif
1909 return tr;
1910}
1911
1912/* -- Concatenation ------------------------------------------------------- */
1913
1914static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot)
1915{
1916 TRef *top = &J->base[topslot];
1917 TValue savetv[5];
1918 BCReg s;
1919 RecordIndex ix;
1920 lj_assertJ(baseslot < topslot, "bad CAT arg");
1921 for (s = baseslot; s <= topslot; s++)
1922 (void)getslot(J, s); /* Ensure all arguments have a reference. */
1923 if (tref_isnumber_str(top[0]) && tref_isnumber_str(top[-1])) {
1924 TRef tr, hdr, *trp, *xbase, *base = &J->base[baseslot];
1925 /* First convert numbers to strings. */
1926 for (trp = top; trp >= base; trp--) {
1927 if (tref_isnumber(*trp))
1928 *trp = emitir(IRT(IR_TOSTR, IRT_STR), *trp,
1929 tref_isnum(*trp) ? IRTOSTR_NUM : IRTOSTR_INT);
1930 else if (!tref_isstr(*trp))
1931 break;
1932 }
1933 xbase = ++trp;
1934 tr = hdr = emitir(IRT(IR_BUFHDR, IRT_PGC),
1935 lj_ir_kptr(J, &J2G(J)->tmpbuf), IRBUFHDR_RESET);
1936 do {
1937 tr = emitir(IRT(IR_BUFPUT, IRT_PGC), tr, *trp++);
1938 } while (trp <= top);
1939 tr = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr);
1940 J->maxslot = (BCReg)(xbase - J->base);
1941 if (xbase == base) return tr; /* Return simple concatenation result. */
1942 /* Pass partial result. */
1943 topslot = J->maxslot--;
1944 *xbase = tr;
1945 top = xbase;
1946 setstrV(J->L, &ix.keyv, &J2G(J)->strempty); /* Simulate string result. */
1947 } else {
1948 J->maxslot = topslot-1;
1949 copyTV(J->L, &ix.keyv, &J->L->base[topslot]);
1950 }
1951 copyTV(J->L, &ix.tabv, &J->L->base[topslot-1]);
1952 ix.tab = top[-1];
1953 ix.key = top[0];
1954 memcpy(savetv, &J->L->base[topslot-1], sizeof(savetv)); /* Save slots. */
1955 rec_mm_arith(J, &ix, MM_concat); /* Call __concat metamethod. */
1956 memcpy(&J->L->base[topslot-1], savetv, sizeof(savetv)); /* Restore slots. */
1957 return 0; /* No result yet. */
1617} 1958}
1618 1959
1619/* -- Record bytecode ops ------------------------------------------------- */ 1960/* -- Record bytecode ops ------------------------------------------------- */
@@ -1634,7 +1975,15 @@ static void rec_comp_fixup(jit_State *J, const BCIns *pc, int cond)
1634 const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0); 1975 const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0);
1635 SnapShot *snap = &J->cur.snap[J->cur.nsnap-1]; 1976 SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
1636 /* Set PC to opposite target to avoid re-recording the comp. in side trace. */ 1977 /* Set PC to opposite target to avoid re-recording the comp. in side trace. */
1978#if LJ_FR2
1979 SnapEntry *flink = &J->cur.snapmap[snap->mapofs + snap->nent];
1980 uint64_t pcbase;
1981 memcpy(&pcbase, flink, sizeof(uint64_t));
1982 pcbase = (pcbase & 0xff) | (u64ptr(npc) << 8);
1983 memcpy(flink, &pcbase, sizeof(uint64_t));
1984#else
1637 J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc); 1985 J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc);
1986#endif
1638 J->needsnap = 1; 1987 J->needsnap = 1;
1639 if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins); 1988 if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins);
1640 lj_snap_shrink(J); /* Shrink last snapshot if possible. */ 1989 lj_snap_shrink(J); /* Shrink last snapshot if possible. */
@@ -1654,7 +2003,7 @@ void lj_record_ins(jit_State *J)
1654 if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) { 2003 if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) {
1655 switch (J->postproc) { 2004 switch (J->postproc) {
1656 case LJ_POST_FIXCOMP: /* Fixup comparison. */ 2005 case LJ_POST_FIXCOMP: /* Fixup comparison. */
1657 pc = frame_pc(&J2G(J)->tmptv); 2006 pc = (const BCIns *)(uintptr_t)J2G(J)->tmptv.u64;
1658 rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1))); 2007 rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1)));
1659 /* fallthrough */ 2008 /* fallthrough */
1660 case LJ_POST_FIXGUARD: /* Fixup and emit pending guard. */ 2009 case LJ_POST_FIXGUARD: /* Fixup and emit pending guard. */
@@ -1692,7 +2041,7 @@ void lj_record_ins(jit_State *J)
1692 if (bc_op(*J->pc) >= BC__MAX) 2041 if (bc_op(*J->pc) >= BC__MAX)
1693 return; 2042 return;
1694 break; 2043 break;
1695 default: lua_assert(0); break; 2044 default: lj_assertJ(0, "bad post-processing mode"); break;
1696 } 2045 }
1697 J->postproc = LJ_POST_NONE; 2046 J->postproc = LJ_POST_NONE;
1698 } 2047 }
@@ -1722,6 +2071,10 @@ void lj_record_ins(jit_State *J)
1722 rec_check_ir(J); 2071 rec_check_ir(J);
1723#endif 2072#endif
1724 2073
2074#if LJ_HASPROFILE
2075 rec_profile_ins(J, pc);
2076#endif
2077
1725 /* Keep a copy of the runtime values of var/num/str operands. */ 2078 /* Keep a copy of the runtime values of var/num/str operands. */
1726#define rav (&ix.valv) 2079#define rav (&ix.valv)
1727#define rbv (&ix.tabv) 2080#define rbv (&ix.tabv)
@@ -1748,7 +2101,7 @@ void lj_record_ins(jit_State *J)
1748 switch (bcmode_c(op)) { 2101 switch (bcmode_c(op)) {
1749 case BCMvar: 2102 case BCMvar:
1750 copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break; 2103 copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break;
1751 case BCMpri: setitype(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break; 2104 case BCMpri: setpriV(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break;
1752 case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc); 2105 case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc);
1753 copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) : 2106 copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) :
1754 lj_ir_knumint(J, numV(tv)); } break; 2107 lj_ir_knumint(J, numV(tv)); } break;
@@ -1843,6 +2196,18 @@ void lj_record_ins(jit_State *J)
1843 J->maxslot = bc_a(pc[1]); /* Shrink used slots. */ 2196 J->maxslot = bc_a(pc[1]); /* Shrink used slots. */
1844 break; 2197 break;
1845 2198
2199 case BC_ISTYPE: case BC_ISNUM:
2200 /* These coercions need to correspond with lj_meta_istype(). */
2201 if (LJ_DUALNUM && rc == ~LJ_TNUMX+1)
2202 ra = lj_opt_narrow_toint(J, ra);
2203 else if (rc == ~LJ_TNUMX+2)
2204 ra = lj_ir_tonum(J, ra);
2205 else if (rc == ~LJ_TSTR+1)
2206 ra = lj_ir_tostr(J, ra);
2207 /* else: type specialization suffices. */
2208 J->base[bc_a(ins)] = ra;
2209 break;
2210
1846 /* -- Unary ops --------------------------------------------------------- */ 2211 /* -- Unary ops --------------------------------------------------------- */
1847 2212
1848 case BC_NOT: 2213 case BC_NOT:
@@ -1854,7 +2219,7 @@ void lj_record_ins(jit_State *J)
1854 if (tref_isstr(rc)) 2219 if (tref_isstr(rc))
1855 rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN); 2220 rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN);
1856 else if (!LJ_52 && tref_istab(rc)) 2221 else if (!LJ_52 && tref_istab(rc))
1857 rc = lj_ir_call(J, IRCALL_lj_tab_len, rc); 2222 rc = emitir(IRTI(IR_ALEN), rc, TREF_NIL);
1858 else 2223 else
1859 rc = rec_mm_len(J, rc, rcv); 2224 rc = rec_mm_len(J, rc, rcv);
1860 break; 2225 break;
@@ -1906,11 +2271,23 @@ void lj_record_ins(jit_State *J)
1906 rc = rec_mm_arith(J, &ix, MM_pow); 2271 rc = rec_mm_arith(J, &ix, MM_pow);
1907 break; 2272 break;
1908 2273
2274 /* -- Miscellaneous ops ------------------------------------------------- */
2275
2276 case BC_CAT:
2277 rc = rec_cat(J, rb, rc);
2278 break;
2279
1909 /* -- Constant and move ops --------------------------------------------- */ 2280 /* -- Constant and move ops --------------------------------------------- */
1910 2281
1911 case BC_MOV: 2282 case BC_MOV:
1912 /* Clear gap of method call to avoid resurrecting previous refs. */ 2283 /* Clear gap of method call to avoid resurrecting previous refs. */
1913 if (ra > J->maxslot) J->base[ra-1] = 0; 2284 if (ra > J->maxslot) {
2285#if LJ_FR2
2286 memset(J->base + J->maxslot, 0, (ra - J->maxslot) * sizeof(TRef));
2287#else
2288 J->base[ra-1] = 0;
2289#endif
2290 }
1914 break; 2291 break;
1915 case BC_KSTR: case BC_KNUM: case BC_KPRI: 2292 case BC_KSTR: case BC_KNUM: case BC_KPRI:
1916 break; 2293 break;
@@ -1918,6 +2295,8 @@ void lj_record_ins(jit_State *J)
1918 rc = lj_ir_kint(J, (int32_t)(int16_t)rc); 2295 rc = lj_ir_kint(J, (int32_t)(int16_t)rc);
1919 break; 2296 break;
1920 case BC_KNIL: 2297 case BC_KNIL:
2298 if (LJ_FR2 && ra > J->maxslot)
2299 J->base[ra-1] = 0;
1921 while (ra <= rc) 2300 while (ra <= rc)
1922 J->base[ra++] = TREF_NIL; 2301 J->base[ra++] = TREF_NIL;
1923 if (rc >= J->maxslot) J->maxslot = rc+1; 2302 if (rc >= J->maxslot) J->maxslot = rc+1;
@@ -1954,6 +2333,14 @@ void lj_record_ins(jit_State *J)
1954 ix.idxchain = LJ_MAX_IDXCHAIN; 2333 ix.idxchain = LJ_MAX_IDXCHAIN;
1955 rc = lj_record_idx(J, &ix); 2334 rc = lj_record_idx(J, &ix);
1956 break; 2335 break;
2336 case BC_TGETR: case BC_TSETR:
2337 ix.idxchain = 0;
2338 rc = lj_record_idx(J, &ix);
2339 break;
2340
2341 case BC_TSETM:
2342 rec_tsetm(J, ra, (BCReg)(J->L->top - J->L->base), (int32_t)rcv->u32.lo);
2343 break;
1957 2344
1958 case BC_TNEW: 2345 case BC_TNEW:
1959 rc = rec_tnew(J, rc); 2346 rc = rec_tnew(J, rc);
@@ -1961,33 +2348,38 @@ void lj_record_ins(jit_State *J)
1961 case BC_TDUP: 2348 case BC_TDUP:
1962 rc = emitir(IRTG(IR_TDUP, IRT_TAB), 2349 rc = emitir(IRTG(IR_TDUP, IRT_TAB),
1963 lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0); 2350 lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0);
2351#ifdef LUAJIT_ENABLE_TABLE_BUMP
2352 J->rbchash[(rc & (RBCHASH_SLOTS-1))].ref = tref_ref(rc);
2353 setmref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pc, pc);
2354 setgcref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt));
2355#endif
1964 break; 2356 break;
1965 2357
1966 /* -- Calls and vararg handling ----------------------------------------- */ 2358 /* -- Calls and vararg handling ----------------------------------------- */
1967 2359
1968 case BC_ITERC: 2360 case BC_ITERC:
1969 J->base[ra] = getslot(J, ra-3); 2361 J->base[ra] = getslot(J, ra-3);
1970 J->base[ra+1] = getslot(J, ra-2); 2362 J->base[ra+1+LJ_FR2] = getslot(J, ra-2);
1971 J->base[ra+2] = getslot(J, ra-1); 2363 J->base[ra+2+LJ_FR2] = getslot(J, ra-1);
1972 { /* Do the actual copy now because lj_record_call needs the values. */ 2364 { /* Do the actual copy now because lj_record_call needs the values. */
1973 TValue *b = &J->L->base[ra]; 2365 TValue *b = &J->L->base[ra];
1974 copyTV(J->L, b, b-3); 2366 copyTV(J->L, b, b-3);
1975 copyTV(J->L, b+1, b-2); 2367 copyTV(J->L, b+1+LJ_FR2, b-2);
1976 copyTV(J->L, b+2, b-1); 2368 copyTV(J->L, b+2+LJ_FR2, b-1);
1977 } 2369 }
1978 lj_record_call(J, ra, (ptrdiff_t)rc-1); 2370 lj_record_call(J, ra, (ptrdiff_t)rc-1);
1979 break; 2371 break;
1980 2372
1981 /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */ 2373 /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */
1982 case BC_CALLM: 2374 case BC_CALLM:
1983 rc = (BCReg)(J->L->top - J->L->base) - ra; 2375 rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2;
1984 /* fallthrough */ 2376 /* fallthrough */
1985 case BC_CALL: 2377 case BC_CALL:
1986 lj_record_call(J, ra, (ptrdiff_t)rc-1); 2378 lj_record_call(J, ra, (ptrdiff_t)rc-1);
1987 break; 2379 break;
1988 2380
1989 case BC_CALLMT: 2381 case BC_CALLMT:
1990 rc = (BCReg)(J->L->top - J->L->base) - ra; 2382 rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2;
1991 /* fallthrough */ 2383 /* fallthrough */
1992 case BC_CALLT: 2384 case BC_CALLT:
1993 lj_record_tailcall(J, ra, (ptrdiff_t)rc-1); 2385 lj_record_tailcall(J, ra, (ptrdiff_t)rc-1);
@@ -2004,6 +2396,9 @@ void lj_record_ins(jit_State *J)
2004 rc = (BCReg)(J->L->top - J->L->base) - ra + 1; 2396 rc = (BCReg)(J->L->top - J->L->base) - ra + 1;
2005 /* fallthrough */ 2397 /* fallthrough */
2006 case BC_RET: case BC_RET0: case BC_RET1: 2398 case BC_RET: case BC_RET0: case BC_RET1:
2399#if LJ_HASPROFILE
2400 rec_profile_ret(J);
2401#endif
2007 lj_record_ret(J, ra, (ptrdiff_t)rc-1); 2402 lj_record_ret(J, ra, (ptrdiff_t)rc-1);
2008 break; 2403 break;
2009 2404
@@ -2014,9 +2409,10 @@ void lj_record_ins(jit_State *J)
2014 J->loopref = J->cur.nins; 2409 J->loopref = J->cur.nins;
2015 break; 2410 break;
2016 case BC_JFORI: 2411 case BC_JFORI:
2017 lua_assert(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL); 2412 lj_assertJ(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL,
2413 "JFORI does not point to JFORL");
2018 if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */ 2414 if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */
2019 rec_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J])); 2415 lj_record_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J]));
2020 /* Continue tracing if the loop is not entered. */ 2416 /* Continue tracing if the loop is not entered. */
2021 break; 2417 break;
2022 2418
@@ -2067,7 +2463,8 @@ void lj_record_ins(jit_State *J)
2067 rec_func_lua(J); 2463 rec_func_lua(J);
2068 break; 2464 break;
2069 case BC_JFUNCV: 2465 case BC_JFUNCV:
2070 lua_assert(0); /* Cannot happen. No hotcall counting for varag funcs. */ 2466 /* Cannot happen. No hotcall counting for varag funcs. */
2467 lj_assertJ(0, "unsupported vararg hotcall");
2071 break; 2468 break;
2072 2469
2073 case BC_FUNCC: 2470 case BC_FUNCC:
@@ -2083,10 +2480,8 @@ void lj_record_ins(jit_State *J)
2083 /* fallthrough */ 2480 /* fallthrough */
2084 case BC_ITERN: 2481 case BC_ITERN:
2085 case BC_ISNEXT: 2482 case BC_ISNEXT:
2086 case BC_CAT:
2087 case BC_UCLO: 2483 case BC_UCLO:
2088 case BC_FNEW: 2484 case BC_FNEW:
2089 case BC_TSETM:
2090 setintV(&J->errinfo, (int32_t)op); 2485 setintV(&J->errinfo, (int32_t)op);
2091 lj_trace_err_info(J, LJ_TRERR_NYIBC); 2486 lj_trace_err_info(J, LJ_TRERR_NYIBC);
2092 break; 2487 break;
@@ -2095,15 +2490,21 @@ void lj_record_ins(jit_State *J)
2095 /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */ 2490 /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */
2096 if (bcmode_a(op) == BCMdst && rc) { 2491 if (bcmode_a(op) == BCMdst && rc) {
2097 J->base[ra] = rc; 2492 J->base[ra] = rc;
2098 if (ra >= J->maxslot) J->maxslot = ra+1; 2493 if (ra >= J->maxslot) {
2494#if LJ_FR2
2495 if (ra > J->maxslot) J->base[ra-1] = 0;
2496#endif
2497 J->maxslot = ra+1;
2498 }
2099 } 2499 }
2100 2500
2101#undef rav 2501#undef rav
2102#undef rbv 2502#undef rbv
2103#undef rcv 2503#undef rcv
2104 2504
2105 /* Limit the number of recorded IR instructions. */ 2505 /* Limit the number of recorded IR instructions and constants. */
2106 if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord]) 2506 if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord] ||
2507 J->cur.nk < REF_BIAS-(IRRef)J->param[JIT_P_maxirconst])
2107 lj_trace_err(J, LJ_TRERR_TRACEOV); 2508 lj_trace_err(J, LJ_TRERR_TRACEOV);
2108} 2509}
2109 2510
@@ -2123,11 +2524,11 @@ static const BCIns *rec_setup_root(jit_State *J)
2123 J->bc_min = pc; 2524 J->bc_min = pc;
2124 break; 2525 break;
2125 case BC_ITERL: 2526 case BC_ITERL:
2126 lua_assert(bc_op(pc[-1]) == BC_ITERC); 2527 lj_assertJ(bc_op(pc[-1]) == BC_ITERC, "no ITERC before ITERL");
2127 J->maxslot = ra + bc_b(pc[-1]) - 1; 2528 J->maxslot = ra + bc_b(pc[-1]) - 1;
2128 J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); 2529 J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
2129 pc += 1+bc_j(ins); 2530 pc += 1+bc_j(ins);
2130 lua_assert(bc_op(pc[-1]) == BC_JMP); 2531 lj_assertJ(bc_op(pc[-1]) == BC_JMP, "ITERL does not point to JMP+1");
2131 J->bc_min = pc; 2532 J->bc_min = pc;
2132 break; 2533 break;
2133 case BC_LOOP: 2534 case BC_LOOP:
@@ -2152,8 +2553,14 @@ static const BCIns *rec_setup_root(jit_State *J)
2152 J->maxslot = J->pt->numparams; 2553 J->maxslot = J->pt->numparams;
2153 pc++; 2554 pc++;
2154 break; 2555 break;
2556 case BC_CALLM:
2557 case BC_CALL:
2558 case BC_ITERC:
2559 /* No bytecode range check for stitched traces. */
2560 pc++;
2561 break;
2155 default: 2562 default:
2156 lua_assert(0); 2563 lj_assertJ(0, "bad root trace start bytecode %d", bc_op(ins));
2157 break; 2564 break;
2158 } 2565 }
2159 return pc; 2566 return pc;
@@ -2167,11 +2574,14 @@ void lj_record_setup(jit_State *J)
2167 /* Initialize state related to current trace. */ 2574 /* Initialize state related to current trace. */
2168 memset(J->slot, 0, sizeof(J->slot)); 2575 memset(J->slot, 0, sizeof(J->slot));
2169 memset(J->chain, 0, sizeof(J->chain)); 2576 memset(J->chain, 0, sizeof(J->chain));
2577#ifdef LUAJIT_ENABLE_TABLE_BUMP
2578 memset(J->rbchash, 0, sizeof(J->rbchash));
2579#endif
2170 memset(J->bpropcache, 0, sizeof(J->bpropcache)); 2580 memset(J->bpropcache, 0, sizeof(J->bpropcache));
2171 J->scev.idx = REF_NIL; 2581 J->scev.idx = REF_NIL;
2172 setmref(J->scev.pc, NULL); 2582 setmref(J->scev.pc, NULL);
2173 2583
2174 J->baseslot = 1; /* Invoking function is at base[-1]. */ 2584 J->baseslot = 1+LJ_FR2; /* Invoking function is at base[-1-LJ_FR2]. */
2175 J->base = J->slot + J->baseslot; 2585 J->base = J->slot + J->baseslot;
2176 J->maxslot = 0; 2586 J->maxslot = 0;
2177 J->framedepth = 0; 2587 J->framedepth = 0;
@@ -2186,7 +2596,7 @@ void lj_record_setup(jit_State *J)
2186 J->bc_extent = ~(MSize)0; 2596 J->bc_extent = ~(MSize)0;
2187 2597
2188 /* Emit instructions for fixed references. Also triggers initial IR alloc. */ 2598 /* Emit instructions for fixed references. Also triggers initial IR alloc. */
2189 emitir_raw(IRT(IR_BASE, IRT_P32), J->parent, J->exitno); 2599 emitir_raw(IRT(IR_BASE, IRT_PGC), J->parent, J->exitno);
2190 for (i = 0; i <= 2; i++) { 2600 for (i = 0; i <= 2; i++) {
2191 IRIns *ir = IR(REF_NIL-i); 2601 IRIns *ir = IR(REF_NIL-i);
2192 ir->i = 0; 2602 ir->i = 0;
@@ -2220,7 +2630,7 @@ void lj_record_setup(jit_State *J)
2220 if (traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] || 2630 if (traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] ||
2221 T->snap[J->exitno].count >= J->param[JIT_P_hotexit] + 2631 T->snap[J->exitno].count >= J->param[JIT_P_hotexit] +
2222 J->param[JIT_P_tryside]) { 2632 J->param[JIT_P_tryside]) {
2223 rec_stop(J, LJ_TRLINK_INTERP, 0); 2633 lj_record_stop(J, LJ_TRLINK_INTERP, 0);
2224 } 2634 }
2225 } else { /* Root trace. */ 2635 } else { /* Root trace. */
2226 J->cur.root = 0; 2636 J->cur.root = 0;
@@ -2232,9 +2642,15 @@ void lj_record_setup(jit_State *J)
2232 lj_snap_add(J); 2642 lj_snap_add(J);
2233 if (bc_op(J->cur.startins) == BC_FORL) 2643 if (bc_op(J->cur.startins) == BC_FORL)
2234 rec_for_loop(J, J->pc-1, &J->scev, 1); 2644 rec_for_loop(J, J->pc-1, &J->scev, 1);
2645 else if (bc_op(J->cur.startins) == BC_ITERC)
2646 J->startpc = NULL;
2235 if (1 + J->pt->framesize >= LJ_MAX_JSLOTS) 2647 if (1 + J->pt->framesize >= LJ_MAX_JSLOTS)
2236 lj_trace_err(J, LJ_TRERR_STACKOV); 2648 lj_trace_err(J, LJ_TRERR_STACKOV);
2237 } 2649 }
2650#if LJ_HASPROFILE
2651 J->prev_pt = NULL;
2652 J->prev_line = -1;
2653#endif
2238#ifdef LUAJIT_ENABLE_CHECKHOOK 2654#ifdef LUAJIT_ENABLE_CHECKHOOK
2239 /* Regularly check for instruction/line hooks from compiled code and 2655 /* Regularly check for instruction/line hooks from compiled code and
2240 ** exit to the interpreter if the hooks are set. 2656 ** exit to the interpreter if the hooks are set.
diff --git a/src/lj_record.h b/src/lj_record.h
index 8ef8b2a7..e7d24fae 100644
--- a/src/lj_record.h
+++ b/src/lj_record.h
@@ -28,6 +28,7 @@ typedef struct RecordIndex {
28 28
29LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b, 29LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b,
30 cTValue *av, cTValue *bv); 30 cTValue *av, cTValue *bv);
31LJ_FUNC void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk);
31LJ_FUNC TRef lj_record_constify(jit_State *J, cTValue *o); 32LJ_FUNC TRef lj_record_constify(jit_State *J, cTValue *o);
32 33
33LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs); 34LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs);
diff --git a/src/lj_snap.c b/src/lj_snap.c
index de8068ac..a21894f6 100644
--- a/src/lj_snap.c
+++ b/src/lj_snap.c
@@ -68,10 +68,22 @@ static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
68 for (s = 0; s < nslots; s++) { 68 for (s = 0; s < nslots; s++) {
69 TRef tr = J->slot[s]; 69 TRef tr = J->slot[s];
70 IRRef ref = tref_ref(tr); 70 IRRef ref = tref_ref(tr);
71#if LJ_FR2
72 if (s == 1) { /* Ignore slot 1 in LJ_FR2 mode, except if tailcalled. */
73 if ((tr & TREF_FRAME))
74 map[n++] = SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL);
75 continue;
76 }
77 if ((tr & (TREF_FRAME | TREF_CONT)) && !ref) {
78 cTValue *base = J->L->base - J->baseslot;
79 tr = J->slot[s] = (tr & 0xff0000) | lj_ir_k64(J, IR_KNUM, base[s].u64);
80 ref = tref_ref(tr);
81 }
82#endif
71 if (ref) { 83 if (ref) {
72 SnapEntry sn = SNAP_TR(s, tr); 84 SnapEntry sn = SNAP_TR(s, tr);
73 IRIns *ir = &J->cur.ir[ref]; 85 IRIns *ir = &J->cur.ir[ref];
74 if (!(sn & (SNAP_CONT|SNAP_FRAME)) && 86 if ((LJ_FR2 || !(sn & (SNAP_CONT|SNAP_FRAME))) &&
75 ir->o == IR_SLOAD && ir->op1 == s && ref > retf) { 87 ir->o == IR_SLOAD && ir->op1 == s && ref > retf) {
76 /* No need to snapshot unmodified non-inherited slots. */ 88 /* No need to snapshot unmodified non-inherited slots. */
77 if (!(ir->op2 & IRSLOAD_INHERIT)) 89 if (!(ir->op2 & IRSLOAD_INHERIT))
@@ -81,7 +93,7 @@ static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
81 (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT) 93 (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT)
82 sn |= SNAP_NORESTORE; 94 sn |= SNAP_NORESTORE;
83 } 95 }
84 if (LJ_SOFTFP && irt_isnum(ir->t)) 96 if (LJ_SOFTFP32 && irt_isnum(ir->t))
85 sn |= SNAP_SOFTFPNUM; 97 sn |= SNAP_SOFTFPNUM;
86 map[n++] = sn; 98 map[n++] = sn;
87 } 99 }
@@ -90,32 +102,51 @@ static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
90} 102}
91 103
92/* Add frame links at the end of the snapshot. */ 104/* Add frame links at the end of the snapshot. */
93static BCReg snapshot_framelinks(jit_State *J, SnapEntry *map) 105static MSize snapshot_framelinks(jit_State *J, SnapEntry *map, uint8_t *topslot)
94{ 106{
95 cTValue *frame = J->L->base - 1; 107 cTValue *frame = J->L->base - 1;
96 cTValue *lim = J->L->base - J->baseslot; 108 cTValue *lim = J->L->base - J->baseslot + LJ_FR2;
97 cTValue *ftop = frame + funcproto(frame_func(frame))->framesize; 109 GCfunc *fn = frame_func(frame);
110 cTValue *ftop = isluafunc(fn) ? (frame+funcproto(fn)->framesize) : J->L->top;
111#if LJ_FR2
112 uint64_t pcbase = (u64ptr(J->pc) << 8) | (J->baseslot - 2);
113 lj_assertJ(2 <= J->baseslot && J->baseslot <= 257, "bad baseslot");
114 memcpy(map, &pcbase, sizeof(uint64_t));
115#else
98 MSize f = 0; 116 MSize f = 0;
99 map[f++] = SNAP_MKPC(J->pc); /* The current PC is always the first entry. */ 117 map[f++] = SNAP_MKPC(J->pc); /* The current PC is always the first entry. */
118#endif
100 while (frame > lim) { /* Backwards traversal of all frames above base. */ 119 while (frame > lim) { /* Backwards traversal of all frames above base. */
101 if (frame_islua(frame)) { 120 if (frame_islua(frame)) {
121#if !LJ_FR2
102 map[f++] = SNAP_MKPC(frame_pc(frame)); 122 map[f++] = SNAP_MKPC(frame_pc(frame));
123#endif
103 frame = frame_prevl(frame); 124 frame = frame_prevl(frame);
104 } else if (frame_iscont(frame)) { 125 } else if (frame_iscont(frame)) {
126#if !LJ_FR2
105 map[f++] = SNAP_MKFTSZ(frame_ftsz(frame)); 127 map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
106 map[f++] = SNAP_MKPC(frame_contpc(frame)); 128 map[f++] = SNAP_MKPC(frame_contpc(frame));
129#endif
107 frame = frame_prevd(frame); 130 frame = frame_prevd(frame);
108 } else { 131 } else {
109 lua_assert(!frame_isc(frame)); 132 lj_assertJ(!frame_isc(frame), "broken frame chain");
133#if !LJ_FR2
110 map[f++] = SNAP_MKFTSZ(frame_ftsz(frame)); 134 map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
135#endif
111 frame = frame_prevd(frame); 136 frame = frame_prevd(frame);
112 continue; 137 continue;
113 } 138 }
114 if (frame + funcproto(frame_func(frame))->framesize > ftop) 139 if (frame + funcproto(frame_func(frame))->framesize > ftop)
115 ftop = frame + funcproto(frame_func(frame))->framesize; 140 ftop = frame + funcproto(frame_func(frame))->framesize;
116 } 141 }
117 lua_assert(f == (MSize)(1 + J->framedepth)); 142 *topslot = (uint8_t)(ftop - lim);
118 return (BCReg)(ftop - lim); 143#if LJ_FR2
144 lj_assertJ(sizeof(SnapEntry) * 2 == sizeof(uint64_t), "bad SnapEntry def");
145 return 2;
146#else
147 lj_assertJ(f == (MSize)(1 + J->framedepth), "miscalculated snapshot size");
148 return f;
149#endif
119} 150}
120 151
121/* Take a snapshot of the current stack. */ 152/* Take a snapshot of the current stack. */
@@ -125,16 +156,16 @@ static void snapshot_stack(jit_State *J, SnapShot *snap, MSize nsnapmap)
125 MSize nent; 156 MSize nent;
126 SnapEntry *p; 157 SnapEntry *p;
127 /* Conservative estimate. */ 158 /* Conservative estimate. */
128 lj_snap_grow_map(J, nsnapmap + nslots + (MSize)J->framedepth+1); 159 lj_snap_grow_map(J, nsnapmap + nslots + (MSize)(LJ_FR2?2:J->framedepth+1));
129 p = &J->cur.snapmap[nsnapmap]; 160 p = &J->cur.snapmap[nsnapmap];
130 nent = snapshot_slots(J, p, nslots); 161 nent = snapshot_slots(J, p, nslots);
131 snap->topslot = (uint8_t)snapshot_framelinks(J, p + nent); 162 snap->nent = (uint8_t)nent;
163 nent += snapshot_framelinks(J, p + nent, &snap->topslot);
132 snap->mapofs = (uint32_t)nsnapmap; 164 snap->mapofs = (uint32_t)nsnapmap;
133 snap->ref = (IRRef1)J->cur.nins; 165 snap->ref = (IRRef1)J->cur.nins;
134 snap->nent = (uint8_t)nent;
135 snap->nslots = (uint8_t)nslots; 166 snap->nslots = (uint8_t)nslots;
136 snap->count = 0; 167 snap->count = 0;
137 J->cur.nsnapmap = (uint32_t)(nsnapmap + nent + 1 + J->framedepth); 168 J->cur.nsnapmap = (uint32_t)(nsnapmap + nent);
138} 169}
139 170
140/* Add or merge a snapshot. */ 171/* Add or merge a snapshot. */
@@ -143,8 +174,8 @@ void lj_snap_add(jit_State *J)
143 MSize nsnap = J->cur.nsnap; 174 MSize nsnap = J->cur.nsnap;
144 MSize nsnapmap = J->cur.nsnapmap; 175 MSize nsnapmap = J->cur.nsnapmap;
145 /* Merge if no ins. inbetween or if requested and no guard inbetween. */ 176 /* Merge if no ins. inbetween or if requested and no guard inbetween. */
146 if (J->mergesnap ? !irt_isguard(J->guardemit) : 177 if ((nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins) ||
147 (nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins)) { 178 (J->mergesnap && !irt_isguard(J->guardemit))) {
148 if (nsnap == 1) { /* But preserve snap #0 PC. */ 179 if (nsnap == 1) { /* But preserve snap #0 PC. */
149 emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0); 180 emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
150 goto nomerge; 181 goto nomerge;
@@ -191,7 +222,8 @@ static BCReg snap_usedef(jit_State *J, uint8_t *udf,
191#define DEF_SLOT(s) udf[(s)] *= 3 222#define DEF_SLOT(s) udf[(s)] *= 3
192 223
193 /* Scan through following bytecode and check for uses/defs. */ 224 /* Scan through following bytecode and check for uses/defs. */
194 lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc); 225 lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc,
226 "snapshot PC out of range");
195 for (;;) { 227 for (;;) {
196 BCIns ins = *pc++; 228 BCIns ins = *pc++;
197 BCOp op = bc_op(ins); 229 BCOp op = bc_op(ins);
@@ -202,7 +234,7 @@ static BCReg snap_usedef(jit_State *J, uint8_t *udf,
202 switch (bcmode_c(op)) { 234 switch (bcmode_c(op)) {
203 case BCMvar: USE_SLOT(bc_c(ins)); break; 235 case BCMvar: USE_SLOT(bc_c(ins)); break;
204 case BCMrbase: 236 case BCMrbase:
205 lua_assert(op == BC_CAT); 237 lj_assertJ(op == BC_CAT, "unhandled op %d with RC rbase", op);
206 for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s); 238 for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s);
207 for (; s < maxslot; s++) DEF_SLOT(s); 239 for (; s < maxslot; s++) DEF_SLOT(s);
208 break; 240 break;
@@ -237,7 +269,8 @@ static BCReg snap_usedef(jit_State *J, uint8_t *udf,
237 case BCMbase: 269 case BCMbase:
238 if (op >= BC_CALLM && op <= BC_VARG) { 270 if (op >= BC_CALLM && op <= BC_VARG) {
239 BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ? 271 BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ?
240 maxslot : (bc_a(ins) + bc_c(ins)); 272 maxslot : (bc_a(ins) + bc_c(ins)+LJ_FR2);
273 if (LJ_FR2) DEF_SLOT(bc_a(ins)+1);
241 s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0); 274 s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0);
242 for (; s < top; s++) USE_SLOT(s); 275 for (; s < top; s++) USE_SLOT(s);
243 for (; s < maxslot; s++) DEF_SLOT(s); 276 for (; s < maxslot; s++) DEF_SLOT(s);
@@ -253,7 +286,8 @@ static BCReg snap_usedef(jit_State *J, uint8_t *udf,
253 break; 286 break;
254 default: break; 287 default: break;
255 } 288 }
256 lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc); 289 lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc,
290 "use/def analysis PC out of range");
257 } 291 }
258 292
259#undef USE_SLOT 293#undef USE_SLOT
@@ -281,8 +315,8 @@ void lj_snap_shrink(jit_State *J)
281 MSize n, m, nlim, nent = snap->nent; 315 MSize n, m, nlim, nent = snap->nent;
282 uint8_t udf[SNAP_USEDEF_SLOTS]; 316 uint8_t udf[SNAP_USEDEF_SLOTS];
283 BCReg maxslot = J->maxslot; 317 BCReg maxslot = J->maxslot;
284 BCReg minslot = snap_usedef(J, udf, snap_pc(map[nent]), maxslot);
285 BCReg baseslot = J->baseslot; 318 BCReg baseslot = J->baseslot;
319 BCReg minslot = snap_usedef(J, udf, snap_pc(&map[nent]), maxslot);
286 maxslot += baseslot; 320 maxslot += baseslot;
287 minslot += baseslot; 321 minslot += baseslot;
288 snap->nslots = (uint8_t)maxslot; 322 snap->nslots = (uint8_t)maxslot;
@@ -324,25 +358,26 @@ static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs)
324} 358}
325 359
326/* Copy RegSP from parent snapshot to the parent links of the IR. */ 360/* Copy RegSP from parent snapshot to the parent links of the IR. */
327IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir) 361IRIns *lj_snap_regspmap(jit_State *J, GCtrace *T, SnapNo snapno, IRIns *ir)
328{ 362{
329 SnapShot *snap = &T->snap[snapno]; 363 SnapShot *snap = &T->snap[snapno];
330 SnapEntry *map = &T->snapmap[snap->mapofs]; 364 SnapEntry *map = &T->snapmap[snap->mapofs];
331 BloomFilter rfilt = snap_renamefilter(T, snapno); 365 BloomFilter rfilt = snap_renamefilter(T, snapno);
332 MSize n = 0; 366 MSize n = 0;
333 IRRef ref = 0; 367 IRRef ref = 0;
368 UNUSED(J);
334 for ( ; ; ir++) { 369 for ( ; ; ir++) {
335 uint32_t rs; 370 uint32_t rs;
336 if (ir->o == IR_SLOAD) { 371 if (ir->o == IR_SLOAD) {
337 if (!(ir->op2 & IRSLOAD_PARENT)) break; 372 if (!(ir->op2 & IRSLOAD_PARENT)) break;
338 for ( ; ; n++) { 373 for ( ; ; n++) {
339 lua_assert(n < snap->nent); 374 lj_assertJ(n < snap->nent, "slot %d not found in snapshot", ir->op1);
340 if (snap_slot(map[n]) == ir->op1) { 375 if (snap_slot(map[n]) == ir->op1) {
341 ref = snap_ref(map[n++]); 376 ref = snap_ref(map[n++]);
342 break; 377 break;
343 } 378 }
344 } 379 }
345 } else if (LJ_SOFTFP && ir->o == IR_HIOP) { 380 } else if (LJ_SOFTFP32 && ir->o == IR_HIOP) {
346 ref++; 381 ref++;
347 } else if (ir->o == IR_PVAL) { 382 } else if (ir->o == IR_PVAL) {
348 ref = ir->op1 + REF_BIAS; 383 ref = ir->op1 + REF_BIAS;
@@ -353,7 +388,7 @@ IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir)
353 if (bloomtest(rfilt, ref)) 388 if (bloomtest(rfilt, ref))
354 rs = snap_renameref(T, snapno, ref, rs); 389 rs = snap_renameref(T, snapno, ref, rs);
355 ir->prev = (uint16_t)rs; 390 ir->prev = (uint16_t)rs;
356 lua_assert(regsp_used(rs)); 391 lj_assertJ(regsp_used(rs), "unused IR %04d in snapshot", ref - REF_BIAS);
357 } 392 }
358 return ir; 393 return ir;
359} 394}
@@ -368,10 +403,10 @@ static TRef snap_replay_const(jit_State *J, IRIns *ir)
368 case IR_KPRI: return TREF_PRI(irt_type(ir->t)); 403 case IR_KPRI: return TREF_PRI(irt_type(ir->t));
369 case IR_KINT: return lj_ir_kint(J, ir->i); 404 case IR_KINT: return lj_ir_kint(J, ir->i);
370 case IR_KGC: return lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t)); 405 case IR_KGC: return lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t));
371 case IR_KNUM: return lj_ir_k64(J, IR_KNUM, ir_knum(ir)); 406 case IR_KNUM: case IR_KINT64:
372 case IR_KINT64: return lj_ir_k64(J, IR_KINT64, ir_kint64(ir)); 407 return lj_ir_k64(J, (IROp)ir->o, ir_k64(ir)->u64);
373 case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir)); /* Continuation. */ 408 case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir)); /* Continuation. */
374 default: lua_assert(0); return TREF_NIL; break; 409 default: lj_assertJ(0, "bad IR constant op %d", ir->o); return TREF_NIL;
375 } 410 }
376} 411}
377 412
@@ -442,21 +477,25 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
442 goto setslot; 477 goto setslot;
443 bloomset(seen, ref); 478 bloomset(seen, ref);
444 if (irref_isk(ref)) { 479 if (irref_isk(ref)) {
445 tr = snap_replay_const(J, ir); 480 /* See special treatment of LJ_FR2 slot 1 in snapshot_slots() above. */
481 if (LJ_FR2 && (sn == SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL)))
482 tr = 0;
483 else
484 tr = snap_replay_const(J, ir);
446 } else if (!regsp_used(ir->prev)) { 485 } else if (!regsp_used(ir->prev)) {
447 pass23 = 1; 486 pass23 = 1;
448 lua_assert(s != 0); 487 lj_assertJ(s != 0, "unused slot 0 in snapshot");
449 tr = s; 488 tr = s;
450 } else { 489 } else {
451 IRType t = irt_type(ir->t); 490 IRType t = irt_type(ir->t);
452 uint32_t mode = IRSLOAD_INHERIT|IRSLOAD_PARENT; 491 uint32_t mode = IRSLOAD_INHERIT|IRSLOAD_PARENT;
453 if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM; 492 if (LJ_SOFTFP32 && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM;
454 if (ir->o == IR_SLOAD) mode |= (ir->op2 & IRSLOAD_READONLY); 493 if (ir->o == IR_SLOAD) mode |= (ir->op2 & IRSLOAD_READONLY);
455 tr = emitir_raw(IRT(IR_SLOAD, t), s, mode); 494 tr = emitir_raw(IRT(IR_SLOAD, t), s, mode);
456 } 495 }
457 setslot: 496 setslot:
458 J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME)); /* Same as TREF_* flags. */ 497 J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME)); /* Same as TREF_* flags. */
459 J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && s); 498 J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && (s != LJ_FR2));
460 if ((sn & SNAP_FRAME)) 499 if ((sn & SNAP_FRAME))
461 J->baseslot = s+1; 500 J->baseslot = s+1;
462 } 501 }
@@ -471,8 +510,9 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
471 if (regsp_reg(ir->r) == RID_SUNK) { 510 if (regsp_reg(ir->r) == RID_SUNK) {
472 if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue; 511 if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue;
473 pass23 = 1; 512 pass23 = 1;
474 lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || 513 lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP ||
475 ir->o == IR_CNEW || ir->o == IR_CNEWI); 514 ir->o == IR_CNEW || ir->o == IR_CNEWI,
515 "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o);
476 if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1); 516 if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1);
477 if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2); 517 if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2);
478 if (LJ_HASFFI && ir->o == IR_CNEWI) { 518 if (LJ_HASFFI && ir->o == IR_CNEWI) {
@@ -484,13 +524,14 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
484 if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) { 524 if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
485 if (snap_pref(J, T, map, nent, seen, irs->op2) == 0) 525 if (snap_pref(J, T, map, nent, seen, irs->op2) == 0)
486 snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1); 526 snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1);
487 else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && 527 else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) &&
488 irs+1 < irlast && (irs+1)->o == IR_HIOP) 528 irs+1 < irlast && (irs+1)->o == IR_HIOP)
489 snap_pref(J, T, map, nent, seen, (irs+1)->op2); 529 snap_pref(J, T, map, nent, seen, (irs+1)->op2);
490 } 530 }
491 } 531 }
492 } else if (!irref_isk(refp) && !regsp_used(ir->prev)) { 532 } else if (!irref_isk(refp) && !regsp_used(ir->prev)) {
493 lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); 533 lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
534 "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o);
494 J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1); 535 J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1);
495 } 536 }
496 } 537 }
@@ -540,20 +581,21 @@ void lj_snap_replay(jit_State *J, GCtrace *T)
540 val = snap_pref(J, T, map, nent, seen, irs->op2); 581 val = snap_pref(J, T, map, nent, seen, irs->op2);
541 if (val == 0) { 582 if (val == 0) {
542 IRIns *irc = &T->ir[irs->op2]; 583 IRIns *irc = &T->ir[irs->op2];
543 lua_assert(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT); 584 lj_assertJ(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT,
585 "sunk store for parent IR %04d with bad op %d",
586 refp - REF_BIAS, irc->o);
544 val = snap_pref(J, T, map, nent, seen, irc->op1); 587 val = snap_pref(J, T, map, nent, seen, irc->op1);
545 val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT); 588 val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
546 } else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && 589 } else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) &&
547 irs+1 < irlast && (irs+1)->o == IR_HIOP) { 590 irs+1 < irlast && (irs+1)->o == IR_HIOP) {
548 IRType t = IRT_I64; 591 IRType t = IRT_I64;
549 if (LJ_SOFTFP && irt_type((irs+1)->t) == IRT_SOFTFP) 592 if (LJ_SOFTFP32 && irt_type((irs+1)->t) == IRT_SOFTFP)
550 t = IRT_NUM; 593 t = IRT_NUM;
551 lj_needsplit(J); 594 lj_needsplit(J);
552 if (irref_isk(irs->op2) && irref_isk((irs+1)->op2)) { 595 if (irref_isk(irs->op2) && irref_isk((irs+1)->op2)) {
553 uint64_t k = (uint32_t)T->ir[irs->op2].i + 596 uint64_t k = (uint32_t)T->ir[irs->op2].i +
554 ((uint64_t)T->ir[(irs+1)->op2].i << 32); 597 ((uint64_t)T->ir[(irs+1)->op2].i << 32);
555 val = lj_ir_k64(J, t == IRT_I64 ? IR_KINT64 : IR_KNUM, 598 val = lj_ir_k64(J, t == IRT_I64 ? IR_KINT64 : IR_KNUM, k);
556 lj_ir_k64_find(J, k));
557 } else { 599 } else {
558 val = emitir_raw(IRT(IR_HIOP, t), val, 600 val = emitir_raw(IRT(IR_HIOP, t), val,
559 snap_pref(J, T, map, nent, seen, (irs+1)->op2)); 601 snap_pref(J, T, map, nent, seen, (irs+1)->op2));
@@ -600,22 +642,24 @@ static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex,
600 int32_t *sps = &ex->spill[regsp_spill(rs)]; 642 int32_t *sps = &ex->spill[regsp_spill(rs)];
601 if (irt_isinteger(t)) { 643 if (irt_isinteger(t)) {
602 setintV(o, *sps); 644 setintV(o, *sps);
603#if !LJ_SOFTFP 645#if !LJ_SOFTFP32
604 } else if (irt_isnum(t)) { 646 } else if (irt_isnum(t)) {
605 o->u64 = *(uint64_t *)sps; 647 o->u64 = *(uint64_t *)sps;
606#endif 648#endif
607 } else if (LJ_64 && irt_islightud(t)) { 649#if LJ_64 && !LJ_GC64
650 } else if (irt_islightud(t)) {
608 /* 64 bit lightuserdata which may escape already has the tag bits. */ 651 /* 64 bit lightuserdata which may escape already has the tag bits. */
609 o->u64 = *(uint64_t *)sps; 652 o->u64 = *(uint64_t *)sps;
653#endif
610 } else { 654 } else {
611 lua_assert(!irt_ispri(t)); /* PRI refs never have a spill slot. */ 655 lj_assertJ(!irt_ispri(t), "PRI ref with spill slot");
612 setgcrefi(o->gcr, *sps); 656 setgcV(J->L, o, (GCobj *)(uintptr_t)*(GCSize *)sps, irt_toitype(t));
613 setitype(o, irt_toitype(t));
614 } 657 }
615 } else { /* Restore from register. */ 658 } else { /* Restore from register. */
616 Reg r = regsp_reg(rs); 659 Reg r = regsp_reg(rs);
617 if (ra_noreg(r)) { 660 if (ra_noreg(r)) {
618 lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); 661 lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
662 "restore from IR %04d has no reg", ref - REF_BIAS);
619 snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o); 663 snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o);
620 if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o)); 664 if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o));
621 return; 665 return;
@@ -624,21 +668,26 @@ static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex,
624#if !LJ_SOFTFP 668#if !LJ_SOFTFP
625 } else if (irt_isnum(t)) { 669 } else if (irt_isnum(t)) {
626 setnumV(o, ex->fpr[r-RID_MIN_FPR]); 670 setnumV(o, ex->fpr[r-RID_MIN_FPR]);
671#elif LJ_64 /* && LJ_SOFTFP */
672 } else if (irt_isnum(t)) {
673 o->u64 = ex->gpr[r-RID_MIN_GPR];
627#endif 674#endif
628 } else if (LJ_64 && irt_islightud(t)) { 675#if LJ_64 && !LJ_GC64
629 /* 64 bit lightuserdata which may escape already has the tag bits. */ 676 } else if (irt_is64(t)) {
677 /* 64 bit values that already have the tag bits. */
630 o->u64 = ex->gpr[r-RID_MIN_GPR]; 678 o->u64 = ex->gpr[r-RID_MIN_GPR];
679#endif
680 } else if (irt_ispri(t)) {
681 setpriV(o, irt_toitype(t));
631 } else { 682 } else {
632 if (!irt_ispri(t)) 683 setgcV(J->L, o, (GCobj *)ex->gpr[r-RID_MIN_GPR], irt_toitype(t));
633 setgcrefi(o->gcr, ex->gpr[r-RID_MIN_GPR]);
634 setitype(o, irt_toitype(t));
635 } 684 }
636 } 685 }
637} 686}
638 687
639#if LJ_HASFFI 688#if LJ_HASFFI
640/* Restore raw data from the trace exit state. */ 689/* Restore raw data from the trace exit state. */
641static void snap_restoredata(GCtrace *T, ExitState *ex, 690static void snap_restoredata(jit_State *J, GCtrace *T, ExitState *ex,
642 SnapNo snapno, BloomFilter rfilt, 691 SnapNo snapno, BloomFilter rfilt,
643 IRRef ref, void *dst, CTSize sz) 692 IRRef ref, void *dst, CTSize sz)
644{ 693{
@@ -646,9 +695,10 @@ static void snap_restoredata(GCtrace *T, ExitState *ex,
646 RegSP rs = ir->prev; 695 RegSP rs = ir->prev;
647 int32_t *src; 696 int32_t *src;
648 uint64_t tmp; 697 uint64_t tmp;
698 UNUSED(J);
649 if (irref_isk(ref)) { 699 if (irref_isk(ref)) {
650 if (ir->o == IR_KNUM || ir->o == IR_KINT64) { 700 if (ir_isk64(ir)) {
651 src = mref(ir->ptr, int32_t); 701 src = (int32_t *)&ir[1];
652 } else if (sz == 8) { 702 } else if (sz == 8) {
653 tmp = (uint64_t)(uint32_t)ir->i; 703 tmp = (uint64_t)(uint32_t)ir->i;
654 src = (int32_t *)&tmp; 704 src = (int32_t *)&tmp;
@@ -668,8 +718,9 @@ static void snap_restoredata(GCtrace *T, ExitState *ex,
668 Reg r = regsp_reg(rs); 718 Reg r = regsp_reg(rs);
669 if (ra_noreg(r)) { 719 if (ra_noreg(r)) {
670 /* Note: this assumes CNEWI is never used for SOFTFP split numbers. */ 720 /* Note: this assumes CNEWI is never used for SOFTFP split numbers. */
671 lua_assert(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); 721 lj_assertJ(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
672 snap_restoredata(T, ex, snapno, rfilt, ir->op1, dst, 4); 722 "restore from IR %04d has no reg", ref - REF_BIAS);
723 snap_restoredata(J, T, ex, snapno, rfilt, ir->op1, dst, 4);
673 *(lua_Number *)dst = (lua_Number)*(int32_t *)dst; 724 *(lua_Number *)dst = (lua_Number)*(int32_t *)dst;
674 return; 725 return;
675 } 726 }
@@ -685,11 +736,13 @@ static void snap_restoredata(GCtrace *T, ExitState *ex,
685#else 736#else
686 if (LJ_BE && sz == 4) src++; 737 if (LJ_BE && sz == 4) src++;
687#endif 738#endif
688 } 739 } else
689#endif 740#endif
741 if (LJ_64 && LJ_BE && sz == 4) src++;
690 } 742 }
691 } 743 }
692 lua_assert(sz == 1 || sz == 2 || sz == 4 || sz == 8); 744 lj_assertJ(sz == 1 || sz == 2 || sz == 4 || sz == 8,
745 "restore from IR %04d with bad size %d", ref - REF_BIAS, sz);
693 if (sz == 4) *(int32_t *)dst = *src; 746 if (sz == 4) *(int32_t *)dst = *src;
694 else if (sz == 8) *(int64_t *)dst = *(int64_t *)src; 747 else if (sz == 8) *(int64_t *)dst = *(int64_t *)src;
695 else if (sz == 1) *(int8_t *)dst = (int8_t)*src; 748 else if (sz == 1) *(int8_t *)dst = (int8_t)*src;
@@ -702,24 +755,27 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
702 SnapNo snapno, BloomFilter rfilt, 755 SnapNo snapno, BloomFilter rfilt,
703 IRIns *ir, TValue *o) 756 IRIns *ir, TValue *o)
704{ 757{
705 lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || 758 lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP ||
706 ir->o == IR_CNEW || ir->o == IR_CNEWI); 759 ir->o == IR_CNEW || ir->o == IR_CNEWI,
760 "sunk allocation with bad op %d", ir->o);
707#if LJ_HASFFI 761#if LJ_HASFFI
708 if (ir->o == IR_CNEW || ir->o == IR_CNEWI) { 762 if (ir->o == IR_CNEW || ir->o == IR_CNEWI) {
709 CTState *cts = ctype_cts(J->L); 763 CTState *cts = ctype_cts(J->L);
710 CTypeID id = (CTypeID)T->ir[ir->op1].i; 764 CTypeID id = (CTypeID)T->ir[ir->op1].i;
711 CTSize sz = lj_ctype_size(cts, id); 765 CTSize sz;
712 GCcdata *cd = lj_cdata_new(cts, id, sz); 766 CTInfo info = lj_ctype_info(cts, id, &sz);
767 GCcdata *cd = lj_cdata_newx(cts, id, sz, info);
713 setcdataV(J->L, o, cd); 768 setcdataV(J->L, o, cd);
714 if (ir->o == IR_CNEWI) { 769 if (ir->o == IR_CNEWI) {
715 uint8_t *p = (uint8_t *)cdataptr(cd); 770 uint8_t *p = (uint8_t *)cdataptr(cd);
716 lua_assert(sz == 4 || sz == 8); 771 lj_assertJ(sz == 4 || sz == 8, "sunk cdata with bad size %d", sz);
717 if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) { 772 if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) {
718 snap_restoredata(T, ex, snapno, rfilt, (ir+1)->op2, LJ_LE?p+4:p, 4); 773 snap_restoredata(J, T, ex, snapno, rfilt, (ir+1)->op2,
774 LJ_LE ? p+4 : p, 4);
719 if (LJ_BE) p += 4; 775 if (LJ_BE) p += 4;
720 sz = 4; 776 sz = 4;
721 } 777 }
722 snap_restoredata(T, ex, snapno, rfilt, ir->op2, p, sz); 778 snap_restoredata(J, T, ex, snapno, rfilt, ir->op2, p, sz);
723 } else { 779 } else {
724 IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref]; 780 IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref];
725 for (irs = ir+1; irs < irlast; irs++) 781 for (irs = ir+1; irs < irlast; irs++)
@@ -727,8 +783,11 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
727 IRIns *iro = &T->ir[T->ir[irs->op1].op2]; 783 IRIns *iro = &T->ir[T->ir[irs->op1].op2];
728 uint8_t *p = (uint8_t *)cd; 784 uint8_t *p = (uint8_t *)cd;
729 CTSize szs; 785 CTSize szs;
730 lua_assert(irs->o == IR_XSTORE && T->ir[irs->op1].o == IR_ADD); 786 lj_assertJ(irs->o == IR_XSTORE, "sunk store with bad op %d", irs->o);
731 lua_assert(iro->o == IR_KINT || iro->o == IR_KINT64); 787 lj_assertJ(T->ir[irs->op1].o == IR_ADD,
788 "sunk store with bad add op %d", T->ir[irs->op1].o);
789 lj_assertJ(iro->o == IR_KINT || iro->o == IR_KINT64,
790 "sunk store with bad const offset op %d", iro->o);
732 if (irt_is64(irs->t)) szs = 8; 791 if (irt_is64(irs->t)) szs = 8;
733 else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1; 792 else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1;
734 else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2; 793 else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2;
@@ -737,14 +796,16 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
737 p += (int64_t)ir_k64(iro)->u64; 796 p += (int64_t)ir_k64(iro)->u64;
738 else 797 else
739 p += iro->i; 798 p += iro->i;
740 lua_assert(p >= (uint8_t *)cdataptr(cd) && 799 lj_assertJ(p >= (uint8_t *)cdataptr(cd) &&
741 p + szs <= (uint8_t *)cdataptr(cd) + sz); 800 p + szs <= (uint8_t *)cdataptr(cd) + sz,
801 "sunk store with offset out of range");
742 if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) { 802 if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
743 lua_assert(szs == 4); 803 lj_assertJ(szs == 4, "sunk store with bad size %d", szs);
744 snap_restoredata(T, ex, snapno, rfilt, (irs+1)->op2, LJ_LE?p+4:p,4); 804 snap_restoredata(J, T, ex, snapno, rfilt, (irs+1)->op2,
805 LJ_LE ? p+4 : p, 4);
745 if (LJ_BE) p += 4; 806 if (LJ_BE) p += 4;
746 } 807 }
747 snap_restoredata(T, ex, snapno, rfilt, irs->op2, p, szs); 808 snap_restoredata(J, T, ex, snapno, rfilt, irs->op2, p, szs);
748 } 809 }
749 } 810 }
750 } else 811 } else
@@ -759,10 +820,12 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
759 if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) { 820 if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
760 IRIns *irk = &T->ir[irs->op1]; 821 IRIns *irk = &T->ir[irs->op1];
761 TValue tmp, *val; 822 TValue tmp, *val;
762 lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || 823 lj_assertJ(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
763 irs->o == IR_FSTORE); 824 irs->o == IR_FSTORE,
825 "sunk store with bad op %d", irs->o);
764 if (irk->o == IR_FREF) { 826 if (irk->o == IR_FREF) {
765 lua_assert(irk->op2 == IRFL_TAB_META); 827 lj_assertJ(irk->op2 == IRFL_TAB_META,
828 "sunk store with bad field %d", irk->op2);
766 snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp); 829 snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp);
767 /* NOBARRIER: The table is new (marked white). */ 830 /* NOBARRIER: The table is new (marked white). */
768 setgcref(t->metatable, obj2gco(tabV(&tmp))); 831 setgcref(t->metatable, obj2gco(tabV(&tmp)));
@@ -773,7 +836,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
773 val = lj_tab_set(J->L, t, &tmp); 836 val = lj_tab_set(J->L, t, &tmp);
774 /* NOBARRIER: The table is new (marked white). */ 837 /* NOBARRIER: The table is new (marked white). */
775 snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, val); 838 snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, val);
776 if (LJ_SOFTFP && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) { 839 if (LJ_SOFTFP32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
777 snap_restoreval(J, T, ex, snapno, rfilt, (irs+1)->op2, &tmp); 840 snap_restoreval(J, T, ex, snapno, rfilt, (irs+1)->op2, &tmp);
778 val->u32.hi = tmp.u32.lo; 841 val->u32.hi = tmp.u32.lo;
779 } 842 }
@@ -791,11 +854,15 @@ const BCIns *lj_snap_restore(jit_State *J, void *exptr)
791 SnapShot *snap = &T->snap[snapno]; 854 SnapShot *snap = &T->snap[snapno];
792 MSize n, nent = snap->nent; 855 MSize n, nent = snap->nent;
793 SnapEntry *map = &T->snapmap[snap->mapofs]; 856 SnapEntry *map = &T->snapmap[snap->mapofs];
794 SnapEntry *flinks = &T->snapmap[snap_nextofs(T, snap)-1]; 857#if !LJ_FR2 || defined(LUA_USE_ASSERT)
795 int32_t ftsz0; 858 SnapEntry *flinks = &T->snapmap[snap_nextofs(T, snap)-1-LJ_FR2];
859#endif
860#if !LJ_FR2
861 ptrdiff_t ftsz0;
862#endif
796 TValue *frame; 863 TValue *frame;
797 BloomFilter rfilt = snap_renamefilter(T, snapno); 864 BloomFilter rfilt = snap_renamefilter(T, snapno);
798 const BCIns *pc = snap_pc(map[nent]); 865 const BCIns *pc = snap_pc(&map[nent]);
799 lua_State *L = J->L; 866 lua_State *L = J->L;
800 867
801 /* Set interpreter PC to the next PC to get correct error messages. */ 868 /* Set interpreter PC to the next PC to get correct error messages. */
@@ -808,8 +875,10 @@ const BCIns *lj_snap_restore(jit_State *J, void *exptr)
808 } 875 }
809 876
810 /* Fill stack slots with data from the registers and spill slots. */ 877 /* Fill stack slots with data from the registers and spill slots. */
811 frame = L->base-1; 878 frame = L->base-1-LJ_FR2;
879#if !LJ_FR2
812 ftsz0 = frame_ftsz(frame); /* Preserve link to previous frame in slot #0. */ 880 ftsz0 = frame_ftsz(frame); /* Preserve link to previous frame in slot #0. */
881#endif
813 for (n = 0; n < nent; n++) { 882 for (n = 0; n < nent; n++) {
814 SnapEntry sn = map[n]; 883 SnapEntry sn = map[n];
815 if (!(sn & SNAP_NORESTORE)) { 884 if (!(sn & SNAP_NORESTORE)) {
@@ -828,18 +897,23 @@ const BCIns *lj_snap_restore(jit_State *J, void *exptr)
828 continue; 897 continue;
829 } 898 }
830 snap_restoreval(J, T, ex, snapno, rfilt, ref, o); 899 snap_restoreval(J, T, ex, snapno, rfilt, ref, o);
831 if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && tvisint(o)) { 900 if (LJ_SOFTFP32 && (sn & SNAP_SOFTFPNUM) && tvisint(o)) {
832 TValue tmp; 901 TValue tmp;
833 snap_restoreval(J, T, ex, snapno, rfilt, ref+1, &tmp); 902 snap_restoreval(J, T, ex, snapno, rfilt, ref+1, &tmp);
834 o->u32.hi = tmp.u32.lo; 903 o->u32.hi = tmp.u32.lo;
904#if !LJ_FR2
835 } else if ((sn & (SNAP_CONT|SNAP_FRAME))) { 905 } else if ((sn & (SNAP_CONT|SNAP_FRAME))) {
836 /* Overwrite tag with frame link. */ 906 /* Overwrite tag with frame link. */
837 o->fr.tp.ftsz = snap_slot(sn) != 0 ? (int32_t)*flinks-- : ftsz0; 907 setframe_ftsz(o, snap_slot(sn) != 0 ? (int32_t)*flinks-- : ftsz0);
838 L->base = o+1; 908 L->base = o+1;
909#endif
839 } 910 }
840 } 911 }
841 } 912 }
842 lua_assert(map + nent == flinks); 913#if LJ_FR2
914 L->base += (map[nent+LJ_BE] & 0xff);
915#endif
916 lj_assertJ(map + nent == flinks, "inconsistent frames in snapshot");
843 917
844 /* Compute current stack top. */ 918 /* Compute current stack top. */
845 switch (bc_op(*pc)) { 919 switch (bc_op(*pc)) {
diff --git a/src/lj_snap.h b/src/lj_snap.h
index 816a9b79..f1760b05 100644
--- a/src/lj_snap.h
+++ b/src/lj_snap.h
@@ -13,7 +13,8 @@
13LJ_FUNC void lj_snap_add(jit_State *J); 13LJ_FUNC void lj_snap_add(jit_State *J);
14LJ_FUNC void lj_snap_purge(jit_State *J); 14LJ_FUNC void lj_snap_purge(jit_State *J);
15LJ_FUNC void lj_snap_shrink(jit_State *J); 15LJ_FUNC void lj_snap_shrink(jit_State *J);
16LJ_FUNC IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir); 16LJ_FUNC IRIns *lj_snap_regspmap(jit_State *J, GCtrace *T, SnapNo snapno,
17 IRIns *ir);
17LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T); 18LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T);
18LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr); 19LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr);
19LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need); 20LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need);
diff --git a/src/lj_state.c b/src/lj_state.c
index ab064266..4f77e71f 100644
--- a/src/lj_state.c
+++ b/src/lj_state.c
@@ -12,6 +12,7 @@
12#include "lj_obj.h" 12#include "lj_obj.h"
13#include "lj_gc.h" 13#include "lj_gc.h"
14#include "lj_err.h" 14#include "lj_err.h"
15#include "lj_buf.h"
15#include "lj_str.h" 16#include "lj_str.h"
16#include "lj_tab.h" 17#include "lj_tab.h"
17#include "lj_func.h" 18#include "lj_func.h"
@@ -24,8 +25,10 @@
24#include "lj_trace.h" 25#include "lj_trace.h"
25#include "lj_dispatch.h" 26#include "lj_dispatch.h"
26#include "lj_vm.h" 27#include "lj_vm.h"
28#include "lj_prng.h"
27#include "lj_lex.h" 29#include "lj_lex.h"
28#include "lj_alloc.h" 30#include "lj_alloc.h"
31#include "luajit.h"
29 32
30/* -- Stack handling ------------------------------------------------------ */ 33/* -- Stack handling ------------------------------------------------------ */
31 34
@@ -47,6 +50,7 @@
47** one extra slot if mobj is not a function. Only lj_meta_tset needs 5 50** one extra slot if mobj is not a function. Only lj_meta_tset needs 5
48** slots above top, but then mobj is always a function. So we can get by 51** slots above top, but then mobj is always a function. So we can get by
49** with 5 extra slots. 52** with 5 extra slots.
53** LJ_FR2: We need 2 more slots for the frame PC and the continuation PC.
50*/ 54*/
51 55
52/* Resize stack slots and adjust pointers in state. */ 56/* Resize stack slots and adjust pointers in state. */
@@ -57,9 +61,10 @@ static void resizestack(lua_State *L, MSize n)
57 MSize oldsize = L->stacksize; 61 MSize oldsize = L->stacksize;
58 MSize realsize = n + 1 + LJ_STACK_EXTRA; 62 MSize realsize = n + 1 + LJ_STACK_EXTRA;
59 GCobj *up; 63 GCobj *up;
60 lua_assert((MSize)(tvref(L->maxstack)-oldst)==L->stacksize-LJ_STACK_EXTRA-1); 64 lj_assertL((MSize)(tvref(L->maxstack)-oldst) == L->stacksize-LJ_STACK_EXTRA-1,
65 "inconsistent stack size");
61 st = (TValue *)lj_mem_realloc(L, tvref(L->stack), 66 st = (TValue *)lj_mem_realloc(L, tvref(L->stack),
62 (MSize)(L->stacksize*sizeof(TValue)), 67 (MSize)(oldsize*sizeof(TValue)),
63 (MSize)(realsize*sizeof(TValue))); 68 (MSize)(realsize*sizeof(TValue)));
64 setmref(L->stack, st); 69 setmref(L->stack, st);
65 delta = (char *)st - (char *)oldst; 70 delta = (char *)st - (char *)oldst;
@@ -67,12 +72,12 @@ static void resizestack(lua_State *L, MSize n)
67 while (oldsize < realsize) /* Clear new slots. */ 72 while (oldsize < realsize) /* Clear new slots. */
68 setnilV(st + oldsize++); 73 setnilV(st + oldsize++);
69 L->stacksize = realsize; 74 L->stacksize = realsize;
75 if ((size_t)(mref(G(L)->jit_base, char) - (char *)oldst) < oldsize)
76 setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta);
70 L->base = (TValue *)((char *)L->base + delta); 77 L->base = (TValue *)((char *)L->base + delta);
71 L->top = (TValue *)((char *)L->top + delta); 78 L->top = (TValue *)((char *)L->top + delta);
72 for (up = gcref(L->openupval); up != NULL; up = gcnext(up)) 79 for (up = gcref(L->openupval); up != NULL; up = gcnext(up))
73 setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta)); 80 setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta));
74 if (obj2gco(L) == gcref(G(L)->jit_L))
75 setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta);
76} 81}
77 82
78/* Relimit stack after error, in case the limit was overdrawn. */ 83/* Relimit stack after error, in case the limit was overdrawn. */
@@ -89,7 +94,8 @@ void lj_state_shrinkstack(lua_State *L, MSize used)
89 return; /* Avoid stack shrinking while handling stack overflow. */ 94 return; /* Avoid stack shrinking while handling stack overflow. */
90 if (4*used < L->stacksize && 95 if (4*used < L->stacksize &&
91 2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize && 96 2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize &&
92 obj2gco(L) != gcref(G(L)->jit_L)) /* Don't shrink stack of live trace. */ 97 /* Don't shrink stack of live trace. */
98 (tvref(G(L)->jit_base) == NULL || obj2gco(L) != gcref(G(L)->cur_L)))
93 resizestack(L, L->stacksize >> 1); 99 resizestack(L, L->stacksize >> 1);
94} 100}
95 101
@@ -125,8 +131,9 @@ static void stack_init(lua_State *L1, lua_State *L)
125 L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA; 131 L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA;
126 stend = st + L1->stacksize; 132 stend = st + L1->stacksize;
127 setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1); 133 setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1);
128 L1->base = L1->top = st+1; 134 setthreadV(L1, st++, L1); /* Needed for curr_funcisL() on empty stack. */
129 setthreadV(L1, st, L1); /* Needed for curr_funcisL() on empty stack. */ 135 if (LJ_FR2) setnilV(st++);
136 L1->base = L1->top = st;
130 while (st < stend) /* Clear new slots. */ 137 while (st < stend) /* Clear new slots. */
131 setnilV(st++); 138 setnilV(st++);
132} 139}
@@ -143,7 +150,7 @@ static TValue *cpluaopen(lua_State *L, lua_CFunction dummy, void *ud)
143 /* NOBARRIER: State initialization, all objects are white. */ 150 /* NOBARRIER: State initialization, all objects are white. */
144 setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL))); 151 setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL)));
145 settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY)); 152 settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY));
146 lj_str_resize(L, LJ_MIN_STRTAB-1); 153 lj_str_init(L);
147 lj_meta_init(L); 154 lj_meta_init(L);
148 lj_lex_init(L); 155 lj_lex_init(L);
149 fixstring(lj_err_str(L, LJ_ERR_ERRMEM)); /* Preallocate memory error msg. */ 156 fixstring(lj_err_str(L, LJ_ERR_ERRMEM)); /* Preallocate memory error msg. */
@@ -157,16 +164,19 @@ static void close_state(lua_State *L)
157 global_State *g = G(L); 164 global_State *g = G(L);
158 lj_func_closeuv(L, tvref(L->stack)); 165 lj_func_closeuv(L, tvref(L->stack));
159 lj_gc_freeall(g); 166 lj_gc_freeall(g);
160 lua_assert(gcref(g->gc.root) == obj2gco(L)); 167 lj_assertG(gcref(g->gc.root) == obj2gco(L),
161 lua_assert(g->strnum == 0); 168 "main thread is not first GC object");
169 lj_assertG(g->str.num == 0, "leaked %d strings", g->str.num);
162 lj_trace_freestate(g); 170 lj_trace_freestate(g);
163#if LJ_HASFFI 171#if LJ_HASFFI
164 lj_ctype_freestate(g); 172 lj_ctype_freestate(g);
165#endif 173#endif
166 lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef); 174 lj_str_freetab(g);
167 lj_str_freebuf(g, &g->tmpbuf); 175 lj_buf_free(g, &g->tmpbuf);
168 lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); 176 lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
169 lua_assert(g->gc.total == sizeof(GG_State)); 177 lj_assertG(g->gc.total == sizeof(GG_State),
178 "memory leak of %lld bytes",
179 (long long)(g->gc.total - sizeof(GG_State)));
170#ifndef LUAJIT_USE_SYSMALLOC 180#ifndef LUAJIT_USE_SYSMALLOC
171 if (g->allocf == lj_alloc_f) 181 if (g->allocf == lj_alloc_f)
172 lj_alloc_destroy(g->allocd); 182 lj_alloc_destroy(g->allocd);
@@ -175,17 +185,34 @@ static void close_state(lua_State *L)
175 g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0); 185 g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0);
176} 186}
177 187
178#if LJ_64 && !(defined(LUAJIT_USE_VALGRIND) && defined(LUAJIT_USE_SYSMALLOC)) 188#if LJ_64 && !LJ_GC64 && !(defined(LUAJIT_USE_VALGRIND) && defined(LUAJIT_USE_SYSMALLOC))
179lua_State *lj_state_newstate(lua_Alloc f, void *ud) 189lua_State *lj_state_newstate(lua_Alloc allocf, void *allocd)
180#else 190#else
181LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud) 191LUA_API lua_State *lua_newstate(lua_Alloc allocf, void *allocd)
182#endif 192#endif
183{ 193{
184 GG_State *GG = (GG_State *)f(ud, NULL, 0, sizeof(GG_State)); 194 PRNGState prng;
185 lua_State *L = &GG->L; 195 GG_State *GG;
186 global_State *g = &GG->g; 196 lua_State *L;
187 if (GG == NULL || !checkptr32(GG)) return NULL; 197 global_State *g;
198 /* We need the PRNG for the memory allocator, so initialize this first. */
199 if (!lj_prng_seed_secure(&prng)) {
200 lj_assertX(0, "secure PRNG seeding failed");
201 /* Can only return NULL here, so this errors with "not enough memory". */
202 return NULL;
203 }
204#ifndef LUAJIT_USE_SYSMALLOC
205 if (allocf == LJ_ALLOCF_INTERNAL) {
206 allocd = lj_alloc_create(&prng);
207 if (!allocd) return NULL;
208 allocf = lj_alloc_f;
209 }
210#endif
211 GG = (GG_State *)allocf(allocd, NULL, 0, sizeof(GG_State));
212 if (GG == NULL || !checkptrGC(GG)) return NULL;
188 memset(GG, 0, sizeof(GG_State)); 213 memset(GG, 0, sizeof(GG_State));
214 L = &GG->L;
215 g = &GG->g;
189 L->gct = ~LJ_TTHREAD; 216 L->gct = ~LJ_TTHREAD;
190 L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED; /* Prevent free. */ 217 L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED; /* Prevent free. */
191 L->dummy_ffid = FF_C; 218 L->dummy_ffid = FF_C;
@@ -193,17 +220,25 @@ LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
193 g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED; 220 g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED;
194 g->strempty.marked = LJ_GC_WHITE0; 221 g->strempty.marked = LJ_GC_WHITE0;
195 g->strempty.gct = ~LJ_TSTR; 222 g->strempty.gct = ~LJ_TSTR;
196 g->allocf = f; 223 g->allocf = allocf;
197 g->allocd = ud; 224 g->allocd = allocd;
225 g->prng = prng;
226#ifndef LUAJIT_USE_SYSMALLOC
227 if (allocf == lj_alloc_f) {
228 lj_alloc_setprng(allocd, &g->prng);
229 }
230#endif
198 setgcref(g->mainthref, obj2gco(L)); 231 setgcref(g->mainthref, obj2gco(L));
199 setgcref(g->uvhead.prev, obj2gco(&g->uvhead)); 232 setgcref(g->uvhead.prev, obj2gco(&g->uvhead));
200 setgcref(g->uvhead.next, obj2gco(&g->uvhead)); 233 setgcref(g->uvhead.next, obj2gco(&g->uvhead));
201 g->strmask = ~(MSize)0; 234 g->str.mask = ~(MSize)0;
202 setnilV(registry(L)); 235 setnilV(registry(L));
203 setnilV(&g->nilnode.val); 236 setnilV(&g->nilnode.val);
204 setnilV(&g->nilnode.key); 237 setnilV(&g->nilnode.key);
238#if !LJ_GC64
205 setmref(g->nilnode.freetop, &g->nilnode); 239 setmref(g->nilnode.freetop, &g->nilnode);
206 lj_str_initbuf(&g->tmpbuf); 240#endif
241 lj_buf_init(NULL, &g->tmpbuf);
207 g->gc.state = GCSpause; 242 g->gc.state = GCSpause;
208 setgcref(g->gc.root, obj2gco(L)); 243 setgcref(g->gc.root, obj2gco(L));
209 setmref(g->gc.sweep, &g->gc.root); 244 setmref(g->gc.sweep, &g->gc.root);
@@ -217,7 +252,7 @@ LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
217 close_state(L); 252 close_state(L);
218 return NULL; 253 return NULL;
219 } 254 }
220 L->status = 0; 255 L->status = LUA_OK;
221 return L; 256 return L;
222} 257}
223 258
@@ -236,6 +271,10 @@ LUA_API void lua_close(lua_State *L)
236 global_State *g = G(L); 271 global_State *g = G(L);
237 int i; 272 int i;
238 L = mainthread(g); /* Only the main thread can be closed. */ 273 L = mainthread(g); /* Only the main thread can be closed. */
274#if LJ_HASPROFILE
275 luaJIT_profile_stop(L);
276#endif
277 setgcrefnull(g->cur_L);
239 lj_func_closeuv(L, tvref(L->stack)); 278 lj_func_closeuv(L, tvref(L->stack));
240 lj_gc_separateudata(g, 1); /* Separate udata which have GC metamethods. */ 279 lj_gc_separateudata(g, 1); /* Separate udata which have GC metamethods. */
241#if LJ_HASJIT 280#if LJ_HASJIT
@@ -245,10 +284,10 @@ LUA_API void lua_close(lua_State *L)
245#endif 284#endif
246 for (i = 0;;) { 285 for (i = 0;;) {
247 hook_enter(g); 286 hook_enter(g);
248 L->status = 0; 287 L->status = LUA_OK;
288 L->base = L->top = tvref(L->stack) + 1 + LJ_FR2;
249 L->cframe = NULL; 289 L->cframe = NULL;
250 L->base = L->top = tvref(L->stack) + 1; 290 if (lj_vm_cpcall(L, NULL, NULL, cpfinalize) == LUA_OK) {
251 if (lj_vm_cpcall(L, NULL, NULL, cpfinalize) == 0) {
252 if (++i >= 10) break; 291 if (++i >= 10) break;
253 lj_gc_separateudata(g, 1); /* Separate udata again. */ 292 lj_gc_separateudata(g, 1); /* Separate udata again. */
254 if (gcref(g->gc.mmudata) == NULL) /* Until nothing is left to do. */ 293 if (gcref(g->gc.mmudata) == NULL) /* Until nothing is left to do. */
@@ -263,7 +302,7 @@ lua_State *lj_state_new(lua_State *L)
263 lua_State *L1 = lj_mem_newobj(L, lua_State); 302 lua_State *L1 = lj_mem_newobj(L, lua_State);
264 L1->gct = ~LJ_TTHREAD; 303 L1->gct = ~LJ_TTHREAD;
265 L1->dummy_ffid = FF_C; 304 L1->dummy_ffid = FF_C;
266 L1->status = 0; 305 L1->status = LUA_OK;
267 L1->stacksize = 0; 306 L1->stacksize = 0;
268 setmref(L1->stack, NULL); 307 setmref(L1->stack, NULL);
269 L1->cframe = NULL; 308 L1->cframe = NULL;
@@ -272,15 +311,17 @@ lua_State *lj_state_new(lua_State *L)
272 setmrefr(L1->glref, L->glref); 311 setmrefr(L1->glref, L->glref);
273 setgcrefr(L1->env, L->env); 312 setgcrefr(L1->env, L->env);
274 stack_init(L1, L); /* init stack */ 313 stack_init(L1, L); /* init stack */
275 lua_assert(iswhite(obj2gco(L1))); 314 lj_assertL(iswhite(obj2gco(L1)), "new thread object is not white");
276 return L1; 315 return L1;
277} 316}
278 317
279void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L) 318void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L)
280{ 319{
281 lua_assert(L != mainthread(g)); 320 lj_assertG(L != mainthread(g), "free of main thread");
321 if (obj2gco(L) == gcref(g->cur_L))
322 setgcrefnull(g->cur_L);
282 lj_func_closeuv(L, tvref(L->stack)); 323 lj_func_closeuv(L, tvref(L->stack));
283 lua_assert(gcref(L->openupval) == NULL); 324 lj_assertG(gcref(L->openupval) == NULL, "stale open upvalues");
284 lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); 325 lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
285 lj_mem_freet(g, L); 326 lj_mem_freet(g, L);
286} 327}
diff --git a/src/lj_state.h b/src/lj_state.h
index 18afe55c..50fe9000 100644
--- a/src/lj_state.h
+++ b/src/lj_state.h
@@ -28,8 +28,10 @@ static LJ_AINLINE void lj_state_checkstack(lua_State *L, MSize need)
28 28
29LJ_FUNC lua_State *lj_state_new(lua_State *L); 29LJ_FUNC lua_State *lj_state_new(lua_State *L);
30LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L); 30LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L);
31#if LJ_64 31#if LJ_64 && !LJ_GC64 && !(defined(LUAJIT_USE_VALGRIND) && defined(LUAJIT_USE_SYSMALLOC))
32LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud); 32LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud);
33#endif 33#endif
34 34
35#define LJ_ALLOCF_INTERNAL ((lua_Alloc)(void *)(uintptr_t)(1237<<4))
36
35#endif 37#endif
diff --git a/src/lj_str.c b/src/lj_str.c
index 279c5cc3..5bf8426c 100644
--- a/src/lj_str.c
+++ b/src/lj_str.c
@@ -1,13 +1,8 @@
1/* 1/*
2** String handling. 2** String handling.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h 3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4**
5** Portions taken verbatim or adapted from the Lua interpreter.
6** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7*/ 4*/
8 5
9#include <stdio.h>
10
11#define lj_str_c 6#define lj_str_c
12#define LUA_CORE 7#define LUA_CORE
13 8
@@ -15,10 +10,10 @@
15#include "lj_gc.h" 10#include "lj_gc.h"
16#include "lj_err.h" 11#include "lj_err.h"
17#include "lj_str.h" 12#include "lj_str.h"
18#include "lj_state.h"
19#include "lj_char.h" 13#include "lj_char.h"
14#include "lj_prng.h"
20 15
21/* -- String interning ---------------------------------------------------- */ 16/* -- String helpers ------------------------------------------------------ */
22 17
23/* Ordered compare of strings. Assumes string data is 4-byte aligned. */ 18/* Ordered compare of strings. Assumes string data is 4-byte aligned. */
24int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b) 19int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b)
@@ -43,297 +38,333 @@ int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b)
43 return (int32_t)(a->len - b->len); 38 return (int32_t)(a->len - b->len);
44} 39}
45 40
46/* Fast string data comparison. Caveat: unaligned access to 1st string! */ 41/* Find fixed string p inside string s. */
47static LJ_AINLINE int str_fastcmp(const char *a, const char *b, MSize len) 42const char *lj_str_find(const char *s, const char *p, MSize slen, MSize plen)
48{ 43{
49 MSize i = 0; 44 if (plen <= slen) {
50 lua_assert(len > 0); 45 if (plen == 0) {
51 lua_assert((((uintptr_t)a+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4); 46 return s;
52 do { /* Note: innocuous access up to end of string + 3. */ 47 } else {
53 uint32_t v = lj_getu32(a+i) ^ *(const uint32_t *)(b+i); 48 int c = *(const uint8_t *)p++;
54 if (v) { 49 plen--; slen -= plen;
55 i -= len; 50 while (slen) {
56#if LJ_LE 51 const char *q = (const char *)memchr(s, c, slen);
57 return (int32_t)i >= -3 ? (v << (32+(i<<3))) : 1; 52 if (!q) break;
58#else 53 if (memcmp(q+1, p, plen) == 0) return q;
59 return (int32_t)i >= -3 ? (v >> (32+(i<<3))) : 1; 54 q++; slen -= (MSize)(q-s); s = q;
60#endif 55 }
61 } 56 }
62 i += 4; 57 }
63 } while (i < len); 58 return NULL;
64 return 0;
65} 59}
66 60
67/* Resize the string hash table (grow and shrink). */ 61/* Check whether a string has a pattern matching character. */
68void lj_str_resize(lua_State *L, MSize newmask) 62int lj_str_haspattern(GCstr *s)
69{ 63{
70 global_State *g = G(L); 64 const char *p = strdata(s), *q = p + s->len;
71 GCRef *newhash; 65 while (p < q) {
72 MSize i; 66 int c = *(const uint8_t *)p++;
73 if (g->gc.state == GCSsweepstring || newmask >= LJ_MAX_STRTAB-1) 67 if (lj_char_ispunct(c) && strchr("^$*+?.([%-", c))
74 return; /* No resizing during GC traversal or if already too big. */ 68 return 1; /* Found a pattern matching char. */
75 newhash = lj_mem_newvec(L, newmask+1, GCRef);
76 memset(newhash, 0, (newmask+1)*sizeof(GCRef));
77 for (i = g->strmask; i != ~(MSize)0; i--) { /* Rehash old table. */
78 GCobj *p = gcref(g->strhash[i]);
79 while (p) { /* Follow each hash chain and reinsert all strings. */
80 MSize h = gco2str(p)->hash & newmask;
81 GCobj *next = gcnext(p);
82 /* NOBARRIER: The string table is a GC root. */
83 setgcrefr(p->gch.nextgc, newhash[h]);
84 setgcref(newhash[h], p);
85 p = next;
86 }
87 } 69 }
88 lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef); 70 return 0; /* No pattern matching chars found. */
89 g->strmask = newmask;
90 g->strhash = newhash;
91} 71}
92 72
93/* Intern a string and return string object. */ 73/* -- String hashing ------------------------------------------------------ */
94GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx) 74
75/* Keyed sparse ARX string hash. Constant time. */
76static StrHash hash_sparse(uint64_t seed, const char *str, MSize len)
95{ 77{
96 global_State *g; 78 /* Constants taken from lookup3 hash by Bob Jenkins. */
97 GCstr *s; 79 StrHash a, b, h = len ^ (StrHash)seed;
98 GCobj *o;
99 MSize len = (MSize)lenx;
100 MSize a, b, h = len;
101 if (lenx >= LJ_MAX_STR)
102 lj_err_msg(L, LJ_ERR_STROV);
103 g = G(L);
104 /* Compute string hash. Constants taken from lookup3 hash by Bob Jenkins. */
105 if (len >= 4) { /* Caveat: unaligned access! */ 80 if (len >= 4) { /* Caveat: unaligned access! */
106 a = lj_getu32(str); 81 a = lj_getu32(str);
107 h ^= lj_getu32(str+len-4); 82 h ^= lj_getu32(str+len-4);
108 b = lj_getu32(str+(len>>1)-2); 83 b = lj_getu32(str+(len>>1)-2);
109 h ^= b; h -= lj_rol(b, 14); 84 h ^= b; h -= lj_rol(b, 14);
110 b += lj_getu32(str+(len>>2)-1); 85 b += lj_getu32(str+(len>>2)-1);
111 } else if (len > 0) { 86 } else {
112 a = *(const uint8_t *)str; 87 a = *(const uint8_t *)str;
113 h ^= *(const uint8_t *)(str+len-1); 88 h ^= *(const uint8_t *)(str+len-1);
114 b = *(const uint8_t *)(str+(len>>1)); 89 b = *(const uint8_t *)(str+(len>>1));
115 h ^= b; h -= lj_rol(b, 14); 90 h ^= b; h -= lj_rol(b, 14);
116 } else {
117 return &g->strempty;
118 } 91 }
119 a ^= h; a -= lj_rol(h, 11); 92 a ^= h; a -= lj_rol(h, 11);
120 b ^= a; b -= lj_rol(a, 25); 93 b ^= a; b -= lj_rol(a, 25);
121 h ^= b; h -= lj_rol(b, 16); 94 h ^= b; h -= lj_rol(b, 16);
122 /* Check if the string has already been interned. */ 95 return h;
123 o = gcref(g->strhash[h & g->strmask]);
124 if (LJ_LIKELY((((uintptr_t)str+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4)) {
125 while (o != NULL) {
126 GCstr *sx = gco2str(o);
127 if (sx->len == len && str_fastcmp(str, strdata(sx), len) == 0) {
128 /* Resurrect if dead. Can only happen with fixstring() (keywords). */
129 if (isdead(g, o)) flipwhite(o);
130 return sx; /* Return existing string. */
131 }
132 o = gcnext(o);
133 }
134 } else { /* Slow path: end of string is too close to a page boundary. */
135 while (o != NULL) {
136 GCstr *sx = gco2str(o);
137 if (sx->len == len && memcmp(str, strdata(sx), len) == 0) {
138 /* Resurrect if dead. Can only happen with fixstring() (keywords). */
139 if (isdead(g, o)) flipwhite(o);
140 return sx; /* Return existing string. */
141 }
142 o = gcnext(o);
143 }
144 }
145 /* Nope, create a new string. */
146 s = lj_mem_newt(L, sizeof(GCstr)+len+1, GCstr);
147 newwhite(g, s);
148 s->gct = ~LJ_TSTR;
149 s->len = len;
150 s->hash = h;
151 s->reserved = 0;
152 memcpy(strdatawr(s), str, len);
153 strdatawr(s)[len] = '\0'; /* Zero-terminate string. */
154 /* Add it to string hash table. */
155 h &= g->strmask;
156 s->nextgc = g->strhash[h];
157 /* NOBARRIER: The string table is a GC root. */
158 setgcref(g->strhash[h], obj2gco(s));
159 if (g->strnum++ > g->strmask) /* Allow a 100% load factor. */
160 lj_str_resize(L, (g->strmask<<1)+1); /* Grow string table. */
161 return s; /* Return newly interned string. */
162} 96}
163 97
164void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s) 98#if LUAJIT_SECURITY_STRHASH
99/* Keyed dense ARX string hash. Linear time. */
100static LJ_NOINLINE StrHash hash_dense(uint64_t seed, StrHash h,
101 const char *str, MSize len)
165{ 102{
166 g->strnum--; 103 StrHash b = lj_bswap(lj_rol(h ^ (StrHash)(seed >> 32), 4));
167 lj_mem_free(g, s, sizestring(s)); 104 if (len > 12) {
105 StrHash a = (StrHash)seed;
106 const char *pe = str+len-12, *p = pe, *q = str;
107 do {
108 a += lj_getu32(p);
109 b += lj_getu32(p+4);
110 h += lj_getu32(p+8);
111 p = q; q += 12;
112 h ^= b; h -= lj_rol(b, 14);
113 a ^= h; a -= lj_rol(h, 11);
114 b ^= a; b -= lj_rol(a, 25);
115 } while (p < pe);
116 h ^= b; h -= lj_rol(b, 16);
117 a ^= h; a -= lj_rol(h, 4);
118 b ^= a; b -= lj_rol(a, 14);
119 }
120 return b;
168} 121}
122#endif
169 123
170/* -- Type conversions ---------------------------------------------------- */ 124/* -- String interning ---------------------------------------------------- */
171 125
172/* Print number to buffer. Canonicalizes non-finite values. */ 126#define LJ_STR_MAXCOLL 32
173size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o)
174{
175 if (LJ_LIKELY((o->u32.hi << 1) < 0xffe00000)) { /* Finite? */
176 lua_Number n = o->n;
177#if __BIONIC__
178 if (tvismzero(o)) { s[0] = '-'; s[1] = '0'; return 2; }
179#endif
180 return (size_t)lua_number2str(s, n);
181 } else if (((o->u32.hi & 0x000fffff) | o->u32.lo) != 0) {
182 s[0] = 'n'; s[1] = 'a'; s[2] = 'n'; return 3;
183 } else if ((o->u32.hi & 0x80000000) == 0) {
184 s[0] = 'i'; s[1] = 'n'; s[2] = 'f'; return 3;
185 } else {
186 s[0] = '-'; s[1] = 'i'; s[2] = 'n'; s[3] = 'f'; return 4;
187 }
188}
189 127
190/* Print integer to buffer. Returns pointer to start. */ 128/* Resize the string interning hash table (grow and shrink). */
191char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k) 129void lj_str_resize(lua_State *L, MSize newmask)
192{ 130{
193 uint32_t u = (uint32_t)(k < 0 ? -k : k); 131 global_State *g = G(L);
194 p += 1+10; 132 GCRef *newtab, *oldtab = g->str.tab;
195 do { *--p = (char)('0' + u % 10); } while (u /= 10); 133 MSize i;
196 if (k < 0) *--p = '-';
197 return p;
198}
199 134
200/* Convert number to string. */ 135 /* No resizing during GC traversal or if already too big. */
201GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np) 136 if (g->gc.state == GCSsweepstring || newmask >= LJ_MAX_STRTAB-1)
202{ 137 return;
203 char buf[LJ_STR_NUMBUF];
204 size_t len = lj_str_bufnum(buf, (TValue *)np);
205 return lj_str_new(L, buf, len);
206}
207 138
208/* Convert integer to string. */ 139 newtab = lj_mem_newvec(L, newmask+1, GCRef);
209GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k) 140 memset(newtab, 0, (newmask+1)*sizeof(GCRef));
210{
211 char s[1+10];
212 char *p = lj_str_bufint(s, k);
213 return lj_str_new(L, p, (size_t)(s+sizeof(s)-p));
214}
215 141
216GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o) 142#if LUAJIT_SECURITY_STRHASH
217{ 143 /* Check which chains need secondary hashes. */
218 return tvisint(o) ? lj_str_fromint(L, intV(o)) : lj_str_fromnum(L, &o->n); 144 if (g->str.second) {
219} 145 int newsecond = 0;
146 /* Compute primary chain lengths. */
147 for (i = g->str.mask; i != ~(MSize)0; i--) {
148 GCobj *o = (GCobj *)(gcrefu(oldtab[i]) & ~(uintptr_t)1);
149 while (o) {
150 GCstr *s = gco2str(o);
151 MSize hash = s->hashalg ? hash_sparse(g->str.seed, strdata(s), s->len) :
152 s->hash;
153 hash &= newmask;
154 setgcrefp(newtab[hash], gcrefu(newtab[hash]) + 1);
155 o = gcnext(o);
156 }
157 }
158 /* Mark secondary chains. */
159 for (i = newmask; i != ~(MSize)0; i--) {
160 int secondary = gcrefu(newtab[i]) > LJ_STR_MAXCOLL;
161 newsecond |= secondary;
162 setgcrefp(newtab[i], secondary);
163 }
164 g->str.second = newsecond;
165 }
166#endif
220 167
221/* -- String formatting --------------------------------------------------- */ 168 /* Reinsert all strings from the old table into the new table. */
169 for (i = g->str.mask; i != ~(MSize)0; i--) {
170 GCobj *o = (GCobj *)(gcrefu(oldtab[i]) & ~(uintptr_t)1);
171 while (o) {
172 GCobj *next = gcnext(o);
173 GCstr *s = gco2str(o);
174 MSize hash = s->hash;
175#if LUAJIT_SECURITY_STRHASH
176 uintptr_t u;
177 if (LJ_LIKELY(!s->hashalg)) { /* String hashed with primary hash. */
178 hash &= newmask;
179 u = gcrefu(newtab[hash]);
180 if (LJ_UNLIKELY(u & 1)) { /* Switch string to secondary hash. */
181 s->hash = hash = hash_dense(g->str.seed, s->hash, strdata(s), s->len);
182 s->hashalg = 1;
183 hash &= newmask;
184 u = gcrefu(newtab[hash]);
185 }
186 } else { /* String hashed with secondary hash. */
187 MSize shash = hash_sparse(g->str.seed, strdata(s), s->len);
188 u = gcrefu(newtab[shash & newmask]);
189 if (u & 1) {
190 hash &= newmask;
191 u = gcrefu(newtab[hash]);
192 } else { /* Revert string back to primary hash. */
193 s->hash = shash;
194 s->hashalg = 0;
195 hash = (shash & newmask);
196 }
197 }
198 /* NOBARRIER: The string table is a GC root. */
199 setgcrefp(o->gch.nextgc, (u & ~(uintptr_t)1));
200 setgcrefp(newtab[hash], ((uintptr_t)o | (u & 1)));
201#else
202 hash &= newmask;
203 /* NOBARRIER: The string table is a GC root. */
204 setgcrefr(o->gch.nextgc, newtab[hash]);
205 setgcref(newtab[hash], o);
206#endif
207 o = next;
208 }
209 }
210
211 /* Free old table and replace with new table. */
212 lj_str_freetab(g);
213 g->str.tab = newtab;
214 g->str.mask = newmask;
215}
222 216
223static void addstr(lua_State *L, SBuf *sb, const char *str, MSize len) 217#if LUAJIT_SECURITY_STRHASH
218/* Rehash and rechain all strings in a chain. */
219static LJ_NOINLINE GCstr *lj_str_rehash_chain(lua_State *L, StrHash hashc,
220 const char *str, MSize len)
224{ 221{
225 char *p; 222 global_State *g = G(L);
226 MSize i; 223 int ow = g->gc.state == GCSsweepstring ? otherwhite(g) : 0; /* Sweeping? */
227 if (sb->n + len > sb->sz) { 224 GCRef *strtab = g->str.tab;
228 MSize sz = sb->sz * 2; 225 MSize strmask = g->str.mask;
229 while (sb->n + len > sz) sz = sz * 2; 226 GCobj *o = gcref(strtab[hashc & strmask]);
230 lj_str_resizebuf(L, sb, sz); 227 setgcrefp(strtab[hashc & strmask], (void *)((uintptr_t)1));
228 g->str.second = 1;
229 while (o) {
230 uintptr_t u;
231 GCobj *next = gcnext(o);
232 GCstr *s = gco2str(o);
233 StrHash hash;
234 if (ow) { /* Must sweep while rechaining. */
235 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* String alive? */
236 lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
237 "sweep of undead string");
238 makewhite(g, o);
239 } else { /* Free dead string. */
240 lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
241 "sweep of unlive string");
242 lj_str_free(g, s);
243 o = next;
244 continue;
245 }
246 }
247 hash = s->hash;
248 if (!s->hashalg) { /* Rehash with secondary hash. */
249 hash = hash_dense(g->str.seed, hash, strdata(s), s->len);
250 s->hash = hash;
251 s->hashalg = 1;
252 }
253 /* Rechain. */
254 hash &= strmask;
255 u = gcrefu(strtab[hash]);
256 setgcrefp(o->gch.nextgc, (u & ~(uintptr_t)1));
257 setgcrefp(strtab[hash], ((uintptr_t)o | (u & 1)));
258 o = next;
231 } 259 }
232 p = sb->buf + sb->n; 260 /* Try to insert the pending string again. */
233 sb->n += len; 261 return lj_str_new(L, str, len);
234 for (i = 0; i < len; i++) p[i] = str[i];
235} 262}
263#endif
264
265/* Reseed String ID from PRNG after random interval < 2^bits. */
266#if LUAJIT_SECURITY_STRID == 1
267#define STRID_RESEED_INTERVAL 8
268#elif LUAJIT_SECURITY_STRID == 2
269#define STRID_RESEED_INTERVAL 4
270#elif LUAJIT_SECURITY_STRID >= 3
271#define STRID_RESEED_INTERVAL 0
272#endif
236 273
237static void addchar(lua_State *L, SBuf *sb, int c) 274/* Allocate a new string and add to string interning table. */
275static GCstr *lj_str_alloc(lua_State *L, const char *str, MSize len,
276 StrHash hash, int hashalg)
238{ 277{
239 if (sb->n + 1 > sb->sz) { 278 GCstr *s = lj_mem_newt(L, lj_str_size(len), GCstr);
240 MSize sz = sb->sz * 2; 279 global_State *g = G(L);
241 lj_str_resizebuf(L, sb, sz); 280 uintptr_t u;
281 newwhite(g, s);
282 s->gct = ~LJ_TSTR;
283 s->len = len;
284 s->hash = hash;
285#ifndef STRID_RESEED_INTERVAL
286 s->sid = g->str.id++;
287#elif STRID_RESEED_INTERVAL
288 if (!g->str.idreseed--) {
289 uint64_t r = lj_prng_u64(&g->prng);
290 g->str.id = (StrID)r;
291 g->str.idreseed = (uint8_t)(r >> (64 - STRID_RESEED_INTERVAL));
242 } 292 }
243 sb->buf[sb->n++] = (char)c; 293 s->sid = g->str.id++;
294#else
295 s->sid = (StrID)lj_prng_u64(&g->prng);
296#endif
297 s->reserved = 0;
298 s->hashalg = (uint8_t)hashalg;
299 /* Clear last 4 bytes of allocated memory. Implies zero-termination, too. */
300 *(uint32_t *)(strdatawr(s)+(len & ~(MSize)3)) = 0;
301 memcpy(strdatawr(s), str, len);
302 /* Add to string hash table. */
303 hash &= g->str.mask;
304 u = gcrefu(g->str.tab[hash]);
305 setgcrefp(s->nextgc, (u & ~(uintptr_t)1));
306 /* NOBARRIER: The string table is a GC root. */
307 setgcrefp(g->str.tab[hash], ((uintptr_t)s | (u & 1)));
308 if (g->str.num++ > g->str.mask) /* Allow a 100% load factor. */
309 lj_str_resize(L, (g->str.mask<<1)+1); /* Grow string table. */
310 return s; /* Return newly interned string. */
244} 311}
245 312
246/* Push formatted message as a string object to Lua stack. va_list variant. */ 313/* Intern a string and return string object. */
247const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp) 314GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx)
248{ 315{
249 SBuf *sb = &G(L)->tmpbuf; 316 global_State *g = G(L);
250 lj_str_needbuf(L, sb, (MSize)strlen(fmt)); 317 if (lenx-1 < LJ_MAX_STR-1) {
251 lj_str_resetbuf(sb); 318 MSize len = (MSize)lenx;
252 for (;;) { 319 StrHash hash = hash_sparse(g->str.seed, str, len);
253 const char *e = strchr(fmt, '%'); 320 MSize coll = 0;
254 if (e == NULL) break; 321 int hashalg = 0;
255 addstr(L, sb, fmt, (MSize)(e-fmt)); 322 /* Check if the string has already been interned. */
256 /* This function only handles %s, %c, %d, %f and %p formats. */ 323 GCobj *o = gcref(g->str.tab[hash & g->str.mask]);
257 switch (e[1]) { 324#if LUAJIT_SECURITY_STRHASH
258 case 's': { 325 if (LJ_UNLIKELY((uintptr_t)o & 1)) { /* Secondary hash for this chain? */
259 const char *s = va_arg(argp, char *); 326 hashalg = 1;
260 if (s == NULL) s = "(null)"; 327 hash = hash_dense(g->str.seed, hash, str, len);
261 addstr(L, sb, s, (MSize)strlen(s)); 328 o = (GCobj *)(gcrefu(g->str.tab[hash & g->str.mask]) & ~(uintptr_t)1);
262 break; 329 }
263 }
264 case 'c':
265 addchar(L, sb, va_arg(argp, int));
266 break;
267 case 'd': {
268 char buf[LJ_STR_INTBUF];
269 char *p = lj_str_bufint(buf, va_arg(argp, int32_t));
270 addstr(L, sb, p, (MSize)(buf+LJ_STR_INTBUF-p));
271 break;
272 }
273 case 'f': {
274 char buf[LJ_STR_NUMBUF];
275 TValue tv;
276 MSize len;
277 tv.n = (lua_Number)(va_arg(argp, LUAI_UACNUMBER));
278 len = (MSize)lj_str_bufnum(buf, &tv);
279 addstr(L, sb, buf, len);
280 break;
281 }
282 case 'p': {
283#define FMTP_CHARS (2*sizeof(ptrdiff_t))
284 char buf[2+FMTP_CHARS];
285 ptrdiff_t p = (ptrdiff_t)(va_arg(argp, void *));
286 ptrdiff_t i, lasti = 2+FMTP_CHARS;
287 if (p == 0) {
288 addstr(L, sb, "NULL", 4);
289 break;
290 }
291#if LJ_64
292 /* Shorten output for 64 bit pointers. */
293 lasti = 2+2*4+((p >> 32) ? 2+2*(lj_fls((uint32_t)(p >> 32))>>3) : 0);
294#endif 330#endif
295 buf[0] = '0'; 331 while (o != NULL) {
296 buf[1] = 'x'; 332 GCstr *sx = gco2str(o);
297 for (i = lasti-1; i >= 2; i--, p >>= 4) 333 if (sx->hash == hash && sx->len == len) {
298 buf[i] = "0123456789abcdef"[(p & 15)]; 334 if (memcmp(str, strdata(sx), len) == 0) {
299 addstr(L, sb, buf, (MSize)lasti); 335 if (isdead(g, o)) flipwhite(o); /* Resurrect if dead. */
300 break; 336 return sx; /* Return existing string. */
337 }
338 coll++;
301 } 339 }
302 case '%': 340 coll++;
303 addchar(L, sb, '%'); 341 o = gcnext(o);
304 break; 342 }
305 default: 343#if LUAJIT_SECURITY_STRHASH
306 addchar(L, sb, '%'); 344 /* Rehash chain if there are too many collisions. */
307 addchar(L, sb, e[1]); 345 if (LJ_UNLIKELY(coll > LJ_STR_MAXCOLL) && !hashalg) {
308 break; 346 return lj_str_rehash_chain(L, hash, str, len);
309 } 347 }
310 fmt = e+2; 348#endif
349 /* Otherwise allocate a new string. */
350 return lj_str_alloc(L, str, len, hash, hashalg);
351 } else {
352 if (lenx)
353 lj_err_msg(L, LJ_ERR_STROV);
354 return &g->strempty;
311 } 355 }
312 addstr(L, sb, fmt, (MSize)strlen(fmt));
313 setstrV(L, L->top, lj_str_new(L, sb->buf, sb->n));
314 incr_top(L);
315 return strVdata(L->top - 1);
316} 356}
317 357
318/* Push formatted message as a string object to Lua stack. Vararg variant. */ 358void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s)
319const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
320{ 359{
321 const char *msg; 360 g->str.num--;
322 va_list argp; 361 lj_mem_free(g, s, lj_str_size(s->len));
323 va_start(argp, fmt);
324 msg = lj_str_pushvf(L, fmt, argp);
325 va_end(argp);
326 return msg;
327} 362}
328 363
329/* -- Buffer handling ----------------------------------------------------- */ 364void LJ_FASTCALL lj_str_init(lua_State *L)
330
331char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz)
332{ 365{
333 if (sz > sb->sz) { 366 global_State *g = G(L);
334 if (sz < LJ_MIN_SBUF) sz = LJ_MIN_SBUF; 367 g->str.seed = lj_prng_u64(&g->prng);
335 lj_str_resizebuf(L, sb, sz); 368 lj_str_resize(L, LJ_MIN_STRTAB-1);
336 }
337 return sb->buf;
338} 369}
339 370
diff --git a/src/lj_str.h b/src/lj_str.h
index e7687cb1..01c6ba6b 100644
--- a/src/lj_str.h
+++ b/src/lj_str.h
@@ -10,41 +10,22 @@
10 10
11#include "lj_obj.h" 11#include "lj_obj.h"
12 12
13/* String interning. */ 13/* String helpers. */
14LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b); 14LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b);
15LJ_FUNC const char *lj_str_find(const char *s, const char *f,
16 MSize slen, MSize flen);
17LJ_FUNC int lj_str_haspattern(GCstr *s);
18
19/* String interning. */
15LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask); 20LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask);
16LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len); 21LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len);
17LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s); 22LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s);
23LJ_FUNC void LJ_FASTCALL lj_str_init(lua_State *L);
24#define lj_str_freetab(g) \
25 (lj_mem_freevec(g, g->str.tab, g->str.mask+1, GCRef))
18 26
19#define lj_str_newz(L, s) (lj_str_new(L, s, strlen(s))) 27#define lj_str_newz(L, s) (lj_str_new(L, s, strlen(s)))
20#define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1)) 28#define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1))
21 29#define lj_str_size(len) (sizeof(GCstr) + (((len)+4) & ~(MSize)3))
22/* Type conversions. */
23LJ_FUNC size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o);
24LJ_FUNC char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k);
25LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np);
26LJ_FUNC GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k);
27LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o);
28
29#define LJ_STR_INTBUF (1+10)
30#define LJ_STR_NUMBUF LUAI_MAXNUMBER2STR
31
32/* String formatting. */
33LJ_FUNC const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp);
34LJ_FUNC const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
35#if defined(__GNUC__)
36 __attribute__ ((format (printf, 2, 3)))
37#endif
38 ;
39
40/* Resizable string buffers. Struct definition in lj_obj.h. */
41LJ_FUNC char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz);
42
43#define lj_str_initbuf(sb) ((sb)->buf = NULL, (sb)->sz = 0)
44#define lj_str_resetbuf(sb) ((sb)->n = 0)
45#define lj_str_resizebuf(L, sb, size) \
46 ((sb)->buf = (char *)lj_mem_realloc(L, (sb)->buf, (sb)->sz, (size)), \
47 (sb)->sz = (size))
48#define lj_str_freebuf(g, sb) lj_mem_free(g, (void *)(sb)->buf, (sb)->sz)
49 30
50#endif 31#endif
diff --git a/src/lj_strfmt.c b/src/lj_strfmt.c
new file mode 100644
index 00000000..331d9474
--- /dev/null
+++ b/src/lj_strfmt.c
@@ -0,0 +1,472 @@
1/*
2** String formatting.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#include <stdio.h>
7
8#define lj_strfmt_c
9#define LUA_CORE
10
11#include "lj_obj.h"
12#include "lj_buf.h"
13#include "lj_str.h"
14#include "lj_state.h"
15#include "lj_char.h"
16#include "lj_strfmt.h"
17
18/* -- Format parser ------------------------------------------------------- */
19
20static const uint8_t strfmt_map[('x'-'A')+1] = {
21 STRFMT_A,0,0,0,STRFMT_E,STRFMT_F,STRFMT_G,0,0,0,0,0,0,
22 0,0,0,0,0,0,0,0,0,0,STRFMT_X,0,0,
23 0,0,0,0,0,0,
24 STRFMT_A,0,STRFMT_C,STRFMT_D,STRFMT_E,STRFMT_F,STRFMT_G,0,STRFMT_I,0,0,0,0,
25 0,STRFMT_O,STRFMT_P,STRFMT_Q,0,STRFMT_S,0,STRFMT_U,0,0,STRFMT_X
26};
27
28SFormat LJ_FASTCALL lj_strfmt_parse(FormatState *fs)
29{
30 const uint8_t *p = fs->p, *e = fs->e;
31 fs->str = (const char *)p;
32 for (; p < e; p++) {
33 if (*p == '%') { /* Escape char? */
34 if (p[1] == '%') { /* '%%'? */
35 fs->p = ++p+1;
36 goto retlit;
37 } else {
38 SFormat sf = 0;
39 uint32_t c;
40 if (p != (const uint8_t *)fs->str)
41 break;
42 for (p++; (uint32_t)*p - ' ' <= (uint32_t)('0' - ' '); p++) {
43 /* Parse flags. */
44 if (*p == '-') sf |= STRFMT_F_LEFT;
45 else if (*p == '+') sf |= STRFMT_F_PLUS;
46 else if (*p == '0') sf |= STRFMT_F_ZERO;
47 else if (*p == ' ') sf |= STRFMT_F_SPACE;
48 else if (*p == '#') sf |= STRFMT_F_ALT;
49 else break;
50 }
51 if ((uint32_t)*p - '0' < 10) { /* Parse width. */
52 uint32_t width = (uint32_t)*p++ - '0';
53 if ((uint32_t)*p - '0' < 10)
54 width = (uint32_t)*p++ - '0' + width*10;
55 sf |= (width << STRFMT_SH_WIDTH);
56 }
57 if (*p == '.') { /* Parse precision. */
58 uint32_t prec = 0;
59 p++;
60 if ((uint32_t)*p - '0' < 10) {
61 prec = (uint32_t)*p++ - '0';
62 if ((uint32_t)*p - '0' < 10)
63 prec = (uint32_t)*p++ - '0' + prec*10;
64 }
65 sf |= ((prec+1) << STRFMT_SH_PREC);
66 }
67 /* Parse conversion. */
68 c = (uint32_t)*p - 'A';
69 if (LJ_LIKELY(c <= (uint32_t)('x' - 'A'))) {
70 uint32_t sx = strfmt_map[c];
71 if (sx) {
72 fs->p = p+1;
73 return (sf | sx | ((c & 0x20) ? 0 : STRFMT_F_UPPER));
74 }
75 }
76 /* Return error location. */
77 if (*p >= 32) p++;
78 fs->len = (MSize)(p - (const uint8_t *)fs->str);
79 fs->p = fs->e;
80 return STRFMT_ERR;
81 }
82 }
83 }
84 fs->p = p;
85retlit:
86 fs->len = (MSize)(p - (const uint8_t *)fs->str);
87 return fs->len ? STRFMT_LIT : STRFMT_EOF;
88}
89
90/* -- Raw conversions ----------------------------------------------------- */
91
92#define WINT_R(x, sh, sc) \
93 { uint32_t d = (x*(((1<<sh)+sc-1)/sc))>>sh; x -= d*sc; *p++ = (char)('0'+d); }
94
95/* Write integer to buffer. */
96char * LJ_FASTCALL lj_strfmt_wint(char *p, int32_t k)
97{
98 uint32_t u = (uint32_t)k;
99 if (k < 0) { u = (uint32_t)-k; *p++ = '-'; }
100 if (u < 10000) {
101 if (u < 10) goto dig1;
102 if (u < 100) goto dig2;
103 if (u < 1000) goto dig3;
104 } else {
105 uint32_t v = u / 10000; u -= v * 10000;
106 if (v < 10000) {
107 if (v < 10) goto dig5;
108 if (v < 100) goto dig6;
109 if (v < 1000) goto dig7;
110 } else {
111 uint32_t w = v / 10000; v -= w * 10000;
112 if (w >= 10) WINT_R(w, 10, 10)
113 *p++ = (char)('0'+w);
114 }
115 WINT_R(v, 23, 1000)
116 dig7: WINT_R(v, 12, 100)
117 dig6: WINT_R(v, 10, 10)
118 dig5: *p++ = (char)('0'+v);
119 }
120 WINT_R(u, 23, 1000)
121 dig3: WINT_R(u, 12, 100)
122 dig2: WINT_R(u, 10, 10)
123 dig1: *p++ = (char)('0'+u);
124 return p;
125}
126#undef WINT_R
127
128/* Write pointer to buffer. */
129char * LJ_FASTCALL lj_strfmt_wptr(char *p, const void *v)
130{
131 ptrdiff_t x = (ptrdiff_t)v;
132 MSize i, n = STRFMT_MAXBUF_PTR;
133 if (x == 0) {
134 *p++ = 'N'; *p++ = 'U'; *p++ = 'L'; *p++ = 'L';
135 return p;
136 }
137#if LJ_64
138 /* Shorten output for 64 bit pointers. */
139 n = 2+2*4+((x >> 32) ? 2+2*(lj_fls((uint32_t)(x >> 32))>>3) : 0);
140#endif
141 p[0] = '0';
142 p[1] = 'x';
143 for (i = n-1; i >= 2; i--, x >>= 4)
144 p[i] = "0123456789abcdef"[(x & 15)];
145 return p+n;
146}
147
148/* Write ULEB128 to buffer. */
149char * LJ_FASTCALL lj_strfmt_wuleb128(char *p, uint32_t v)
150{
151 for (; v >= 0x80; v >>= 7)
152 *p++ = (char)((v & 0x7f) | 0x80);
153 *p++ = (char)v;
154 return p;
155}
156
157/* Return string or write number to tmp buffer and return pointer to start. */
158const char *lj_strfmt_wstrnum(lua_State *L, cTValue *o, MSize *lenp)
159{
160 SBuf *sb;
161 if (tvisstr(o)) {
162 *lenp = strV(o)->len;
163 return strVdata(o);
164 } else if (tvisint(o)) {
165 sb = lj_strfmt_putint(lj_buf_tmp_(L), intV(o));
166 } else if (tvisnum(o)) {
167 sb = lj_strfmt_putfnum(lj_buf_tmp_(L), STRFMT_G14, o->n);
168 } else {
169 return NULL;
170 }
171 *lenp = sbuflen(sb);
172 return sbufB(sb);
173}
174
175/* -- Unformatted conversions to buffer ----------------------------------- */
176
177/* Add integer to buffer. */
178SBuf * LJ_FASTCALL lj_strfmt_putint(SBuf *sb, int32_t k)
179{
180 setsbufP(sb, lj_strfmt_wint(lj_buf_more(sb, STRFMT_MAXBUF_INT), k));
181 return sb;
182}
183
184#if LJ_HASJIT
185/* Add number to buffer. */
186SBuf * LJ_FASTCALL lj_strfmt_putnum(SBuf *sb, cTValue *o)
187{
188 return lj_strfmt_putfnum(sb, STRFMT_G14, o->n);
189}
190#endif
191
192SBuf * LJ_FASTCALL lj_strfmt_putptr(SBuf *sb, const void *v)
193{
194 setsbufP(sb, lj_strfmt_wptr(lj_buf_more(sb, STRFMT_MAXBUF_PTR), v));
195 return sb;
196}
197
198/* Add quoted string to buffer. */
199SBuf * LJ_FASTCALL lj_strfmt_putquoted(SBuf *sb, GCstr *str)
200{
201 const char *s = strdata(str);
202 MSize len = str->len;
203 lj_buf_putb(sb, '"');
204 while (len--) {
205 uint32_t c = (uint32_t)(uint8_t)*s++;
206 char *p = lj_buf_more(sb, 4);
207 if (c == '"' || c == '\\' || c == '\n') {
208 *p++ = '\\';
209 } else if (lj_char_iscntrl(c)) { /* This can only be 0-31 or 127. */
210 uint32_t d;
211 *p++ = '\\';
212 if (c >= 100 || lj_char_isdigit((uint8_t)*s)) {
213 *p++ = (char)('0'+(c >= 100)); if (c >= 100) c -= 100;
214 goto tens;
215 } else if (c >= 10) {
216 tens:
217 d = (c * 205) >> 11; c -= d * 10; *p++ = (char)('0'+d);
218 }
219 c += '0';
220 }
221 *p++ = (char)c;
222 setsbufP(sb, p);
223 }
224 lj_buf_putb(sb, '"');
225 return sb;
226}
227
228/* -- Formatted conversions to buffer ------------------------------------- */
229
230/* Add formatted char to buffer. */
231SBuf *lj_strfmt_putfchar(SBuf *sb, SFormat sf, int32_t c)
232{
233 MSize width = STRFMT_WIDTH(sf);
234 char *p = lj_buf_more(sb, width > 1 ? width : 1);
235 if ((sf & STRFMT_F_LEFT)) *p++ = (char)c;
236 while (width-- > 1) *p++ = ' ';
237 if (!(sf & STRFMT_F_LEFT)) *p++ = (char)c;
238 setsbufP(sb, p);
239 return sb;
240}
241
242/* Add formatted string to buffer. */
243SBuf *lj_strfmt_putfstr(SBuf *sb, SFormat sf, GCstr *str)
244{
245 MSize len = str->len <= STRFMT_PREC(sf) ? str->len : STRFMT_PREC(sf);
246 MSize width = STRFMT_WIDTH(sf);
247 char *p = lj_buf_more(sb, width > len ? width : len);
248 if ((sf & STRFMT_F_LEFT)) p = lj_buf_wmem(p, strdata(str), len);
249 while (width-- > len) *p++ = ' ';
250 if (!(sf & STRFMT_F_LEFT)) p = lj_buf_wmem(p, strdata(str), len);
251 setsbufP(sb, p);
252 return sb;
253}
254
255/* Add formatted signed/unsigned integer to buffer. */
256SBuf *lj_strfmt_putfxint(SBuf *sb, SFormat sf, uint64_t k)
257{
258 char buf[STRFMT_MAXBUF_XINT], *q = buf + sizeof(buf), *p;
259#ifdef LUA_USE_ASSERT
260 char *ps;
261#endif
262 MSize prefix = 0, len, prec, pprec, width, need;
263
264 /* Figure out signed prefixes. */
265 if (STRFMT_TYPE(sf) == STRFMT_INT) {
266 if ((int64_t)k < 0) {
267 k = (uint64_t)-(int64_t)k;
268 prefix = 256 + '-';
269 } else if ((sf & STRFMT_F_PLUS)) {
270 prefix = 256 + '+';
271 } else if ((sf & STRFMT_F_SPACE)) {
272 prefix = 256 + ' ';
273 }
274 }
275
276 /* Convert number and store to fixed-size buffer in reverse order. */
277 prec = STRFMT_PREC(sf);
278 if ((int32_t)prec >= 0) sf &= ~STRFMT_F_ZERO;
279 if (k == 0) { /* Special-case zero argument. */
280 if (prec != 0 ||
281 (sf & (STRFMT_T_OCT|STRFMT_F_ALT)) == (STRFMT_T_OCT|STRFMT_F_ALT))
282 *--q = '0';
283 } else if (!(sf & (STRFMT_T_HEX|STRFMT_T_OCT))) { /* Decimal. */
284 uint32_t k2;
285 while ((k >> 32)) { *--q = (char)('0' + k % 10); k /= 10; }
286 k2 = (uint32_t)k;
287 do { *--q = (char)('0' + k2 % 10); k2 /= 10; } while (k2);
288 } else if ((sf & STRFMT_T_HEX)) { /* Hex. */
289 const char *hexdig = (sf & STRFMT_F_UPPER) ? "0123456789ABCDEF" :
290 "0123456789abcdef";
291 do { *--q = hexdig[(k & 15)]; k >>= 4; } while (k);
292 if ((sf & STRFMT_F_ALT)) prefix = 512 + ((sf & STRFMT_F_UPPER) ? 'X' : 'x');
293 } else { /* Octal. */
294 do { *--q = (char)('0' + (uint32_t)(k & 7)); k >>= 3; } while (k);
295 if ((sf & STRFMT_F_ALT)) *--q = '0';
296 }
297
298 /* Calculate sizes. */
299 len = (MSize)(buf + sizeof(buf) - q);
300 if ((int32_t)len >= (int32_t)prec) prec = len;
301 width = STRFMT_WIDTH(sf);
302 pprec = prec + (prefix >> 8);
303 need = width > pprec ? width : pprec;
304 p = lj_buf_more(sb, need);
305#ifdef LUA_USE_ASSERT
306 ps = p;
307#endif
308
309 /* Format number with leading/trailing whitespace and zeros. */
310 if ((sf & (STRFMT_F_LEFT|STRFMT_F_ZERO)) == 0)
311 while (width-- > pprec) *p++ = ' ';
312 if (prefix) {
313 if ((char)prefix >= 'X') *p++ = '0';
314 *p++ = (char)prefix;
315 }
316 if ((sf & (STRFMT_F_LEFT|STRFMT_F_ZERO)) == STRFMT_F_ZERO)
317 while (width-- > pprec) *p++ = '0';
318 while (prec-- > len) *p++ = '0';
319 while (q < buf + sizeof(buf)) *p++ = *q++; /* Add number itself. */
320 if ((sf & STRFMT_F_LEFT))
321 while (width-- > pprec) *p++ = ' ';
322
323 lj_assertX(need == (MSize)(p - ps), "miscalculated format size");
324 setsbufP(sb, p);
325 return sb;
326}
327
328/* Add number formatted as signed integer to buffer. */
329SBuf *lj_strfmt_putfnum_int(SBuf *sb, SFormat sf, lua_Number n)
330{
331 int64_t k = (int64_t)n;
332 if (checki32(k) && sf == STRFMT_INT)
333 return lj_strfmt_putint(sb, (int32_t)k); /* Shortcut for plain %d. */
334 else
335 return lj_strfmt_putfxint(sb, sf, (uint64_t)k);
336}
337
338/* Add number formatted as unsigned integer to buffer. */
339SBuf *lj_strfmt_putfnum_uint(SBuf *sb, SFormat sf, lua_Number n)
340{
341 int64_t k;
342 if (n >= 9223372036854775808.0)
343 k = (int64_t)(n - 18446744073709551616.0);
344 else
345 k = (int64_t)n;
346 return lj_strfmt_putfxint(sb, sf, (uint64_t)k);
347}
348
349/* -- Conversions to strings ---------------------------------------------- */
350
351/* Convert integer to string. */
352GCstr * LJ_FASTCALL lj_strfmt_int(lua_State *L, int32_t k)
353{
354 char buf[STRFMT_MAXBUF_INT];
355 MSize len = (MSize)(lj_strfmt_wint(buf, k) - buf);
356 return lj_str_new(L, buf, len);
357}
358
359/* Convert integer or number to string. */
360GCstr * LJ_FASTCALL lj_strfmt_number(lua_State *L, cTValue *o)
361{
362 return tvisint(o) ? lj_strfmt_int(L, intV(o)) : lj_strfmt_num(L, o);
363}
364
365#if LJ_HASJIT
366/* Convert char value to string. */
367GCstr * LJ_FASTCALL lj_strfmt_char(lua_State *L, int c)
368{
369 char buf[1];
370 buf[0] = c;
371 return lj_str_new(L, buf, 1);
372}
373#endif
374
375/* Raw conversion of object to string. */
376GCstr * LJ_FASTCALL lj_strfmt_obj(lua_State *L, cTValue *o)
377{
378 if (tvisstr(o)) {
379 return strV(o);
380 } else if (tvisnumber(o)) {
381 return lj_strfmt_number(L, o);
382 } else if (tvisnil(o)) {
383 return lj_str_newlit(L, "nil");
384 } else if (tvisfalse(o)) {
385 return lj_str_newlit(L, "false");
386 } else if (tvistrue(o)) {
387 return lj_str_newlit(L, "true");
388 } else {
389 char buf[8+2+2+16], *p = buf;
390 p = lj_buf_wmem(p, lj_typename(o), (MSize)strlen(lj_typename(o)));
391 *p++ = ':'; *p++ = ' ';
392 if (tvisfunc(o) && isffunc(funcV(o))) {
393 p = lj_buf_wmem(p, "builtin#", 8);
394 p = lj_strfmt_wint(p, funcV(o)->c.ffid);
395 } else {
396 p = lj_strfmt_wptr(p, lj_obj_ptr(o));
397 }
398 return lj_str_new(L, buf, (size_t)(p - buf));
399 }
400}
401
402/* -- Internal string formatting ------------------------------------------ */
403
404/*
405** These functions are only used for lua_pushfstring(), lua_pushvfstring()
406** and for internal string formatting (e.g. error messages). Caveat: unlike
407** string.format(), only a limited subset of formats and flags are supported!
408**
409** LuaJIT has support for a couple more formats than Lua 5.1/5.2:
410** - %d %u %o %x with full formatting, 32 bit integers only.
411** - %f and other FP formats are really %.14g.
412** - %s %c %p without formatting.
413*/
414
415/* Push formatted message as a string object to Lua stack. va_list variant. */
416const char *lj_strfmt_pushvf(lua_State *L, const char *fmt, va_list argp)
417{
418 SBuf *sb = lj_buf_tmp_(L);
419 FormatState fs;
420 SFormat sf;
421 GCstr *str;
422 lj_strfmt_init(&fs, fmt, (MSize)strlen(fmt));
423 while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) {
424 switch (STRFMT_TYPE(sf)) {
425 case STRFMT_LIT:
426 lj_buf_putmem(sb, fs.str, fs.len);
427 break;
428 case STRFMT_INT:
429 lj_strfmt_putfxint(sb, sf, va_arg(argp, int32_t));
430 break;
431 case STRFMT_UINT:
432 lj_strfmt_putfxint(sb, sf, va_arg(argp, uint32_t));
433 break;
434 case STRFMT_NUM:
435 lj_strfmt_putfnum(sb, STRFMT_G14, va_arg(argp, lua_Number));
436 break;
437 case STRFMT_STR: {
438 const char *s = va_arg(argp, char *);
439 if (s == NULL) s = "(null)";
440 lj_buf_putmem(sb, s, (MSize)strlen(s));
441 break;
442 }
443 case STRFMT_CHAR:
444 lj_buf_putb(sb, va_arg(argp, int));
445 break;
446 case STRFMT_PTR:
447 lj_strfmt_putptr(sb, va_arg(argp, void *));
448 break;
449 case STRFMT_ERR:
450 default:
451 lj_buf_putb(sb, '?');
452 lj_assertL(0, "bad string format near offset %d", fs.len);
453 break;
454 }
455 }
456 str = lj_buf_str(L, sb);
457 setstrV(L, L->top, str);
458 incr_top(L);
459 return strdata(str);
460}
461
462/* Push formatted message as a string object to Lua stack. Vararg variant. */
463const char *lj_strfmt_pushf(lua_State *L, const char *fmt, ...)
464{
465 const char *msg;
466 va_list argp;
467 va_start(argp, fmt);
468 msg = lj_strfmt_pushvf(L, fmt, argp);
469 va_end(argp);
470 return msg;
471}
472
diff --git a/src/lj_strfmt.h b/src/lj_strfmt.h
new file mode 100644
index 00000000..9fe46d67
--- /dev/null
+++ b/src/lj_strfmt.h
@@ -0,0 +1,126 @@
1/*
2** String formatting.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#ifndef _LJ_STRFMT_H
7#define _LJ_STRFMT_H
8
9#include "lj_obj.h"
10
11typedef uint32_t SFormat; /* Format indicator. */
12
13/* Format parser state. */
14typedef struct FormatState {
15 const uint8_t *p; /* Current format string pointer. */
16 const uint8_t *e; /* End of format string. */
17 const char *str; /* Returned literal string. */
18 MSize len; /* Size of literal string. */
19} FormatState;
20
21/* Format types (max. 16). */
22typedef enum FormatType {
23 STRFMT_EOF, STRFMT_ERR, STRFMT_LIT,
24 STRFMT_INT, STRFMT_UINT, STRFMT_NUM, STRFMT_STR, STRFMT_CHAR, STRFMT_PTR
25} FormatType;
26
27/* Format subtypes (bits are reused). */
28#define STRFMT_T_HEX 0x0010 /* STRFMT_UINT */
29#define STRFMT_T_OCT 0x0020 /* STRFMT_UINT */
30#define STRFMT_T_FP_A 0x0000 /* STRFMT_NUM */
31#define STRFMT_T_FP_E 0x0010 /* STRFMT_NUM */
32#define STRFMT_T_FP_F 0x0020 /* STRFMT_NUM */
33#define STRFMT_T_FP_G 0x0030 /* STRFMT_NUM */
34#define STRFMT_T_QUOTED 0x0010 /* STRFMT_STR */
35
36/* Format flags. */
37#define STRFMT_F_LEFT 0x0100
38#define STRFMT_F_PLUS 0x0200
39#define STRFMT_F_ZERO 0x0400
40#define STRFMT_F_SPACE 0x0800
41#define STRFMT_F_ALT 0x1000
42#define STRFMT_F_UPPER 0x2000
43
44/* Format indicator fields. */
45#define STRFMT_SH_WIDTH 16
46#define STRFMT_SH_PREC 24
47
48#define STRFMT_TYPE(sf) ((FormatType)((sf) & 15))
49#define STRFMT_WIDTH(sf) (((sf) >> STRFMT_SH_WIDTH) & 255u)
50#define STRFMT_PREC(sf) ((((sf) >> STRFMT_SH_PREC) & 255u) - 1u)
51#define STRFMT_FP(sf) (((sf) >> 4) & 3)
52
53/* Formats for conversion characters. */
54#define STRFMT_A (STRFMT_NUM|STRFMT_T_FP_A)
55#define STRFMT_C (STRFMT_CHAR)
56#define STRFMT_D (STRFMT_INT)
57#define STRFMT_E (STRFMT_NUM|STRFMT_T_FP_E)
58#define STRFMT_F (STRFMT_NUM|STRFMT_T_FP_F)
59#define STRFMT_G (STRFMT_NUM|STRFMT_T_FP_G)
60#define STRFMT_I STRFMT_D
61#define STRFMT_O (STRFMT_UINT|STRFMT_T_OCT)
62#define STRFMT_P (STRFMT_PTR)
63#define STRFMT_Q (STRFMT_STR|STRFMT_T_QUOTED)
64#define STRFMT_S (STRFMT_STR)
65#define STRFMT_U (STRFMT_UINT)
66#define STRFMT_X (STRFMT_UINT|STRFMT_T_HEX)
67#define STRFMT_G14 (STRFMT_G | ((14+1) << STRFMT_SH_PREC))
68
69/* Maximum buffer sizes for conversions. */
70#define STRFMT_MAXBUF_XINT (1+22) /* '0' prefix + uint64_t in octal. */
71#define STRFMT_MAXBUF_INT (1+10) /* Sign + int32_t in decimal. */
72#define STRFMT_MAXBUF_NUM 32 /* Must correspond with STRFMT_G14. */
73#define STRFMT_MAXBUF_PTR (2+2*sizeof(ptrdiff_t)) /* "0x" + hex ptr. */
74
75/* Format parser. */
76LJ_FUNC SFormat LJ_FASTCALL lj_strfmt_parse(FormatState *fs);
77
78static LJ_AINLINE void lj_strfmt_init(FormatState *fs, const char *p, MSize len)
79{
80 fs->p = (const uint8_t *)p;
81 fs->e = (const uint8_t *)p + len;
82 /* Must be NUL-terminated. May have NULs inside, too. */
83 lj_assertX(*fs->e == 0, "format not NUL-terminated");
84}
85
86/* Raw conversions. */
87LJ_FUNC char * LJ_FASTCALL lj_strfmt_wint(char *p, int32_t k);
88LJ_FUNC char * LJ_FASTCALL lj_strfmt_wptr(char *p, const void *v);
89LJ_FUNC char * LJ_FASTCALL lj_strfmt_wuleb128(char *p, uint32_t v);
90LJ_FUNC const char *lj_strfmt_wstrnum(lua_State *L, cTValue *o, MSize *lenp);
91
92/* Unformatted conversions to buffer. */
93LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putint(SBuf *sb, int32_t k);
94#if LJ_HASJIT
95LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putnum(SBuf *sb, cTValue *o);
96#endif
97LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putptr(SBuf *sb, const void *v);
98LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putquoted(SBuf *sb, GCstr *str);
99
100/* Formatted conversions to buffer. */
101LJ_FUNC SBuf *lj_strfmt_putfxint(SBuf *sb, SFormat sf, uint64_t k);
102LJ_FUNC SBuf *lj_strfmt_putfnum_int(SBuf *sb, SFormat sf, lua_Number n);
103LJ_FUNC SBuf *lj_strfmt_putfnum_uint(SBuf *sb, SFormat sf, lua_Number n);
104LJ_FUNC SBuf *lj_strfmt_putfnum(SBuf *sb, SFormat, lua_Number n);
105LJ_FUNC SBuf *lj_strfmt_putfchar(SBuf *sb, SFormat, int32_t c);
106LJ_FUNC SBuf *lj_strfmt_putfstr(SBuf *sb, SFormat, GCstr *str);
107
108/* Conversions to strings. */
109LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_int(lua_State *L, int32_t k);
110LJ_FUNCA GCstr * LJ_FASTCALL lj_strfmt_num(lua_State *L, cTValue *o);
111LJ_FUNCA GCstr * LJ_FASTCALL lj_strfmt_number(lua_State *L, cTValue *o);
112#if LJ_HASJIT
113LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_char(lua_State *L, int c);
114#endif
115LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_obj(lua_State *L, cTValue *o);
116
117/* Internal string formatting. */
118LJ_FUNC const char *lj_strfmt_pushvf(lua_State *L, const char *fmt,
119 va_list argp);
120LJ_FUNC const char *lj_strfmt_pushf(lua_State *L, const char *fmt, ...)
121#if defined(__GNUC__) || defined(__clang__)
122 __attribute__ ((format (printf, 2, 3)))
123#endif
124 ;
125
126#endif
diff --git a/src/lj_strfmt_num.c b/src/lj_strfmt_num.c
new file mode 100644
index 00000000..8cb5d47f
--- /dev/null
+++ b/src/lj_strfmt_num.c
@@ -0,0 +1,592 @@
1/*
2** String formatting for floating-point numbers.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4** Contributed by Peter Cawley.
5*/
6
7#include <stdio.h>
8
9#define lj_strfmt_num_c
10#define LUA_CORE
11
12#include "lj_obj.h"
13#include "lj_buf.h"
14#include "lj_str.h"
15#include "lj_strfmt.h"
16
17/* -- Precomputed tables -------------------------------------------------- */
18
19/* Rescale factors to push the exponent of a number towards zero. */
20#define RESCALE_EXPONENTS(P, N) \
21 P(308), P(289), P(270), P(250), P(231), P(212), P(193), P(173), P(154), \
22 P(135), P(115), P(96), P(77), P(58), P(38), P(0), P(0), P(0), N(39), N(58), \
23 N(77), N(96), N(116), N(135), N(154), N(174), N(193), N(212), N(231), \
24 N(251), N(270), N(289)
25
26#define ONE_E_P(X) 1e+0 ## X
27#define ONE_E_N(X) 1e-0 ## X
28static const int16_t rescale_e[] = { RESCALE_EXPONENTS(-, +) };
29static const double rescale_n[] = { RESCALE_EXPONENTS(ONE_E_P, ONE_E_N) };
30#undef ONE_E_N
31#undef ONE_E_P
32
33/*
34** For p in range -70 through 57, this table encodes pairs (m, e) such that
35** 4*2^p <= (uint8_t)m*10^e, and is the smallest value for which this holds.
36*/
37static const int8_t four_ulp_m_e[] = {
38 34, -21, 68, -21, 14, -20, 28, -20, 55, -20, 2, -19, 3, -19, 5, -19, 9, -19,
39 -82, -18, 35, -18, 7, -17, -117, -17, 28, -17, 56, -17, 112, -16, -33, -16,
40 45, -16, 89, -16, -78, -15, 36, -15, 72, -15, -113, -14, 29, -14, 57, -14,
41 114, -13, -28, -13, 46, -13, 91, -12, -74, -12, 37, -12, 73, -12, 15, -11, 3,
42 -11, 59, -11, 2, -10, 3, -10, 5, -10, 1, -9, -69, -9, 38, -9, 75, -9, 15, -7,
43 3, -7, 6, -7, 12, -6, -17, -7, 48, -7, 96, -7, -65, -6, 39, -6, 77, -6, -103,
44 -5, 31, -5, 62, -5, 123, -4, -11, -4, 49, -4, 98, -4, -60, -3, 4, -2, 79, -3,
45 16, -2, 32, -2, 63, -2, 2, -1, 25, 0, 5, 1, 1, 2, 2, 2, 4, 2, 8, 2, 16, 2,
46 32, 2, 64, 2, -128, 2, 26, 2, 52, 2, 103, 3, -51, 3, 41, 4, 82, 4, -92, 4,
47 33, 4, 66, 4, -124, 5, 27, 5, 53, 5, 105, 6, 21, 6, 42, 6, 84, 6, 17, 7, 34,
48 7, 68, 7, 2, 8, 3, 8, 6, 8, 108, 9, -41, 9, 43, 10, 86, 9, -84, 10, 35, 10,
49 69, 10, -118, 11, 28, 11, 55, 12, 11, 13, 22, 13, 44, 13, 88, 13, -80, 13,
50 36, 13, 71, 13, -115, 14, 29, 14, 57, 14, 113, 15, -30, 15, 46, 15, 91, 15,
51 19, 16, 37, 16, 73, 16, 2, 17, 3, 17, 6, 17
52};
53
54/* min(2^32-1, 10^e-1) for e in range 0 through 10 */
55static uint32_t ndigits_dec_threshold[] = {
56 0, 9U, 99U, 999U, 9999U, 99999U, 999999U,
57 9999999U, 99999999U, 999999999U, 0xffffffffU
58};
59
60/* -- Helper functions ---------------------------------------------------- */
61
62/* Compute the number of digits in the decimal representation of x. */
63static MSize ndigits_dec(uint32_t x)
64{
65 MSize t = ((lj_fls(x | 1) * 77) >> 8) + 1; /* 2^8/77 is roughly log2(10) */
66 return t + (x > ndigits_dec_threshold[t]);
67}
68
69#define WINT_R(x, sh, sc) \
70 { uint32_t d = (x*(((1<<sh)+sc-1)/sc))>>sh; x -= d*sc; *p++ = (char)('0'+d); }
71
72/* Write 9-digit unsigned integer to buffer. */
73static char *lj_strfmt_wuint9(char *p, uint32_t u)
74{
75 uint32_t v = u / 10000, w;
76 u -= v * 10000;
77 w = v / 10000;
78 v -= w * 10000;
79 *p++ = (char)('0'+w);
80 WINT_R(v, 23, 1000)
81 WINT_R(v, 12, 100)
82 WINT_R(v, 10, 10)
83 *p++ = (char)('0'+v);
84 WINT_R(u, 23, 1000)
85 WINT_R(u, 12, 100)
86 WINT_R(u, 10, 10)
87 *p++ = (char)('0'+u);
88 return p;
89}
90#undef WINT_R
91
92/* -- Extended precision arithmetic --------------------------------------- */
93
94/*
95** The "nd" format is a fixed-precision decimal representation for numbers. It
96** consists of up to 64 uint32_t values, with each uint32_t storing a value
97** in the range [0, 1e9). A number in "nd" format consists of three variables:
98**
99** uint32_t nd[64];
100** uint32_t ndlo;
101** uint32_t ndhi;
102**
103** The integral part of the number is stored in nd[0 ... ndhi], the value of
104** which is sum{i in [0, ndhi] | nd[i] * 10^(9*i)}. If the fractional part of
105** the number is zero, ndlo is zero. Otherwise, the fractional part is stored
106** in nd[ndlo ... 63], the value of which is taken to be
107** sum{i in [ndlo, 63] | nd[i] * 10^(9*(i-64))}.
108**
109** If the array part had 128 elements rather than 64, then every double would
110** have an exact representation in "nd" format. With 64 elements, all integral
111** doubles have an exact representation, and all non-integral doubles have
112** enough digits to make both %.99e and %.99f do the right thing.
113*/
114
115#if LJ_64
116#define ND_MUL2K_MAX_SHIFT 29
117#define ND_MUL2K_DIV1E9(val) ((uint32_t)((val) / 1000000000))
118#else
119#define ND_MUL2K_MAX_SHIFT 11
120#define ND_MUL2K_DIV1E9(val) ((uint32_t)((val) >> 9) / 1953125)
121#endif
122
123/* Multiply nd by 2^k and add carry_in (ndlo is assumed to be zero). */
124static uint32_t nd_mul2k(uint32_t* nd, uint32_t ndhi, uint32_t k,
125 uint32_t carry_in, SFormat sf)
126{
127 uint32_t i, ndlo = 0, start = 1;
128 /* Performance hacks. */
129 if (k > ND_MUL2K_MAX_SHIFT*2 && STRFMT_FP(sf) != STRFMT_FP(STRFMT_T_FP_F)) {
130 start = ndhi - (STRFMT_PREC(sf) + 17) / 8;
131 }
132 /* Real logic. */
133 while (k >= ND_MUL2K_MAX_SHIFT) {
134 for (i = ndlo; i <= ndhi; i++) {
135 uint64_t val = ((uint64_t)nd[i] << ND_MUL2K_MAX_SHIFT) | carry_in;
136 carry_in = ND_MUL2K_DIV1E9(val);
137 nd[i] = (uint32_t)val - carry_in * 1000000000;
138 }
139 if (carry_in) {
140 nd[++ndhi] = carry_in; carry_in = 0;
141 if (start++ == ndlo) ++ndlo;
142 }
143 k -= ND_MUL2K_MAX_SHIFT;
144 }
145 if (k) {
146 for (i = ndlo; i <= ndhi; i++) {
147 uint64_t val = ((uint64_t)nd[i] << k) | carry_in;
148 carry_in = ND_MUL2K_DIV1E9(val);
149 nd[i] = (uint32_t)val - carry_in * 1000000000;
150 }
151 if (carry_in) nd[++ndhi] = carry_in;
152 }
153 return ndhi;
154}
155
156/* Divide nd by 2^k (ndlo is assumed to be zero). */
157static uint32_t nd_div2k(uint32_t* nd, uint32_t ndhi, uint32_t k, SFormat sf)
158{
159 uint32_t ndlo = 0, stop1 = ~0, stop2 = ~0;
160 /* Performance hacks. */
161 if (!ndhi) {
162 if (!nd[0]) {
163 return 0;
164 } else {
165 uint32_t s = lj_ffs(nd[0]);
166 if (s >= k) { nd[0] >>= k; return 0; }
167 nd[0] >>= s; k -= s;
168 }
169 }
170 if (k > 18) {
171 if (STRFMT_FP(sf) == STRFMT_FP(STRFMT_T_FP_F)) {
172 stop1 = 63 - (int32_t)STRFMT_PREC(sf) / 9;
173 } else {
174 int32_t floorlog2 = ndhi * 29 + lj_fls(nd[ndhi]) - k;
175 int32_t floorlog10 = (int32_t)(floorlog2 * 0.30102999566398114);
176 stop1 = 62 + (floorlog10 - (int32_t)STRFMT_PREC(sf)) / 9;
177 stop2 = 61 + ndhi - (int32_t)STRFMT_PREC(sf) / 8;
178 }
179 }
180 /* Real logic. */
181 while (k >= 9) {
182 uint32_t i = ndhi, carry = 0;
183 for (;;) {
184 uint32_t val = nd[i];
185 nd[i] = (val >> 9) + carry;
186 carry = (val & 0x1ff) * 1953125;
187 if (i == ndlo) break;
188 i = (i - 1) & 0x3f;
189 }
190 if (ndlo != stop1 && ndlo != stop2) {
191 if (carry) { ndlo = (ndlo - 1) & 0x3f; nd[ndlo] = carry; }
192 if (!nd[ndhi]) { ndhi = (ndhi - 1) & 0x3f; stop2--; }
193 } else if (!nd[ndhi]) {
194 if (ndhi != ndlo) { ndhi = (ndhi - 1) & 0x3f; stop2--; }
195 else return ndlo;
196 }
197 k -= 9;
198 }
199 if (k) {
200 uint32_t mask = (1U << k) - 1, mul = 1000000000 >> k, i = ndhi, carry = 0;
201 for (;;) {
202 uint32_t val = nd[i];
203 nd[i] = (val >> k) + carry;
204 carry = (val & mask) * mul;
205 if (i == ndlo) break;
206 i = (i - 1) & 0x3f;
207 }
208 if (carry) { ndlo = (ndlo - 1) & 0x3f; nd[ndlo] = carry; }
209 }
210 return ndlo;
211}
212
213/* Add m*10^e to nd (assumes ndlo <= e/9 <= ndhi and 0 <= m <= 9). */
214static uint32_t nd_add_m10e(uint32_t* nd, uint32_t ndhi, uint8_t m, int32_t e)
215{
216 uint32_t i, carry;
217 if (e >= 0) {
218 i = (uint32_t)e/9;
219 carry = m * (ndigits_dec_threshold[e - (int32_t)i*9] + 1);
220 } else {
221 int32_t f = (e-8)/9;
222 i = (uint32_t)(64 + f);
223 carry = m * (ndigits_dec_threshold[e - f*9] + 1);
224 }
225 for (;;) {
226 uint32_t val = nd[i] + carry;
227 if (LJ_UNLIKELY(val >= 1000000000)) {
228 val -= 1000000000;
229 nd[i] = val;
230 if (LJ_UNLIKELY(i == ndhi)) {
231 ndhi = (ndhi + 1) & 0x3f;
232 nd[ndhi] = 1;
233 break;
234 }
235 carry = 1;
236 i = (i + 1) & 0x3f;
237 } else {
238 nd[i] = val;
239 break;
240 }
241 }
242 return ndhi;
243}
244
245/* Test whether two "nd" values are equal in their most significant digits. */
246static int nd_similar(uint32_t* nd, uint32_t ndhi, uint32_t* ref, MSize hilen,
247 MSize prec)
248{
249 char nd9[9], ref9[9];
250 if (hilen <= prec) {
251 if (LJ_UNLIKELY(nd[ndhi] != *ref)) return 0;
252 prec -= hilen; ref--; ndhi = (ndhi - 1) & 0x3f;
253 if (prec >= 9) {
254 if (LJ_UNLIKELY(nd[ndhi] != *ref)) return 0;
255 prec -= 9; ref--; ndhi = (ndhi - 1) & 0x3f;
256 }
257 } else {
258 prec -= hilen - 9;
259 }
260 lj_assertX(prec < 9, "bad precision %d", prec);
261 lj_strfmt_wuint9(nd9, nd[ndhi]);
262 lj_strfmt_wuint9(ref9, *ref);
263 return !memcmp(nd9, ref9, prec) && (nd9[prec] < '5') == (ref9[prec] < '5');
264}
265
266/* -- Formatted conversions to buffer ------------------------------------- */
267
268/* Write formatted floating-point number to either sb or p. */
269static char *lj_strfmt_wfnum(SBuf *sb, SFormat sf, lua_Number n, char *p)
270{
271 MSize width = STRFMT_WIDTH(sf), prec = STRFMT_PREC(sf), len;
272 TValue t;
273 t.n = n;
274 if (LJ_UNLIKELY((t.u32.hi << 1) >= 0xffe00000)) {
275 /* Handle non-finite values uniformly for %a, %e, %f, %g. */
276 int prefix = 0, ch = (sf & STRFMT_F_UPPER) ? 0x202020 : 0;
277 if (((t.u32.hi & 0x000fffff) | t.u32.lo) != 0) {
278 ch ^= ('n' << 16) | ('a' << 8) | 'n';
279 if ((sf & STRFMT_F_SPACE)) prefix = ' ';
280 } else {
281 ch ^= ('i' << 16) | ('n' << 8) | 'f';
282 if ((t.u32.hi & 0x80000000)) prefix = '-';
283 else if ((sf & STRFMT_F_PLUS)) prefix = '+';
284 else if ((sf & STRFMT_F_SPACE)) prefix = ' ';
285 }
286 len = 3 + (prefix != 0);
287 if (!p) p = lj_buf_more(sb, width > len ? width : len);
288 if (!(sf & STRFMT_F_LEFT)) while (width-- > len) *p++ = ' ';
289 if (prefix) *p++ = prefix;
290 *p++ = (char)(ch >> 16); *p++ = (char)(ch >> 8); *p++ = (char)ch;
291 } else if (STRFMT_FP(sf) == STRFMT_FP(STRFMT_T_FP_A)) {
292 /* %a */
293 const char *hexdig = (sf & STRFMT_F_UPPER) ? "0123456789ABCDEFPX"
294 : "0123456789abcdefpx";
295 int32_t e = (t.u32.hi >> 20) & 0x7ff;
296 char prefix = 0, eprefix = '+';
297 if (t.u32.hi & 0x80000000) prefix = '-';
298 else if ((sf & STRFMT_F_PLUS)) prefix = '+';
299 else if ((sf & STRFMT_F_SPACE)) prefix = ' ';
300 t.u32.hi &= 0xfffff;
301 if (e) {
302 t.u32.hi |= 0x100000;
303 e -= 1023;
304 } else if (t.u32.lo | t.u32.hi) {
305 /* Non-zero denormal - normalise it. */
306 uint32_t shift = t.u32.hi ? 20-lj_fls(t.u32.hi) : 52-lj_fls(t.u32.lo);
307 e = -1022 - shift;
308 t.u64 <<= shift;
309 }
310 /* abs(n) == t.u64 * 2^(e - 52) */
311 /* If n != 0, bit 52 of t.u64 is set, and is the highest set bit. */
312 if ((int32_t)prec < 0) {
313 /* Default precision: use smallest precision giving exact result. */
314 prec = t.u32.lo ? 13-lj_ffs(t.u32.lo)/4 : 5-lj_ffs(t.u32.hi|0x100000)/4;
315 } else if (prec < 13) {
316 /* Precision is sufficiently low as to maybe require rounding. */
317 t.u64 += (((uint64_t)1) << (51 - prec*4));
318 }
319 if (e < 0) {
320 eprefix = '-';
321 e = -e;
322 }
323 len = 5 + ndigits_dec((uint32_t)e) + prec + (prefix != 0)
324 + ((prec | (sf & STRFMT_F_ALT)) != 0);
325 if (!p) p = lj_buf_more(sb, width > len ? width : len);
326 if (!(sf & (STRFMT_F_LEFT | STRFMT_F_ZERO))) {
327 while (width-- > len) *p++ = ' ';
328 }
329 if (prefix) *p++ = prefix;
330 *p++ = '0';
331 *p++ = hexdig[17]; /* x or X */
332 if ((sf & (STRFMT_F_LEFT | STRFMT_F_ZERO)) == STRFMT_F_ZERO) {
333 while (width-- > len) *p++ = '0';
334 }
335 *p++ = '0' + (t.u32.hi >> 20); /* Usually '1', sometimes '0' or '2'. */
336 if ((prec | (sf & STRFMT_F_ALT))) {
337 /* Emit fractional part. */
338 char *q = p + 1 + prec;
339 *p = '.';
340 if (prec < 13) t.u64 >>= (52 - prec*4);
341 else while (prec > 13) p[prec--] = '0';
342 while (prec) { p[prec--] = hexdig[t.u64 & 15]; t.u64 >>= 4; }
343 p = q;
344 }
345 *p++ = hexdig[16]; /* p or P */
346 *p++ = eprefix; /* + or - */
347 p = lj_strfmt_wint(p, e);
348 } else {
349 /* %e or %f or %g - begin by converting n to "nd" format. */
350 uint32_t nd[64];
351 uint32_t ndhi = 0, ndlo, i;
352 int32_t e = (t.u32.hi >> 20) & 0x7ff, ndebias = 0;
353 char prefix = 0, *q;
354 if (t.u32.hi & 0x80000000) prefix = '-';
355 else if ((sf & STRFMT_F_PLUS)) prefix = '+';
356 else if ((sf & STRFMT_F_SPACE)) prefix = ' ';
357 prec += ((int32_t)prec >> 31) & 7; /* Default precision is 6. */
358 if (STRFMT_FP(sf) == STRFMT_FP(STRFMT_T_FP_G)) {
359 /* %g - decrement precision if non-zero (to make it like %e). */
360 prec--;
361 prec ^= (uint32_t)((int32_t)prec >> 31);
362 }
363 if ((sf & STRFMT_T_FP_E) && prec < 14 && n != 0) {
364 /* Precision is sufficiently low that rescaling will probably work. */
365 if ((ndebias = rescale_e[e >> 6])) {
366 t.n = n * rescale_n[e >> 6];
367 if (LJ_UNLIKELY(!e)) t.n *= 1e10, ndebias -= 10;
368 t.u64 -= 2; /* Convert 2ulp below (later we convert 2ulp above). */
369 nd[0] = 0x100000 | (t.u32.hi & 0xfffff);
370 e = ((t.u32.hi >> 20) & 0x7ff) - 1075 - (ND_MUL2K_MAX_SHIFT < 29);
371 goto load_t_lo; rescale_failed:
372 t.n = n;
373 e = (t.u32.hi >> 20) & 0x7ff;
374 ndebias = ndhi = 0;
375 }
376 }
377 nd[0] = t.u32.hi & 0xfffff;
378 if (e == 0) e++; else nd[0] |= 0x100000;
379 e -= 1043;
380 if (t.u32.lo) {
381 e -= 32 + (ND_MUL2K_MAX_SHIFT < 29); load_t_lo:
382#if ND_MUL2K_MAX_SHIFT >= 29
383 nd[0] = (nd[0] << 3) | (t.u32.lo >> 29);
384 ndhi = nd_mul2k(nd, ndhi, 29, t.u32.lo & 0x1fffffff, sf);
385#elif ND_MUL2K_MAX_SHIFT >= 11
386 ndhi = nd_mul2k(nd, ndhi, 11, t.u32.lo >> 21, sf);
387 ndhi = nd_mul2k(nd, ndhi, 11, (t.u32.lo >> 10) & 0x7ff, sf);
388 ndhi = nd_mul2k(nd, ndhi, 11, (t.u32.lo << 1) & 0x7ff, sf);
389#else
390#error "ND_MUL2K_MAX_SHIFT too small"
391#endif
392 }
393 if (e >= 0) {
394 ndhi = nd_mul2k(nd, ndhi, (uint32_t)e, 0, sf);
395 ndlo = 0;
396 } else {
397 ndlo = nd_div2k(nd, ndhi, (uint32_t)-e, sf);
398 if (ndhi && !nd[ndhi]) ndhi--;
399 }
400 /* abs(n) == nd * 10^ndebias (for slightly loose interpretation of ==) */
401 if ((sf & STRFMT_T_FP_E)) {
402 /* %e or %g - assume %e and start by calculating nd's exponent (nde). */
403 char eprefix = '+';
404 int32_t nde = -1;
405 MSize hilen;
406 if (ndlo && !nd[ndhi]) {
407 ndhi = 64; do {} while (!nd[--ndhi]);
408 nde -= 64 * 9;
409 }
410 hilen = ndigits_dec(nd[ndhi]);
411 nde += ndhi * 9 + hilen;
412 if (ndebias) {
413 /*
414 ** Rescaling was performed, but this introduced some error, and might
415 ** have pushed us across a rounding boundary. We check whether this
416 ** error affected the result by introducing even more error (2ulp in
417 ** either direction), and seeing whether a rounding boundary was
418 ** crossed. Having already converted the -2ulp case, we save off its
419 ** most significant digits, convert the +2ulp case, and compare them.
420 */
421 int32_t eidx = e + 70 + (ND_MUL2K_MAX_SHIFT < 29)
422 + (t.u32.lo >= 0xfffffffe && !(~t.u32.hi << 12));
423 const int8_t *m_e = four_ulp_m_e + eidx * 2;
424 lj_assertG_(G(sbufL(sb)), 0 <= eidx && eidx < 128, "bad eidx %d", eidx);
425 nd[33] = nd[ndhi];
426 nd[32] = nd[(ndhi - 1) & 0x3f];
427 nd[31] = nd[(ndhi - 2) & 0x3f];
428 nd_add_m10e(nd, ndhi, (uint8_t)*m_e, m_e[1]);
429 if (LJ_UNLIKELY(!nd_similar(nd, ndhi, nd + 33, hilen, prec + 1))) {
430 goto rescale_failed;
431 }
432 }
433 if ((int32_t)(prec - nde) < (0x3f & -(int32_t)ndlo) * 9) {
434 /* Precision is sufficiently low as to maybe require rounding. */
435 ndhi = nd_add_m10e(nd, ndhi, 5, nde - prec - 1);
436 nde += (hilen != ndigits_dec(nd[ndhi]));
437 }
438 nde += ndebias;
439 if ((sf & STRFMT_T_FP_F)) {
440 /* %g */
441 if ((int32_t)prec >= nde && nde >= -4) {
442 if (nde < 0) ndhi = 0;
443 prec -= nde;
444 goto g_format_like_f;
445 } else if (!(sf & STRFMT_F_ALT) && prec && width > 5) {
446 /* Decrease precision in order to strip trailing zeroes. */
447 char tail[9];
448 uint32_t maxprec = hilen - 1 + ((ndhi - ndlo) & 0x3f) * 9;
449 if (prec >= maxprec) prec = maxprec;
450 else ndlo = (ndhi - (((int32_t)(prec - hilen) + 9) / 9)) & 0x3f;
451 i = prec - hilen - (((ndhi - ndlo) & 0x3f) * 9) + 10;
452 lj_strfmt_wuint9(tail, nd[ndlo]);
453 while (prec && tail[--i] == '0') {
454 prec--;
455 if (!i) {
456 if (ndlo == ndhi) { prec = 0; break; }
457 lj_strfmt_wuint9(tail, nd[++ndlo]);
458 i = 9;
459 }
460 }
461 }
462 }
463 if (nde < 0) {
464 /* Make nde non-negative. */
465 eprefix = '-';
466 nde = -nde;
467 }
468 len = 3 + prec + (prefix != 0) + ndigits_dec((uint32_t)nde) + (nde < 10)
469 + ((prec | (sf & STRFMT_F_ALT)) != 0);
470 if (!p) p = lj_buf_more(sb, (width > len ? width : len) + 5);
471 if (!(sf & (STRFMT_F_LEFT | STRFMT_F_ZERO))) {
472 while (width-- > len) *p++ = ' ';
473 }
474 if (prefix) *p++ = prefix;
475 if ((sf & (STRFMT_F_LEFT | STRFMT_F_ZERO)) == STRFMT_F_ZERO) {
476 while (width-- > len) *p++ = '0';
477 }
478 q = lj_strfmt_wint(p + 1, nd[ndhi]);
479 p[0] = p[1]; /* Put leading digit in the correct place. */
480 if ((prec | (sf & STRFMT_F_ALT))) {
481 /* Emit fractional part. */
482 p[1] = '.'; p += 2;
483 prec -= (MSize)(q - p); p = q; /* Account for digits already emitted. */
484 /* Then emit chunks of 9 digits (this may emit 8 digits too many). */
485 for (i = ndhi; (int32_t)prec > 0 && i != ndlo; prec -= 9) {
486 i = (i - 1) & 0x3f;
487 p = lj_strfmt_wuint9(p, nd[i]);
488 }
489 if ((sf & STRFMT_T_FP_F) && !(sf & STRFMT_F_ALT)) {
490 /* %g (and not %#g) - strip trailing zeroes. */
491 p += (int32_t)prec & ((int32_t)prec >> 31);
492 while (p[-1] == '0') p--;
493 if (p[-1] == '.') p--;
494 } else {
495 /* %e (or %#g) - emit trailing zeroes. */
496 while ((int32_t)prec > 0) { *p++ = '0'; prec--; }
497 p += (int32_t)prec;
498 }
499 } else {
500 p++;
501 }
502 *p++ = (sf & STRFMT_F_UPPER) ? 'E' : 'e';
503 *p++ = eprefix; /* + or - */
504 if (nde < 10) *p++ = '0'; /* Always at least two digits of exponent. */
505 p = lj_strfmt_wint(p, nde);
506 } else {
507 /* %f (or, shortly, %g in %f style) */
508 if (prec < (MSize)(0x3f & -(int32_t)ndlo) * 9) {
509 /* Precision is sufficiently low as to maybe require rounding. */
510 ndhi = nd_add_m10e(nd, ndhi, 5, 0 - prec - 1);
511 }
512 g_format_like_f:
513 if ((sf & STRFMT_T_FP_E) && !(sf & STRFMT_F_ALT) && prec && width) {
514 /* Decrease precision in order to strip trailing zeroes. */
515 if (ndlo) {
516 /* nd has a fractional part; we need to look at its digits. */
517 char tail[9];
518 uint32_t maxprec = (64 - ndlo) * 9;
519 if (prec >= maxprec) prec = maxprec;
520 else ndlo = 64 - (prec + 8) / 9;
521 i = prec - ((63 - ndlo) * 9);
522 lj_strfmt_wuint9(tail, nd[ndlo]);
523 while (prec && tail[--i] == '0') {
524 prec--;
525 if (!i) {
526 if (ndlo == 63) { prec = 0; break; }
527 lj_strfmt_wuint9(tail, nd[++ndlo]);
528 i = 9;
529 }
530 }
531 } else {
532 /* nd has no fractional part, so precision goes straight to zero. */
533 prec = 0;
534 }
535 }
536 len = ndhi * 9 + ndigits_dec(nd[ndhi]) + prec + (prefix != 0)
537 + ((prec | (sf & STRFMT_F_ALT)) != 0);
538 if (!p) p = lj_buf_more(sb, (width > len ? width : len) + 8);
539 if (!(sf & (STRFMT_F_LEFT | STRFMT_F_ZERO))) {
540 while (width-- > len) *p++ = ' ';
541 }
542 if (prefix) *p++ = prefix;
543 if ((sf & (STRFMT_F_LEFT | STRFMT_F_ZERO)) == STRFMT_F_ZERO) {
544 while (width-- > len) *p++ = '0';
545 }
546 /* Emit integer part. */
547 p = lj_strfmt_wint(p, nd[ndhi]);
548 i = ndhi;
549 while (i) p = lj_strfmt_wuint9(p, nd[--i]);
550 if ((prec | (sf & STRFMT_F_ALT))) {
551 /* Emit fractional part. */
552 *p++ = '.';
553 /* Emit chunks of 9 digits (this may emit 8 digits too many). */
554 while ((int32_t)prec > 0 && i != ndlo) {
555 i = (i - 1) & 0x3f;
556 p = lj_strfmt_wuint9(p, nd[i]);
557 prec -= 9;
558 }
559 if ((sf & STRFMT_T_FP_E) && !(sf & STRFMT_F_ALT)) {
560 /* %g (and not %#g) - strip trailing zeroes. */
561 p += (int32_t)prec & ((int32_t)prec >> 31);
562 while (p[-1] == '0') p--;
563 if (p[-1] == '.') p--;
564 } else {
565 /* %f (or %#g) - emit trailing zeroes. */
566 while ((int32_t)prec > 0) { *p++ = '0'; prec--; }
567 p += (int32_t)prec;
568 }
569 }
570 }
571 }
572 if ((sf & STRFMT_F_LEFT)) while (width-- > len) *p++ = ' ';
573 return p;
574}
575
576/* Add formatted floating-point number to buffer. */
577SBuf *lj_strfmt_putfnum(SBuf *sb, SFormat sf, lua_Number n)
578{
579 setsbufP(sb, lj_strfmt_wfnum(sb, sf, n, NULL));
580 return sb;
581}
582
583/* -- Conversions to strings ---------------------------------------------- */
584
585/* Convert number to string. */
586GCstr * LJ_FASTCALL lj_strfmt_num(lua_State *L, cTValue *o)
587{
588 char buf[STRFMT_MAXBUF_NUM];
589 MSize len = (MSize)(lj_strfmt_wfnum(NULL, STRFMT_G14, o->n, buf) - buf);
590 return lj_str_new(L, buf, len);
591}
592
diff --git a/src/lj_strscan.c b/src/lj_strscan.c
index 8614facd..11abd526 100644
--- a/src/lj_strscan.c
+++ b/src/lj_strscan.c
@@ -79,7 +79,7 @@ static void strscan_double(uint64_t x, TValue *o, int32_t ex2, int32_t neg)
79 /* Avoid double rounding for denormals. */ 79 /* Avoid double rounding for denormals. */
80 if (LJ_UNLIKELY(ex2 <= -1075 && x != 0)) { 80 if (LJ_UNLIKELY(ex2 <= -1075 && x != 0)) {
81 /* NYI: all of this generates way too much code on 32 bit CPUs. */ 81 /* NYI: all of this generates way too much code on 32 bit CPUs. */
82#if defined(__GNUC__) && LJ_64 82#if (defined(__GNUC__) || defined(__clang__)) && LJ_64
83 int32_t b = (int32_t)(__builtin_clzll(x)^63); 83 int32_t b = (int32_t)(__builtin_clzll(x)^63);
84#else 84#else
85 int32_t b = (x>>32) ? 32+(int32_t)lj_fls((uint32_t)(x>>32)) : 85 int32_t b = (x>>32) ? 32+(int32_t)lj_fls((uint32_t)(x>>32)) :
@@ -93,7 +93,7 @@ static void strscan_double(uint64_t x, TValue *o, int32_t ex2, int32_t neg)
93 } 93 }
94 94
95 /* Convert to double using a signed int64_t conversion, then rescale. */ 95 /* Convert to double using a signed int64_t conversion, then rescale. */
96 lua_assert((int64_t)x >= 0); 96 lj_assertX((int64_t)x >= 0, "bad double conversion");
97 n = (double)(int64_t)x; 97 n = (double)(int64_t)x;
98 if (neg) n = -n; 98 if (neg) n = -n;
99 if (ex2) n = ldexp(n, ex2); 99 if (ex2) n = ldexp(n, ex2);
@@ -140,7 +140,7 @@ static StrScanFmt strscan_hex(const uint8_t *p, TValue *o,
140 break; 140 break;
141 } 141 }
142 142
143 /* Reduce range then convert to double. */ 143 /* Reduce range, then convert to double. */
144 if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; } 144 if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; }
145 strscan_double(x, o, ex2, neg); 145 strscan_double(x, o, ex2, neg);
146 return fmt; 146 return fmt;
@@ -262,7 +262,7 @@ static StrScanFmt strscan_dec(const uint8_t *p, TValue *o,
262 uint32_t hi = 0, lo = (uint32_t)(xip-xi); 262 uint32_t hi = 0, lo = (uint32_t)(xip-xi);
263 int32_t ex2 = 0, idig = (int32_t)lo + (ex10 >> 1); 263 int32_t ex2 = 0, idig = (int32_t)lo + (ex10 >> 1);
264 264
265 lua_assert(lo > 0 && (ex10 & 1) == 0); 265 lj_assertX(lo > 0 && (ex10 & 1) == 0, "bad lo %d ex10 %d", lo, ex10);
266 266
267 /* Handle simple overflow/underflow. */ 267 /* Handle simple overflow/underflow. */
268 if (idig > 310/2) { if (neg) setminfV(o); else setpinfV(o); return fmt; } 268 if (idig > 310/2) { if (neg) setminfV(o); else setpinfV(o); return fmt; }
@@ -326,10 +326,55 @@ static StrScanFmt strscan_dec(const uint8_t *p, TValue *o,
326 return fmt; 326 return fmt;
327} 327}
328 328
329/* Parse binary number. */
330static StrScanFmt strscan_bin(const uint8_t *p, TValue *o,
331 StrScanFmt fmt, uint32_t opt,
332 int32_t ex2, int32_t neg, uint32_t dig)
333{
334 uint64_t x = 0;
335 uint32_t i;
336
337 if (ex2 || dig > 64) return STRSCAN_ERROR;
338
339 /* Scan binary digits. */
340 for (i = dig; i; i--, p++) {
341 if ((*p & ~1) != '0') return STRSCAN_ERROR;
342 x = (x << 1) | (*p & 1);
343 }
344
345 /* Format-specific handling. */
346 switch (fmt) {
347 case STRSCAN_INT:
348 if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) {
349 o->i = neg ? -(int32_t)x : (int32_t)x;
350 return STRSCAN_INT; /* Fast path for 32 bit integers. */
351 }
352 if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; break; }
353 /* fallthrough */
354 case STRSCAN_U32:
355 if (dig > 32) return STRSCAN_ERROR;
356 o->i = neg ? -(int32_t)x : (int32_t)x;
357 return STRSCAN_U32;
358 case STRSCAN_I64:
359 case STRSCAN_U64:
360 o->u64 = neg ? (uint64_t)-(int64_t)x : x;
361 return fmt;
362 default:
363 break;
364 }
365
366 /* Reduce range, then convert to double. */
367 if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; }
368 strscan_double(x, o, ex2, neg);
369 return fmt;
370}
371
329/* Scan string containing a number. Returns format. Returns value in o. */ 372/* Scan string containing a number. Returns format. Returns value in o. */
330StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt) 373StrScanFmt lj_strscan_scan(const uint8_t *p, MSize len, TValue *o,
374 uint32_t opt)
331{ 375{
332 int32_t neg = 0; 376 int32_t neg = 0;
377 const uint8_t *pe = p + len;
333 378
334 /* Remove leading space, parse sign and non-numbers. */ 379 /* Remove leading space, parse sign and non-numbers. */
335 if (LJ_UNLIKELY(!lj_char_isdigit(*p))) { 380 if (LJ_UNLIKELY(!lj_char_isdigit(*p))) {
@@ -347,7 +392,7 @@ StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt)
347 p += 3; 392 p += 3;
348 } 393 }
349 while (lj_char_isspace(*p)) p++; 394 while (lj_char_isspace(*p)) p++;
350 if (*p) return STRSCAN_ERROR; 395 if (*p || p < pe) return STRSCAN_ERROR;
351 o->u64 = tmp.u64; 396 o->u64 = tmp.u64;
352 return STRSCAN_NUM; 397 return STRSCAN_NUM;
353 } 398 }
@@ -364,8 +409,12 @@ StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt)
364 409
365 /* Determine base and skip leading zeros. */ 410 /* Determine base and skip leading zeros. */
366 if (LJ_UNLIKELY(*p <= '0')) { 411 if (LJ_UNLIKELY(*p <= '0')) {
367 if (*p == '0' && casecmp(p[1], 'x')) 412 if (*p == '0') {
368 base = 16, cmask = LJ_CHAR_XDIGIT, p += 2; 413 if (casecmp(p[1], 'x'))
414 base = 16, cmask = LJ_CHAR_XDIGIT, p += 2;
415 else if (casecmp(p[1], 'b'))
416 base = 2, cmask = LJ_CHAR_DIGIT, p += 2;
417 }
369 for ( ; ; p++) { 418 for ( ; ; p++) {
370 if (*p == '0') { 419 if (*p == '0') {
371 hasdig = 1; 420 hasdig = 1;
@@ -403,7 +452,7 @@ StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt)
403 } 452 }
404 453
405 /* Parse exponent. */ 454 /* Parse exponent. */
406 if (casecmp(*p, (uint32_t)(base == 16 ? 'p' : 'e'))) { 455 if (base >= 10 && casecmp(*p, (uint32_t)(base == 16 ? 'p' : 'e'))) {
407 uint32_t xx; 456 uint32_t xx;
408 int negx = 0; 457 int negx = 0;
409 fmt = STRSCAN_NUM; p++; 458 fmt = STRSCAN_NUM; p++;
@@ -441,6 +490,7 @@ StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt)
441 while (lj_char_isspace(*p)) p++; 490 while (lj_char_isspace(*p)) p++;
442 if (*p) return STRSCAN_ERROR; 491 if (*p) return STRSCAN_ERROR;
443 } 492 }
493 if (p < pe) return STRSCAN_ERROR;
444 494
445 /* Fast path for decimal 32 bit integers. */ 495 /* Fast path for decimal 32 bit integers. */
446 if (fmt == STRSCAN_INT && base == 10 && 496 if (fmt == STRSCAN_INT && base == 10 &&
@@ -459,6 +509,8 @@ StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt)
459 return strscan_oct(sp, o, fmt, neg, dig); 509 return strscan_oct(sp, o, fmt, neg, dig);
460 if (base == 16) 510 if (base == 16)
461 fmt = strscan_hex(sp, o, fmt, opt, ex, neg, dig); 511 fmt = strscan_hex(sp, o, fmt, opt, ex, neg, dig);
512 else if (base == 2)
513 fmt = strscan_bin(sp, o, fmt, opt, ex, neg, dig);
462 else 514 else
463 fmt = strscan_dec(sp, o, fmt, opt, ex, neg, dig); 515 fmt = strscan_dec(sp, o, fmt, opt, ex, neg, dig);
464 516
@@ -474,18 +526,19 @@ StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt)
474 526
475int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o) 527int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o)
476{ 528{
477 StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o, 529 StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), str->len, o,
478 STRSCAN_OPT_TONUM); 530 STRSCAN_OPT_TONUM);
479 lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM); 531 lj_assertX(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM, "bad scan format");
480 return (fmt != STRSCAN_ERROR); 532 return (fmt != STRSCAN_ERROR);
481} 533}
482 534
483#if LJ_DUALNUM 535#if LJ_DUALNUM
484int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o) 536int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o)
485{ 537{
486 StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o, 538 StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), str->len, o,
487 STRSCAN_OPT_TOINT); 539 STRSCAN_OPT_TOINT);
488 lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM || fmt == STRSCAN_INT); 540 lj_assertX(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM || fmt == STRSCAN_INT,
541 "bad scan format");
489 if (fmt == STRSCAN_INT) setitype(o, LJ_TISNUM); 542 if (fmt == STRSCAN_INT) setitype(o, LJ_TISNUM);
490 return (fmt != STRSCAN_ERROR); 543 return (fmt != STRSCAN_ERROR);
491} 544}
diff --git a/src/lj_strscan.h b/src/lj_strscan.h
index 42aa1455..30e271b2 100644
--- a/src/lj_strscan.h
+++ b/src/lj_strscan.h
@@ -22,7 +22,8 @@ typedef enum {
22 STRSCAN_INT, STRSCAN_U32, STRSCAN_I64, STRSCAN_U64, 22 STRSCAN_INT, STRSCAN_U32, STRSCAN_I64, STRSCAN_U64,
23} StrScanFmt; 23} StrScanFmt;
24 24
25LJ_FUNC StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt); 25LJ_FUNC StrScanFmt lj_strscan_scan(const uint8_t *p, MSize len, TValue *o,
26 uint32_t opt);
26LJ_FUNC int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o); 27LJ_FUNC int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o);
27#if LJ_DUALNUM 28#if LJ_DUALNUM
28LJ_FUNC int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o); 29LJ_FUNC int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o);
diff --git a/src/lj_tab.c b/src/lj_tab.c
index a45ddaca..982b0763 100644
--- a/src/lj_tab.c
+++ b/src/lj_tab.c
@@ -23,18 +23,22 @@ static LJ_AINLINE Node *hashmask(const GCtab *t, uint32_t hash)
23 return &n[hash & t->hmask]; 23 return &n[hash & t->hmask];
24} 24}
25 25
26/* String hashes are precomputed when they are interned. */ 26/* String IDs are generated when a string is interned. */
27#define hashstr(t, s) hashmask(t, (s)->hash) 27#define hashstr(t, s) hashmask(t, (s)->sid)
28 28
29#define hashlohi(t, lo, hi) hashmask((t), hashrot((lo), (hi))) 29#define hashlohi(t, lo, hi) hashmask((t), hashrot((lo), (hi)))
30#define hashnum(t, o) hashlohi((t), (o)->u32.lo, ((o)->u32.hi << 1)) 30#define hashnum(t, o) hashlohi((t), (o)->u32.lo, ((o)->u32.hi << 1))
31#define hashptr(t, p) hashlohi((t), u32ptr(p), u32ptr(p) + HASH_BIAS) 31#if LJ_GC64
32#define hashgcref(t, r) \
33 hashlohi((t), (uint32_t)gcrefu(r), (uint32_t)(gcrefu(r) >> 32))
34#else
32#define hashgcref(t, r) hashlohi((t), gcrefu(r), gcrefu(r) + HASH_BIAS) 35#define hashgcref(t, r) hashlohi((t), gcrefu(r), gcrefu(r) + HASH_BIAS)
36#endif
33 37
34/* Hash an arbitrary key and return its anchor position in the hash table. */ 38/* Hash an arbitrary key and return its anchor position in the hash table. */
35static Node *hashkey(const GCtab *t, cTValue *key) 39static Node *hashkey(const GCtab *t, cTValue *key)
36{ 40{
37 lua_assert(!tvisint(key)); 41 lj_assertX(!tvisint(key), "attempt to hash integer");
38 if (tvisstr(key)) 42 if (tvisstr(key))
39 return hashstr(t, strV(key)); 43 return hashstr(t, strV(key));
40 else if (tvisnum(key)) 44 else if (tvisnum(key))
@@ -53,13 +57,13 @@ static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits)
53{ 57{
54 uint32_t hsize; 58 uint32_t hsize;
55 Node *node; 59 Node *node;
56 lua_assert(hbits != 0); 60 lj_assertL(hbits != 0, "zero hash size");
57 if (hbits > LJ_MAX_HBITS) 61 if (hbits > LJ_MAX_HBITS)
58 lj_err_msg(L, LJ_ERR_TABOV); 62 lj_err_msg(L, LJ_ERR_TABOV);
59 hsize = 1u << hbits; 63 hsize = 1u << hbits;
60 node = lj_mem_newvec(L, hsize, Node); 64 node = lj_mem_newvec(L, hsize, Node);
61 setmref(node->freetop, &node[hsize]);
62 setmref(t->node, node); 65 setmref(t->node, node);
66 setfreetop(t, node, &node[hsize]);
63 t->hmask = hsize-1; 67 t->hmask = hsize-1;
64} 68}
65 69
@@ -74,7 +78,7 @@ static LJ_AINLINE void clearhpart(GCtab *t)
74{ 78{
75 uint32_t i, hmask = t->hmask; 79 uint32_t i, hmask = t->hmask;
76 Node *node = noderef(t->node); 80 Node *node = noderef(t->node);
77 lua_assert(t->hmask != 0); 81 lj_assertX(t->hmask != 0, "empty hash part");
78 for (i = 0; i <= hmask; i++) { 82 for (i = 0; i <= hmask; i++) {
79 Node *n = &node[i]; 83 Node *n = &node[i];
80 setmref(n->next, NULL); 84 setmref(n->next, NULL);
@@ -98,7 +102,8 @@ static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits)
98 GCtab *t; 102 GCtab *t;
99 /* First try to colocate the array part. */ 103 /* First try to colocate the array part. */
100 if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) { 104 if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) {
101 lua_assert((sizeof(GCtab) & 7) == 0); 105 Node *nilnode;
106 lj_assertL((sizeof(GCtab) & 7) == 0, "bad GCtab size");
102 t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize)); 107 t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize));
103 t->gct = ~LJ_TTAB; 108 t->gct = ~LJ_TTAB;
104 t->nomm = (uint8_t)~0; 109 t->nomm = (uint8_t)~0;
@@ -107,8 +112,13 @@ static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits)
107 setgcrefnull(t->metatable); 112 setgcrefnull(t->metatable);
108 t->asize = asize; 113 t->asize = asize;
109 t->hmask = 0; 114 t->hmask = 0;
110 setmref(t->node, &G(L)->nilnode); 115 nilnode = &G(L)->nilnode;
116 setmref(t->node, nilnode);
117#if LJ_GC64
118 setmref(t->freetop, nilnode);
119#endif
111 } else { /* Otherwise separately allocate the array part. */ 120 } else { /* Otherwise separately allocate the array part. */
121 Node *nilnode;
112 t = lj_mem_newobj(L, GCtab); 122 t = lj_mem_newobj(L, GCtab);
113 t->gct = ~LJ_TTAB; 123 t->gct = ~LJ_TTAB;
114 t->nomm = (uint8_t)~0; 124 t->nomm = (uint8_t)~0;
@@ -117,7 +127,11 @@ static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits)
117 setgcrefnull(t->metatable); 127 setgcrefnull(t->metatable);
118 t->asize = 0; /* In case the array allocation fails. */ 128 t->asize = 0; /* In case the array allocation fails. */
119 t->hmask = 0; 129 t->hmask = 0;
120 setmref(t->node, &G(L)->nilnode); 130 nilnode = &G(L)->nilnode;
131 setmref(t->node, nilnode);
132#if LJ_GC64
133 setmref(t->freetop, nilnode);
134#endif
121 if (asize > 0) { 135 if (asize > 0) {
122 if (asize > LJ_MAX_ASIZE) 136 if (asize > LJ_MAX_ASIZE)
123 lj_err_msg(L, LJ_ERR_TABOV); 137 lj_err_msg(L, LJ_ERR_TABOV);
@@ -149,6 +163,12 @@ GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits)
149 return t; 163 return t;
150} 164}
151 165
166/* The API of this function conforms to lua_createtable(). */
167GCtab *lj_tab_new_ah(lua_State *L, int32_t a, int32_t h)
168{
169 return lj_tab_new(L, (uint32_t)(a > 0 ? a+1 : 0), hsize2hbits(h));
170}
171
152#if LJ_HASJIT 172#if LJ_HASJIT
153GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize) 173GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize)
154{ 174{
@@ -165,7 +185,8 @@ GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt)
165 GCtab *t; 185 GCtab *t;
166 uint32_t asize, hmask; 186 uint32_t asize, hmask;
167 t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0); 187 t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0);
168 lua_assert(kt->asize == t->asize && kt->hmask == t->hmask); 188 lj_assertL(kt->asize == t->asize && kt->hmask == t->hmask,
189 "mismatched size of table and template");
169 t->nomm = 0; /* Keys with metamethod names may be present. */ 190 t->nomm = 0; /* Keys with metamethod names may be present. */
170 asize = kt->asize; 191 asize = kt->asize;
171 if (asize > 0) { 192 if (asize > 0) {
@@ -185,7 +206,7 @@ GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt)
185 Node *node = noderef(t->node); 206 Node *node = noderef(t->node);
186 Node *knode = noderef(kt->node); 207 Node *knode = noderef(kt->node);
187 ptrdiff_t d = (char *)node - (char *)knode; 208 ptrdiff_t d = (char *)node - (char *)knode;
188 setmref(node->freetop, (Node *)((char *)noderef(knode->freetop) + d)); 209 setfreetop(t, node, (Node *)((char *)getfreetop(kt, knode) + d));
189 for (i = 0; i <= hmask; i++) { 210 for (i = 0; i <= hmask; i++) {
190 Node *kn = &knode[i]; 211 Node *kn = &knode[i];
191 Node *n = &node[i]; 212 Node *n = &node[i];
@@ -198,6 +219,17 @@ GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt)
198 return t; 219 return t;
199} 220}
200 221
222/* Clear a table. */
223void LJ_FASTCALL lj_tab_clear(GCtab *t)
224{
225 clearapart(t);
226 if (t->hmask > 0) {
227 Node *node = noderef(t->node);
228 setfreetop(t, node, &node[t->hmask+1]);
229 clearhpart(t);
230 }
231}
232
201/* Free a table. */ 233/* Free a table. */
202void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t) 234void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t)
203{ 235{
@@ -214,7 +246,7 @@ void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t)
214/* -- Table resizing ------------------------------------------------------ */ 246/* -- Table resizing ------------------------------------------------------ */
215 247
216/* Resize a table to fit the new array/hash part sizes. */ 248/* Resize a table to fit the new array/hash part sizes. */
217static void resizetab(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits) 249void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits)
218{ 250{
219 Node *oldnode = noderef(t->node); 251 Node *oldnode = noderef(t->node);
220 uint32_t oldasize = t->asize; 252 uint32_t oldasize = t->asize;
@@ -247,6 +279,9 @@ static void resizetab(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits)
247 } else { 279 } else {
248 global_State *g = G(L); 280 global_State *g = G(L);
249 setmref(t->node, &g->nilnode); 281 setmref(t->node, &g->nilnode);
282#if LJ_GC64
283 setmref(t->freetop, &g->nilnode);
284#endif
250 t->hmask = 0; 285 t->hmask = 0;
251 } 286 }
252 if (asize < oldasize) { /* Array part shrinks? */ 287 if (asize < oldasize) { /* Array part shrinks? */
@@ -276,7 +311,7 @@ static void resizetab(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits)
276 311
277static uint32_t countint(cTValue *key, uint32_t *bins) 312static uint32_t countint(cTValue *key, uint32_t *bins)
278{ 313{
279 lua_assert(!tvisint(key)); 314 lj_assertX(!tvisint(key), "bad integer key");
280 if (tvisnum(key)) { 315 if (tvisnum(key)) {
281 lua_Number nk = numV(key); 316 lua_Number nk = numV(key);
282 int32_t k = lj_num2int(nk); 317 int32_t k = lj_num2int(nk);
@@ -348,7 +383,7 @@ static void rehashtab(lua_State *L, GCtab *t, cTValue *ek)
348 asize += countint(ek, bins); 383 asize += countint(ek, bins);
349 na = bestasize(bins, &asize); 384 na = bestasize(bins, &asize);
350 total -= na; 385 total -= na;
351 resizetab(L, t, asize, hsize2hbits(total)); 386 lj_tab_resize(L, t, asize, hsize2hbits(total));
352} 387}
353 388
354#if LJ_HASFFI 389#if LJ_HASFFI
@@ -360,7 +395,7 @@ void lj_tab_rehash(lua_State *L, GCtab *t)
360 395
361void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize) 396void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize)
362{ 397{
363 resizetab(L, t, nasize+1, t->hmask > 0 ? lj_fls(t->hmask)+1 : 0); 398 lj_tab_resize(L, t, nasize+1, t->hmask > 0 ? lj_fls(t->hmask)+1 : 0);
364} 399}
365 400
366/* -- Table getters ------------------------------------------------------- */ 401/* -- Table getters ------------------------------------------------------- */
@@ -428,16 +463,17 @@ TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key)
428 Node *n = hashkey(t, key); 463 Node *n = hashkey(t, key);
429 if (!tvisnil(&n->val) || t->hmask == 0) { 464 if (!tvisnil(&n->val) || t->hmask == 0) {
430 Node *nodebase = noderef(t->node); 465 Node *nodebase = noderef(t->node);
431 Node *collide, *freenode = noderef(nodebase->freetop); 466 Node *collide, *freenode = getfreetop(t, nodebase);
432 lua_assert(freenode >= nodebase && freenode <= nodebase+t->hmask+1); 467 lj_assertL(freenode >= nodebase && freenode <= nodebase+t->hmask+1,
468 "bad freenode");
433 do { 469 do {
434 if (freenode == nodebase) { /* No free node found? */ 470 if (freenode == nodebase) { /* No free node found? */
435 rehashtab(L, t, key); /* Rehash table. */ 471 rehashtab(L, t, key); /* Rehash table. */
436 return lj_tab_set(L, t, key); /* Retry key insertion. */ 472 return lj_tab_set(L, t, key); /* Retry key insertion. */
437 } 473 }
438 } while (!tvisnil(&(--freenode)->key)); 474 } while (!tvisnil(&(--freenode)->key));
439 setmref(nodebase->freetop, freenode); 475 setfreetop(t, nodebase, freenode);
440 lua_assert(freenode != &G(L)->nilnode); 476 lj_assertL(freenode != &G(L)->nilnode, "store to fallback hash");
441 collide = hashkey(t, &n->key); 477 collide = hashkey(t, &n->key);
442 if (collide != n) { /* Colliding node not the main node? */ 478 if (collide != n) { /* Colliding node not the main node? */
443 while (noderef(collide->next) != n) /* Find predecessor. */ 479 while (noderef(collide->next) != n) /* Find predecessor. */
@@ -493,7 +529,7 @@ TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key)
493 if (LJ_UNLIKELY(tvismzero(&n->key))) 529 if (LJ_UNLIKELY(tvismzero(&n->key)))
494 n->key.u64 = 0; 530 n->key.u64 = 0;
495 lj_gc_anybarriert(L, t); 531 lj_gc_anybarriert(L, t);
496 lua_assert(tvisnil(&n->val)); 532 lj_assertL(tvisnil(&n->val), "new hash slot is not empty");
497 return &n->val; 533 return &n->val;
498} 534}
499 535
@@ -605,49 +641,62 @@ int lj_tab_next(lua_State *L, GCtab *t, TValue *key)
605 641
606/* -- Table length calculation -------------------------------------------- */ 642/* -- Table length calculation -------------------------------------------- */
607 643
608static MSize unbound_search(GCtab *t, MSize j) 644/* Compute table length. Slow path with mixed array/hash lookups. */
645LJ_NOINLINE static MSize tab_len_slow(GCtab *t, size_t hi)
609{ 646{
610 cTValue *tv; 647 cTValue *tv;
611 MSize i = j; /* i is zero or a present index */ 648 size_t lo = hi;
612 j++; 649 hi++;
613 /* find `i' and `j' such that i is present and j is not */ 650 /* Widening search for an upper bound. */
614 while ((tv = lj_tab_getint(t, (int32_t)j)) && !tvisnil(tv)) { 651 while ((tv = lj_tab_getint(t, (int32_t)hi)) && !tvisnil(tv)) {
615 i = j; 652 lo = hi;
616 j *= 2; 653 hi += hi;
617 if (j > (MSize)(INT_MAX-2)) { /* overflow? */ 654 if (hi > (size_t)(INT_MAX-2)) { /* Punt and do a linear search. */
618 /* table was built with bad purposes: resort to linear search */ 655 lo = 1;
619 i = 1; 656 while ((tv = lj_tab_getint(t, (int32_t)lo)) && !tvisnil(tv)) lo++;
620 while ((tv = lj_tab_getint(t, (int32_t)i)) && !tvisnil(tv)) i++; 657 return (MSize)(lo - 1);
621 return i - 1;
622 } 658 }
623 } 659 }
624 /* now do a binary search between them */ 660 /* Binary search to find a non-nil to nil transition. */
625 while (j - i > 1) { 661 while (hi - lo > 1) {
626 MSize m = (i+j)/2; 662 size_t mid = (lo+hi) >> 1;
627 cTValue *tvb = lj_tab_getint(t, (int32_t)m); 663 cTValue *tvb = lj_tab_getint(t, (int32_t)mid);
628 if (tvb && !tvisnil(tvb)) i = m; else j = m; 664 if (tvb && !tvisnil(tvb)) lo = mid; else hi = mid;
629 } 665 }
630 return i; 666 return (MSize)lo;
631} 667}
632 668
633/* 669/* Compute table length. Fast path. */
634** Try to find a boundary in table `t'. A `boundary' is an integer index
635** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil).
636*/
637MSize LJ_FASTCALL lj_tab_len(GCtab *t) 670MSize LJ_FASTCALL lj_tab_len(GCtab *t)
638{ 671{
639 MSize j = (MSize)t->asize; 672 size_t hi = (size_t)t->asize;
640 if (j > 1 && tvisnil(arrayslot(t, j-1))) { 673 if (hi) hi--;
641 MSize i = 1; 674 /* In a growing array the last array element is very likely nil. */
642 while (j - i > 1) { 675 if (hi > 0 && LJ_LIKELY(tvisnil(arrayslot(t, hi)))) {
643 MSize m = (i+j)/2; 676 /* Binary search to find a non-nil to nil transition in the array. */
644 if (tvisnil(arrayslot(t, m-1))) j = m; else i = m; 677 size_t lo = 0;
678 while (hi - lo > 1) {
679 size_t mid = (lo+hi) >> 1;
680 if (tvisnil(arrayslot(t, mid))) hi = mid; else lo = mid;
645 } 681 }
646 return i-1; 682 return (MSize)lo;
647 } 683 }
648 if (j) j--; 684 /* Without a hash part, there's an implicit nil after the last element. */
649 if (t->hmask <= 0) 685 return t->hmask ? tab_len_slow(t, hi) : (MSize)hi;
650 return j;
651 return unbound_search(t, j);
652} 686}
653 687
688#if LJ_HASJIT
689/* Verify hinted table length or compute it. */
690MSize LJ_FASTCALL lj_tab_len_hint(GCtab *t, size_t hint)
691{
692 size_t asize = (size_t)t->asize;
693 cTValue *tv = arrayslot(t, hint);
694 if (LJ_LIKELY(hint+1 < asize)) {
695 if (LJ_LIKELY(!tvisnil(tv) && tvisnil(tv+1))) return (MSize)hint;
696 } else if (hint+1 <= asize && LJ_LIKELY(t->hmask == 0) && !tvisnil(tv)) {
697 return (MSize)hint;
698 }
699 return lj_tab_len(t);
700}
701#endif
702
diff --git a/src/lj_tab.h b/src/lj_tab.h
index dc3c8dc1..f31590cd 100644
--- a/src/lj_tab.h
+++ b/src/lj_tab.h
@@ -34,14 +34,17 @@ static LJ_AINLINE uint32_t hashrot(uint32_t lo, uint32_t hi)
34#define hsize2hbits(s) ((s) ? ((s)==1 ? 1 : 1+lj_fls((uint32_t)((s)-1))) : 0) 34#define hsize2hbits(s) ((s) ? ((s)==1 ? 1 : 1+lj_fls((uint32_t)((s)-1))) : 0)
35 35
36LJ_FUNCA GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits); 36LJ_FUNCA GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits);
37LJ_FUNC GCtab *lj_tab_new_ah(lua_State *L, int32_t a, int32_t h);
37#if LJ_HASJIT 38#if LJ_HASJIT
38LJ_FUNC GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize); 39LJ_FUNC GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize);
39#endif 40#endif
40LJ_FUNCA GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt); 41LJ_FUNCA GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt);
42LJ_FUNC void LJ_FASTCALL lj_tab_clear(GCtab *t);
41LJ_FUNC void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t); 43LJ_FUNC void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t);
42#if LJ_HASFFI 44#if LJ_HASFFI
43LJ_FUNC void lj_tab_rehash(lua_State *L, GCtab *t); 45LJ_FUNC void lj_tab_rehash(lua_State *L, GCtab *t);
44#endif 46#endif
47LJ_FUNC void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits);
45LJ_FUNCA void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize); 48LJ_FUNCA void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize);
46 49
47/* Caveat: all getters except lj_tab_get() can return NULL! */ 50/* Caveat: all getters except lj_tab_get() can return NULL! */
@@ -53,7 +56,7 @@ LJ_FUNCA cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key);
53/* Caveat: all setters require a write barrier for the stored value. */ 56/* Caveat: all setters require a write barrier for the stored value. */
54 57
55LJ_FUNCA TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key); 58LJ_FUNCA TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key);
56LJ_FUNC TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key); 59LJ_FUNCA TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key);
57LJ_FUNC TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key); 60LJ_FUNC TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key);
58LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key); 61LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key);
59 62
@@ -66,5 +69,8 @@ LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key);
66 69
67LJ_FUNCA int lj_tab_next(lua_State *L, GCtab *t, TValue *key); 70LJ_FUNCA int lj_tab_next(lua_State *L, GCtab *t, TValue *key);
68LJ_FUNCA MSize LJ_FASTCALL lj_tab_len(GCtab *t); 71LJ_FUNCA MSize LJ_FASTCALL lj_tab_len(GCtab *t);
72#if LJ_HASJIT
73LJ_FUNC MSize LJ_FASTCALL lj_tab_len_hint(GCtab *t, size_t hint);
74#endif
69 75
70#endif 76#endif
diff --git a/src/lj_target.h b/src/lj_target.h
index a8182596..ce67d000 100644
--- a/src/lj_target.h
+++ b/src/lj_target.h
@@ -55,7 +55,7 @@ typedef uint32_t RegSP;
55/* Bitset for registers. 32 registers suffice for most architectures. 55/* Bitset for registers. 32 registers suffice for most architectures.
56** Note that one set holds bits for both GPRs and FPRs. 56** Note that one set holds bits for both GPRs and FPRs.
57*/ 57*/
58#if LJ_TARGET_PPC || LJ_TARGET_MIPS 58#if LJ_TARGET_PPC || LJ_TARGET_MIPS || LJ_TARGET_ARM64
59typedef uint64_t RegSet; 59typedef uint64_t RegSet;
60#else 60#else
61typedef uint32_t RegSet; 61typedef uint32_t RegSet;
@@ -69,7 +69,7 @@ typedef uint32_t RegSet;
69#define rset_set(rs, r) (rs |= RID2RSET(r)) 69#define rset_set(rs, r) (rs |= RID2RSET(r))
70#define rset_clear(rs, r) (rs &= ~RID2RSET(r)) 70#define rset_clear(rs, r) (rs &= ~RID2RSET(r))
71#define rset_exclude(rs, r) (rs & ~RID2RSET(r)) 71#define rset_exclude(rs, r) (rs & ~RID2RSET(r))
72#if LJ_TARGET_PPC || LJ_TARGET_MIPS 72#if LJ_TARGET_PPC || LJ_TARGET_MIPS || LJ_TARGET_ARM64
73#define rset_picktop(rs) ((Reg)(__builtin_clzll(rs)^63)) 73#define rset_picktop(rs) ((Reg)(__builtin_clzll(rs)^63))
74#define rset_pickbot(rs) ((Reg)__builtin_ctzll(rs)) 74#define rset_pickbot(rs) ((Reg)__builtin_ctzll(rs))
75#else 75#else
@@ -138,6 +138,8 @@ typedef uint32_t RegCost;
138#include "lj_target_x86.h" 138#include "lj_target_x86.h"
139#elif LJ_TARGET_ARM 139#elif LJ_TARGET_ARM
140#include "lj_target_arm.h" 140#include "lj_target_arm.h"
141#elif LJ_TARGET_ARM64
142#include "lj_target_arm64.h"
141#elif LJ_TARGET_PPC 143#elif LJ_TARGET_PPC
142#include "lj_target_ppc.h" 144#include "lj_target_ppc.h"
143#elif LJ_TARGET_MIPS 145#elif LJ_TARGET_MIPS
@@ -150,7 +152,8 @@ typedef uint32_t RegCost;
150/* Return the address of an exit stub. */ 152/* Return the address of an exit stub. */
151static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno) 153static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno)
152{ 154{
153 lua_assert(group[exitno / EXITSTUBS_PER_GROUP] != NULL); 155 lj_assertX(group[exitno / EXITSTUBS_PER_GROUP] != NULL,
156 "exit stub group for exit %d uninitialized", exitno);
154 return (char *)group[exitno / EXITSTUBS_PER_GROUP] + 157 return (char *)group[exitno / EXITSTUBS_PER_GROUP] +
155 EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP); 158 EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP);
156} 159}
diff --git a/src/lj_target_arm.h b/src/lj_target_arm.h
index 4d292dc9..48e50fe9 100644
--- a/src/lj_target_arm.h
+++ b/src/lj_target_arm.h
@@ -243,10 +243,6 @@ typedef enum ARMIns {
243 ARMI_VCVT_S32_F64 = 0xeebd0bc0, 243 ARMI_VCVT_S32_F64 = 0xeebd0bc0,
244 ARMI_VCVT_U32_F32 = 0xeebc0ac0, 244 ARMI_VCVT_U32_F32 = 0xeebc0ac0,
245 ARMI_VCVT_U32_F64 = 0xeebc0bc0, 245 ARMI_VCVT_U32_F64 = 0xeebc0bc0,
246 ARMI_VCVTR_S32_F32 = 0xeebd0a40,
247 ARMI_VCVTR_S32_F64 = 0xeebd0b40,
248 ARMI_VCVTR_U32_F32 = 0xeebc0a40,
249 ARMI_VCVTR_U32_F64 = 0xeebc0b40,
250 ARMI_VCVT_F32_S32 = 0xeeb80ac0, 246 ARMI_VCVT_F32_S32 = 0xeeb80ac0,
251 ARMI_VCVT_F64_S32 = 0xeeb80bc0, 247 ARMI_VCVT_F64_S32 = 0xeeb80bc0,
252 ARMI_VCVT_F32_U32 = 0xeeb80a40, 248 ARMI_VCVT_F32_U32 = 0xeeb80a40,
diff --git a/src/lj_target_arm64.h b/src/lj_target_arm64.h
new file mode 100644
index 00000000..d729e178
--- /dev/null
+++ b/src/lj_target_arm64.h
@@ -0,0 +1,332 @@
1/*
2** Definitions for ARM64 CPUs.
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#ifndef _LJ_TARGET_ARM64_H
7#define _LJ_TARGET_ARM64_H
8
9/* -- Registers IDs ------------------------------------------------------- */
10
11#define GPRDEF(_) \
12 _(X0) _(X1) _(X2) _(X3) _(X4) _(X5) _(X6) _(X7) \
13 _(X8) _(X9) _(X10) _(X11) _(X12) _(X13) _(X14) _(X15) \
14 _(X16) _(X17) _(X18) _(X19) _(X20) _(X21) _(X22) _(X23) \
15 _(X24) _(X25) _(X26) _(X27) _(X28) _(FP) _(LR) _(SP)
16#define FPRDEF(_) \
17 _(D0) _(D1) _(D2) _(D3) _(D4) _(D5) _(D6) _(D7) \
18 _(D8) _(D9) _(D10) _(D11) _(D12) _(D13) _(D14) _(D15) \
19 _(D16) _(D17) _(D18) _(D19) _(D20) _(D21) _(D22) _(D23) \
20 _(D24) _(D25) _(D26) _(D27) _(D28) _(D29) _(D30) _(D31)
21#define VRIDDEF(_)
22
23#define RIDENUM(name) RID_##name,
24
25enum {
26 GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
27 FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
28 RID_MAX,
29 RID_TMP = RID_LR,
30 RID_ZERO = RID_SP,
31
32 /* Calling conventions. */
33 RID_RET = RID_X0,
34 RID_FPRET = RID_D0,
35
36 /* These definitions must match with the *.dasc file(s): */
37 RID_BASE = RID_X19, /* Interpreter BASE. */
38 RID_LPC = RID_X21, /* Interpreter PC. */
39 RID_GL = RID_X22, /* Interpreter GL. */
40 RID_LREG = RID_X23, /* Interpreter L. */
41
42 /* Register ranges [min, max) and number of registers. */
43 RID_MIN_GPR = RID_X0,
44 RID_MAX_GPR = RID_SP+1,
45 RID_MIN_FPR = RID_MAX_GPR,
46 RID_MAX_FPR = RID_D31+1,
47 RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
48 RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
49};
50
51#define RID_NUM_KREF RID_NUM_GPR
52#define RID_MIN_KREF RID_X0
53
54/* -- Register sets ------------------------------------------------------- */
55
56/* Make use of all registers, except for x18, fp, lr and sp. */
57#define RSET_FIXED \
58 (RID2RSET(RID_X18)|RID2RSET(RID_FP)|RID2RSET(RID_LR)|RID2RSET(RID_SP)|\
59 RID2RSET(RID_GL))
60#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
61#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)
62#define RSET_ALL (RSET_GPR|RSET_FPR)
63#define RSET_INIT RSET_ALL
64
65/* lr is an implicit scratch register. */
66#define RSET_SCRATCH_GPR (RSET_RANGE(RID_X0, RID_X17+1))
67#define RSET_SCRATCH_FPR \
68 (RSET_RANGE(RID_D0, RID_D7+1)|RSET_RANGE(RID_D16, RID_D31+1))
69#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
70#define REGARG_FIRSTGPR RID_X0
71#define REGARG_LASTGPR RID_X7
72#define REGARG_NUMGPR 8
73#define REGARG_FIRSTFPR RID_D0
74#define REGARG_LASTFPR RID_D7
75#define REGARG_NUMFPR 8
76
77/* -- Spill slots --------------------------------------------------------- */
78
79/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
80**
81** SPS_FIXED: Available fixed spill slots in interpreter frame.
82** This definition must match with the vm_arm64.dasc file.
83** Pre-allocate some slots to avoid sp adjust in every root trace.
84**
85** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
86*/
87#define SPS_FIXED 4
88#define SPS_FIRST 2
89
90#define SPOFS_TMP 0
91
92#define sps_scale(slot) (4 * (int32_t)(slot))
93#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
94
95/* -- Exit state ---------------------------------------------------------- */
96
97/* This definition must match with the *.dasc file(s). */
98typedef struct {
99 lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
100 intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
101 int32_t spill[256]; /* Spill slots. */
102} ExitState;
103
104/* Highest exit + 1 indicates stack check. */
105#define EXITSTATE_CHECKEXIT 1
106
107/* Return the address of a per-trace exit stub. */
108static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p, uint32_t exitno)
109{
110 while (*p == (LJ_LE ? 0xd503201f : 0x1f2003d5)) p++; /* Skip A64I_NOP. */
111 return p + 3 + exitno;
112}
113/* Avoid dependence on lj_jit.h if only including lj_target.h. */
114#define exitstub_trace_addr(T, exitno) \
115 exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode), (exitno))
116
117/* -- Instructions -------------------------------------------------------- */
118
119/* ARM64 instructions are always little-endian. Swap for ARM64BE. */
120#if LJ_BE
121#define A64I_LE(x) (lj_bswap(x))
122#else
123#define A64I_LE(x) (x)
124#endif
125
126/* Instruction fields. */
127#define A64F_D(r) (r)
128#define A64F_N(r) ((r) << 5)
129#define A64F_A(r) ((r) << 10)
130#define A64F_M(r) ((r) << 16)
131#define A64F_IMMS(x) ((x) << 10)
132#define A64F_IMMR(x) ((x) << 16)
133#define A64F_U16(x) ((x) << 5)
134#define A64F_U12(x) ((x) << 10)
135#define A64F_S26(x) (((uint32_t)(x) & 0x03ffffffu))
136#define A64F_S19(x) (((uint32_t)(x) & 0x7ffffu) << 5)
137#define A64F_S14(x) (((uint32_t)(x) & 0x3fffu) << 5)
138#define A64F_S9(x) ((x) << 12)
139#define A64F_BIT(x) ((x) << 19)
140#define A64F_SH(sh, x) (((sh) << 22) | ((x) << 10))
141#define A64F_EX(ex) (A64I_EX | ((ex) << 13))
142#define A64F_EXSH(ex,x) (A64I_EX | ((ex) << 13) | ((x) << 10))
143#define A64F_FP8(x) ((x) << 13)
144#define A64F_CC(cc) ((cc) << 12)
145#define A64F_LSL16(x) (((x) / 16) << 21)
146#define A64F_BSH(sh) ((sh) << 10)
147
148/* Check for valid field range. */
149#define A64F_S_OK(x, b) ((((x) + (1 << (b-1))) >> (b)) == 0)
150
151typedef enum A64Ins {
152 A64I_S = 0x20000000,
153 A64I_X = 0x80000000,
154 A64I_EX = 0x00200000,
155 A64I_ON = 0x00200000,
156 A64I_K12 = 0x1a000000,
157 A64I_K13 = 0x18000000,
158 A64I_LS_U = 0x01000000,
159 A64I_LS_S = 0x00800000,
160 A64I_LS_R = 0x01200800,
161 A64I_LS_SH = 0x00001000,
162 A64I_LS_UXTWx = 0x00004000,
163 A64I_LS_SXTWx = 0x0000c000,
164 A64I_LS_SXTXx = 0x0000e000,
165 A64I_LS_LSLx = 0x00006000,
166
167 A64I_ADDw = 0x0b000000,
168 A64I_ADDx = 0x8b000000,
169 A64I_ADDSw = 0x2b000000,
170 A64I_ADDSx = 0xab000000,
171 A64I_NEGw = 0x4b0003e0,
172 A64I_NEGx = 0xcb0003e0,
173 A64I_SUBw = 0x4b000000,
174 A64I_SUBx = 0xcb000000,
175 A64I_SUBSw = 0x6b000000,
176 A64I_SUBSx = 0xeb000000,
177
178 A64I_MULw = 0x1b007c00,
179 A64I_MULx = 0x9b007c00,
180 A64I_SMULL = 0x9b207c00,
181
182 A64I_ANDw = 0x0a000000,
183 A64I_ANDx = 0x8a000000,
184 A64I_ANDSw = 0x6a000000,
185 A64I_ANDSx = 0xea000000,
186 A64I_EORw = 0x4a000000,
187 A64I_EORx = 0xca000000,
188 A64I_ORRw = 0x2a000000,
189 A64I_ORRx = 0xaa000000,
190 A64I_TSTw = 0x6a00001f,
191 A64I_TSTx = 0xea00001f,
192
193 A64I_CMPw = 0x6b00001f,
194 A64I_CMPx = 0xeb00001f,
195 A64I_CMNw = 0x2b00001f,
196 A64I_CMNx = 0xab00001f,
197 A64I_CCMPw = 0x7a400000,
198 A64I_CCMPx = 0xfa400000,
199 A64I_CSELw = 0x1a800000,
200 A64I_CSELx = 0x9a800000,
201
202 A64I_ASRw = 0x13007c00,
203 A64I_ASRx = 0x9340fc00,
204 A64I_LSLx = 0xd3400000,
205 A64I_LSRx = 0xd340fc00,
206 A64I_SHRw = 0x1ac02000,
207 A64I_SHRx = 0x9ac02000, /* lsl/lsr/asr/ror x0, x0, x0 */
208 A64I_REVw = 0x5ac00800,
209 A64I_REVx = 0xdac00c00,
210
211 A64I_EXTRw = 0x13800000,
212 A64I_EXTRx = 0x93c00000,
213 A64I_SBFMw = 0x13000000,
214 A64I_SBFMx = 0x93400000,
215 A64I_SXTBw = 0x13001c00,
216 A64I_SXTHw = 0x13003c00,
217 A64I_SXTW = 0x93407c00,
218 A64I_UBFMw = 0x53000000,
219 A64I_UBFMx = 0xd3400000,
220 A64I_UXTBw = 0x53001c00,
221 A64I_UXTHw = 0x53003c00,
222
223 A64I_MOVw = 0x2a0003e0,
224 A64I_MOVx = 0xaa0003e0,
225 A64I_MVNw = 0x2a2003e0,
226 A64I_MVNx = 0xaa2003e0,
227 A64I_MOVKw = 0x72800000,
228 A64I_MOVKx = 0xf2800000,
229 A64I_MOVZw = 0x52800000,
230 A64I_MOVZx = 0xd2800000,
231 A64I_MOVNw = 0x12800000,
232 A64I_MOVNx = 0x92800000,
233
234 A64I_LDRB = 0x39400000,
235 A64I_LDRH = 0x79400000,
236 A64I_LDRw = 0xb9400000,
237 A64I_LDRx = 0xf9400000,
238 A64I_LDRLw = 0x18000000,
239 A64I_LDRLx = 0x58000000,
240 A64I_STRB = 0x39000000,
241 A64I_STRH = 0x79000000,
242 A64I_STRw = 0xb9000000,
243 A64I_STRx = 0xf9000000,
244 A64I_STPw = 0x29000000,
245 A64I_STPx = 0xa9000000,
246 A64I_LDPw = 0x29400000,
247 A64I_LDPx = 0xa9400000,
248
249 A64I_B = 0x14000000,
250 A64I_BCC = 0x54000000,
251 A64I_BL = 0x94000000,
252 A64I_BR = 0xd61f0000,
253 A64I_BLR = 0xd63f0000,
254 A64I_TBZ = 0x36000000,
255 A64I_TBNZ = 0x37000000,
256 A64I_CBZ = 0x34000000,
257 A64I_CBNZ = 0x35000000,
258
259 A64I_NOP = 0xd503201f,
260
261 /* FP */
262 A64I_FADDd = 0x1e602800,
263 A64I_FSUBd = 0x1e603800,
264 A64I_FMADDd = 0x1f400000,
265 A64I_FMSUBd = 0x1f408000,
266 A64I_FNMADDd = 0x1f600000,
267 A64I_FNMSUBd = 0x1f608000,
268 A64I_FMULd = 0x1e600800,
269 A64I_FDIVd = 0x1e601800,
270 A64I_FNEGd = 0x1e614000,
271 A64I_FABS = 0x1e60c000,
272 A64I_FSQRTd = 0x1e61c000,
273 A64I_LDRs = 0xbd400000,
274 A64I_LDRd = 0xfd400000,
275 A64I_STRs = 0xbd000000,
276 A64I_STRd = 0xfd000000,
277 A64I_LDPs = 0x2d400000,
278 A64I_LDPd = 0x6d400000,
279 A64I_STPs = 0x2d000000,
280 A64I_STPd = 0x6d000000,
281 A64I_FCMPd = 0x1e602000,
282 A64I_FCMPZd = 0x1e602008,
283 A64I_FCSELd = 0x1e600c00,
284 A64I_FRINTMd = 0x1e654000,
285 A64I_FRINTPd = 0x1e64c000,
286 A64I_FRINTZd = 0x1e65c000,
287
288 A64I_FCVT_F32_F64 = 0x1e624000,
289 A64I_FCVT_F64_F32 = 0x1e22c000,
290 A64I_FCVT_F32_S32 = 0x1e220000,
291 A64I_FCVT_F64_S32 = 0x1e620000,
292 A64I_FCVT_F32_U32 = 0x1e230000,
293 A64I_FCVT_F64_U32 = 0x1e630000,
294 A64I_FCVT_F32_S64 = 0x9e220000,
295 A64I_FCVT_F64_S64 = 0x9e620000,
296 A64I_FCVT_F32_U64 = 0x9e230000,
297 A64I_FCVT_F64_U64 = 0x9e630000,
298 A64I_FCVT_S32_F64 = 0x1e780000,
299 A64I_FCVT_S32_F32 = 0x1e380000,
300 A64I_FCVT_U32_F64 = 0x1e790000,
301 A64I_FCVT_U32_F32 = 0x1e390000,
302 A64I_FCVT_S64_F64 = 0x9e780000,
303 A64I_FCVT_S64_F32 = 0x9e380000,
304 A64I_FCVT_U64_F64 = 0x9e790000,
305 A64I_FCVT_U64_F32 = 0x9e390000,
306
307 A64I_FMOV_S = 0x1e204000,
308 A64I_FMOV_D = 0x1e604000,
309 A64I_FMOV_R_S = 0x1e260000,
310 A64I_FMOV_S_R = 0x1e270000,
311 A64I_FMOV_R_D = 0x9e660000,
312 A64I_FMOV_D_R = 0x9e670000,
313 A64I_FMOV_DI = 0x1e601000,
314} A64Ins;
315
316typedef enum A64Shift {
317 A64SH_LSL, A64SH_LSR, A64SH_ASR, A64SH_ROR
318} A64Shift;
319
320typedef enum A64Extend {
321 A64EX_UXTB, A64EX_UXTH, A64EX_UXTW, A64EX_UXTX,
322 A64EX_SXTB, A64EX_SXTH, A64EX_SXTW, A64EX_SXTX,
323} A64Extend;
324
325/* ARM condition codes. */
326typedef enum A64CC {
327 CC_EQ, CC_NE, CC_CS, CC_CC, CC_MI, CC_PL, CC_VS, CC_VC,
328 CC_HI, CC_LS, CC_GE, CC_LT, CC_GT, CC_LE, CC_AL,
329 CC_HS = CC_CS, CC_LO = CC_CC
330} A64CC;
331
332#endif
diff --git a/src/lj_target_mips.h b/src/lj_target_mips.h
index 4bbdc743..6e436967 100644
--- a/src/lj_target_mips.h
+++ b/src/lj_target_mips.h
@@ -13,11 +13,15 @@
13 _(R8) _(R9) _(R10) _(R11) _(R12) _(R13) _(R14) _(R15) \ 13 _(R8) _(R9) _(R10) _(R11) _(R12) _(R13) _(R14) _(R15) \
14 _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \ 14 _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
15 _(R24) _(R25) _(SYS1) _(SYS2) _(R28) _(SP) _(R30) _(RA) 15 _(R24) _(R25) _(SYS1) _(SYS2) _(R28) _(SP) _(R30) _(RA)
16#if LJ_SOFTFP
17#define FPRDEF(_)
18#else
16#define FPRDEF(_) \ 19#define FPRDEF(_) \
17 _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \ 20 _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
18 _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \ 21 _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
19 _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \ 22 _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
20 _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31) 23 _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
24#endif
21#define VRIDDEF(_) 25#define VRIDDEF(_)
22 26
23#define RIDENUM(name) RID_##name, 27#define RIDENUM(name) RID_##name,
@@ -39,7 +43,11 @@ enum {
39 RID_RETHI = RID_R2, 43 RID_RETHI = RID_R2,
40 RID_RETLO = RID_R3, 44 RID_RETLO = RID_R3,
41#endif 45#endif
46#if LJ_SOFTFP
47 RID_FPRET = RID_R2,
48#else
42 RID_FPRET = RID_F0, 49 RID_FPRET = RID_F0,
50#endif
43 RID_CFUNCADDR = RID_R25, 51 RID_CFUNCADDR = RID_R25,
44 52
45 /* These definitions must match with the *.dasc file(s): */ 53 /* These definitions must match with the *.dasc file(s): */
@@ -52,8 +60,12 @@ enum {
52 /* Register ranges [min, max) and number of registers. */ 60 /* Register ranges [min, max) and number of registers. */
53 RID_MIN_GPR = RID_R0, 61 RID_MIN_GPR = RID_R0,
54 RID_MAX_GPR = RID_RA+1, 62 RID_MAX_GPR = RID_RA+1,
55 RID_MIN_FPR = RID_F0, 63 RID_MIN_FPR = RID_MAX_GPR,
64#if LJ_SOFTFP
65 RID_MAX_FPR = RID_MIN_FPR,
66#else
56 RID_MAX_FPR = RID_F31+1, 67 RID_MAX_FPR = RID_F31+1,
68#endif
57 RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR, 69 RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
58 RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR /* Only even regs are used. */ 70 RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR /* Only even regs are used. */
59}; 71};
@@ -68,28 +80,60 @@ enum {
68 (RID2RSET(RID_ZERO)|RID2RSET(RID_TMP)|RID2RSET(RID_SP)|\ 80 (RID2RSET(RID_ZERO)|RID2RSET(RID_TMP)|RID2RSET(RID_SP)|\
69 RID2RSET(RID_SYS1)|RID2RSET(RID_SYS2)|RID2RSET(RID_JGL)|RID2RSET(RID_GP)) 81 RID2RSET(RID_SYS1)|RID2RSET(RID_SYS2)|RID2RSET(RID_JGL)|RID2RSET(RID_GP))
70#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED) 82#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
83#if LJ_SOFTFP
84#define RSET_FPR 0
85#else
86#if LJ_32
71#define RSET_FPR \ 87#define RSET_FPR \
72 (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\ 88 (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
73 RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\ 89 RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
74 RID2RSET(RID_F16)|RID2RSET(RID_F18)|RID2RSET(RID_F20)|RID2RSET(RID_F22)|\ 90 RID2RSET(RID_F16)|RID2RSET(RID_F18)|RID2RSET(RID_F20)|RID2RSET(RID_F22)|\
75 RID2RSET(RID_F24)|RID2RSET(RID_F26)|RID2RSET(RID_F28)|RID2RSET(RID_F30)) 91 RID2RSET(RID_F24)|RID2RSET(RID_F26)|RID2RSET(RID_F28)|RID2RSET(RID_F30))
76#define RSET_ALL (RSET_GPR|RSET_FPR) 92#else
77#define RSET_INIT RSET_ALL 93#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)
94#endif
95#endif
96#define RSET_ALL (RSET_GPR|RSET_FPR)
97#define RSET_INIT RSET_ALL
78 98
79#define RSET_SCRATCH_GPR \ 99#define RSET_SCRATCH_GPR \
80 (RSET_RANGE(RID_R1, RID_R15+1)|\ 100 (RSET_RANGE(RID_R1, RID_R15+1)|\
81 RID2RSET(RID_R24)|RID2RSET(RID_R25)) 101 RID2RSET(RID_R24)|RID2RSET(RID_R25))
102#if LJ_SOFTFP
103#define RSET_SCRATCH_FPR 0
104#else
105#if LJ_32
82#define RSET_SCRATCH_FPR \ 106#define RSET_SCRATCH_FPR \
83 (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\ 107 (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
84 RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\ 108 RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
85 RID2RSET(RID_F16)|RID2RSET(RID_F18)) 109 RID2RSET(RID_F16)|RID2RSET(RID_F18))
110#else
111#define RSET_SCRATCH_FPR RSET_RANGE(RID_F0, RID_F24)
112#endif
113#endif
86#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR) 114#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
87#define REGARG_FIRSTGPR RID_R4 115#define REGARG_FIRSTGPR RID_R4
116#if LJ_32
88#define REGARG_LASTGPR RID_R7 117#define REGARG_LASTGPR RID_R7
89#define REGARG_NUMGPR 4 118#define REGARG_NUMGPR 4
119#else
120#define REGARG_LASTGPR RID_R11
121#define REGARG_NUMGPR 8
122#endif
123#if LJ_ABI_SOFTFP
124#define REGARG_FIRSTFPR 0
125#define REGARG_LASTFPR 0
126#define REGARG_NUMFPR 0
127#else
90#define REGARG_FIRSTFPR RID_F12 128#define REGARG_FIRSTFPR RID_F12
129#if LJ_32
91#define REGARG_LASTFPR RID_F14 130#define REGARG_LASTFPR RID_F14
92#define REGARG_NUMFPR 2 131#define REGARG_NUMFPR 2
132#else
133#define REGARG_LASTFPR RID_F19
134#define REGARG_NUMFPR 8
135#endif
136#endif
93 137
94/* -- Spill slots --------------------------------------------------------- */ 138/* -- Spill slots --------------------------------------------------------- */
95 139
@@ -100,7 +144,11 @@ enum {
100** 144**
101** SPS_FIRST: First spill slot for general use. 145** SPS_FIRST: First spill slot for general use.
102*/ 146*/
147#if LJ_32
103#define SPS_FIXED 5 148#define SPS_FIXED 5
149#else
150#define SPS_FIXED 4
151#endif
104#define SPS_FIRST 4 152#define SPS_FIRST 4
105 153
106#define SPOFS_TMP 0 154#define SPOFS_TMP 0
@@ -112,8 +160,10 @@ enum {
112 160
113/* This definition must match with the *.dasc file(s). */ 161/* This definition must match with the *.dasc file(s). */
114typedef struct { 162typedef struct {
163#if !LJ_SOFTFP
115 lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ 164 lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
116 int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ 165#endif
166 intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
117 int32_t spill[256]; /* Spill slots. */ 167 int32_t spill[256]; /* Spill slots. */
118} ExitState; 168} ExitState;
119 169
@@ -142,52 +192,85 @@ static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p)
142#define MIPSF_F(r) ((r) << 6) 192#define MIPSF_F(r) ((r) << 6)
143#define MIPSF_A(n) ((n) << 6) 193#define MIPSF_A(n) ((n) << 6)
144#define MIPSF_M(n) ((n) << 11) 194#define MIPSF_M(n) ((n) << 11)
195#define MIPSF_L(n) ((n) << 6)
145 196
146typedef enum MIPSIns { 197typedef enum MIPSIns {
198 MIPSI_D = 0x38,
199 MIPSI_DV = 0x10,
200 MIPSI_D32 = 0x3c,
147 /* Integer instructions. */ 201 /* Integer instructions. */
148 MIPSI_MOVE = 0x00000021, 202 MIPSI_MOVE = 0x00000025,
149 MIPSI_NOP = 0x00000000, 203 MIPSI_NOP = 0x00000000,
150 204
151 MIPSI_LI = 0x24000000, 205 MIPSI_LI = 0x24000000,
152 MIPSI_LU = 0x34000000, 206 MIPSI_LU = 0x34000000,
153 MIPSI_LUI = 0x3c000000, 207 MIPSI_LUI = 0x3c000000,
154 208
155 MIPSI_ADDIU = 0x24000000, 209 MIPSI_AND = 0x00000024,
156 MIPSI_ANDI = 0x30000000, 210 MIPSI_ANDI = 0x30000000,
211 MIPSI_OR = 0x00000025,
157 MIPSI_ORI = 0x34000000, 212 MIPSI_ORI = 0x34000000,
213 MIPSI_XOR = 0x00000026,
158 MIPSI_XORI = 0x38000000, 214 MIPSI_XORI = 0x38000000,
215 MIPSI_NOR = 0x00000027,
216
217 MIPSI_SLT = 0x0000002a,
218 MIPSI_SLTU = 0x0000002b,
159 MIPSI_SLTI = 0x28000000, 219 MIPSI_SLTI = 0x28000000,
160 MIPSI_SLTIU = 0x2c000000, 220 MIPSI_SLTIU = 0x2c000000,
161 221
162 MIPSI_ADDU = 0x00000021, 222 MIPSI_ADDU = 0x00000021,
223 MIPSI_ADDIU = 0x24000000,
224 MIPSI_SUB = 0x00000022,
163 MIPSI_SUBU = 0x00000023, 225 MIPSI_SUBU = 0x00000023,
226
227#if !LJ_TARGET_MIPSR6
164 MIPSI_MUL = 0x70000002, 228 MIPSI_MUL = 0x70000002,
165 MIPSI_AND = 0x00000024, 229 MIPSI_DIV = 0x0000001a,
166 MIPSI_OR = 0x00000025, 230 MIPSI_DIVU = 0x0000001b,
167 MIPSI_XOR = 0x00000026, 231
168 MIPSI_NOR = 0x00000027,
169 MIPSI_SLT = 0x0000002a,
170 MIPSI_SLTU = 0x0000002b,
171 MIPSI_MOVZ = 0x0000000a, 232 MIPSI_MOVZ = 0x0000000a,
172 MIPSI_MOVN = 0x0000000b, 233 MIPSI_MOVN = 0x0000000b,
234 MIPSI_MFHI = 0x00000010,
235 MIPSI_MFLO = 0x00000012,
236 MIPSI_MULT = 0x00000018,
237#else
238 MIPSI_MUL = 0x00000098,
239 MIPSI_MUH = 0x000000d8,
240 MIPSI_DIV = 0x0000009a,
241 MIPSI_DIVU = 0x0000009b,
242
243 MIPSI_SELEQZ = 0x00000035,
244 MIPSI_SELNEZ = 0x00000037,
245#endif
173 246
174 MIPSI_SLL = 0x00000000, 247 MIPSI_SLL = 0x00000000,
175 MIPSI_SRL = 0x00000002, 248 MIPSI_SRL = 0x00000002,
176 MIPSI_SRA = 0x00000003, 249 MIPSI_SRA = 0x00000003,
177 MIPSI_ROTR = 0x00200002, /* MIPS32R2 */ 250 MIPSI_ROTR = 0x00200002, /* MIPSXXR2 */
251 MIPSI_DROTR = 0x0020003a,
252 MIPSI_DROTR32 = 0x0020003e,
178 MIPSI_SLLV = 0x00000004, 253 MIPSI_SLLV = 0x00000004,
179 MIPSI_SRLV = 0x00000006, 254 MIPSI_SRLV = 0x00000006,
180 MIPSI_SRAV = 0x00000007, 255 MIPSI_SRAV = 0x00000007,
181 MIPSI_ROTRV = 0x00000046, /* MIPS32R2 */ 256 MIPSI_ROTRV = 0x00000046, /* MIPSXXR2 */
257 MIPSI_DROTRV = 0x00000056,
182 258
183 MIPSI_SEB = 0x7c000420, /* MIPS32R2 */ 259 MIPSI_SEB = 0x7c000420, /* MIPSXXR2 */
184 MIPSI_SEH = 0x7c000620, /* MIPS32R2 */ 260 MIPSI_SEH = 0x7c000620, /* MIPSXXR2 */
185 MIPSI_WSBH = 0x7c0000a0, /* MIPS32R2 */ 261 MIPSI_WSBH = 0x7c0000a0, /* MIPSXXR2 */
262 MIPSI_DSBH = 0x7c0000a4,
186 263
187 MIPSI_B = 0x10000000, 264 MIPSI_B = 0x10000000,
188 MIPSI_J = 0x08000000, 265 MIPSI_J = 0x08000000,
189 MIPSI_JAL = 0x0c000000, 266 MIPSI_JAL = 0x0c000000,
267#if !LJ_TARGET_MIPSR6
268 MIPSI_JALX = 0x74000000,
190 MIPSI_JR = 0x00000008, 269 MIPSI_JR = 0x00000008,
270#else
271 MIPSI_JR = 0x00000009,
272 MIPSI_BALC = 0xe8000000,
273#endif
191 MIPSI_JALR = 0x0000f809, 274 MIPSI_JALR = 0x0000f809,
192 275
193 MIPSI_BEQ = 0x10000000, 276 MIPSI_BEQ = 0x10000000,
@@ -199,7 +282,9 @@ typedef enum MIPSIns {
199 282
200 /* Load/store instructions. */ 283 /* Load/store instructions. */
201 MIPSI_LW = 0x8c000000, 284 MIPSI_LW = 0x8c000000,
285 MIPSI_LD = 0xdc000000,
202 MIPSI_SW = 0xac000000, 286 MIPSI_SW = 0xac000000,
287 MIPSI_SD = 0xfc000000,
203 MIPSI_LB = 0x80000000, 288 MIPSI_LB = 0x80000000,
204 MIPSI_SB = 0xa0000000, 289 MIPSI_SB = 0xa0000000,
205 MIPSI_LH = 0x84000000, 290 MIPSI_LH = 0x84000000,
@@ -211,11 +296,69 @@ typedef enum MIPSIns {
211 MIPSI_LDC1 = 0xd4000000, 296 MIPSI_LDC1 = 0xd4000000,
212 MIPSI_SDC1 = 0xf4000000, 297 MIPSI_SDC1 = 0xf4000000,
213 298
299 /* MIPS64 instructions. */
300 MIPSI_DADD = 0x0000002c,
301 MIPSI_DADDU = 0x0000002d,
302 MIPSI_DADDIU = 0x64000000,
303 MIPSI_DSUB = 0x0000002e,
304 MIPSI_DSUBU = 0x0000002f,
305#if !LJ_TARGET_MIPSR6
306 MIPSI_DDIV = 0x0000001e,
307 MIPSI_DDIVU = 0x0000001f,
308 MIPSI_DMULT = 0x0000001c,
309 MIPSI_DMULTU = 0x0000001d,
310#else
311 MIPSI_DDIV = 0x0000009e,
312 MIPSI_DMOD = 0x000000de,
313 MIPSI_DDIVU = 0x0000009f,
314 MIPSI_DMODU = 0x000000df,
315 MIPSI_DMUL = 0x0000009c,
316 MIPSI_DMUH = 0x000000dc,
317#endif
318
319 MIPSI_DSLL = 0x00000038,
320 MIPSI_DSRL = 0x0000003a,
321 MIPSI_DSLLV = 0x00000014,
322 MIPSI_DSRLV = 0x00000016,
323 MIPSI_DSRA = 0x0000003b,
324 MIPSI_DSRAV = 0x00000017,
325 MIPSI_DSRA32 = 0x0000003f,
326 MIPSI_DSLL32 = 0x0000003c,
327 MIPSI_DSRL32 = 0x0000003e,
328 MIPSI_DSHD = 0x7c000164,
329
330 MIPSI_AADDU = LJ_32 ? MIPSI_ADDU : MIPSI_DADDU,
331 MIPSI_AADDIU = LJ_32 ? MIPSI_ADDIU : MIPSI_DADDIU,
332 MIPSI_ASUBU = LJ_32 ? MIPSI_SUBU : MIPSI_DSUBU,
333 MIPSI_AL = LJ_32 ? MIPSI_LW : MIPSI_LD,
334 MIPSI_AS = LJ_32 ? MIPSI_SW : MIPSI_SD,
335#if LJ_TARGET_MIPSR6
336 MIPSI_LSA = 0x00000005,
337 MIPSI_DLSA = 0x00000015,
338 MIPSI_ALSA = LJ_32 ? MIPSI_LSA : MIPSI_DLSA,
339#endif
340
341 /* Extract/insert instructions. */
342 MIPSI_DEXTM = 0x7c000001,
343 MIPSI_DEXTU = 0x7c000002,
344 MIPSI_DEXT = 0x7c000003,
345 MIPSI_DINSM = 0x7c000005,
346 MIPSI_DINSU = 0x7c000006,
347 MIPSI_DINS = 0x7c000007,
348
349 MIPSI_FLOOR_D = 0x4620000b,
350
214 /* FP instructions. */ 351 /* FP instructions. */
215 MIPSI_MOV_S = 0x46000006, 352 MIPSI_MOV_S = 0x46000006,
216 MIPSI_MOV_D = 0x46200006, 353 MIPSI_MOV_D = 0x46200006,
354#if !LJ_TARGET_MIPSR6
217 MIPSI_MOVT_D = 0x46210011, 355 MIPSI_MOVT_D = 0x46210011,
218 MIPSI_MOVF_D = 0x46200011, 356 MIPSI_MOVF_D = 0x46200011,
357#else
358 MIPSI_MIN_D = 0x4620001C,
359 MIPSI_MAX_D = 0x4620001E,
360 MIPSI_SEL_D = 0x46200010,
361#endif
219 362
220 MIPSI_ABS_D = 0x46200005, 363 MIPSI_ABS_D = 0x46200005,
221 MIPSI_NEG_D = 0x46200007, 364 MIPSI_NEG_D = 0x46200007,
@@ -235,23 +378,37 @@ typedef enum MIPSIns {
235 MIPSI_CVT_W_D = 0x46200024, 378 MIPSI_CVT_W_D = 0x46200024,
236 MIPSI_CVT_S_W = 0x46800020, 379 MIPSI_CVT_S_W = 0x46800020,
237 MIPSI_CVT_D_W = 0x46800021, 380 MIPSI_CVT_D_W = 0x46800021,
381 MIPSI_CVT_S_L = 0x46a00020,
382 MIPSI_CVT_D_L = 0x46a00021,
238 383
239 MIPSI_TRUNC_W_S = 0x4600000d, 384 MIPSI_TRUNC_W_S = 0x4600000d,
240 MIPSI_TRUNC_W_D = 0x4620000d, 385 MIPSI_TRUNC_W_D = 0x4620000d,
386 MIPSI_TRUNC_L_S = 0x46000009,
387 MIPSI_TRUNC_L_D = 0x46200009,
241 MIPSI_FLOOR_W_S = 0x4600000f, 388 MIPSI_FLOOR_W_S = 0x4600000f,
242 MIPSI_FLOOR_W_D = 0x4620000f, 389 MIPSI_FLOOR_W_D = 0x4620000f,
243 390
244 MIPSI_MFC1 = 0x44000000, 391 MIPSI_MFC1 = 0x44000000,
245 MIPSI_MTC1 = 0x44800000, 392 MIPSI_MTC1 = 0x44800000,
393 MIPSI_DMTC1 = 0x44a00000,
394 MIPSI_DMFC1 = 0x44200000,
246 395
396#if !LJ_TARGET_MIPSR6
247 MIPSI_BC1F = 0x45000000, 397 MIPSI_BC1F = 0x45000000,
248 MIPSI_BC1T = 0x45010000, 398 MIPSI_BC1T = 0x45010000,
249
250 MIPSI_C_EQ_D = 0x46200032, 399 MIPSI_C_EQ_D = 0x46200032,
400 MIPSI_C_OLT_S = 0x46000034,
251 MIPSI_C_OLT_D = 0x46200034, 401 MIPSI_C_OLT_D = 0x46200034,
252 MIPSI_C_ULT_D = 0x46200035, 402 MIPSI_C_ULT_D = 0x46200035,
253 MIPSI_C_OLE_D = 0x46200036, 403 MIPSI_C_OLE_D = 0x46200036,
254 MIPSI_C_ULE_D = 0x46200037, 404 MIPSI_C_ULE_D = 0x46200037,
405#else
406 MIPSI_BC1EQZ = 0x45200000,
407 MIPSI_BC1NEZ = 0x45a00000,
408 MIPSI_CMP_EQ_D = 0x46a00002,
409 MIPSI_CMP_LT_S = 0x46800004,
410 MIPSI_CMP_LT_D = 0x46a00004,
411#endif
255 412
256} MIPSIns; 413} MIPSIns;
257 414
diff --git a/src/lj_target_ppc.h b/src/lj_target_ppc.h
index 580995d5..c7d4c229 100644
--- a/src/lj_target_ppc.h
+++ b/src/lj_target_ppc.h
@@ -104,7 +104,7 @@ enum {
104/* This definition must match with the *.dasc file(s). */ 104/* This definition must match with the *.dasc file(s). */
105typedef struct { 105typedef struct {
106 lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */ 106 lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
107 int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */ 107 intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
108 int32_t spill[256]; /* Spill slots. */ 108 int32_t spill[256]; /* Spill slots. */
109} ExitState; 109} ExitState;
110 110
diff --git a/src/lj_target_x86.h b/src/lj_target_x86.h
index 8a96cbf2..fd72c71d 100644
--- a/src/lj_target_x86.h
+++ b/src/lj_target_x86.h
@@ -22,7 +22,7 @@
22 _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) 22 _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7)
23#endif 23#endif
24#define VRIDDEF(_) \ 24#define VRIDDEF(_) \
25 _(MRM) 25 _(MRM) _(RIP)
26 26
27#define RIDENUM(name) RID_##name, 27#define RIDENUM(name) RID_##name,
28 28
@@ -31,8 +31,10 @@ enum {
31 FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */ 31 FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
32 RID_MAX, 32 RID_MAX,
33 RID_MRM = RID_MAX, /* Pseudo-id for ModRM operand. */ 33 RID_MRM = RID_MAX, /* Pseudo-id for ModRM operand. */
34 RID_RIP = RID_MAX+5, /* Pseudo-id for RIP (x64 only), rm bits = 5. */
34 35
35 /* Calling conventions. */ 36 /* Calling conventions. */
37 RID_SP = RID_ESP,
36 RID_RET = RID_EAX, 38 RID_RET = RID_EAX,
37#if LJ_64 39#if LJ_64
38 RID_FPRET = RID_XMM0, 40 RID_FPRET = RID_XMM0,
@@ -62,8 +64,10 @@ enum {
62 64
63/* -- Register sets ------------------------------------------------------- */ 65/* -- Register sets ------------------------------------------------------- */
64 66
65/* Make use of all registers, except the stack pointer. */ 67/* Make use of all registers, except the stack pointer (and maybe DISPATCH). */
66#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR)-RID2RSET(RID_ESP)) 68#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) \
69 - RID2RSET(RID_ESP) \
70 - LJ_GC64*RID2RSET(RID_DISPATCH))
67#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)) 71#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR))
68#define RSET_ALL (RSET_GPR|RSET_FPR) 72#define RSET_ALL (RSET_GPR|RSET_FPR)
69#define RSET_INIT RSET_ALL 73#define RSET_INIT RSET_ALL
@@ -131,7 +135,11 @@ enum {
131#define SPS_FIXED (4*2) 135#define SPS_FIXED (4*2)
132#define SPS_FIRST (4*2) /* Don't use callee register save area. */ 136#define SPS_FIRST (4*2) /* Don't use callee register save area. */
133#else 137#else
138#if LJ_GC64
139#define SPS_FIXED 2
140#else
134#define SPS_FIXED 4 141#define SPS_FIXED 4
142#endif
135#define SPS_FIRST 2 143#define SPS_FIRST 2
136#endif 144#endif
137#else 145#else
@@ -184,12 +192,18 @@ typedef struct {
184#define XO_f20f(o) ((uint32_t)(0x0ff2fc + (0x##o<<24))) 192#define XO_f20f(o) ((uint32_t)(0x0ff2fc + (0x##o<<24)))
185#define XO_f30f(o) ((uint32_t)(0x0ff3fc + (0x##o<<24))) 193#define XO_f30f(o) ((uint32_t)(0x0ff3fc + (0x##o<<24)))
186 194
195#define XV_660f38(o) ((uint32_t)(0x79e2c4 + (0x##o<<24)))
196#define XV_f20f38(o) ((uint32_t)(0x7be2c4 + (0x##o<<24)))
197#define XV_f20f3a(o) ((uint32_t)(0x7be3c4 + (0x##o<<24)))
198#define XV_f30f38(o) ((uint32_t)(0x7ae2c4 + (0x##o<<24)))
199
187/* This list of x86 opcodes is not intended to be complete. Opcodes are only 200/* This list of x86 opcodes is not intended to be complete. Opcodes are only
188** included when needed. Take a look at DynASM or jit.dis_x86 to see the 201** included when needed. Take a look at DynASM or jit.dis_x86 to see the
189** whole mess. 202** whole mess.
190*/ 203*/
191typedef enum { 204typedef enum {
192 /* Fixed length opcodes. XI_* prefix. */ 205 /* Fixed length opcodes. XI_* prefix. */
206 XI_O16 = 0x66,
193 XI_NOP = 0x90, 207 XI_NOP = 0x90,
194 XI_XCHGa = 0x90, 208 XI_XCHGa = 0x90,
195 XI_CALL = 0xe8, 209 XI_CALL = 0xe8,
@@ -207,26 +221,28 @@ typedef enum {
207 XI_PUSHi8 = 0x6a, 221 XI_PUSHi8 = 0x6a,
208 XI_TESTb = 0x84, 222 XI_TESTb = 0x84,
209 XI_TEST = 0x85, 223 XI_TEST = 0x85,
224 XI_INT3 = 0xcc,
210 XI_MOVmi = 0xc7, 225 XI_MOVmi = 0xc7,
211 XI_GROUP5 = 0xff, 226 XI_GROUP5 = 0xff,
212 227
213 /* Note: little-endian byte-order! */ 228 /* Note: little-endian byte-order! */
214 XI_FLDZ = 0xeed9, 229 XI_FLDZ = 0xeed9,
215 XI_FLD1 = 0xe8d9, 230 XI_FLD1 = 0xe8d9,
216 XI_FLDLG2 = 0xecd9,
217 XI_FLDLN2 = 0xedd9,
218 XI_FDUP = 0xc0d9, /* Really fld st0. */ 231 XI_FDUP = 0xc0d9, /* Really fld st0. */
219 XI_FPOP = 0xd8dd, /* Really fstp st0. */ 232 XI_FPOP = 0xd8dd, /* Really fstp st0. */
220 XI_FPOP1 = 0xd9dd, /* Really fstp st1. */ 233 XI_FPOP1 = 0xd9dd, /* Really fstp st1. */
221 XI_FRNDINT = 0xfcd9, 234 XI_FRNDINT = 0xfcd9,
222 XI_FSIN = 0xfed9,
223 XI_FCOS = 0xffd9,
224 XI_FPTAN = 0xf2d9,
225 XI_FPATAN = 0xf3d9,
226 XI_FSCALE = 0xfdd9, 235 XI_FSCALE = 0xfdd9,
227 XI_FYL2X = 0xf1d9, 236 XI_FYL2X = 0xf1d9,
228 237
238 /* VEX-encoded instructions. XV_* prefix. */
239 XV_RORX = XV_f20f3a(f0),
240 XV_SARX = XV_f30f38(f7),
241 XV_SHLX = XV_660f38(f7),
242 XV_SHRX = XV_f20f38(f7),
243
229 /* Variable-length opcodes. XO_* prefix. */ 244 /* Variable-length opcodes. XO_* prefix. */
245 XO_OR = XO_(0b),
230 XO_MOV = XO_(8b), 246 XO_MOV = XO_(8b),
231 XO_MOVto = XO_(89), 247 XO_MOVto = XO_(89),
232 XO_MOVtow = XO_66(89), 248 XO_MOVtow = XO_66(89),
@@ -277,10 +293,8 @@ typedef enum {
277 XO_ROUNDSD = 0x0b3a0ffc, /* Really 66 0f 3a 0b. See asm_fpmath. */ 293 XO_ROUNDSD = 0x0b3a0ffc, /* Really 66 0f 3a 0b. See asm_fpmath. */
278 XO_UCOMISD = XO_660f(2e), 294 XO_UCOMISD = XO_660f(2e),
279 XO_CVTSI2SD = XO_f20f(2a), 295 XO_CVTSI2SD = XO_f20f(2a),
280 XO_CVTSD2SI = XO_f20f(2d),
281 XO_CVTTSD2SI= XO_f20f(2c), 296 XO_CVTTSD2SI= XO_f20f(2c),
282 XO_CVTSI2SS = XO_f30f(2a), 297 XO_CVTSI2SS = XO_f30f(2a),
283 XO_CVTSS2SI = XO_f30f(2d),
284 XO_CVTTSS2SI= XO_f30f(2c), 298 XO_CVTTSS2SI= XO_f30f(2c),
285 XO_CVTSS2SD = XO_f30f(5a), 299 XO_CVTSS2SD = XO_f30f(5a),
286 XO_CVTSD2SS = XO_f20f(5a), 300 XO_CVTSD2SS = XO_f20f(5a),
diff --git a/src/lj_trace.c b/src/lj_trace.c
index c7f3f52d..a3980891 100644
--- a/src/lj_trace.c
+++ b/src/lj_trace.c
@@ -30,6 +30,7 @@
30#include "lj_vm.h" 30#include "lj_vm.h"
31#include "lj_vmevent.h" 31#include "lj_vmevent.h"
32#include "lj_target.h" 32#include "lj_target.h"
33#include "lj_prng.h"
33 34
34/* -- Error handling ------------------------------------------------------ */ 35/* -- Error handling ------------------------------------------------------ */
35 36
@@ -104,7 +105,8 @@ static void perftools_addtrace(GCtrace *T)
104 name++; 105 name++;
105 else 106 else
106 name = "(string)"; 107 name = "(string)";
107 lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc); 108 lj_assertX(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc,
109 "trace PC out of range");
108 lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); 110 lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
109 if (!fp) { 111 if (!fp) {
110 char fname[40]; 112 char fname[40];
@@ -117,15 +119,26 @@ static void perftools_addtrace(GCtrace *T)
117} 119}
118#endif 120#endif
119 121
120/* Allocate space for copy of trace. */ 122/* Allocate space for copy of T. */
121static GCtrace *trace_save_alloc(jit_State *J) 123GCtrace * LJ_FASTCALL lj_trace_alloc(lua_State *L, GCtrace *T)
122{ 124{
123 size_t sztr = ((sizeof(GCtrace)+7)&~7); 125 size_t sztr = ((sizeof(GCtrace)+7)&~7);
124 size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns); 126 size_t szins = (T->nins-T->nk)*sizeof(IRIns);
125 size_t sz = sztr + szins + 127 size_t sz = sztr + szins +
126 J->cur.nsnap*sizeof(SnapShot) + 128 T->nsnap*sizeof(SnapShot) +
127 J->cur.nsnapmap*sizeof(SnapEntry); 129 T->nsnapmap*sizeof(SnapEntry);
128 return lj_mem_newt(J->L, (MSize)sz, GCtrace); 130 GCtrace *T2 = lj_mem_newt(L, (MSize)sz, GCtrace);
131 char *p = (char *)T2 + sztr;
132 T2->gct = ~LJ_TTRACE;
133 T2->marked = 0;
134 T2->traceno = 0;
135 T2->ir = (IRIns *)p - T->nk;
136 T2->nins = T->nins;
137 T2->nk = T->nk;
138 T2->nsnap = T->nsnap;
139 T2->nsnapmap = T->nsnapmap;
140 memcpy(p, T->ir + T->nk, szins);
141 return T2;
129} 142}
130 143
131/* Save current trace by copying and compacting it. */ 144/* Save current trace by copying and compacting it. */
@@ -139,12 +152,12 @@ static void trace_save(jit_State *J, GCtrace *T)
139 setgcrefp(J2G(J)->gc.root, T); 152 setgcrefp(J2G(J)->gc.root, T);
140 newwhite(J2G(J), T); 153 newwhite(J2G(J), T);
141 T->gct = ~LJ_TTRACE; 154 T->gct = ~LJ_TTRACE;
142 T->ir = (IRIns *)p - J->cur.nk; 155 T->ir = (IRIns *)p - J->cur.nk; /* The IR has already been copied above. */
143 memcpy(p, J->cur.ir+J->cur.nk, szins);
144 p += szins; 156 p += szins;
145 TRACE_APPENDVEC(snap, nsnap, SnapShot) 157 TRACE_APPENDVEC(snap, nsnap, SnapShot)
146 TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry) 158 TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry)
147 J->cur.traceno = 0; 159 J->cur.traceno = 0;
160 J->curfinal = NULL;
148 setgcrefp(J->trace[T->traceno], T); 161 setgcrefp(J->trace[T->traceno], T);
149 lj_gc_barriertrace(J2G(J), T->traceno); 162 lj_gc_barriertrace(J2G(J), T->traceno);
150 lj_gdbjit_addtrace(J, T); 163 lj_gdbjit_addtrace(J, T);
@@ -172,7 +185,7 @@ void lj_trace_reenableproto(GCproto *pt)
172{ 185{
173 if ((pt->flags & PROTO_ILOOP)) { 186 if ((pt->flags & PROTO_ILOOP)) {
174 BCIns *bc = proto_bc(pt); 187 BCIns *bc = proto_bc(pt);
175 BCPos i, sizebc = pt->sizebc;; 188 BCPos i, sizebc = pt->sizebc;
176 pt->flags &= ~PROTO_ILOOP; 189 pt->flags &= ~PROTO_ILOOP;
177 if (bc_op(bc[0]) == BC_IFUNCF) 190 if (bc_op(bc[0]) == BC_IFUNCF)
178 setbc_op(&bc[0], BC_FUNCF); 191 setbc_op(&bc[0], BC_FUNCF);
@@ -194,27 +207,28 @@ static void trace_unpatch(jit_State *J, GCtrace *T)
194 return; /* No need to unpatch branches in parent traces (yet). */ 207 return; /* No need to unpatch branches in parent traces (yet). */
195 switch (bc_op(*pc)) { 208 switch (bc_op(*pc)) {
196 case BC_JFORL: 209 case BC_JFORL:
197 lua_assert(traceref(J, bc_d(*pc)) == T); 210 lj_assertJ(traceref(J, bc_d(*pc)) == T, "JFORL references other trace");
198 *pc = T->startins; 211 *pc = T->startins;
199 pc += bc_j(T->startins); 212 pc += bc_j(T->startins);
200 lua_assert(bc_op(*pc) == BC_JFORI); 213 lj_assertJ(bc_op(*pc) == BC_JFORI, "FORL does not point to JFORI");
201 setbc_op(pc, BC_FORI); 214 setbc_op(pc, BC_FORI);
202 break; 215 break;
203 case BC_JITERL: 216 case BC_JITERL:
204 case BC_JLOOP: 217 case BC_JLOOP:
205 lua_assert(op == BC_ITERL || op == BC_LOOP || bc_isret(op)); 218 lj_assertJ(op == BC_ITERL || op == BC_LOOP || bc_isret(op),
219 "bad original bytecode %d", op);
206 *pc = T->startins; 220 *pc = T->startins;
207 break; 221 break;
208 case BC_JMP: 222 case BC_JMP:
209 lua_assert(op == BC_ITERL); 223 lj_assertJ(op == BC_ITERL, "bad original bytecode %d", op);
210 pc += bc_j(*pc)+2; 224 pc += bc_j(*pc)+2;
211 if (bc_op(*pc) == BC_JITERL) { 225 if (bc_op(*pc) == BC_JITERL) {
212 lua_assert(traceref(J, bc_d(*pc)) == T); 226 lj_assertJ(traceref(J, bc_d(*pc)) == T, "JITERL references other trace");
213 *pc = T->startins; 227 *pc = T->startins;
214 } 228 }
215 break; 229 break;
216 case BC_JFUNCF: 230 case BC_JFUNCF:
217 lua_assert(op == BC_FUNCF); 231 lj_assertJ(op == BC_FUNCF, "bad original bytecode %d", op);
218 *pc = T->startins; 232 *pc = T->startins;
219 break; 233 break;
220 default: /* Already unpatched. */ 234 default: /* Already unpatched. */
@@ -226,7 +240,8 @@ static void trace_unpatch(jit_State *J, GCtrace *T)
226static void trace_flushroot(jit_State *J, GCtrace *T) 240static void trace_flushroot(jit_State *J, GCtrace *T)
227{ 241{
228 GCproto *pt = &gcref(T->startpt)->pt; 242 GCproto *pt = &gcref(T->startpt)->pt;
229 lua_assert(T->root == 0 && pt != NULL); 243 lj_assertJ(T->root == 0, "not a root trace");
244 lj_assertJ(pt != NULL, "trace has no prototype");
230 /* First unpatch any modified bytecode. */ 245 /* First unpatch any modified bytecode. */
231 trace_unpatch(J, T); 246 trace_unpatch(J, T);
232 /* Unlink root trace from chain anchored in prototype. */ 247 /* Unlink root trace from chain anchored in prototype. */
@@ -274,7 +289,7 @@ int lj_trace_flushall(lua_State *L)
274 if (T->root == 0) 289 if (T->root == 0)
275 trace_flushroot(J, T); 290 trace_flushroot(J, T);
276 lj_gdbjit_deltrace(J, T); 291 lj_gdbjit_deltrace(J, T);
277 T->traceno = 0; 292 T->traceno = T->link = 0; /* Blacklist the link for cont_stitch. */
278 setgcrefnull(J->trace[i]); 293 setgcrefnull(J->trace[i]);
279 } 294 }
280 } 295 }
@@ -296,13 +311,42 @@ void lj_trace_initstate(global_State *g)
296{ 311{
297 jit_State *J = G2J(g); 312 jit_State *J = G2J(g);
298 TValue *tv; 313 TValue *tv;
299 /* Initialize SIMD constants. */ 314
315 /* Initialize aligned SIMD constants. */
300 tv = LJ_KSIMD(J, LJ_KSIMD_ABS); 316 tv = LJ_KSIMD(J, LJ_KSIMD_ABS);
301 tv[0].u64 = U64x(7fffffff,ffffffff); 317 tv[0].u64 = U64x(7fffffff,ffffffff);
302 tv[1].u64 = U64x(7fffffff,ffffffff); 318 tv[1].u64 = U64x(7fffffff,ffffffff);
303 tv = LJ_KSIMD(J, LJ_KSIMD_NEG); 319 tv = LJ_KSIMD(J, LJ_KSIMD_NEG);
304 tv[0].u64 = U64x(80000000,00000000); 320 tv[0].u64 = U64x(80000000,00000000);
305 tv[1].u64 = U64x(80000000,00000000); 321 tv[1].u64 = U64x(80000000,00000000);
322
323 /* Initialize 32/64 bit constants. */
324#if LJ_TARGET_X86ORX64
325 J->k64[LJ_K64_TOBIT].u64 = U64x(43380000,00000000);
326#if LJ_32
327 J->k64[LJ_K64_M2P64_31].u64 = U64x(c1e00000,00000000);
328#endif
329 J->k64[LJ_K64_2P64].u64 = U64x(43f00000,00000000);
330 J->k32[LJ_K32_M2P64_31] = LJ_64 ? 0xdf800000 : 0xcf000000;
331#endif
332#if LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS64
333 J->k64[LJ_K64_M2P64].u64 = U64x(c3f00000,00000000);
334#endif
335#if LJ_TARGET_PPC
336 J->k32[LJ_K32_2P52_2P31] = 0x59800004;
337 J->k32[LJ_K32_2P52] = 0x59800000;
338#endif
339#if LJ_TARGET_PPC || LJ_TARGET_MIPS
340 J->k32[LJ_K32_2P31] = 0x4f000000;
341#endif
342#if LJ_TARGET_MIPS
343 J->k64[LJ_K64_2P31].u64 = U64x(41e00000,00000000);
344#if LJ_64
345 J->k64[LJ_K64_2P63].u64 = U64x(43e00000,00000000);
346 J->k32[LJ_K32_2P63] = 0x5f000000;
347 J->k32[LJ_K32_M2P64] = 0xdf800000;
348#endif
349#endif
306} 350}
307 351
308/* Free everything associated with the JIT compiler state. */ 352/* Free everything associated with the JIT compiler state. */
@@ -313,11 +357,11 @@ void lj_trace_freestate(global_State *g)
313 { /* This assumes all traces have already been freed. */ 357 { /* This assumes all traces have already been freed. */
314 ptrdiff_t i; 358 ptrdiff_t i;
315 for (i = 1; i < (ptrdiff_t)J->sizetrace; i++) 359 for (i = 1; i < (ptrdiff_t)J->sizetrace; i++)
316 lua_assert(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL); 360 lj_assertG(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL,
361 "trace still allocated");
317 } 362 }
318#endif 363#endif
319 lj_mcode_free(J); 364 lj_mcode_free(J);
320 lj_ir_k64_freeall(J);
321 lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry); 365 lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
322 lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot); 366 lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
323 lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns); 367 lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);
@@ -341,7 +385,7 @@ static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e)
341 if (mref(J->penalty[i].pc, const BCIns) == pc) { /* Cache slot found? */ 385 if (mref(J->penalty[i].pc, const BCIns) == pc) { /* Cache slot found? */
342 /* First try to bump its hotcount several times. */ 386 /* First try to bump its hotcount several times. */
343 val = ((uint32_t)J->penalty[i].val << 1) + 387 val = ((uint32_t)J->penalty[i].val << 1) +
344 LJ_PRNG_BITS(J, PENALTY_RNDBITS); 388 (lj_prng_u64(&J2G(J)->prng) & ((1u<<PENALTY_RNDBITS)-1));
345 if (val > PENALTY_MAX) { 389 if (val > PENALTY_MAX) {
346 blacklist_pc(pt, pc); /* Blacklist it, if that didn't help. */ 390 blacklist_pc(pt, pc); /* Blacklist it, if that didn't help. */
347 return; 391 return;
@@ -367,10 +411,11 @@ static void trace_start(jit_State *J)
367 TraceNo traceno; 411 TraceNo traceno;
368 412
369 if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ 413 if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */
370 if (J->parent == 0) { 414 if (J->parent == 0 && J->exitno == 0) {
371 /* Lazy bytecode patching to disable hotcount events. */ 415 /* Lazy bytecode patching to disable hotcount events. */
372 lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || 416 lj_assertJ(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL ||
373 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF); 417 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF,
418 "bad hot bytecode %d", bc_op(*J->pc));
374 setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP); 419 setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP);
375 J->pt->flags |= PROTO_ILOOP; 420 J->pt->flags |= PROTO_ILOOP;
376 } 421 }
@@ -381,7 +426,8 @@ static void trace_start(jit_State *J)
381 /* Get a new trace number. */ 426 /* Get a new trace number. */
382 traceno = trace_findfree(J); 427 traceno = trace_findfree(J);
383 if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */ 428 if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */
384 lua_assert((J2G(J)->hookmask & HOOK_GC) == 0); 429 lj_assertJ((J2G(J)->hookmask & HOOK_GC) == 0,
430 "recorder called from GC hook");
385 lj_trace_flushall(J->L); 431 lj_trace_flushall(J->L);
386 J->state = LJ_TRACE_IDLE; /* Silently ignored. */ 432 J->state = LJ_TRACE_IDLE; /* Silently ignored. */
387 return; 433 return;
@@ -401,6 +447,8 @@ static void trace_start(jit_State *J)
401 J->guardemit.irt = 0; 447 J->guardemit.irt = 0;
402 J->postproc = LJ_POST_NONE; 448 J->postproc = LJ_POST_NONE;
403 lj_resetsplit(J); 449 lj_resetsplit(J);
450 J->retryrec = 0;
451 J->ktrace = 0;
404 setgcref(J->cur.startpt, obj2gco(J->pt)); 452 setgcref(J->cur.startpt, obj2gco(J->pt));
405 453
406 L = J->L; 454 L = J->L;
@@ -412,6 +460,12 @@ static void trace_start(jit_State *J)
412 if (J->parent) { 460 if (J->parent) {
413 setintV(L->top++, J->parent); 461 setintV(L->top++, J->parent);
414 setintV(L->top++, J->exitno); 462 setintV(L->top++, J->exitno);
463 } else {
464 BCOp op = bc_op(*J->pc);
465 if (op == BC_CALLM || op == BC_CALL || op == BC_ITERC) {
466 setintV(L->top++, J->exitno); /* Parent of stitched trace. */
467 setintV(L->top++, -1);
468 }
415 } 469 }
416 ); 470 );
417 lj_record_setup(J); 471 lj_record_setup(J);
@@ -424,7 +478,7 @@ static void trace_stop(jit_State *J)
424 BCOp op = bc_op(J->cur.startins); 478 BCOp op = bc_op(J->cur.startins);
425 GCproto *pt = &gcref(J->cur.startpt)->pt; 479 GCproto *pt = &gcref(J->cur.startpt)->pt;
426 TraceNo traceno = J->cur.traceno; 480 TraceNo traceno = J->cur.traceno;
427 GCtrace *T = trace_save_alloc(J); /* Do this first. May throw OOM. */ 481 GCtrace *T = J->curfinal;
428 lua_State *L; 482 lua_State *L;
429 483
430 switch (op) { 484 switch (op) {
@@ -449,7 +503,7 @@ static void trace_stop(jit_State *J)
449 goto addroot; 503 goto addroot;
450 case BC_JMP: 504 case BC_JMP:
451 /* Patch exit branch in parent to side trace entry. */ 505 /* Patch exit branch in parent to side trace entry. */
452 lua_assert(J->parent != 0 && J->cur.root != 0); 506 lj_assertJ(J->parent != 0 && J->cur.root != 0, "not a side trace");
453 lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode); 507 lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode);
454 /* Avoid compiling a side trace twice (stack resizing uses parent exit). */ 508 /* Avoid compiling a side trace twice (stack resizing uses parent exit). */
455 traceref(J, J->parent)->snap[J->exitno].count = SNAPCOUNT_DONE; 509 traceref(J, J->parent)->snap[J->exitno].count = SNAPCOUNT_DONE;
@@ -461,8 +515,14 @@ static void trace_stop(jit_State *J)
461 root->nextside = (TraceNo1)traceno; 515 root->nextside = (TraceNo1)traceno;
462 } 516 }
463 break; 517 break;
518 case BC_CALLM:
519 case BC_CALL:
520 case BC_ITERC:
521 /* Trace stitching: patch link of previous trace. */
522 traceref(J, J->exitno)->link = traceno;
523 break;
464 default: 524 default:
465 lua_assert(0); 525 lj_assertJ(0, "bad stop bytecode %d", op);
466 break; 526 break;
467 } 527 }
468 528
@@ -475,6 +535,7 @@ static void trace_stop(jit_State *J)
475 lj_vmevent_send(L, TRACE, 535 lj_vmevent_send(L, TRACE,
476 setstrV(L, L->top++, lj_str_newlit(L, "stop")); 536 setstrV(L, L->top++, lj_str_newlit(L, "stop"));
477 setintV(L->top++, traceno); 537 setintV(L->top++, traceno);
538 setfuncV(L, L->top++, J->fn);
478 ); 539 );
479} 540}
480 541
@@ -482,8 +543,8 @@ static void trace_stop(jit_State *J)
482static int trace_downrec(jit_State *J) 543static int trace_downrec(jit_State *J)
483{ 544{
484 /* Restart recording at the return instruction. */ 545 /* Restart recording at the return instruction. */
485 lua_assert(J->pt != NULL); 546 lj_assertJ(J->pt != NULL, "no active prototype");
486 lua_assert(bc_isret(bc_op(*J->pc))); 547 lj_assertJ(bc_isret(bc_op(*J->pc)), "not at a return bytecode");
487 if (bc_op(*J->pc) == BC_RETM) 548 if (bc_op(*J->pc) == BC_RETM)
488 return 0; /* NYI: down-recursion with RETM. */ 549 return 0; /* NYI: down-recursion with RETM. */
489 J->parent = 0; 550 J->parent = 0;
@@ -502,6 +563,10 @@ static int trace_abort(jit_State *J)
502 563
503 J->postproc = LJ_POST_NONE; 564 J->postproc = LJ_POST_NONE;
504 lj_mcode_abort(J); 565 lj_mcode_abort(J);
566 if (J->curfinal) {
567 lj_trace_free(J2G(J), J->curfinal);
568 J->curfinal = NULL;
569 }
505 if (tvisnumber(L->top-1)) 570 if (tvisnumber(L->top-1))
506 e = (TraceError)numberVint(L->top-1); 571 e = (TraceError)numberVint(L->top-1);
507 if (e == LJ_TRERR_MCODELM) { 572 if (e == LJ_TRERR_MCODELM) {
@@ -510,8 +575,17 @@ static int trace_abort(jit_State *J)
510 return 1; /* Retry ASM with new MCode area. */ 575 return 1; /* Retry ASM with new MCode area. */
511 } 576 }
512 /* Penalize or blacklist starting bytecode instruction. */ 577 /* Penalize or blacklist starting bytecode instruction. */
513 if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) 578 if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
514 penalty_pc(J, &gcref(J->cur.startpt)->pt, mref(J->cur.startpc, BCIns), e); 579 if (J->exitno == 0) {
580 BCIns *startpc = mref(J->cur.startpc, BCIns);
581 if (e == LJ_TRERR_RETRY)
582 hotcount_set(J2GG(J), startpc+1, 1); /* Immediate retry. */
583 else
584 penalty_pc(J, &gcref(J->cur.startpt)->pt, startpc, e);
585 } else {
586 traceref(J, J->exitno)->link = J->exitno; /* Self-link is blacklisted. */
587 }
588 }
515 589
516 /* Is there anything to abort? */ 590 /* Is there anything to abort? */
517 traceno = J->cur.traceno; 591 traceno = J->cur.traceno;
@@ -680,15 +754,30 @@ static void trace_hotside(jit_State *J, const BCIns *pc)
680{ 754{
681 SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno]; 755 SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
682 if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) && 756 if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
757 isluafunc(curr_func(J->L)) &&
683 snap->count != SNAPCOUNT_DONE && 758 snap->count != SNAPCOUNT_DONE &&
684 ++snap->count >= J->param[JIT_P_hotexit]) { 759 ++snap->count >= J->param[JIT_P_hotexit]) {
685 lua_assert(J->state == LJ_TRACE_IDLE); 760 lj_assertJ(J->state == LJ_TRACE_IDLE, "hot side exit while recording");
686 /* J->parent is non-zero for a side trace. */ 761 /* J->parent is non-zero for a side trace. */
687 J->state = LJ_TRACE_START; 762 J->state = LJ_TRACE_START;
688 lj_trace_ins(J, pc); 763 lj_trace_ins(J, pc);
689 } 764 }
690} 765}
691 766
767/* Stitch a new trace to the previous trace. */
768void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc)
769{
770 /* Only start a new trace if not recording or inside __gc call or vmevent. */
771 if (J->state == LJ_TRACE_IDLE &&
772 !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
773 J->parent = 0; /* Have to treat it like a root trace. */
774 /* J->exitno is set to the invoking trace. */
775 J->state = LJ_TRACE_START;
776 lj_trace_ins(J, pc);
777 }
778}
779
780
692/* Tiny struct to pass data to protected call. */ 781/* Tiny struct to pass data to protected call. */
693typedef struct ExitDataCP { 782typedef struct ExitDataCP {
694 jit_State *J; 783 jit_State *J;
@@ -742,7 +831,7 @@ static TraceNo trace_exit_find(jit_State *J, MCode *pc)
742 if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode)) 831 if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode))
743 return traceno; 832 return traceno;
744 } 833 }
745 lua_assert(0); 834 lj_assertJ(0, "bad exit pc");
746 return 0; 835 return 0;
747} 836}
748#endif 837#endif
@@ -764,30 +853,33 @@ int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
764 T = traceref(J, J->parent); UNUSED(T); 853 T = traceref(J, J->parent); UNUSED(T);
765#ifdef EXITSTATE_CHECKEXIT 854#ifdef EXITSTATE_CHECKEXIT
766 if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */ 855 if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */
767 lua_assert(T->root != 0); 856 lj_assertJ(T->root != 0, "stack check in root trace");
768 J->exitno = T->ir[REF_BASE].op2; 857 J->exitno = T->ir[REF_BASE].op2;
769 J->parent = T->ir[REF_BASE].op1; 858 J->parent = T->ir[REF_BASE].op1;
770 T = traceref(J, J->parent); 859 T = traceref(J, J->parent);
771 } 860 }
772#endif 861#endif
773 lua_assert(T != NULL && J->exitno < T->nsnap); 862 lj_assertJ(T != NULL && J->exitno < T->nsnap, "bad trace or exit number");
774 exd.J = J; 863 exd.J = J;
775 exd.exptr = exptr; 864 exd.exptr = exptr;
776 errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp); 865 errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp);
777 if (errcode) 866 if (errcode)
778 return -errcode; /* Return negated error code. */ 867 return -errcode; /* Return negated error code. */
779 868
780 lj_vmevent_send(L, TEXIT, 869 if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)))
781 lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK); 870 lj_vmevent_send(L, TEXIT,
782 setintV(L->top++, J->parent); 871 lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
783 setintV(L->top++, J->exitno); 872 setintV(L->top++, J->parent);
784 trace_exit_regs(L, ex); 873 setintV(L->top++, J->exitno);
785 ); 874 trace_exit_regs(L, ex);
875 );
786 876
787 pc = exd.pc; 877 pc = exd.pc;
788 cf = cframe_raw(L->cframe); 878 cf = cframe_raw(L->cframe);
789 setcframe_pc(cf, pc); 879 setcframe_pc(cf, pc);
790 if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) { 880 if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) {
881 /* Just exit to interpreter. */
882 } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) {
791 if (!(G(L)->hookmask & HOOK_GC)) 883 if (!(G(L)->hookmask & HOOK_GC))
792 lj_gc_step(L); /* Exited because of GC: drive GC forward. */ 884 lj_gc_step(L); /* Exited because of GC: drive GC forward. */
793 } else { 885 } else {
@@ -811,7 +903,7 @@ int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
811 ERRNO_RESTORE 903 ERRNO_RESTORE
812 switch (bc_op(*pc)) { 904 switch (bc_op(*pc)) {
813 case BC_CALLM: case BC_CALLMT: 905 case BC_CALLM: case BC_CALLMT:
814 return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc)); 906 return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc) - LJ_FR2);
815 case BC_RETM: 907 case BC_RETM:
816 return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc)); 908 return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc));
817 case BC_TSETM: 909 case BC_TSETM:
diff --git a/src/lj_trace.h b/src/lj_trace.h
index 460f10a1..93d7aea1 100644
--- a/src/lj_trace.h
+++ b/src/lj_trace.h
@@ -23,6 +23,7 @@ LJ_FUNC_NORET void lj_trace_err(jit_State *J, TraceError e);
23LJ_FUNC_NORET void lj_trace_err_info(jit_State *J, TraceError e); 23LJ_FUNC_NORET void lj_trace_err_info(jit_State *J, TraceError e);
24 24
25/* Trace management. */ 25/* Trace management. */
26LJ_FUNC GCtrace * LJ_FASTCALL lj_trace_alloc(lua_State *L, GCtrace *T);
26LJ_FUNC void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T); 27LJ_FUNC void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T);
27LJ_FUNC void lj_trace_reenableproto(GCproto *pt); 28LJ_FUNC void lj_trace_reenableproto(GCproto *pt);
28LJ_FUNC void lj_trace_flushproto(global_State *g, GCproto *pt); 29LJ_FUNC void lj_trace_flushproto(global_State *g, GCproto *pt);
@@ -34,6 +35,7 @@ LJ_FUNC void lj_trace_freestate(global_State *g);
34/* Event handling. */ 35/* Event handling. */
35LJ_FUNC void lj_trace_ins(jit_State *J, const BCIns *pc); 36LJ_FUNC void lj_trace_ins(jit_State *J, const BCIns *pc);
36LJ_FUNCA void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc); 37LJ_FUNCA void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc);
38LJ_FUNCA void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc);
37LJ_FUNCA int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr); 39LJ_FUNCA int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr);
38 40
39/* Signal asynchronous abort of trace or end of trace. */ 41/* Signal asynchronous abort of trace or end of trace. */
diff --git a/src/lj_traceerr.h b/src/lj_traceerr.h
index ecba11a6..0156a664 100644
--- a/src/lj_traceerr.h
+++ b/src/lj_traceerr.h
@@ -7,10 +7,12 @@
7 7
8/* Recording. */ 8/* Recording. */
9TREDEF(RECERR, "error thrown or hook called during recording") 9TREDEF(RECERR, "error thrown or hook called during recording")
10TREDEF(TRACEUV, "trace too short")
10TREDEF(TRACEOV, "trace too long") 11TREDEF(TRACEOV, "trace too long")
11TREDEF(STACKOV, "trace too deep") 12TREDEF(STACKOV, "trace too deep")
12TREDEF(SNAPOV, "too many snapshots") 13TREDEF(SNAPOV, "too many snapshots")
13TREDEF(BLACKL, "blacklisted") 14TREDEF(BLACKL, "blacklisted")
15TREDEF(RETRY, "retry recording")
14TREDEF(NYIBC, "NYI: bytecode %d") 16TREDEF(NYIBC, "NYI: bytecode %d")
15 17
16/* Recording loop ops. */ 18/* Recording loop ops. */
@@ -23,8 +25,6 @@ TREDEF(BADTYPE, "bad argument type")
23TREDEF(CJITOFF, "JIT compilation disabled for function") 25TREDEF(CJITOFF, "JIT compilation disabled for function")
24TREDEF(CUNROLL, "call unroll limit reached") 26TREDEF(CUNROLL, "call unroll limit reached")
25TREDEF(DOWNREC, "down-recursion, restarting") 27TREDEF(DOWNREC, "down-recursion, restarting")
26TREDEF(NYICF, "NYI: C function %s")
27TREDEF(NYIFF, "NYI: FastFunc %s")
28TREDEF(NYIFFU, "NYI: unsupported variant of FastFunc %s") 28TREDEF(NYIFFU, "NYI: unsupported variant of FastFunc %s")
29TREDEF(NYIRETL, "NYI: return to lower frame") 29TREDEF(NYIRETL, "NYI: return to lower frame")
30 30
diff --git a/src/lj_vm.h b/src/lj_vm.h
index 5b10adf3..d572e7d7 100644
--- a/src/lj_vm.h
+++ b/src/lj_vm.h
@@ -17,6 +17,10 @@ LJ_ASMF int lj_vm_cpcall(lua_State *L, lua_CFunction func, void *ud,
17LJ_ASMF int lj_vm_resume(lua_State *L, TValue *base, int nres1, ptrdiff_t ef); 17LJ_ASMF int lj_vm_resume(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
18LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_c(void *cframe, int errcode); 18LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_c(void *cframe, int errcode);
19LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_ff(void *cframe); 19LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_ff(void *cframe);
20#if LJ_ABI_WIN && LJ_TARGET_X86
21LJ_ASMF_NORET void LJ_FASTCALL lj_vm_rtlunwind(void *cframe, void *excptrec,
22 void *unwinder, int errcode);
23#endif
20LJ_ASMF void lj_vm_unwind_c_eh(void); 24LJ_ASMF void lj_vm_unwind_c_eh(void);
21LJ_ASMF void lj_vm_unwind_ff_eh(void); 25LJ_ASMF void lj_vm_unwind_ff_eh(void);
22#if LJ_TARGET_X86ORX64 26#if LJ_TARGET_X86ORX64
@@ -43,13 +47,14 @@ LJ_ASMF void lj_vm_record(void);
43LJ_ASMF void lj_vm_inshook(void); 47LJ_ASMF void lj_vm_inshook(void);
44LJ_ASMF void lj_vm_rethook(void); 48LJ_ASMF void lj_vm_rethook(void);
45LJ_ASMF void lj_vm_callhook(void); 49LJ_ASMF void lj_vm_callhook(void);
50LJ_ASMF void lj_vm_profhook(void);
46 51
47/* Trace exit handling. */ 52/* Trace exit handling. */
48LJ_ASMF void lj_vm_exit_handler(void); 53LJ_ASMF void lj_vm_exit_handler(void);
49LJ_ASMF void lj_vm_exit_interp(void); 54LJ_ASMF void lj_vm_exit_interp(void);
50 55
51/* Internal math helper functions. */ 56/* Internal math helper functions. */
52#if LJ_TARGET_X86ORX64 || LJ_TARGET_PPC 57#if LJ_TARGET_PPC || LJ_TARGET_ARM64 || (LJ_TARGET_MIPS && LJ_ABI_SOFTFP)
53#define lj_vm_floor floor 58#define lj_vm_floor floor
54#define lj_vm_ceil ceil 59#define lj_vm_ceil ceil
55#else 60#else
@@ -60,23 +65,26 @@ LJ_ASMF double lj_vm_floor_sf(double);
60LJ_ASMF double lj_vm_ceil_sf(double); 65LJ_ASMF double lj_vm_ceil_sf(double);
61#endif 66#endif
62#endif 67#endif
63#if defined(LUAJIT_NO_LOG2) || LJ_TARGET_X86ORX64 68#ifdef LUAJIT_NO_LOG2
64LJ_ASMF double lj_vm_log2(double); 69LJ_ASMF double lj_vm_log2(double);
65#else 70#else
66#define lj_vm_log2 log2 71#define lj_vm_log2 log2
67#endif 72#endif
73#if !(defined(_LJ_DISPATCH_H) && LJ_TARGET_MIPS)
74LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t, int32_t);
75#endif
68 76
69#if LJ_HASJIT 77#if LJ_HASJIT
70#if LJ_TARGET_X86ORX64 78#if LJ_TARGET_X86ORX64
71LJ_ASMF void lj_vm_floor_sse(void); 79LJ_ASMF void lj_vm_floor_sse(void);
72LJ_ASMF void lj_vm_ceil_sse(void); 80LJ_ASMF void lj_vm_ceil_sse(void);
73LJ_ASMF void lj_vm_trunc_sse(void); 81LJ_ASMF void lj_vm_trunc_sse(void);
74LJ_ASMF void lj_vm_exp_x87(void);
75LJ_ASMF void lj_vm_exp2_x87(void);
76LJ_ASMF void lj_vm_pow_sse(void);
77LJ_ASMF void lj_vm_powi_sse(void); 82LJ_ASMF void lj_vm_powi_sse(void);
83#define lj_vm_powi NULL
78#else 84#else
79#if LJ_TARGET_PPC 85LJ_ASMF double lj_vm_powi(double, int32_t);
86#endif
87#if LJ_TARGET_PPC || LJ_TARGET_ARM64
80#define lj_vm_trunc trunc 88#define lj_vm_trunc trunc
81#else 89#else
82LJ_ASMF double lj_vm_trunc(double); 90LJ_ASMF double lj_vm_trunc(double);
@@ -84,14 +92,6 @@ LJ_ASMF double lj_vm_trunc(double);
84LJ_ASMF double lj_vm_trunc_sf(double); 92LJ_ASMF double lj_vm_trunc_sf(double);
85#endif 93#endif
86#endif 94#endif
87LJ_ASMF double lj_vm_powi(double, int32_t);
88#ifdef LUAJIT_NO_EXP2
89LJ_ASMF double lj_vm_exp2(double);
90#else
91#define lj_vm_exp2 exp2
92#endif
93#endif
94LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t, int32_t);
95#if LJ_HASFFI 95#if LJ_HASFFI
96LJ_ASMF int lj_vm_errno(void); 96LJ_ASMF int lj_vm_errno(void);
97#endif 97#endif
@@ -104,8 +104,7 @@ LJ_ASMF void lj_cont_nop(void); /* Do nothing, just continue execution. */
104LJ_ASMF void lj_cont_condt(void); /* Branch if result is true. */ 104LJ_ASMF void lj_cont_condt(void); /* Branch if result is true. */
105LJ_ASMF void lj_cont_condf(void); /* Branch if result is false. */ 105LJ_ASMF void lj_cont_condf(void); /* Branch if result is false. */
106LJ_ASMF void lj_cont_hook(void); /* Continue from hook yield. */ 106LJ_ASMF void lj_cont_hook(void); /* Continue from hook yield. */
107 107LJ_ASMF void lj_cont_stitch(void); /* Trace stitching. */
108enum { LJ_CONT_TAILCALL, LJ_CONT_FFI_CALLBACK }; /* Special continuations. */
109 108
110/* Start of the ASM code. */ 109/* Start of the ASM code. */
111LJ_ASMF char lj_vm_asm_begin[]; 110LJ_ASMF char lj_vm_asm_begin[];
diff --git a/src/lj_vmevent.c b/src/lj_vmevent.c
index 1d496748..8b442a44 100644
--- a/src/lj_vmevent.c
+++ b/src/lj_vmevent.c
@@ -27,6 +27,7 @@ ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev)
27 if (tv && tvisfunc(tv)) { 27 if (tv && tvisfunc(tv)) {
28 lj_state_checkstack(L, LUA_MINSTACK); 28 lj_state_checkstack(L, LUA_MINSTACK);
29 setfuncV(L, L->top++, funcV(tv)); 29 setfuncV(L, L->top++, funcV(tv));
30 if (LJ_FR2) setnilV(L->top++);
30 return savestack(L, L->top); 31 return savestack(L, L->top);
31 } 32 }
32 } 33 }
diff --git a/src/lj_vmmath.c b/src/lj_vmmath.c
index 50a2cbba..9ed37bf2 100644
--- a/src/lj_vmmath.c
+++ b/src/lj_vmmath.c
@@ -13,16 +13,29 @@
13#include "lj_ir.h" 13#include "lj_ir.h"
14#include "lj_vm.h" 14#include "lj_vm.h"
15 15
16/* -- Helper functions for generated machine code ------------------------- */ 16/* -- Wrapper functions --------------------------------------------------- */
17 17
18#if LJ_TARGET_X86ORX64 18#if LJ_TARGET_X86 && __ELF__ && __PIC__
19/* Wrapper functions to avoid linker issues on OSX. */ 19/* Wrapper functions to deal with the ELF/x86 PIC disaster. */
20LJ_FUNCA double lj_vm_sinh(double x) { return sinh(x); } 20LJ_FUNCA double lj_wrap_log(double x) { return log(x); }
21LJ_FUNCA double lj_vm_cosh(double x) { return cosh(x); } 21LJ_FUNCA double lj_wrap_log10(double x) { return log10(x); }
22LJ_FUNCA double lj_vm_tanh(double x) { return tanh(x); } 22LJ_FUNCA double lj_wrap_exp(double x) { return exp(x); }
23LJ_FUNCA double lj_wrap_sin(double x) { return sin(x); }
24LJ_FUNCA double lj_wrap_cos(double x) { return cos(x); }
25LJ_FUNCA double lj_wrap_tan(double x) { return tan(x); }
26LJ_FUNCA double lj_wrap_asin(double x) { return asin(x); }
27LJ_FUNCA double lj_wrap_acos(double x) { return acos(x); }
28LJ_FUNCA double lj_wrap_atan(double x) { return atan(x); }
29LJ_FUNCA double lj_wrap_sinh(double x) { return sinh(x); }
30LJ_FUNCA double lj_wrap_cosh(double x) { return cosh(x); }
31LJ_FUNCA double lj_wrap_tanh(double x) { return tanh(x); }
32LJ_FUNCA double lj_wrap_atan2(double x, double y) { return atan2(x, y); }
33LJ_FUNCA double lj_wrap_pow(double x, double y) { return pow(x, y); }
34LJ_FUNCA double lj_wrap_fmod(double x, double y) { return fmod(x, y); }
23#endif 35#endif
24 36
25#if !LJ_TARGET_X86ORX64 37/* -- Helper functions for generated machine code ------------------------- */
38
26double lj_vm_foldarith(double x, double y, int op) 39double lj_vm_foldarith(double x, double y, int op)
27{ 40{
28 switch (op) { 41 switch (op) {
@@ -35,37 +48,20 @@ double lj_vm_foldarith(double x, double y, int op)
35 case IR_NEG - IR_ADD: return -x; break; 48 case IR_NEG - IR_ADD: return -x; break;
36 case IR_ABS - IR_ADD: return fabs(x); break; 49 case IR_ABS - IR_ADD: return fabs(x); break;
37#if LJ_HASJIT 50#if LJ_HASJIT
38 case IR_ATAN2 - IR_ADD: return atan2(x, y); break;
39 case IR_LDEXP - IR_ADD: return ldexp(x, (int)y); break; 51 case IR_LDEXP - IR_ADD: return ldexp(x, (int)y); break;
40 case IR_MIN - IR_ADD: return x > y ? y : x; break; 52 case IR_MIN - IR_ADD: return x < y ? x : y; break;
41 case IR_MAX - IR_ADD: return x < y ? y : x; break; 53 case IR_MAX - IR_ADD: return x > y ? x : y; break;
42#endif 54#endif
43 default: return x; 55 default: return x;
44 } 56 }
45} 57}
46#endif
47
48#if LJ_HASJIT
49 58
50#ifdef LUAJIT_NO_LOG2 59#if (LJ_HASJIT && !(LJ_TARGET_ARM || LJ_TARGET_ARM64 || LJ_TARGET_PPC)) || LJ_TARGET_MIPS
51double lj_vm_log2(double a)
52{
53 return log(a) * 1.4426950408889634074;
54}
55#endif
56
57#ifdef LUAJIT_NO_EXP2
58double lj_vm_exp2(double a)
59{
60 return exp(a * 0.6931471805599453);
61}
62#endif
63
64#if !(LJ_TARGET_ARM || LJ_TARGET_PPC)
65int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b) 60int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b)
66{ 61{
67 uint32_t y, ua, ub; 62 uint32_t y, ua, ub;
68 lua_assert(b != 0); /* This must be checked before using this function. */ 63 /* This must be checked before using this function. */
64 lj_assertX(b != 0, "modulo with zero divisor");
69 ua = a < 0 ? (uint32_t)-a : (uint32_t)a; 65 ua = a < 0 ? (uint32_t)-a : (uint32_t)a;
70 ub = b < 0 ? (uint32_t)-b : (uint32_t)b; 66 ub = b < 0 ? (uint32_t)-b : (uint32_t)b;
71 y = ua % ub; 67 y = ua % ub;
@@ -75,12 +71,21 @@ int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b)
75} 71}
76#endif 72#endif
77 73
74#if LJ_HASJIT
75
76#ifdef LUAJIT_NO_LOG2
77double lj_vm_log2(double a)
78{
79 return log(a) * 1.4426950408889634074;
80}
81#endif
82
78#if !LJ_TARGET_X86ORX64 83#if !LJ_TARGET_X86ORX64
79/* Unsigned x^k. */ 84/* Unsigned x^k. */
80static double lj_vm_powui(double x, uint32_t k) 85static double lj_vm_powui(double x, uint32_t k)
81{ 86{
82 double y; 87 double y;
83 lua_assert(k != 0); 88 lj_assertX(k != 0, "pow with zero exponent");
84 for (; (k & 1) == 0; k >>= 1) x *= x; 89 for (; (k & 1) == 0; k >>= 1) x *= x;
85 y = x; 90 y = x;
86 if ((k >>= 1) != 0) { 91 if ((k >>= 1) != 0) {
@@ -107,6 +112,7 @@ double lj_vm_powi(double x, int32_t k)
107 else 112 else
108 return 1.0 / lj_vm_powui(x, (uint32_t)-k); 113 return 1.0 / lj_vm_powui(x, (uint32_t)-k);
109} 114}
115#endif
110 116
111/* Computes fpm(x) for extended math functions. */ 117/* Computes fpm(x) for extended math functions. */
112double lj_vm_foldfpm(double x, int fpm) 118double lj_vm_foldfpm(double x, int fpm)
@@ -116,19 +122,12 @@ double lj_vm_foldfpm(double x, int fpm)
116 case IRFPM_CEIL: return lj_vm_ceil(x); 122 case IRFPM_CEIL: return lj_vm_ceil(x);
117 case IRFPM_TRUNC: return lj_vm_trunc(x); 123 case IRFPM_TRUNC: return lj_vm_trunc(x);
118 case IRFPM_SQRT: return sqrt(x); 124 case IRFPM_SQRT: return sqrt(x);
119 case IRFPM_EXP: return exp(x);
120 case IRFPM_EXP2: return lj_vm_exp2(x);
121 case IRFPM_LOG: return log(x); 125 case IRFPM_LOG: return log(x);
122 case IRFPM_LOG2: return lj_vm_log2(x); 126 case IRFPM_LOG2: return lj_vm_log2(x);
123 case IRFPM_LOG10: return log10(x); 127 default: lj_assertX(0, "bad fpm %d", fpm);
124 case IRFPM_SIN: return sin(x);
125 case IRFPM_COS: return cos(x);
126 case IRFPM_TAN: return tan(x);
127 default: lua_assert(0);
128 } 128 }
129 return 0; 129 return 0;
130} 130}
131#endif
132 131
133#if LJ_HASFFI 132#if LJ_HASFFI
134int lj_vm_errno(void) 133int lj_vm_errno(void)
diff --git a/src/ljamalg.c b/src/ljamalg.c
index 21b46314..56585e6d 100644
--- a/src/ljamalg.c
+++ b/src/ljamalg.c
@@ -3,16 +3,6 @@
3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h 3** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4*/ 4*/
5 5
6/*
7+--------------------------------------------------------------------------+
8| WARNING: Compiling the amalgamation needs a lot of virtual memory |
9| (around 300 MB with GCC 4.x)! If you don't have enough physical memory |
10| your machine will start swapping to disk and the compile will not finish |
11| within a reasonable amount of time. |
12| So either compile on a bigger machine or use the non-amalgamated build. |
13+--------------------------------------------------------------------------+
14*/
15
16#define ljamalg_c 6#define ljamalg_c
17#define LUA_CORE 7#define LUA_CORE
18 8
@@ -28,23 +18,29 @@
28#include "lua.h" 18#include "lua.h"
29#include "lauxlib.h" 19#include "lauxlib.h"
30 20
21#include "lj_assert.c"
31#include "lj_gc.c" 22#include "lj_gc.c"
32#include "lj_err.c" 23#include "lj_err.c"
33#include "lj_char.c" 24#include "lj_char.c"
34#include "lj_bc.c" 25#include "lj_bc.c"
35#include "lj_obj.c" 26#include "lj_obj.c"
27#include "lj_buf.c"
36#include "lj_str.c" 28#include "lj_str.c"
37#include "lj_tab.c" 29#include "lj_tab.c"
38#include "lj_func.c" 30#include "lj_func.c"
39#include "lj_udata.c" 31#include "lj_udata.c"
40#include "lj_meta.c" 32#include "lj_meta.c"
41#include "lj_debug.c" 33#include "lj_debug.c"
34#include "lj_prng.c"
42#include "lj_state.c" 35#include "lj_state.c"
43#include "lj_dispatch.c" 36#include "lj_dispatch.c"
44#include "lj_vmevent.c" 37#include "lj_vmevent.c"
45#include "lj_vmmath.c" 38#include "lj_vmmath.c"
46#include "lj_strscan.c" 39#include "lj_strscan.c"
40#include "lj_strfmt.c"
41#include "lj_strfmt_num.c"
47#include "lj_api.c" 42#include "lj_api.c"
43#include "lj_profile.c"
48#include "lj_lex.c" 44#include "lj_lex.c"
49#include "lj_parse.c" 45#include "lj_parse.c"
50#include "lj_bcread.c" 46#include "lj_bcread.c"
diff --git a/src/lua.h b/src/lua.h
index c83fd3bb..850bd796 100644
--- a/src/lua.h
+++ b/src/lua.h
@@ -39,7 +39,8 @@
39#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i)) 39#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i))
40 40
41 41
42/* thread status; 0 is OK */ 42/* thread status */
43#define LUA_OK 0
43#define LUA_YIELD 1 44#define LUA_YIELD 1
44#define LUA_ERRRUN 2 45#define LUA_ERRRUN 2
45#define LUA_ERRSYNTAX 3 46#define LUA_ERRSYNTAX 3
@@ -226,6 +227,7 @@ LUA_API int (lua_status) (lua_State *L);
226#define LUA_GCSTEP 5 227#define LUA_GCSTEP 5
227#define LUA_GCSETPAUSE 6 228#define LUA_GCSETPAUSE 6
228#define LUA_GCSETSTEPMUL 7 229#define LUA_GCSETSTEPMUL 7
230#define LUA_GCISRUNNING 9
229 231
230LUA_API int (lua_gc) (lua_State *L, int what, int data); 232LUA_API int (lua_gc) (lua_State *L, int what, int data);
231 233
@@ -346,6 +348,13 @@ LUA_API void *lua_upvalueid (lua_State *L, int idx, int n);
346LUA_API void lua_upvaluejoin (lua_State *L, int idx1, int n1, int idx2, int n2); 348LUA_API void lua_upvaluejoin (lua_State *L, int idx1, int n1, int idx2, int n2);
347LUA_API int lua_loadx (lua_State *L, lua_Reader reader, void *dt, 349LUA_API int lua_loadx (lua_State *L, lua_Reader reader, void *dt,
348 const char *chunkname, const char *mode); 350 const char *chunkname, const char *mode);
351LUA_API const lua_Number *lua_version (lua_State *L);
352LUA_API void lua_copy (lua_State *L, int fromidx, int toidx);
353LUA_API lua_Number lua_tonumberx (lua_State *L, int idx, int *isnum);
354LUA_API lua_Integer lua_tointegerx (lua_State *L, int idx, int *isnum);
355
356/* From Lua 5.3. */
357LUA_API int lua_isyieldable (lua_State *L);
349 358
350 359
351struct lua_Debug { 360struct lua_Debug {
diff --git a/src/luaconf.h b/src/luaconf.h
index 20feaca8..18fb961d 100644
--- a/src/luaconf.h
+++ b/src/luaconf.h
@@ -37,7 +37,7 @@
37#endif 37#endif
38#define LUA_LROOT "/usr/local" 38#define LUA_LROOT "/usr/local"
39#define LUA_LUADIR "/lua/5.1/" 39#define LUA_LUADIR "/lua/5.1/"
40#define LUA_LJDIR "/luajit-2.0.5/" 40#define LUA_LJDIR "/luajit-2.1.0-beta3/"
41 41
42#ifdef LUA_ROOT 42#ifdef LUA_ROOT
43#define LUA_JROOT LUA_ROOT 43#define LUA_JROOT LUA_ROOT
@@ -79,7 +79,7 @@
79#define LUA_IGMARK "-" 79#define LUA_IGMARK "-"
80#define LUA_PATH_CONFIG \ 80#define LUA_PATH_CONFIG \
81 LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \ 81 LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \
82 LUA_EXECDIR "\n" LUA_IGMARK 82 LUA_EXECDIR "\n" LUA_IGMARK "\n"
83 83
84/* Quoting in error messages. */ 84/* Quoting in error messages. */
85#define LUA_QL(x) "'" x "'" 85#define LUA_QL(x) "'" x "'"
@@ -92,10 +92,6 @@
92#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */ 92#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */
93#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */ 93#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */
94 94
95/* Compatibility with older library function names. */
96#define LUA_COMPAT_MOD /* OLD: math.mod, NEW: math.fmod */
97#define LUA_COMPAT_GFIND /* OLD: string.gfind, NEW: string.gmatch */
98
99/* Configuration for the frontend (the luajit executable). */ 95/* Configuration for the frontend (the luajit executable). */
100#if defined(luajit_c) 96#if defined(luajit_c)
101#define LUA_PROGNAME "luajit" /* Fallback frontend name. */ 97#define LUA_PROGNAME "luajit" /* Fallback frontend name. */
@@ -140,7 +136,7 @@
140 136
141#define LUALIB_API LUA_API 137#define LUALIB_API LUA_API
142 138
143/* Support for internal assertions. */ 139/* Compatibility support for assertions. */
144#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK) 140#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
145#include <assert.h> 141#include <assert.h>
146#endif 142#endif
diff --git a/src/luajit.c b/src/luajit.c
index 3901762d..53902480 100644
--- a/src/luajit.c
+++ b/src/luajit.c
@@ -61,8 +61,9 @@ static void laction(int i)
61 61
62static void print_usage(void) 62static void print_usage(void)
63{ 63{
64 fprintf(stderr, 64 fputs("usage: ", stderr);
65 "usage: %s [options]... [script [args]...].\n" 65 fputs(progname, stderr);
66 fputs(" [options]... [script [args]...].\n"
66 "Available options are:\n" 67 "Available options are:\n"
67 " -e chunk Execute string " LUA_QL("chunk") ".\n" 68 " -e chunk Execute string " LUA_QL("chunk") ".\n"
68 " -l name Require library " LUA_QL("name") ".\n" 69 " -l name Require library " LUA_QL("name") ".\n"
@@ -73,16 +74,14 @@ static void print_usage(void)
73 " -v Show version information.\n" 74 " -v Show version information.\n"
74 " -E Ignore environment variables.\n" 75 " -E Ignore environment variables.\n"
75 " -- Stop handling options.\n" 76 " -- Stop handling options.\n"
76 " - Execute stdin and stop handling options.\n" 77 " - Execute stdin and stop handling options.\n", stderr);
77 ,
78 progname);
79 fflush(stderr); 78 fflush(stderr);
80} 79}
81 80
82static void l_message(const char *pname, const char *msg) 81static void l_message(const char *pname, const char *msg)
83{ 82{
84 if (pname) fprintf(stderr, "%s: ", pname); 83 if (pname) { fputs(pname, stderr); fputc(':', stderr); fputc(' ', stderr); }
85 fprintf(stderr, "%s\n", msg); 84 fputs(msg, stderr); fputc('\n', stderr);
86 fflush(stderr); 85 fflush(stderr);
87} 86}
88 87
@@ -125,7 +124,7 @@ static int docall(lua_State *L, int narg, int clear)
125#endif 124#endif
126 lua_remove(L, base); /* remove traceback function */ 125 lua_remove(L, base); /* remove traceback function */
127 /* force a complete garbage collection in case of errors */ 126 /* force a complete garbage collection in case of errors */
128 if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0); 127 if (status != LUA_OK) lua_gc(L, LUA_GCCOLLECT, 0);
129 return status; 128 return status;
130} 129}
131 130
@@ -154,22 +153,15 @@ static void print_jit_status(lua_State *L)
154 lua_settop(L, 0); /* clear stack */ 153 lua_settop(L, 0); /* clear stack */
155} 154}
156 155
157static int getargs(lua_State *L, char **argv, int n) 156static void createargtable(lua_State *L, char **argv, int argc, int argf)
158{ 157{
159 int narg;
160 int i; 158 int i;
161 int argc = 0; 159 lua_createtable(L, argc - argf, argf);
162 while (argv[argc]) argc++; /* count total number of arguments */
163 narg = argc - (n + 1); /* number of arguments to the script */
164 luaL_checkstack(L, narg + 3, "too many arguments to script");
165 for (i = n+1; i < argc; i++)
166 lua_pushstring(L, argv[i]);
167 lua_createtable(L, narg, n + 1);
168 for (i = 0; i < argc; i++) { 160 for (i = 0; i < argc; i++) {
169 lua_pushstring(L, argv[i]); 161 lua_pushstring(L, argv[i]);
170 lua_rawseti(L, -2, i - n); 162 lua_rawseti(L, -2, i - argf);
171 } 163 }
172 return narg; 164 lua_setglobal(L, "arg");
173} 165}
174 166
175static int dofile(lua_State *L, const char *name) 167static int dofile(lua_State *L, const char *name)
@@ -258,9 +250,9 @@ static void dotty(lua_State *L)
258 const char *oldprogname = progname; 250 const char *oldprogname = progname;
259 progname = NULL; 251 progname = NULL;
260 while ((status = loadline(L)) != -1) { 252 while ((status = loadline(L)) != -1) {
261 if (status == 0) status = docall(L, 0, 0); 253 if (status == LUA_OK) status = docall(L, 0, 0);
262 report(L, status); 254 report(L, status);
263 if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */ 255 if (status == LUA_OK && lua_gettop(L) > 0) { /* any result to print? */
264 lua_getglobal(L, "print"); 256 lua_getglobal(L, "print");
265 lua_insert(L, 1); 257 lua_insert(L, 1);
266 if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0) 258 if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0)
@@ -275,21 +267,30 @@ static void dotty(lua_State *L)
275 progname = oldprogname; 267 progname = oldprogname;
276} 268}
277 269
278static int handle_script(lua_State *L, char **argv, int n) 270static int handle_script(lua_State *L, char **argx)
279{ 271{
280 int status; 272 int status;
281 const char *fname; 273 const char *fname = argx[0];
282 int narg = getargs(L, argv, n); /* collect arguments */ 274 if (strcmp(fname, "-") == 0 && strcmp(argx[-1], "--") != 0)
283 lua_setglobal(L, "arg");
284 fname = argv[n];
285 if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0)
286 fname = NULL; /* stdin */ 275 fname = NULL; /* stdin */
287 status = luaL_loadfile(L, fname); 276 status = luaL_loadfile(L, fname);
288 lua_insert(L, -(narg+1)); 277 if (status == LUA_OK) {
289 if (status == 0) 278 /* Fetch args from arg table. LUA_INIT or -e might have changed them. */
279 int narg = 0;
280 lua_getglobal(L, "arg");
281 if (lua_istable(L, -1)) {
282 do {
283 narg++;
284 lua_rawgeti(L, -narg, narg);
285 } while (!lua_isnil(L, -1));
286 lua_pop(L, 1);
287 lua_remove(L, -narg);
288 narg--;
289 } else {
290 lua_pop(L, 1);
291 }
290 status = docall(L, narg, 0); 292 status = docall(L, narg, 0);
291 else 293 }
292 lua_pop(L, narg);
293 return report(L, status); 294 return report(L, status);
294} 295}
295 296
@@ -386,7 +387,8 @@ static int dobytecode(lua_State *L, char **argv)
386 } 387 }
387 for (argv++; *argv != NULL; narg++, argv++) 388 for (argv++; *argv != NULL; narg++, argv++)
388 lua_pushstring(L, *argv); 389 lua_pushstring(L, *argv);
389 return report(L, lua_pcall(L, narg, 0, 0)); 390 report(L, lua_pcall(L, narg, 0, 0));
391 return -1;
390} 392}
391 393
392/* check that argument has no extra characters at the end */ 394/* check that argument has no extra characters at the end */
@@ -407,7 +409,7 @@ static int collectargs(char **argv, int *flags)
407 switch (argv[i][1]) { /* Check option. */ 409 switch (argv[i][1]) { /* Check option. */
408 case '-': 410 case '-':
409 notail(argv[i]); 411 notail(argv[i]);
410 return (argv[i+1] != NULL ? i+1 : 0); 412 return i+1;
411 case '\0': 413 case '\0':
412 return i; 414 return i;
413 case 'i': 415 case 'i':
@@ -433,23 +435,23 @@ static int collectargs(char **argv, int *flags)
433 case 'b': /* LuaJIT extension */ 435 case 'b': /* LuaJIT extension */
434 if (*flags) return -1; 436 if (*flags) return -1;
435 *flags |= FLAGS_EXEC; 437 *flags |= FLAGS_EXEC;
436 return 0; 438 return i+1;
437 case 'E': 439 case 'E':
438 *flags |= FLAGS_NOENV; 440 *flags |= FLAGS_NOENV;
439 break; 441 break;
440 default: return -1; /* invalid option */ 442 default: return -1; /* invalid option */
441 } 443 }
442 } 444 }
443 return 0; 445 return i;
444} 446}
445 447
446static int runargs(lua_State *L, char **argv, int n) 448static int runargs(lua_State *L, char **argv, int argn)
447{ 449{
448 int i; 450 int i;
449 for (i = 1; i < n; i++) { 451 for (i = 1; i < argn; i++) {
450 if (argv[i] == NULL) continue; 452 if (argv[i] == NULL) continue;
451 lua_assert(argv[i][0] == '-'); 453 lua_assert(argv[i][0] == '-');
452 switch (argv[i][1]) { /* option */ 454 switch (argv[i][1]) {
453 case 'e': { 455 case 'e': {
454 const char *chunk = argv[i] + 2; 456 const char *chunk = argv[i] + 2;
455 if (*chunk == '\0') chunk = argv[++i]; 457 if (*chunk == '\0') chunk = argv[++i];
@@ -463,10 +465,10 @@ static int runargs(lua_State *L, char **argv, int n)
463 if (*filename == '\0') filename = argv[++i]; 465 if (*filename == '\0') filename = argv[++i];
464 lua_assert(filename != NULL); 466 lua_assert(filename != NULL);
465 if (dolibrary(L, filename)) 467 if (dolibrary(L, filename))
466 return 1; /* stop if file fails */ 468 return 1;
467 break; 469 break;
468 } 470 }
469 case 'j': { /* LuaJIT extension */ 471 case 'j': { /* LuaJIT extension. */
470 const char *cmd = argv[i] + 2; 472 const char *cmd = argv[i] + 2;
471 if (*cmd == '\0') cmd = argv[++i]; 473 if (*cmd == '\0') cmd = argv[++i];
472 lua_assert(cmd != NULL); 474 lua_assert(cmd != NULL);
@@ -474,16 +476,16 @@ static int runargs(lua_State *L, char **argv, int n)
474 return 1; 476 return 1;
475 break; 477 break;
476 } 478 }
477 case 'O': /* LuaJIT extension */ 479 case 'O': /* LuaJIT extension. */
478 if (dojitopt(L, argv[i] + 2)) 480 if (dojitopt(L, argv[i] + 2))
479 return 1; 481 return 1;
480 break; 482 break;
481 case 'b': /* LuaJIT extension */ 483 case 'b': /* LuaJIT extension. */
482 return dobytecode(L, argv+i); 484 return dobytecode(L, argv+i);
483 default: break; 485 default: break;
484 } 486 }
485 } 487 }
486 return 0; 488 return LUA_OK;
487} 489}
488 490
489static int handle_luainit(lua_State *L) 491static int handle_luainit(lua_State *L)
@@ -494,7 +496,7 @@ static int handle_luainit(lua_State *L)
494 const char *init = getenv(LUA_INIT); 496 const char *init = getenv(LUA_INIT);
495#endif 497#endif
496 if (init == NULL) 498 if (init == NULL)
497 return 0; /* status OK */ 499 return LUA_OK;
498 else if (init[0] == '@') 500 else if (init[0] == '@')
499 return dofile(L, init+1); 501 return dofile(L, init+1);
500 else 502 else
@@ -511,45 +513,57 @@ static int pmain(lua_State *L)
511{ 513{
512 struct Smain *s = &smain; 514 struct Smain *s = &smain;
513 char **argv = s->argv; 515 char **argv = s->argv;
514 int script; 516 int argn;
515 int flags = 0; 517 int flags = 0;
516 globalL = L; 518 globalL = L;
517 if (argv[0] && argv[0][0]) progname = argv[0]; 519 if (argv[0] && argv[0][0]) progname = argv[0];
518 LUAJIT_VERSION_SYM(); /* linker-enforced version check */ 520
519 script = collectargs(argv, &flags); 521 LUAJIT_VERSION_SYM(); /* Linker-enforced version check. */
520 if (script < 0) { /* invalid args? */ 522
523 argn = collectargs(argv, &flags);
524 if (argn < 0) { /* Invalid args? */
521 print_usage(); 525 print_usage();
522 s->status = 1; 526 s->status = 1;
523 return 0; 527 return 0;
524 } 528 }
529
525 if ((flags & FLAGS_NOENV)) { 530 if ((flags & FLAGS_NOENV)) {
526 lua_pushboolean(L, 1); 531 lua_pushboolean(L, 1);
527 lua_setfield(L, LUA_REGISTRYINDEX, "LUA_NOENV"); 532 lua_setfield(L, LUA_REGISTRYINDEX, "LUA_NOENV");
528 } 533 }
529 lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */ 534
530 luaL_openlibs(L); /* open libraries */ 535 /* Stop collector during library initialization. */
536 lua_gc(L, LUA_GCSTOP, 0);
537 luaL_openlibs(L);
531 lua_gc(L, LUA_GCRESTART, -1); 538 lua_gc(L, LUA_GCRESTART, -1);
539
540 createargtable(L, argv, s->argc, argn);
541
532 if (!(flags & FLAGS_NOENV)) { 542 if (!(flags & FLAGS_NOENV)) {
533 s->status = handle_luainit(L); 543 s->status = handle_luainit(L);
534 if (s->status != 0) return 0; 544 if (s->status != LUA_OK) return 0;
535 } 545 }
546
536 if ((flags & FLAGS_VERSION)) print_version(); 547 if ((flags & FLAGS_VERSION)) print_version();
537 s->status = runargs(L, argv, (script > 0) ? script : s->argc); 548
538 if (s->status != 0) return 0; 549 s->status = runargs(L, argv, argn);
539 if (script) { 550 if (s->status != LUA_OK) return 0;
540 s->status = handle_script(L, argv, script); 551
541 if (s->status != 0) return 0; 552 if (s->argc > argn) {
553 s->status = handle_script(L, argv + argn);
554 if (s->status != LUA_OK) return 0;
542 } 555 }
556
543 if ((flags & FLAGS_INTERACTIVE)) { 557 if ((flags & FLAGS_INTERACTIVE)) {
544 print_jit_status(L); 558 print_jit_status(L);
545 dotty(L); 559 dotty(L);
546 } else if (script == 0 && !(flags & (FLAGS_EXEC|FLAGS_VERSION))) { 560 } else if (s->argc == argn && !(flags & (FLAGS_EXEC|FLAGS_VERSION))) {
547 if (lua_stdin_is_tty()) { 561 if (lua_stdin_is_tty()) {
548 print_version(); 562 print_version();
549 print_jit_status(L); 563 print_jit_status(L);
550 dotty(L); 564 dotty(L);
551 } else { 565 } else {
552 dofile(L, NULL); /* executes stdin as a file */ 566 dofile(L, NULL); /* Executes stdin as a file. */
553 } 567 }
554 } 568 }
555 return 0; 569 return 0;
@@ -558,7 +572,7 @@ static int pmain(lua_State *L)
558int main(int argc, char **argv) 572int main(int argc, char **argv)
559{ 573{
560 int status; 574 int status;
561 lua_State *L = lua_open(); /* create state */ 575 lua_State *L = lua_open();
562 if (L == NULL) { 576 if (L == NULL) {
563 l_message(argv[0], "cannot create state: not enough memory"); 577 l_message(argv[0], "cannot create state: not enough memory");
564 return EXIT_FAILURE; 578 return EXIT_FAILURE;
@@ -568,6 +582,6 @@ int main(int argc, char **argv)
568 status = lua_cpcall(L, pmain, NULL); 582 status = lua_cpcall(L, pmain, NULL);
569 report(L, status); 583 report(L, status);
570 lua_close(L); 584 lua_close(L);
571 return (status || smain.status) ? EXIT_FAILURE : EXIT_SUCCESS; 585 return (status || smain.status > 0) ? EXIT_FAILURE : EXIT_SUCCESS;
572} 586}
573 587
diff --git a/src/luajit.h b/src/luajit.h
index 5f5b3887..600031a1 100644
--- a/src/luajit.h
+++ b/src/luajit.h
@@ -30,9 +30,9 @@
30 30
31#include "lua.h" 31#include "lua.h"
32 32
33#define LUAJIT_VERSION "LuaJIT 2.0.5" 33#define LUAJIT_VERSION "LuaJIT 2.1.0-beta3"
34#define LUAJIT_VERSION_NUM 20005 /* Version 2.0.5 = 02.00.05. */ 34#define LUAJIT_VERSION_NUM 20100 /* Version 2.1.0 = 02.01.00. */
35#define LUAJIT_VERSION_SYM luaJIT_version_2_0_5 35#define LUAJIT_VERSION_SYM luaJIT_version_2_1_0_beta3
36#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2020 Mike Pall" 36#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2020 Mike Pall"
37#define LUAJIT_URL "http://luajit.org/" 37#define LUAJIT_URL "http://luajit.org/"
38 38
@@ -64,6 +64,15 @@ enum {
64/* Control the JIT engine. */ 64/* Control the JIT engine. */
65LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode); 65LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
66 66
67/* Low-overhead profiling API. */
68typedef void (*luaJIT_profile_callback)(void *data, lua_State *L,
69 int samples, int vmstate);
70LUA_API void luaJIT_profile_start(lua_State *L, const char *mode,
71 luaJIT_profile_callback cb, void *data);
72LUA_API void luaJIT_profile_stop(lua_State *L);
73LUA_API const char *luaJIT_profile_dumpstack(lua_State *L, const char *fmt,
74 int depth, size_t *len);
75
67/* Enforce (dynamic) linker error for version mismatches. Call from main. */ 76/* Enforce (dynamic) linker error for version mismatches. Call from main. */
68LUA_API void LUAJIT_VERSION_SYM(void); 77LUA_API void LUAJIT_VERSION_SYM(void);
69 78
diff --git a/src/msvcbuild.bat b/src/msvcbuild.bat
index 499b5f12..ae035dc6 100644
--- a/src/msvcbuild.bat
+++ b/src/msvcbuild.bat
@@ -5,6 +5,7 @@
5@rem Then cd to this directory and run this script. Use the following 5@rem Then cd to this directory and run this script. Use the following
6@rem options (in order), if needed. The default is a dynamic release build. 6@rem options (in order), if needed. The default is a dynamic release build.
7@rem 7@rem
8@rem nogc64 disable LJ_GC64 mode for x64
8@rem debug emit debug symbols 9@rem debug emit debug symbols
9@rem amalg amalgamated build 10@rem amalg amalgamated build
10@rem static static linkage 11@rem static static linkage
@@ -20,6 +21,7 @@
20@set LJLIB=lib /nologo /nodefaultlib 21@set LJLIB=lib /nologo /nodefaultlib
21@set DASMDIR=..\dynasm 22@set DASMDIR=..\dynasm
22@set DASM=%DASMDIR%\dynasm.lua 23@set DASM=%DASMDIR%\dynasm.lua
24@set DASC=vm_x64.dasc
23@set LJDLLNAME=lua51.dll 25@set LJDLLNAME=lua51.dll
24@set LJLIBNAME=lua51.lib 26@set LJLIBNAME=lua51.lib
25@set BUILDTYPE=release 27@set BUILDTYPE=release
@@ -36,10 +38,17 @@ if exist minilua.exe.manifest^
36@set LJARCH=x64 38@set LJARCH=x64
37@minilua 39@minilua
38@if errorlevel 8 goto :X64 40@if errorlevel 8 goto :X64
41@set DASC=vm_x86.dasc
39@set DASMFLAGS=-D WIN -D JIT -D FFI 42@set DASMFLAGS=-D WIN -D JIT -D FFI
40@set LJARCH=x86 43@set LJARCH=x86
44@set LJCOMPILE=%LJCOMPILE% /arch:SSE2
41:X64 45:X64
42minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x86.dasc 46@if "%1" neq "nogc64" goto :GC64
47@shift
48@set DASC=vm_x86.dasc
49@set LJCOMPILE=%LJCOMPILE% /DLUAJIT_DISABLE_GC64
50:GC64
51minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC%
43@if errorlevel 1 goto :BAD 52@if errorlevel 1 goto :BAD
44 53
45%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c 54%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c
@@ -68,6 +77,7 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
68@shift 77@shift
69@set BUILDTYPE=debug 78@set BUILDTYPE=debug
70@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS% 79@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS%
80@set LJLINK=%LJLINK% /opt:ref /opt:icf /incremental:no
71:NODEBUG 81:NODEBUG
72@set LJLINK=%LJLINK% /%BUILDTYPE% 82@set LJLINK=%LJLINK% /%BUILDTYPE%
73@if "%1"=="amalg" goto :AMALGDLL 83@if "%1"=="amalg" goto :AMALGDLL
diff --git a/src/ps4build.bat b/src/ps4build.bat
index 337a44fa..e83c674a 100644
--- a/src/ps4build.bat
+++ b/src/ps4build.bat
@@ -2,7 +2,19 @@
2@rem Donated to the public domain. 2@rem Donated to the public domain.
3@rem 3@rem
4@rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler) 4@rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler)
5@rem or "VS2015 x64 Native Tools Command Prompt".
6@rem
5@rem Then cd to this directory and run this script. 7@rem Then cd to this directory and run this script.
8@rem
9@rem Recommended invocation:
10@rem
11@rem ps4build release build, amalgamated, 64-bit GC
12@rem ps4build debug debug build, amalgamated, 64-bit GC
13@rem
14@rem Additional command-line options (not generally recommended):
15@rem
16@rem gc32 (before debug) 32-bit GC
17@rem noamalg (after debug) non-amalgamated build
6 18
7@if not defined INCLUDE goto :FAIL 19@if not defined INCLUDE goto :FAIL
8@if not defined SCE_ORBIS_SDK_DIR goto :FAIL 20@if not defined SCE_ORBIS_SDK_DIR goto :FAIL
@@ -15,6 +27,14 @@
15@set DASMDIR=..\dynasm 27@set DASMDIR=..\dynasm
16@set DASM=%DASMDIR%\dynasm.lua 28@set DASM=%DASMDIR%\dynasm.lua
17@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c 29@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c
30@set GC64=
31@set DASC=vm_x64.dasc
32
33@if "%1" neq "gc32" goto :NOGC32
34@shift
35@set GC64=-DLUAJIT_DISABLE_GC64
36@set DASC=vm_x86.dasc
37:NOGC32
18 38
19%LJCOMPILE% host\minilua.c 39%LJCOMPILE% host\minilua.c
20@if errorlevel 1 goto :BAD 40@if errorlevel 1 goto :BAD
@@ -28,10 +48,10 @@ if exist minilua.exe.manifest^
28@if not errorlevel 8 goto :FAIL 48@if not errorlevel 8 goto :FAIL
29 49
30@set DASMFLAGS=-D P64 -D NO_UNWIND 50@set DASMFLAGS=-D P64 -D NO_UNWIND
31minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x86.dasc 51minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC%
32@if errorlevel 1 goto :BAD 52@if errorlevel 1 goto :BAD
33 53
34%LJCOMPILE% /I "." /I %DASMDIR% -DLUAJIT_TARGET=LUAJIT_ARCH_X64 -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLUAJIT_NO_UNWIND host\buildvm*.c 54%LJCOMPILE% /I "." /I %DASMDIR% %GC64% -DLUAJIT_TARGET=LUAJIT_ARCH_X64 -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLUAJIT_NO_UNWIND host\buildvm*.c
35@if errorlevel 1 goto :BAD 55@if errorlevel 1 goto :BAD
36%LJLINK% /out:buildvm.exe buildvm*.obj 56%LJLINK% /out:buildvm.exe buildvm*.obj
37@if errorlevel 1 goto :BAD 57@if errorlevel 1 goto :BAD
@@ -54,7 +74,7 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
54@if errorlevel 1 goto :BAD 74@if errorlevel 1 goto :BAD
55 75
56@rem ---- Cross compiler ---- 76@rem ---- Cross compiler ----
57@set LJCOMPILE="%SCE_ORBIS_SDK_DIR%\host_tools\bin\orbis-clang" -c -Wall -DLUAJIT_DISABLE_FFI 77@set LJCOMPILE="%SCE_ORBIS_SDK_DIR%\host_tools\bin\orbis-clang" -c -Wall -DLUAJIT_DISABLE_FFI %GC64%
58@set LJLIB="%SCE_ORBIS_SDK_DIR%\host_tools\bin\orbis-ar" rcus 78@set LJLIB="%SCE_ORBIS_SDK_DIR%\host_tools\bin\orbis-ar" rcus
59@set INCLUDE="" 79@set INCLUDE=""
60 80
@@ -63,14 +83,14 @@ orbis-as -o lj_vm.o lj_vm.s
63@if "%1" neq "debug" goto :NODEBUG 83@if "%1" neq "debug" goto :NODEBUG
64@shift 84@shift
65@set LJCOMPILE=%LJCOMPILE% -g -O0 85@set LJCOMPILE=%LJCOMPILE% -g -O0
66@set TARGETLIB=libluajitD.a 86@set TARGETLIB=libluajitD_ps4.a
67goto :BUILD 87goto :BUILD
68:NODEBUG 88:NODEBUG
69@set LJCOMPILE=%LJCOMPILE% -O2 89@set LJCOMPILE=%LJCOMPILE% -O2
70@set TARGETLIB=libluajit.a 90@set TARGETLIB=libluajit_ps4.a
71:BUILD 91:BUILD
72del %TARGETLIB% 92del %TARGETLIB%
73@if "%1"=="amalg" goto :AMALG 93@if "%1" neq "noamalg" goto :AMALG
74for %%f in (lj_*.c lib_*.c) do ( 94for %%f in (lj_*.c lib_*.c) do (
75 %LJCOMPILE% %%f 95 %LJCOMPILE% %%f
76 @if errorlevel 1 goto :BAD 96 @if errorlevel 1 goto :BAD
diff --git a/src/vm_arm.dasc b/src/vm_arm.dasc
index dcfb10b3..6b5572b7 100644
--- a/src/vm_arm.dasc
+++ b/src/vm_arm.dasc
@@ -99,6 +99,7 @@
99|.type NODE, Node 99|.type NODE, Node
100|.type NARGS8, int 100|.type NARGS8, int
101|.type TRACE, GCtrace 101|.type TRACE, GCtrace
102|.type SBUF, SBuf
102| 103|
103|//----------------------------------------------------------------------- 104|//-----------------------------------------------------------------------
104| 105|
@@ -372,6 +373,17 @@ static void build_subroutines(BuildCtx *ctx)
372 | st_vmstate CARG2 373 | st_vmstate CARG2
373 | b ->vm_returnc 374 | b ->vm_returnc
374 | 375 |
376 |->vm_unwind_ext: // Complete external unwind.
377#if !LJ_NO_UNWIND
378 | push {r0, r1, r2, lr}
379 | bl extern _Unwind_Complete
380 | ldr r0, [sp]
381 | bl extern _Unwind_DeleteException
382 | pop {r0, r1, r2, lr}
383 | mov r0, r1
384 | bx r2
385#endif
386 |
375 |//----------------------------------------------------------------------- 387 |//-----------------------------------------------------------------------
376 |//-- Grow stack for calls ----------------------------------------------- 388 |//-- Grow stack for calls -----------------------------------------------
377 |//----------------------------------------------------------------------- 389 |//-----------------------------------------------------------------------
@@ -418,13 +430,14 @@ static void build_subroutines(BuildCtx *ctx)
418 | add CARG2, sp, #CFRAME_RESUME 430 | add CARG2, sp, #CFRAME_RESUME
419 | ldrb CARG1, L->status 431 | ldrb CARG1, L->status
420 | str CARG3, SAVE_ERRF 432 | str CARG3, SAVE_ERRF
421 | str CARG2, L->cframe 433 | str L, SAVE_PC // Any value outside of bytecode is ok.
422 | str CARG3, SAVE_CFRAME 434 | str CARG3, SAVE_CFRAME
423 | cmp CARG1, #0 435 | cmp CARG1, #0
424 | str L, SAVE_PC // Any value outside of bytecode is ok. 436 | str CARG2, L->cframe
425 | beq >3 437 | beq >3
426 | 438 |
427 | // Resume after yield (like a return). 439 | // Resume after yield (like a return).
440 | str L, [DISPATCH, #DISPATCH_GL(cur_L)]
428 | mov RA, BASE 441 | mov RA, BASE
429 | ldr BASE, L->base 442 | ldr BASE, L->base
430 | ldr CARG1, L->top 443 | ldr CARG1, L->top
@@ -458,14 +471,15 @@ static void build_subroutines(BuildCtx *ctx)
458 | str CARG3, SAVE_NRES 471 | str CARG3, SAVE_NRES
459 | mov L, CARG1 472 | mov L, CARG1
460 | str CARG1, SAVE_L 473 | str CARG1, SAVE_L
461 | mov BASE, CARG2
462 | str sp, L->cframe // Add our C frame to cframe chain.
463 | ldr DISPATCH, L->glref // Setup pointer to dispatch table. 474 | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
475 | mov BASE, CARG2
464 | str CARG1, SAVE_PC // Any value outside of bytecode is ok. 476 | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
465 | str RC, SAVE_CFRAME 477 | str RC, SAVE_CFRAME
466 | add DISPATCH, DISPATCH, #GG_G2DISP 478 | add DISPATCH, DISPATCH, #GG_G2DISP
479 | str sp, L->cframe // Add our C frame to cframe chain.
467 | 480 |
468 |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). 481 |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
482 | str L, [DISPATCH, #DISPATCH_GL(cur_L)]
469 | ldr RB, L->base // RB = old base (for vmeta_call). 483 | ldr RB, L->base // RB = old base (for vmeta_call).
470 | ldr CARG1, L->top 484 | ldr CARG1, L->top
471 | mov MASKR8, #255 485 | mov MASKR8, #255
@@ -491,20 +505,21 @@ static void build_subroutines(BuildCtx *ctx)
491 | mov L, CARG1 505 | mov L, CARG1
492 | ldr RA, L:CARG1->stack 506 | ldr RA, L:CARG1->stack
493 | str CARG1, SAVE_L 507 | str CARG1, SAVE_L
508 | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
494 | ldr RB, L->top 509 | ldr RB, L->top
495 | str CARG1, SAVE_PC // Any value outside of bytecode is ok. 510 | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
496 | ldr RC, L->cframe 511 | ldr RC, L->cframe
512 | add DISPATCH, DISPATCH, #GG_G2DISP
497 | sub RA, RA, RB // Compute -savestack(L, L->top). 513 | sub RA, RA, RB // Compute -savestack(L, L->top).
498 | str sp, L->cframe // Add our C frame to cframe chain.
499 | mov RB, #0 514 | mov RB, #0
500 | str RA, SAVE_NRES // Neg. delta means cframe w/o frame. 515 | str RA, SAVE_NRES // Neg. delta means cframe w/o frame.
501 | str RB, SAVE_ERRF // No error function. 516 | str RB, SAVE_ERRF // No error function.
502 | str RC, SAVE_CFRAME 517 | str RC, SAVE_CFRAME
518 | str sp, L->cframe // Add our C frame to cframe chain.
519 | str L, [DISPATCH, #DISPATCH_GL(cur_L)]
503 | blx CARG4 // (lua_State *L, lua_CFunction func, void *ud) 520 | blx CARG4 // (lua_State *L, lua_CFunction func, void *ud)
504 | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
505 | movs BASE, CRET1 521 | movs BASE, CRET1
506 | mov PC, #FRAME_CP 522 | mov PC, #FRAME_CP
507 | add DISPATCH, DISPATCH, #GG_G2DISP
508 | bne <3 // Else continue with the call. 523 | bne <3 // Else continue with the call.
509 | b ->vm_leave_cp // No base? Just remove C frame. 524 | b ->vm_leave_cp // No base? Just remove C frame.
510 | 525 |
@@ -614,6 +629,16 @@ static void build_subroutines(BuildCtx *ctx)
614 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. 629 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
615 | b ->vm_call_dispatch_f 630 | b ->vm_call_dispatch_f
616 | 631 |
632 |->vmeta_tgetr:
633 | .IOS mov RC, BASE
634 | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
635 | // Returns cTValue * or NULL.
636 | .IOS mov BASE, RC
637 | cmp CRET1, #0
638 | ldrdne CARG12, [CRET1]
639 | mvneq CARG2, #~LJ_TNIL
640 | b ->BC_TGETR_Z
641 |
617 |//----------------------------------------------------------------------- 642 |//-----------------------------------------------------------------------
618 | 643 |
619 |->vmeta_tsets1: 644 |->vmeta_tsets1:
@@ -671,6 +696,16 @@ static void build_subroutines(BuildCtx *ctx)
671 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here. 696 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
672 | b ->vm_call_dispatch_f 697 | b ->vm_call_dispatch_f
673 | 698 |
699 |->vmeta_tsetr:
700 | str BASE, L->base
701 | .IOS mov RC, BASE
702 | mov CARG1, L
703 | str PC, SAVE_PC
704 | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
705 | // Returns TValue *.
706 | .IOS mov BASE, RC
707 | b ->BC_TSETR_Z
708 |
674 |//-- Comparison metamethods --------------------------------------------- 709 |//-- Comparison metamethods ---------------------------------------------
675 | 710 |
676 |->vmeta_comp: 711 |->vmeta_comp:
@@ -735,6 +770,17 @@ static void build_subroutines(BuildCtx *ctx)
735 | b <3 770 | b <3
736 |.endif 771 |.endif
737 | 772 |
773 |->vmeta_istype:
774 | sub PC, PC, #4
775 | str BASE, L->base
776 | mov CARG1, L
777 | lsr CARG2, RA, #3
778 | mov CARG3, RC
779 | str PC, SAVE_PC
780 | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
781 | .IOS ldr BASE, L->base
782 | b ->cont_nop
783 |
738 |//-- Arithmetic metamethods --------------------------------------------- 784 |//-- Arithmetic metamethods ---------------------------------------------
739 | 785 |
740 |->vmeta_arith_vn: 786 |->vmeta_arith_vn:
@@ -966,9 +1012,9 @@ static void build_subroutines(BuildCtx *ctx)
966 | cmp TAB:RB, #0 1012 | cmp TAB:RB, #0
967 | beq ->fff_restv 1013 | beq ->fff_restv
968 | ldr CARG3, TAB:RB->hmask 1014 | ldr CARG3, TAB:RB->hmask
969 | ldr CARG4, STR:RC->hash 1015 | ldr CARG4, STR:RC->sid
970 | ldr NODE:INS, TAB:RB->node 1016 | ldr NODE:INS, TAB:RB->node
971 | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask 1017 | and CARG3, CARG3, CARG4 // idx = str->sid & tab->hmask
972 | add CARG3, CARG3, CARG3, lsl #1 1018 | add CARG3, CARG3, CARG3, lsl #1
973 | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 1019 | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
974 |3: // Rearranged logic, because we expect _not_ to find the key. 1020 |3: // Rearranged logic, because we expect _not_ to find the key.
@@ -1052,7 +1098,7 @@ static void build_subroutines(BuildCtx *ctx)
1052 | ffgccheck 1098 | ffgccheck
1053 | mov CARG1, L 1099 | mov CARG1, L
1054 | mov CARG2, BASE 1100 | mov CARG2, BASE
1055 | bl extern lj_str_fromnumber // (lua_State *L, cTValue *o) 1101 | bl extern lj_strfmt_number // (lua_State *L, cTValue *o)
1056 | // Returns GCstr *. 1102 | // Returns GCstr *.
1057 | ldr BASE, L->base 1103 | ldr BASE, L->base
1058 | mvn CARG2, #~LJ_TSTR 1104 | mvn CARG2, #~LJ_TSTR
@@ -1230,9 +1276,10 @@ static void build_subroutines(BuildCtx *ctx)
1230 | ldr CARG3, L:RA->base 1276 | ldr CARG3, L:RA->base
1231 | mv_vmstate CARG2, INTERP 1277 | mv_vmstate CARG2, INTERP
1232 | ldr CARG4, L:RA->top 1278 | ldr CARG4, L:RA->top
1233 | st_vmstate CARG2
1234 | cmp CRET1, #LUA_YIELD 1279 | cmp CRET1, #LUA_YIELD
1235 | ldr BASE, L->base 1280 | ldr BASE, L->base
1281 | str L, [DISPATCH, #DISPATCH_GL(cur_L)]
1282 | st_vmstate CARG2
1236 | bhi >8 1283 | bhi >8
1237 | subs RC, CARG4, CARG3 1284 | subs RC, CARG4, CARG3
1238 | ldr CARG1, L->maxstack 1285 | ldr CARG1, L->maxstack
@@ -1500,19 +1547,6 @@ static void build_subroutines(BuildCtx *ctx)
1500 | math_extern2 atan2 1547 | math_extern2 atan2
1501 | math_extern2 fmod 1548 | math_extern2 fmod
1502 | 1549 |
1503 |->ff_math_deg:
1504 |.if FPU
1505 | .ffunc_d math_rad
1506 | vldr d1, CFUNC:CARG3->upvalue[0]
1507 | vmul.f64 d0, d0, d1
1508 | b ->fff_resd
1509 |.else
1510 | .ffunc_n math_rad
1511 | ldrd CARG34, CFUNC:CARG3->upvalue[0]
1512 | bl extern __aeabi_dmul
1513 | b ->fff_restv
1514 |.endif
1515 |
1516 |.if HFABI 1550 |.if HFABI
1517 | .ffunc math_ldexp 1551 | .ffunc math_ldexp
1518 | ldr CARG4, [BASE, #4] 1552 | ldr CARG4, [BASE, #4]
@@ -1682,17 +1716,11 @@ static void build_subroutines(BuildCtx *ctx)
1682 |.endif 1716 |.endif
1683 |.endmacro 1717 |.endmacro
1684 | 1718 |
1685 | math_minmax math_min, gt, hi 1719 | math_minmax math_min, gt, pl
1686 | math_minmax math_max, lt, lo 1720 | math_minmax math_max, lt, le
1687 | 1721 |
1688 |//-- String library ----------------------------------------------------- 1722 |//-- String library -----------------------------------------------------
1689 | 1723 |
1690 |.ffunc_1 string_len
1691 | checkstr CARG2, ->fff_fallback
1692 | ldr CARG1, STR:CARG1->len
1693 | mvn CARG2, #~LJ_TISNUM
1694 | b ->fff_restv
1695 |
1696 |.ffunc string_byte // Only handle the 1-arg case here. 1724 |.ffunc string_byte // Only handle the 1-arg case here.
1697 | ldrd CARG12, [BASE] 1725 | ldrd CARG12, [BASE]
1698 | ldr PC, [BASE, FRAME_PC] 1726 | ldr PC, [BASE, FRAME_PC]
@@ -1725,6 +1753,7 @@ static void build_subroutines(BuildCtx *ctx)
1725 | mov CARG1, L 1753 | mov CARG1, L
1726 | str PC, SAVE_PC 1754 | str PC, SAVE_PC
1727 | bl extern lj_str_new // (lua_State *L, char *str, size_t l) 1755 | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
1756 |->fff_resstr:
1728 | // Returns GCstr *. 1757 | // Returns GCstr *.
1729 | ldr BASE, L->base 1758 | ldr BASE, L->base
1730 | mvn CARG2, #~LJ_TSTR 1759 | mvn CARG2, #~LJ_TSTR
@@ -1768,91 +1797,28 @@ static void build_subroutines(BuildCtx *ctx)
1768 | mvn CARG2, #~LJ_TSTR 1797 | mvn CARG2, #~LJ_TSTR
1769 | b ->fff_restv 1798 | b ->fff_restv
1770 | 1799 |
1771 |.ffunc string_rep // Only handle the 1-char case inline. 1800 |.macro ffstring_op, name
1772 | ffgccheck 1801 | .ffunc string_ .. name
1773 | ldrd CARG12, [BASE]
1774 | ldrd CARG34, [BASE, #8]
1775 | cmp NARGS8:RC, #16
1776 | bne ->fff_fallback // Exactly 2 arguments
1777 | checktp CARG2, LJ_TSTR
1778 | checktpeq CARG4, LJ_TISNUM
1779 | bne ->fff_fallback
1780 | subs CARG4, CARG3, #1
1781 | ldr CARG2, STR:CARG1->len
1782 | blt ->fff_emptystr // Count <= 0?
1783 | cmp CARG2, #1
1784 | blo ->fff_emptystr // Zero-length string?
1785 | bne ->fff_fallback // Fallback for > 1-char strings.
1786 | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
1787 | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
1788 | ldr CARG1, STR:CARG1[1]
1789 | cmp RB, CARG3
1790 | blo ->fff_fallback
1791 |1: // Fill buffer with char.
1792 | strb CARG1, [CARG2, CARG4]
1793 | subs CARG4, CARG4, #1
1794 | bge <1
1795 | b ->fff_newstr
1796 |
1797 |.ffunc string_reverse
1798 | ffgccheck 1802 | ffgccheck
1799 | ldrd CARG12, [BASE] 1803 | ldr CARG3, [BASE, #4]
1800 | cmp NARGS8:RC, #8 1804 | cmp NARGS8:RC, #8
1805 | ldr STR:CARG2, [BASE]
1801 | blo ->fff_fallback 1806 | blo ->fff_fallback
1802 | checkstr CARG2, ->fff_fallback 1807 | sub SBUF:CARG1, DISPATCH, #-DISPATCH_GL(tmpbuf)
1803 | ldr CARG3, STR:CARG1->len 1808 | checkstr CARG3, ->fff_fallback
1804 | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)] 1809 | ldr CARG4, SBUF:CARG1->b
1805 | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)] 1810 | str BASE, L->base
1806 | mov CARG4, CARG3 1811 | str PC, SAVE_PC
1807 | add CARG1, STR:CARG1, #sizeof(GCstr) 1812 | str L, SBUF:CARG1->L
1808 | cmp RB, CARG3 1813 | str CARG4, SBUF:CARG1->p
1809 | blo ->fff_fallback 1814 | bl extern lj_buf_putstr_ .. name
1810 |1: // Reverse string copy. 1815 | bl extern lj_buf_tostr
1811 | ldrb RB, [CARG1], #1 1816 | b ->fff_resstr
1812 | subs CARG4, CARG4, #1
1813 | blt ->fff_newstr
1814 | strb RB, [CARG2, CARG4]
1815 | b <1
1816 |
1817 |.macro ffstring_case, name, lo
1818 | .ffunc name
1819 | ffgccheck
1820 | ldrd CARG12, [BASE]
1821 | cmp NARGS8:RC, #8
1822 | blo ->fff_fallback
1823 | checkstr CARG2, ->fff_fallback
1824 | ldr CARG3, STR:CARG1->len
1825 | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
1826 | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
1827 | mov CARG4, #0
1828 | add CARG1, STR:CARG1, #sizeof(GCstr)
1829 | cmp RB, CARG3
1830 | blo ->fff_fallback
1831 |1: // ASCII case conversion.
1832 | ldrb RB, [CARG1, CARG4]
1833 | cmp CARG4, CARG3
1834 | bhs ->fff_newstr
1835 | sub RC, RB, #lo
1836 | cmp RC, #26
1837 | eorlo RB, RB, #0x20
1838 | strb RB, [CARG2, CARG4]
1839 | add CARG4, CARG4, #1
1840 | b <1
1841 |.endmacro 1817 |.endmacro
1842 | 1818 |
1843 |ffstring_case string_lower, 65 1819 |ffstring_op reverse
1844 |ffstring_case string_upper, 97 1820 |ffstring_op lower
1845 | 1821 |ffstring_op upper
1846 |//-- Table library ------------------------------------------------------
1847 |
1848 |.ffunc_1 table_getn
1849 | checktab CARG2, ->fff_fallback
1850 | .IOS mov RA, BASE
1851 | bl extern lj_tab_len // (GCtab *t)
1852 | // Returns uint32_t (but less than 2^31).
1853 | .IOS mov BASE, RA
1854 | mvn CARG2, #~LJ_TISNUM
1855 | b ->fff_restv
1856 | 1822 |
1857 |//-- Bit library -------------------------------------------------------- 1823 |//-- Bit library --------------------------------------------------------
1858 | 1824 |
@@ -2127,6 +2093,66 @@ static void build_subroutines(BuildCtx *ctx)
2127 | ldr INS, [PC, #-4] 2093 | ldr INS, [PC, #-4]
2128 | bx CRET1 2094 | bx CRET1
2129 | 2095 |
2096 |->cont_stitch: // Trace stitching.
2097 |.if JIT
2098 | // RA = resultptr, CARG4 = meta base
2099 | ldr RB, SAVE_MULTRES
2100 | ldr INS, [PC, #-4]
2101 | ldr TRACE:CARG3, [CARG4, #-24] // Save previous trace.
2102 | subs RB, RB, #8
2103 | decode_RA8 RC, INS // Call base.
2104 | beq >2
2105 |1: // Move results down.
2106 | ldrd CARG12, [RA]
2107 | add RA, RA, #8
2108 | subs RB, RB, #8
2109 | strd CARG12, [BASE, RC]
2110 | add RC, RC, #8
2111 | bne <1
2112 |2:
2113 | decode_RA8 RA, INS
2114 | decode_RB8 RB, INS
2115 | add RA, RA, RB
2116 |3:
2117 | cmp RA, RC
2118 | mvn CARG2, #~LJ_TNIL
2119 | bhi >9 // More results wanted?
2120 |
2121 | ldrh RA, TRACE:CARG3->traceno
2122 | ldrh RC, TRACE:CARG3->link
2123 | cmp RC, RA
2124 | beq ->cont_nop // Blacklisted.
2125 | cmp RC, #0
2126 | bne =>BC_JLOOP // Jump to stitched trace.
2127 |
2128 | // Stitch a new trace to the previous trace.
2129 | str RA, [DISPATCH, #DISPATCH_J(exitno)]
2130 | str L, [DISPATCH, #DISPATCH_J(L)]
2131 | str BASE, L->base
2132 | sub CARG1, DISPATCH, #-GG_DISP2J
2133 | mov CARG2, PC
2134 | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
2135 | ldr BASE, L->base
2136 | b ->cont_nop
2137 |
2138 |9: // Fill up results with nil.
2139 | strd CARG12, [BASE, RC]
2140 | add RC, RC, #8
2141 | b <3
2142 |.endif
2143 |
2144 |->vm_profhook: // Dispatch target for profiler hook.
2145#if LJ_HASPROFILE
2146 | mov CARG1, L
2147 | str BASE, L->base
2148 | mov CARG2, PC
2149 | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
2150 | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
2151 | ldr BASE, L->base
2152 | sub PC, PC, #4
2153 | b ->cont_nop
2154#endif
2155 |
2130 |//----------------------------------------------------------------------- 2156 |//-----------------------------------------------------------------------
2131 |//-- Trace exit handler ------------------------------------------------- 2157 |//-- Trace exit handler -------------------------------------------------
2132 |//----------------------------------------------------------------------- 2158 |//-----------------------------------------------------------------------
@@ -2151,14 +2177,14 @@ static void build_subroutines(BuildCtx *ctx)
2151 | add CARG1, CARG1, CARG2, asr #6 2177 | add CARG1, CARG1, CARG2, asr #6
2152 | ldr CARG2, [lr, #4] // Load exit stub group offset. 2178 | ldr CARG2, [lr, #4] // Load exit stub group offset.
2153 | sub CARG1, CARG1, lr 2179 | sub CARG1, CARG1, lr
2154 | ldr L, [DISPATCH, #DISPATCH_GL(jit_L)] 2180 | ldr L, [DISPATCH, #DISPATCH_GL(cur_L)]
2155 | add CARG1, CARG2, CARG1, lsr #2 // Compute exit number. 2181 | add CARG1, CARG2, CARG1, lsr #2 // Compute exit number.
2156 | ldr BASE, [DISPATCH, #DISPATCH_GL(jit_base)] 2182 | ldr BASE, [DISPATCH, #DISPATCH_GL(jit_base)]
2157 | str CARG1, [DISPATCH, #DISPATCH_J(exitno)] 2183 | str CARG1, [DISPATCH, #DISPATCH_J(exitno)]
2158 | mov CARG4, #0 2184 | mov CARG4, #0
2159 | str L, [DISPATCH, #DISPATCH_J(L)]
2160 | str BASE, L->base 2185 | str BASE, L->base
2161 | str CARG4, [DISPATCH, #DISPATCH_GL(jit_L)] 2186 | str L, [DISPATCH, #DISPATCH_J(L)]
2187 | str CARG4, [DISPATCH, #DISPATCH_GL(jit_base)]
2162 | sub CARG1, DISPATCH, #-GG_DISP2J 2188 | sub CARG1, DISPATCH, #-GG_DISP2J
2163 | mov CARG2, sp 2189 | mov CARG2, sp
2164 | bl extern lj_trace_exit // (jit_State *J, ExitState *ex) 2190 | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
@@ -2177,13 +2203,14 @@ static void build_subroutines(BuildCtx *ctx)
2177 | ldr L, SAVE_L 2203 | ldr L, SAVE_L
2178 |1: 2204 |1:
2179 | cmp CARG1, #0 2205 | cmp CARG1, #0
2180 | blt >3 // Check for error from exit. 2206 | blt >9 // Check for error from exit.
2181 | lsl RC, CARG1, #3 2207 | lsl RC, CARG1, #3
2182 | ldr LFUNC:CARG2, [BASE, FRAME_FUNC] 2208 | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
2183 | str RC, SAVE_MULTRES 2209 | str RC, SAVE_MULTRES
2184 | mov CARG3, #0 2210 | mov CARG3, #0
2211 | str BASE, L->base
2185 | ldr CARG2, LFUNC:CARG2->field_pc 2212 | ldr CARG2, LFUNC:CARG2->field_pc
2186 | str CARG3, [DISPATCH, #DISPATCH_GL(jit_L)] 2213 | str CARG3, [DISPATCH, #DISPATCH_GL(jit_base)]
2187 | mv_vmstate CARG4, INTERP 2214 | mv_vmstate CARG4, INTERP
2188 | ldr KBASE, [CARG2, #PC2PROTO(k)] 2215 | ldr KBASE, [CARG2, #PC2PROTO(k)]
2189 | // Modified copy of ins_next which handles function header dispatch, too. 2216 | // Modified copy of ins_next which handles function header dispatch, too.
@@ -2192,15 +2219,32 @@ static void build_subroutines(BuildCtx *ctx)
2192 | ldr INS, [PC], #4 2219 | ldr INS, [PC], #4
2193 | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8. 2220 | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
2194 | st_vmstate CARG4 2221 | st_vmstate CARG4
2222 | cmp OP, #BC_FUNCC+2 // Fast function?
2223 | bhs >4
2224 |2:
2195 | cmp OP, #BC_FUNCF // Function header? 2225 | cmp OP, #BC_FUNCF // Function header?
2196 | ldr OP, [DISPATCH, OP, lsl #2] 2226 | ldr OP, [DISPATCH, OP, lsl #2]
2197 | decode_RA8 RA, INS 2227 | decode_RA8 RA, INS
2198 | lsrlo RC, INS, #16 // No: Decode operands A*8 and D. 2228 | lsrlo RC, INS, #16 // No: Decode operands A*8 and D.
2199 | subhs RC, RC, #8 2229 | subhs RC, RC, #8
2200 | addhs RA, RA, BASE // Yes: RA = BASE+framesize*8, RC = nargs*8 2230 | addhs RA, RA, BASE // Yes: RA = BASE+framesize*8, RC = nargs*8
2231 | ldrhs CARG3, [BASE, FRAME_FUNC]
2201 | bx OP 2232 | bx OP
2202 | 2233 |
2203 |3: // Rethrow error from the right C frame. 2234 |4: // Check frame below fast function.
2235 | ldr CARG1, [BASE, FRAME_PC]
2236 | ands CARG2, CARG1, #FRAME_TYPE
2237 | bne <2 // Trace stitching continuation?
2238 | // Otherwise set KBASE for Lua function below fast function.
2239 | ldr CARG3, [CARG1, #-4]
2240 | decode_RA8 CARG1, CARG3
2241 | sub CARG2, BASE, CARG1
2242 | ldr LFUNC:CARG3, [CARG2, #-16]
2243 | ldr CARG3, LFUNC:CARG3->field_pc
2244 | ldr KBASE, [CARG3, #PC2PROTO(k)]
2245 | b <2
2246 |
2247 |9: // Rethrow error from the right C frame.
2204 | mov CARG1, L 2248 | mov CARG1, L
2205 | bl extern lj_err_run // (lua_State *L) 2249 | bl extern lj_err_run // (lua_State *L)
2206 |.endif 2250 |.endif
@@ -2832,6 +2876,25 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2832 | ins_next 2876 | ins_next
2833 break; 2877 break;
2834 2878
2879 case BC_ISTYPE:
2880 | // RA = src*8, RC = -type
2881 | ldrd CARG12, [BASE, RA]
2882 | ins_next1
2883 | cmn CARG2, RC
2884 | ins_next2
2885 | bne ->vmeta_istype
2886 | ins_next3
2887 break;
2888 case BC_ISNUM:
2889 | // RA = src*8, RC = -(TISNUM-1)
2890 | ldrd CARG12, [BASE, RA]
2891 | ins_next1
2892 | checktp CARG2, LJ_TISNUM
2893 | ins_next2
2894 | bhs ->vmeta_istype
2895 | ins_next3
2896 break;
2897
2835 /* -- Unary ops --------------------------------------------------------- */ 2898 /* -- Unary ops --------------------------------------------------------- */
2836 2899
2837 case BC_MOV: 2900 case BC_MOV:
@@ -3436,10 +3499,10 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3436 |->BC_TGETS_Z: 3499 |->BC_TGETS_Z:
3437 | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8 3500 | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8
3438 | ldr CARG3, TAB:CARG1->hmask 3501 | ldr CARG3, TAB:CARG1->hmask
3439 | ldr CARG4, STR:RC->hash 3502 | ldr CARG4, STR:RC->sid
3440 | ldr NODE:INS, TAB:CARG1->node 3503 | ldr NODE:INS, TAB:CARG1->node
3441 | mov TAB:RB, TAB:CARG1 3504 | mov TAB:RB, TAB:CARG1
3442 | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask 3505 | and CARG3, CARG3, CARG4 // idx = str->sid & tab->hmask
3443 | add CARG3, CARG3, CARG3, lsl #1 3506 | add CARG3, CARG3, CARG3, lsl #1
3444 | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 3507 | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
3445 |1: 3508 |1:
@@ -3502,6 +3565,24 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3502 | bne <1 // 'no __index' flag set: done. 3565 | bne <1 // 'no __index' flag set: done.
3503 | b ->vmeta_tgetb 3566 | b ->vmeta_tgetb
3504 break; 3567 break;
3568 case BC_TGETR:
3569 | decode_RB8 RB, INS
3570 | decode_RC8 RC, INS
3571 | // RA = dst*8, RB = table*8, RC = key*8
3572 | ldr TAB:CARG1, [BASE, RB]
3573 | ldr CARG2, [BASE, RC]
3574 | ldr CARG4, TAB:CARG1->array
3575 | ldr CARG3, TAB:CARG1->asize
3576 | add CARG4, CARG4, CARG2, lsl #3
3577 | cmp CARG2, CARG3 // In array part?
3578 | bhs ->vmeta_tgetr
3579 | ldrd CARG12, [CARG4]
3580 |->BC_TGETR_Z:
3581 | ins_next1
3582 | ins_next2
3583 | strd CARG12, [BASE, RA]
3584 | ins_next3
3585 break;
3505 3586
3506 case BC_TSETV: 3587 case BC_TSETV:
3507 | decode_RB8 RB, INS 3588 | decode_RB8 RB, INS
@@ -3565,10 +3646,10 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3565 |->BC_TSETS_Z: 3646 |->BC_TSETS_Z:
3566 | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8 3647 | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8
3567 | ldr CARG3, TAB:CARG1->hmask 3648 | ldr CARG3, TAB:CARG1->hmask
3568 | ldr CARG4, STR:RC->hash 3649 | ldr CARG4, STR:RC->sid
3569 | ldr NODE:INS, TAB:CARG1->node 3650 | ldr NODE:INS, TAB:CARG1->node
3570 | mov TAB:RB, TAB:CARG1 3651 | mov TAB:RB, TAB:CARG1
3571 | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask 3652 | and CARG3, CARG3, CARG4 // idx = str->sid & tab->hmask
3572 | add CARG3, CARG3, CARG3, lsl #1 3653 | add CARG3, CARG3, CARG3, lsl #1
3573 | mov CARG4, #0 3654 | mov CARG4, #0
3574 | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8 3655 | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
@@ -3672,6 +3753,32 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3672 | barrierback TAB:CARG1, INS, CARG3 3753 | barrierback TAB:CARG1, INS, CARG3
3673 | b <2 3754 | b <2
3674 break; 3755 break;
3756 case BC_TSETR:
3757 | decode_RB8 RB, INS
3758 | decode_RC8 RC, INS
3759 | // RA = src*8, RB = table*8, RC = key*8
3760 | ldr TAB:CARG2, [BASE, RB]
3761 | ldr CARG3, [BASE, RC]
3762 | ldrb INS, TAB:CARG2->marked
3763 | ldr CARG1, TAB:CARG2->array
3764 | ldr CARG4, TAB:CARG2->asize
3765 | tst INS, #LJ_GC_BLACK // isblack(table)
3766 | add CARG1, CARG1, CARG3, lsl #3
3767 | bne >7
3768 |2:
3769 | cmp CARG3, CARG4 // In array part?
3770 | bhs ->vmeta_tsetr
3771 |->BC_TSETR_Z:
3772 | ldrd CARG34, [BASE, RA]
3773 | ins_next1
3774 | ins_next2
3775 | strd CARG34, [CARG1]
3776 | ins_next3
3777 |
3778 |7: // Possible table write barrier for the value. Skip valiswhite check.
3779 | barrierback TAB:CARG2, INS, RB
3780 | b <2
3781 break;
3675 3782
3676 case BC_TSETM: 3783 case BC_TSETM:
3677 | // RA = base*8 (table at base-1), RC = num_const (start index) 3784 | // RA = base*8 (table at base-1), RC = num_const (start index)
@@ -4269,7 +4376,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4269 | st_vmstate CARG2 4376 | st_vmstate CARG2
4270 | ldr RA, TRACE:RC->mcode 4377 | ldr RA, TRACE:RC->mcode
4271 | str BASE, [DISPATCH, #DISPATCH_GL(jit_base)] 4378 | str BASE, [DISPATCH, #DISPATCH_GL(jit_base)]
4272 | str L, [DISPATCH, #DISPATCH_GL(jit_L)] 4379 | str L, [DISPATCH, #DISPATCH_GL(tmpbuf.L)]
4273 | bx RA 4380 | bx RA
4274 |.endif 4381 |.endif
4275 break; 4382 break;
@@ -4387,6 +4494,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4387 | ldr BASE, L->base 4494 | ldr BASE, L->base
4388 | mv_vmstate CARG3, INTERP 4495 | mv_vmstate CARG3, INTERP
4389 | ldr CRET2, L->top 4496 | ldr CRET2, L->top
4497 | str L, [DISPATCH, #DISPATCH_GL(cur_L)]
4390 | lsl RC, CRET1, #3 4498 | lsl RC, CRET1, #3
4391 | st_vmstate CARG3 4499 | st_vmstate CARG3
4392 | ldr PC, [BASE, FRAME_PC] 4500 | ldr PC, [BASE, FRAME_PC]
diff --git a/src/vm_arm64.dasc b/src/vm_arm64.dasc
new file mode 100644
index 00000000..1dc4d78e
--- /dev/null
+++ b/src/vm_arm64.dasc
@@ -0,0 +1,3988 @@
1|// Low-level VM code for ARM64 CPUs.
2|// Bytecode interpreter, fast functions and helper functions.
3|// Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4|
5|.arch arm64
6|.section code_op, code_sub
7|
8|.actionlist build_actionlist
9|.globals GLOB_
10|.globalnames globnames
11|.externnames extnames
12|
13|// Note: The ragged indentation of the instructions is intentional.
14|// The starting columns indicate data dependencies.
15|
16|//-----------------------------------------------------------------------
17|
18|// ARM64 registers and the AAPCS64 ABI 1.0 at a glance:
19|//
20|// x0-x17 temp, x19-x28 callee-saved, x29 fp, x30 lr
21|// x18 is reserved on most platforms. Don't use it, save it or restore it.
22|// x31 doesn't exist. Register number 31 either means xzr/wzr (zero) or sp,
23|// depending on the instruction.
24|// v0-v7 temp, v8-v15 callee-saved (only d8-d15 preserved), v16-v31 temp
25|//
26|// x0-x7/v0-v7 hold parameters and results.
27|
28|// Fixed register assignments for the interpreter.
29|
30|// The following must be C callee-save.
31|.define BASE, x19 // Base of current Lua stack frame.
32|.define KBASE, x20 // Constants of current Lua function.
33|.define PC, x21 // Next PC.
34|.define GLREG, x22 // Global state.
35|.define LREG, x23 // Register holding lua_State (also in SAVE_L).
36|.define TISNUM, x24 // Constant LJ_TISNUM << 47.
37|.define TISNUMhi, x25 // Constant LJ_TISNUM << 15.
38|.define TISNIL, x26 // Constant -1LL.
39|.define fp, x29 // Yes, we have to maintain a frame pointer.
40|
41|.define ST_INTERP, w26 // Constant -1.
42|
43|// The following temporaries are not saved across C calls, except for RA/RC.
44|.define RA, x27
45|.define RC, x28
46|.define RB, x17
47|.define RAw, w27
48|.define RCw, w28
49|.define RBw, w17
50|.define INS, x16
51|.define INSw, w16
52|.define ITYPE, x15
53|.define TMP0, x8
54|.define TMP1, x9
55|.define TMP2, x10
56|.define TMP3, x11
57|.define TMP0w, w8
58|.define TMP1w, w9
59|.define TMP2w, w10
60|.define TMP3w, w11
61|
62|// Calling conventions. Also used as temporaries.
63|.define CARG1, x0
64|.define CARG2, x1
65|.define CARG3, x2
66|.define CARG4, x3
67|.define CARG5, x4
68|.define CARG1w, w0
69|.define CARG2w, w1
70|.define CARG3w, w2
71|.define CARG4w, w3
72|.define CARG5w, w4
73|
74|.define FARG1, d0
75|.define FARG2, d1
76|
77|.define CRET1, x0
78|.define CRET1w, w0
79|
80|// Stack layout while in interpreter. Must match with lj_frame.h.
81|
82|.define CFRAME_SPACE, 208
83|//----- 16 byte aligned, <-- sp entering interpreter
84|// Unused [sp, #204] // 32 bit values
85|.define SAVE_NRES, [sp, #200]
86|.define SAVE_ERRF, [sp, #196]
87|.define SAVE_MULTRES, [sp, #192]
88|.define TMPD, [sp, #184] // 64 bit values
89|.define SAVE_L, [sp, #176]
90|.define SAVE_PC, [sp, #168]
91|.define SAVE_CFRAME, [sp, #160]
92|.define SAVE_FPR_, 96 // 96+8*8: 64 bit FPR saves
93|.define SAVE_GPR_, 16 // 16+10*8: 64 bit GPR saves
94|.define SAVE_LR, [sp, #8]
95|.define SAVE_FP, [sp]
96|//----- 16 byte aligned, <-- sp while in interpreter.
97|
98|.define TMPDofs, #184
99|
100|.macro save_, gpr1, gpr2, fpr1, fpr2
101| stp d..fpr1, d..fpr2, [sp, # SAVE_FPR_+(fpr1-8)*8]
102| stp x..gpr1, x..gpr2, [sp, # SAVE_GPR_+(gpr1-19)*8]
103|.endmacro
104|.macro rest_, gpr1, gpr2, fpr1, fpr2
105| ldp d..fpr1, d..fpr2, [sp, # SAVE_FPR_+(fpr1-8)*8]
106| ldp x..gpr1, x..gpr2, [sp, # SAVE_GPR_+(gpr1-19)*8]
107|.endmacro
108|
109|.macro saveregs
110| stp fp, lr, [sp, #-CFRAME_SPACE]!
111| add fp, sp, #0
112| stp x19, x20, [sp, # SAVE_GPR_]
113| save_ 21, 22, 8, 9
114| save_ 23, 24, 10, 11
115| save_ 25, 26, 12, 13
116| save_ 27, 28, 14, 15
117|.endmacro
118|.macro restoreregs
119| ldp x19, x20, [sp, # SAVE_GPR_]
120| rest_ 21, 22, 8, 9
121| rest_ 23, 24, 10, 11
122| rest_ 25, 26, 12, 13
123| rest_ 27, 28, 14, 15
124| ldp fp, lr, [sp], # CFRAME_SPACE
125|.endmacro
126|
127|// Type definitions. Some of these are only used for documentation.
128|.type L, lua_State, LREG
129|.type GL, global_State, GLREG
130|.type TVALUE, TValue
131|.type GCOBJ, GCobj
132|.type STR, GCstr
133|.type TAB, GCtab
134|.type LFUNC, GCfuncL
135|.type CFUNC, GCfuncC
136|.type PROTO, GCproto
137|.type UPVAL, GCupval
138|.type NODE, Node
139|.type NARGS8, int
140|.type TRACE, GCtrace
141|.type SBUF, SBuf
142|
143|//-----------------------------------------------------------------------
144|
145|// Trap for not-yet-implemented parts.
146|.macro NYI; brk; .endmacro
147|
148|//-----------------------------------------------------------------------
149|
150|// Access to frame relative to BASE.
151|.define FRAME_FUNC, #-16
152|.define FRAME_PC, #-8
153|
154|// Endian-specific defines.
155|.if ENDIAN_LE
156|.define LO, 0
157|.define OFS_RD, 2
158|.define OFS_RB, 3
159|.define OFS_RA, 1
160|.define OFS_OP, 0
161|.else
162|.define LO, 4
163|.define OFS_RD, 0
164|.define OFS_RB, 0
165|.define OFS_RA, 2
166|.define OFS_OP, 3
167|.endif
168|
169|.macro decode_RA, dst, ins; ubfx dst, ins, #8, #8; .endmacro
170|.macro decode_RB, dst, ins; ubfx dst, ins, #24, #8; .endmacro
171|.macro decode_RC, dst, ins; ubfx dst, ins, #16, #8; .endmacro
172|.macro decode_RD, dst, ins; ubfx dst, ins, #16, #16; .endmacro
173|.macro decode_RC8RD, dst, src; ubfiz dst, src, #3, #8; .endmacro
174|
175|// Instruction decode+dispatch.
176|.macro ins_NEXT
177| ldr INSw, [PC], #4
178| add TMP1, GL, INS, uxtb #3
179| decode_RA RA, INS
180| ldr TMP0, [TMP1, #GG_G2DISP]
181| decode_RD RC, INS
182| br TMP0
183|.endmacro
184|
185|// Instruction footer.
186|.if 1
187| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
188| .define ins_next, ins_NEXT
189| .define ins_next_, ins_NEXT
190|.else
191| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
192| // Affects only certain kinds of benchmarks (and only with -j off).
193| .macro ins_next
194| b ->ins_next
195| .endmacro
196| .macro ins_next_
197| ->ins_next:
198| ins_NEXT
199| .endmacro
200|.endif
201|
202|// Call decode and dispatch.
203|.macro ins_callt
204| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
205| ldr PC, LFUNC:CARG3->pc
206| ldr INSw, [PC], #4
207| add TMP1, GL, INS, uxtb #3
208| decode_RA RA, INS
209| ldr TMP0, [TMP1, #GG_G2DISP]
210| add RA, BASE, RA, lsl #3
211| br TMP0
212|.endmacro
213|
214|.macro ins_call
215| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
216| str PC, [BASE, FRAME_PC]
217| ins_callt
218|.endmacro
219|
220|//-----------------------------------------------------------------------
221|
222|// Macros to check the TValue type and extract the GCobj. Branch on failure.
223|.macro checktp, reg, tp, target
224| asr ITYPE, reg, #47
225| cmn ITYPE, #-tp
226| and reg, reg, #LJ_GCVMASK
227| bne target
228|.endmacro
229|.macro checktp, dst, reg, tp, target
230| asr ITYPE, reg, #47
231| cmn ITYPE, #-tp
232| and dst, reg, #LJ_GCVMASK
233| bne target
234|.endmacro
235|.macro checkstr, reg, target; checktp reg, LJ_TSTR, target; .endmacro
236|.macro checktab, reg, target; checktp reg, LJ_TTAB, target; .endmacro
237|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC, target; .endmacro
238|.macro checkint, reg, target
239| cmp TISNUMhi, reg, lsr #32
240| bne target
241|.endmacro
242|.macro checknum, reg, target
243| cmp TISNUMhi, reg, lsr #32
244| bls target
245|.endmacro
246|.macro checknumber, reg, target
247| cmp TISNUMhi, reg, lsr #32
248| blo target
249|.endmacro
250|
251|.macro mov_false, reg; movn reg, #0x8000, lsl #32; .endmacro
252|.macro mov_true, reg; movn reg, #0x0001, lsl #48; .endmacro
253|
254#define GL_J(field) (GG_G2J + (int)offsetof(jit_State, field))
255|
256#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
257|
258|.macro hotcheck, delta
259| lsr CARG1, PC, #1
260| and CARG1, CARG1, #126
261| add CARG1, CARG1, #GG_G2DISP+GG_DISP2HOT
262| ldrh CARG2w, [GL, CARG1]
263| subs CARG2, CARG2, #delta
264| strh CARG2w, [GL, CARG1]
265|.endmacro
266|
267|.macro hotloop
268| hotcheck HOTCOUNT_LOOP
269| blo ->vm_hotloop
270|.endmacro
271|
272|.macro hotcall
273| hotcheck HOTCOUNT_CALL
274| blo ->vm_hotcall
275|.endmacro
276|
277|// Set current VM state.
278|.macro mv_vmstate, reg, st; movn reg, #LJ_VMST_..st; .endmacro
279|.macro st_vmstate, reg; str reg, GL->vmstate; .endmacro
280|
281|// Move table write barrier back. Overwrites mark and tmp.
282|.macro barrierback, tab, mark, tmp
283| ldr tmp, GL->gc.grayagain
284| and mark, mark, #~LJ_GC_BLACK // black2gray(tab)
285| str tab, GL->gc.grayagain
286| strb mark, tab->marked
287| str tmp, tab->gclist
288|.endmacro
289|
290|//-----------------------------------------------------------------------
291
292#if !LJ_DUALNUM
293#error "Only dual-number mode supported for ARM64 target"
294#endif
295
296/* Generate subroutines used by opcodes and other parts of the VM. */
297/* The .code_sub section should be last to help static branch prediction. */
298static void build_subroutines(BuildCtx *ctx)
299{
300 |.code_sub
301 |
302 |//-----------------------------------------------------------------------
303 |//-- Return handling ----------------------------------------------------
304 |//-----------------------------------------------------------------------
305 |
306 |->vm_returnp:
307 | // See vm_return. Also: RB = previous base.
308 | tbz PC, #2, ->cont_dispatch // (PC & FRAME_P) == 0?
309 |
310 | // Return from pcall or xpcall fast func.
311 | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame.
312 | mov_true TMP0
313 | mov BASE, RB
314 | // Prepending may overwrite the pcall frame, so do it at the end.
315 | str TMP0, [RA, #-8]! // Prepend true to results.
316 |
317 |->vm_returnc:
318 | adds RC, RC, #8 // RC = (nresults+1)*8.
319 | mov CRET1, #LUA_YIELD
320 | beq ->vm_unwind_c_eh
321 | str RCw, SAVE_MULTRES
322 | ands CARG1, PC, #FRAME_TYPE
323 | beq ->BC_RET_Z // Handle regular return to Lua.
324 |
325 |->vm_return:
326 | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return
327 | // CARG1 = PC & FRAME_TYPE
328 | and RB, PC, #~FRAME_TYPEP
329 | cmp CARG1, #FRAME_C
330 | sub RB, BASE, RB // RB = previous base.
331 | bne ->vm_returnp
332 |
333 | str RB, L->base
334 | ldrsw CARG2, SAVE_NRES // CARG2 = nresults+1.
335 | mv_vmstate TMP0w, C
336 | sub BASE, BASE, #16
337 | subs TMP2, RC, #8
338 | st_vmstate TMP0w
339 | beq >2
340 |1:
341 | subs TMP2, TMP2, #8
342 | ldr TMP0, [RA], #8
343 | str TMP0, [BASE], #8
344 | bne <1
345 |2:
346 | cmp RC, CARG2, lsl #3 // More/less results wanted?
347 | bne >6
348 |3:
349 | str BASE, L->top // Store new top.
350 |
351 |->vm_leave_cp:
352 | ldr RC, SAVE_CFRAME // Restore previous C frame.
353 | mov CRET1, #0 // Ok return status for vm_pcall.
354 | str RC, L->cframe
355 |
356 |->vm_leave_unw:
357 | restoreregs
358 | ret
359 |
360 |6:
361 | bgt >7 // Less results wanted?
362 | // More results wanted. Check stack size and fill up results with nil.
363 | ldr CARG3, L->maxstack
364 | cmp BASE, CARG3
365 | bhs >8
366 | str TISNIL, [BASE], #8
367 | add RC, RC, #8
368 | b <2
369 |
370 |7: // Less results wanted.
371 | cbz CARG2, <3 // LUA_MULTRET+1 case?
372 | sub CARG1, RC, CARG2, lsl #3
373 | sub BASE, BASE, CARG1 // Shrink top.
374 | b <3
375 |
376 |8: // Corner case: need to grow stack for filling up results.
377 | // This can happen if:
378 | // - A C function grows the stack (a lot).
379 | // - The GC shrinks the stack in between.
380 | // - A return back from a lua_call() with (high) nresults adjustment.
381 | str BASE, L->top // Save current top held in BASE (yes).
382 | mov CARG1, L
383 | bl extern lj_state_growstack // (lua_State *L, int n)
384 | ldr BASE, L->top // Need the (realloced) L->top in BASE.
385 | ldrsw CARG2, SAVE_NRES
386 | b <2
387 |
388 |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
389 | // (void *cframe, int errcode)
390 | mov sp, CARG1
391 | mov CRET1, CARG2
392 |->vm_unwind_c_eh: // Landing pad for external unwinder.
393 | ldr L, SAVE_L
394 | mv_vmstate TMP0w, C
395 | ldr GL, L->glref
396 | st_vmstate TMP0w
397 | b ->vm_leave_unw
398 |
399 |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
400 | // (void *cframe)
401 | and sp, CARG1, #CFRAME_RAWMASK
402 |->vm_unwind_ff_eh: // Landing pad for external unwinder.
403 | ldr L, SAVE_L
404 | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
405 | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
406 | movn TISNIL, #0
407 | mov RC, #16 // 2 results: false + error message.
408 | ldr BASE, L->base
409 | ldr GL, L->glref // Setup pointer to global state.
410 | mov_false TMP0
411 | sub RA, BASE, #8 // Results start at BASE-8.
412 | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame.
413 | str TMP0, [BASE, #-8] // Prepend false to error message.
414 | st_vmstate ST_INTERP
415 | b ->vm_returnc
416 |
417 |//-----------------------------------------------------------------------
418 |//-- Grow stack for calls -----------------------------------------------
419 |//-----------------------------------------------------------------------
420 |
421 |->vm_growstack_c: // Grow stack for C function.
422 | // CARG1 = L
423 | mov CARG2, #LUA_MINSTACK
424 | b >2
425 |
426 |->vm_growstack_l: // Grow stack for Lua function.
427 | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
428 | add RC, BASE, RC
429 | sub RA, RA, BASE
430 | mov CARG1, L
431 | stp BASE, RC, L->base
432 | add PC, PC, #4 // Must point after first instruction.
433 | lsr CARG2, RA, #3
434 |2:
435 | // L->base = new base, L->top = top
436 | str PC, SAVE_PC
437 | bl extern lj_state_growstack // (lua_State *L, int n)
438 | ldp BASE, RC, L->base
439 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
440 | sub NARGS8:RC, RC, BASE
441 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
442 | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
443 | ins_callt // Just retry the call.
444 |
445 |//-----------------------------------------------------------------------
446 |//-- Entry points into the assembler VM ---------------------------------
447 |//-----------------------------------------------------------------------
448 |
449 |->vm_resume: // Setup C frame and resume thread.
450 | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
451 | saveregs
452 | mov L, CARG1
453 | ldr GL, L->glref // Setup pointer to global state.
454 | mov BASE, CARG2
455 | str L, SAVE_L
456 | mov PC, #FRAME_CP
457 | str wzr, SAVE_NRES
458 | add TMP0, sp, #CFRAME_RESUME
459 | ldrb TMP1w, L->status
460 | str wzr, SAVE_ERRF
461 | str L, SAVE_PC // Any value outside of bytecode is ok.
462 | str xzr, SAVE_CFRAME
463 | str TMP0, L->cframe
464 | cbz TMP1w, >3
465 |
466 | // Resume after yield (like a return).
467 | str L, GL->cur_L
468 | mov RA, BASE
469 | ldp BASE, CARG1, L->base
470 | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
471 | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
472 | ldr PC, [BASE, FRAME_PC]
473 | strb wzr, L->status
474 | movn TISNIL, #0
475 | sub RC, CARG1, BASE
476 | ands CARG1, PC, #FRAME_TYPE
477 | add RC, RC, #8
478 | st_vmstate ST_INTERP
479 | str RCw, SAVE_MULTRES
480 | beq ->BC_RET_Z
481 | b ->vm_return
482 |
483 |->vm_pcall: // Setup protected C frame and enter VM.
484 | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
485 | saveregs
486 | mov PC, #FRAME_CP
487 | str CARG4w, SAVE_ERRF
488 | b >1
489 |
490 |->vm_call: // Setup C frame and enter VM.
491 | // (lua_State *L, TValue *base, int nres1)
492 | saveregs
493 | mov PC, #FRAME_C
494 |
495 |1: // Entry point for vm_pcall above (PC = ftype).
496 | ldr RC, L:CARG1->cframe
497 | str CARG3w, SAVE_NRES
498 | mov L, CARG1
499 | str CARG1, SAVE_L
500 | ldr GL, L->glref // Setup pointer to global state.
501 | mov BASE, CARG2
502 | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
503 | str RC, SAVE_CFRAME
504 | str fp, L->cframe // Add our C frame to cframe chain.
505 |
506 |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
507 | str L, GL->cur_L
508 | ldp RB, CARG1, L->base // RB = old base (for vmeta_call).
509 | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
510 | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
511 | add PC, PC, BASE
512 | movn TISNIL, #0
513 | sub PC, PC, RB // PC = frame delta + frame type
514 | sub NARGS8:RC, CARG1, BASE
515 | st_vmstate ST_INTERP
516 |
517 |->vm_call_dispatch:
518 | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC
519 | ldr CARG3, [BASE, FRAME_FUNC]
520 | checkfunc CARG3, ->vmeta_call
521 |
522 |->vm_call_dispatch_f:
523 | ins_call
524 | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC
525 |
526 |->vm_cpcall: // Setup protected C frame, call C.
527 | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
528 | saveregs
529 | mov L, CARG1
530 | ldr RA, L:CARG1->stack
531 | str CARG1, SAVE_L
532 | ldr GL, L->glref // Setup pointer to global state.
533 | ldr RB, L->top
534 | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
535 | ldr RC, L->cframe
536 | sub RA, RA, RB // Compute -savestack(L, L->top).
537 | str RAw, SAVE_NRES // Neg. delta means cframe w/o frame.
538 | str wzr, SAVE_ERRF // No error function.
539 | str RC, SAVE_CFRAME
540 | str fp, L->cframe // Add our C frame to cframe chain.
541 | str L, GL->cur_L
542 | blr CARG4 // (lua_State *L, lua_CFunction func, void *ud)
543 | mov BASE, CRET1
544 | mov PC, #FRAME_CP
545 | cbnz BASE, <3 // Else continue with the call.
546 | b ->vm_leave_cp // No base? Just remove C frame.
547 |
548 |//-----------------------------------------------------------------------
549 |//-- Metamethod handling ------------------------------------------------
550 |//-----------------------------------------------------------------------
551 |
552 |//-- Continuation dispatch ----------------------------------------------
553 |
554 |->cont_dispatch:
555 | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8
556 | ldr LFUNC:CARG3, [RB, FRAME_FUNC]
557 | ldr CARG1, [BASE, #-32] // Get continuation.
558 | mov CARG4, BASE
559 | mov BASE, RB // Restore caller BASE.
560 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
561 |.if FFI
562 | cmp CARG1, #1
563 |.endif
564 | ldr PC, [CARG4, #-24] // Restore PC from [cont|PC].
565 | ldr CARG3, LFUNC:CARG3->pc
566 | add TMP0, RA, RC
567 | str TISNIL, [TMP0, #-8] // Ensure one valid arg.
568 |.if FFI
569 | bls >1
570 |.endif
571 | ldr KBASE, [CARG3, #PC2PROTO(k)]
572 | // BASE = base, RA = resultptr, CARG4 = meta base
573 | br CARG1
574 |
575 |.if FFI
576 |1:
577 | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
578 | // cont = 0: tailcall from C function.
579 | sub CARG4, CARG4, #32
580 | sub RC, CARG4, BASE
581 | b ->vm_call_tail
582 |.endif
583 |
584 |->cont_cat: // RA = resultptr, CARG4 = meta base
585 | ldr INSw, [PC, #-4]
586 | sub CARG2, CARG4, #32
587 | ldr TMP0, [RA]
588 | str BASE, L->base
589 | decode_RB RB, INS
590 | decode_RA RA, INS
591 | add TMP1, BASE, RB, lsl #3
592 | subs TMP1, CARG2, TMP1
593 | beq >1
594 | str TMP0, [CARG2]
595 | lsr CARG3, TMP1, #3
596 | b ->BC_CAT_Z
597 |
598 |1:
599 | str TMP0, [BASE, RA, lsl #3]
600 | b ->cont_nop
601 |
602 |//-- Table indexing metamethods -----------------------------------------
603 |
604 |->vmeta_tgets1:
605 | movn CARG4, #~LJ_TSTR
606 | add CARG2, BASE, RB, lsl #3
607 | add CARG4, STR:RC, CARG4, lsl #47
608 | b >2
609 |
610 |->vmeta_tgets:
611 | movk CARG2, #(LJ_TTAB>>1)&0xffff, lsl #48
612 | str CARG2, GL->tmptv
613 | add CARG2, GL, #offsetof(global_State, tmptv)
614 |2:
615 | add CARG3, sp, TMPDofs
616 | str CARG4, TMPD
617 | b >1
618 |
619 |->vmeta_tgetb: // RB = table, RC = index
620 | add RC, RC, TISNUM
621 | add CARG2, BASE, RB, lsl #3
622 | add CARG3, sp, TMPDofs
623 | str RC, TMPD
624 | b >1
625 |
626 |->vmeta_tgetv: // RB = table, RC = key
627 | add CARG2, BASE, RB, lsl #3
628 | add CARG3, BASE, RC, lsl #3
629 |1:
630 | str BASE, L->base
631 | mov CARG1, L
632 | str PC, SAVE_PC
633 | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
634 | // Returns TValue * (finished) or NULL (metamethod).
635 | cbz CRET1, >3
636 | ldr TMP0, [CRET1]
637 | str TMP0, [BASE, RA, lsl #3]
638 | ins_next
639 |
640 |3: // Call __index metamethod.
641 | // BASE = base, L->top = new base, stack = cont/func/t/k
642 | sub TMP1, BASE, #FRAME_CONT
643 | ldr BASE, L->top
644 | mov NARGS8:RC, #16 // 2 args for func(t, k).
645 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
646 | str PC, [BASE, #-24] // [cont|PC]
647 | sub PC, BASE, TMP1
648 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
649 | b ->vm_call_dispatch_f
650 |
651 |->vmeta_tgetr:
652 | sxtw CARG2, TMP1w
653 | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
654 | // Returns cTValue * or NULL.
655 | mov TMP0, TISNIL
656 | cbz CRET1, ->BC_TGETR_Z
657 | ldr TMP0, [CRET1]
658 | b ->BC_TGETR_Z
659 |
660 |//-----------------------------------------------------------------------
661 |
662 |->vmeta_tsets1:
663 | movn CARG4, #~LJ_TSTR
664 | add CARG2, BASE, RB, lsl #3
665 | add CARG4, STR:RC, CARG4, lsl #47
666 | b >2
667 |
668 |->vmeta_tsets:
669 | movk CARG2, #(LJ_TTAB>>1)&0xffff, lsl #48
670 | str CARG2, GL->tmptv
671 | add CARG2, GL, #offsetof(global_State, tmptv)
672 |2:
673 | add CARG3, sp, TMPDofs
674 | str CARG4, TMPD
675 | b >1
676 |
677 |->vmeta_tsetb: // RB = table, RC = index
678 | add RC, RC, TISNUM
679 | add CARG2, BASE, RB, lsl #3
680 | add CARG3, sp, TMPDofs
681 | str RC, TMPD
682 | b >1
683 |
684 |->vmeta_tsetv:
685 | add CARG2, BASE, RB, lsl #3
686 | add CARG3, BASE, RC, lsl #3
687 |1:
688 | str BASE, L->base
689 | mov CARG1, L
690 | str PC, SAVE_PC
691 | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
692 | // Returns TValue * (finished) or NULL (metamethod).
693 | ldr TMP0, [BASE, RA, lsl #3]
694 | cbz CRET1, >3
695 | // NOBARRIER: lj_meta_tset ensures the table is not black.
696 | str TMP0, [CRET1]
697 | ins_next
698 |
699 |3: // Call __newindex metamethod.
700 | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
701 | sub TMP1, BASE, #FRAME_CONT
702 | ldr BASE, L->top
703 | mov NARGS8:RC, #24 // 3 args for func(t, k, v).
704 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
705 | str TMP0, [BASE, #16] // Copy value to third argument.
706 | str PC, [BASE, #-24] // [cont|PC]
707 | sub PC, BASE, TMP1
708 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
709 | b ->vm_call_dispatch_f
710 |
711 |->vmeta_tsetr:
712 | sxtw CARG3, TMP1w
713 | str BASE, L->base
714 | mov CARG1, L
715 | str PC, SAVE_PC
716 | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
717 | // Returns TValue *.
718 | b ->BC_TSETR_Z
719 |
720 |//-- Comparison metamethods ---------------------------------------------
721 |
722 |->vmeta_comp:
723 | add CARG2, BASE, RA, lsl #3
724 | sub PC, PC, #4
725 | add CARG3, BASE, RC, lsl #3
726 | str BASE, L->base
727 | mov CARG1, L
728 | str PC, SAVE_PC
729 | uxtb CARG4w, INSw
730 | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
731 | // Returns 0/1 or TValue * (metamethod).
732 |3:
733 | cmp CRET1, #1
734 | bhi ->vmeta_binop
735 |4:
736 | ldrh RBw, [PC, # OFS_RD]
737 | add PC, PC, #4
738 | add RB, PC, RB, lsl #2
739 | sub RB, RB, #0x20000
740 | csel PC, PC, RB, lo
741 |->cont_nop:
742 | ins_next
743 |
744 |->cont_ra: // RA = resultptr
745 | ldr INSw, [PC, #-4]
746 | ldr TMP0, [RA]
747 | decode_RA TMP1, INS
748 | str TMP0, [BASE, TMP1, lsl #3]
749 | b ->cont_nop
750 |
751 |->cont_condt: // RA = resultptr
752 | ldr TMP0, [RA]
753 | mov_true TMP1
754 | cmp TMP1, TMP0 // Branch if result is true.
755 | b <4
756 |
757 |->cont_condf: // RA = resultptr
758 | ldr TMP0, [RA]
759 | mov_false TMP1
760 | cmp TMP0, TMP1 // Branch if result is false.
761 | b <4
762 |
763 |->vmeta_equal:
764 | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
765 | and TAB:CARG3, CARG3, #LJ_GCVMASK
766 | sub PC, PC, #4
767 | str BASE, L->base
768 | mov CARG1, L
769 | str PC, SAVE_PC
770 | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
771 | // Returns 0/1 or TValue * (metamethod).
772 | b <3
773 |
774 |->vmeta_equal_cd:
775 |.if FFI
776 | sub PC, PC, #4
777 | str BASE, L->base
778 | mov CARG1, L
779 | mov CARG2, INS
780 | str PC, SAVE_PC
781 | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
782 | // Returns 0/1 or TValue * (metamethod).
783 | b <3
784 |.endif
785 |
786 |->vmeta_istype:
787 | sub PC, PC, #4
788 | str BASE, L->base
789 | mov CARG1, L
790 | mov CARG2, RA
791 | mov CARG3, RC
792 | str PC, SAVE_PC
793 | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
794 | b ->cont_nop
795 |
796 |//-- Arithmetic metamethods ---------------------------------------------
797 |
798 |->vmeta_arith_vn:
799 | add CARG3, BASE, RB, lsl #3
800 | add CARG4, KBASE, RC, lsl #3
801 | b >1
802 |
803 |->vmeta_arith_nv:
804 | add CARG4, BASE, RB, lsl #3
805 | add CARG3, KBASE, RC, lsl #3
806 | b >1
807 |
808 |->vmeta_unm:
809 | add CARG3, BASE, RC, lsl #3
810 | mov CARG4, CARG3
811 | b >1
812 |
813 |->vmeta_arith_vv:
814 | add CARG3, BASE, RB, lsl #3
815 | add CARG4, BASE, RC, lsl #3
816 |1:
817 | uxtb CARG5w, INSw
818 | add CARG2, BASE, RA, lsl #3
819 | str BASE, L->base
820 | mov CARG1, L
821 | str PC, SAVE_PC
822 | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
823 | // Returns NULL (finished) or TValue * (metamethod).
824 | cbz CRET1, ->cont_nop
825 |
826 | // Call metamethod for binary op.
827 |->vmeta_binop:
828 | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
829 | sub TMP1, CRET1, BASE
830 | str PC, [CRET1, #-24] // [cont|PC]
831 | add PC, TMP1, #FRAME_CONT
832 | mov BASE, CRET1
833 | mov NARGS8:RC, #16 // 2 args for func(o1, o2).
834 | b ->vm_call_dispatch
835 |
836 |->vmeta_len:
837 | add CARG2, BASE, RC, lsl #3
838#if LJ_52
839 | mov TAB:RC, TAB:CARG1 // Save table (ignored for other types).
840#endif
841 | str BASE, L->base
842 | mov CARG1, L
843 | str PC, SAVE_PC
844 | bl extern lj_meta_len // (lua_State *L, TValue *o)
845 | // Returns NULL (retry) or TValue * (metamethod base).
846#if LJ_52
847 | cbnz CRET1, ->vmeta_binop // Binop call for compatibility.
848 | mov TAB:CARG1, TAB:RC
849 | b ->BC_LEN_Z
850#else
851 | b ->vmeta_binop // Binop call for compatibility.
852#endif
853 |
854 |//-- Call metamethod ----------------------------------------------------
855 |
856 |->vmeta_call: // Resolve and call __call metamethod.
857 | // RB = old base, BASE = new base, RC = nargs*8
858 | mov CARG1, L
859 | str RB, L->base // This is the callers base!
860 | sub CARG2, BASE, #16
861 | str PC, SAVE_PC
862 | add CARG3, BASE, NARGS8:RC
863 | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
864 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
865 | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
866 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
867 | ins_call
868 |
869 |->vmeta_callt: // Resolve __call for BC_CALLT.
870 | // BASE = old base, RA = new base, RC = nargs*8
871 | mov CARG1, L
872 | str BASE, L->base
873 | sub CARG2, RA, #16
874 | str PC, SAVE_PC
875 | add CARG3, RA, NARGS8:RC
876 | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
877 | ldr TMP1, [RA, FRAME_FUNC] // Guaranteed to be a function here.
878 | ldr PC, [BASE, FRAME_PC]
879 | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
880 | and LFUNC:CARG3, TMP1, #LJ_GCVMASK
881 | b ->BC_CALLT2_Z
882 |
883 |//-- Argument coercion for 'for' statement ------------------------------
884 |
885 |->vmeta_for:
886 | mov CARG1, L
887 | str BASE, L->base
888 | mov CARG2, RA
889 | str PC, SAVE_PC
890 | bl extern lj_meta_for // (lua_State *L, TValue *base)
891 | ldr INSw, [PC, #-4]
892 |.if JIT
893 | uxtb TMP0w, INSw
894 |.endif
895 | decode_RA RA, INS
896 | decode_RD RC, INS
897 |.if JIT
898 | cmp TMP0, #BC_JFORI
899 | beq =>BC_JFORI
900 |.endif
901 | b =>BC_FORI
902 |
903 |//-----------------------------------------------------------------------
904 |//-- Fast functions -----------------------------------------------------
905 |//-----------------------------------------------------------------------
906 |
907 |.macro .ffunc, name
908 |->ff_ .. name:
909 |.endmacro
910 |
911 |.macro .ffunc_1, name
912 |->ff_ .. name:
913 | ldr CARG1, [BASE]
914 | cmp NARGS8:RC, #8
915 | blo ->fff_fallback
916 |.endmacro
917 |
918 |.macro .ffunc_2, name
919 |->ff_ .. name:
920 | ldp CARG1, CARG2, [BASE]
921 | cmp NARGS8:RC, #16
922 | blo ->fff_fallback
923 |.endmacro
924 |
925 |.macro .ffunc_n, name
926 | .ffunc name
927 | ldr CARG1, [BASE]
928 | cmp NARGS8:RC, #8
929 | ldr FARG1, [BASE]
930 | blo ->fff_fallback
931 | checknum CARG1, ->fff_fallback
932 |.endmacro
933 |
934 |.macro .ffunc_nn, name
935 | .ffunc name
936 | ldp CARG1, CARG2, [BASE]
937 | cmp NARGS8:RC, #16
938 | ldp FARG1, FARG2, [BASE]
939 | blo ->fff_fallback
940 | checknum CARG1, ->fff_fallback
941 | checknum CARG2, ->fff_fallback
942 |.endmacro
943 |
944 |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2.
945 |.macro ffgccheck
946 | ldp CARG1, CARG2, GL->gc.total // Assumes threshold follows total.
947 | cmp CARG1, CARG2
948 | blt >1
949 | bl ->fff_gcstep
950 |1:
951 |.endmacro
952 |
953 |//-- Base library: checks -----------------------------------------------
954 |
955 |.ffunc_1 assert
956 | ldr PC, [BASE, FRAME_PC]
957 | mov_false TMP1
958 | cmp CARG1, TMP1
959 | bhs ->fff_fallback
960 | str CARG1, [BASE, #-16]
961 | sub RB, BASE, #8
962 | subs RA, NARGS8:RC, #8
963 | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8.
964 | cbz RA, ->fff_res // Done if exactly 1 argument.
965 |1:
966 | ldr CARG1, [RB, #16]
967 | sub RA, RA, #8
968 | str CARG1, [RB], #8
969 | cbnz RA, <1
970 | b ->fff_res
971 |
972 |.ffunc_1 type
973 | mov TMP0, #~LJ_TISNUM
974 | asr ITYPE, CARG1, #47
975 | cmn ITYPE, #~LJ_TISNUM
976 | csinv TMP1, TMP0, ITYPE, lo
977 | add TMP1, TMP1, #offsetof(GCfuncC, upvalue)/8
978 | ldr CARG1, [CFUNC:CARG3, TMP1, lsl #3]
979 | b ->fff_restv
980 |
981 |//-- Base library: getters and setters ---------------------------------
982 |
983 |.ffunc_1 getmetatable
984 | asr ITYPE, CARG1, #47
985 | cmn ITYPE, #-LJ_TTAB
986 | ccmn ITYPE, #-LJ_TUDATA, #4, ne
987 | and TAB:CARG1, CARG1, #LJ_GCVMASK
988 | bne >6
989 |1: // Field metatable must be at same offset for GCtab and GCudata!
990 | ldr TAB:RB, TAB:CARG1->metatable
991 |2:
992 | mov CARG1, TISNIL
993 | ldr STR:RC, GL->gcroot[GCROOT_MMNAME+MM_metatable]
994 | cbz TAB:RB, ->fff_restv
995 | ldr TMP1w, TAB:RB->hmask
996 | ldr TMP2w, STR:RC->sid
997 | ldr NODE:CARG3, TAB:RB->node
998 | and TMP1w, TMP1w, TMP2w // idx = str->sid & tab->hmask
999 | add TMP1, TMP1, TMP1, lsl #1
1000 | movn CARG4, #~LJ_TSTR
1001 | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8
1002 | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for.
1003 |3: // Rearranged logic, because we expect _not_ to find the key.
1004 | ldp CARG1, TMP0, NODE:CARG3->val
1005 | ldr NODE:CARG3, NODE:CARG3->next
1006 | cmp TMP0, CARG4
1007 | beq >5
1008 | cbnz NODE:CARG3, <3
1009 |4:
1010 | mov CARG1, RB // Use metatable as default result.
1011 | movk CARG1, #(LJ_TTAB>>1)&0xffff, lsl #48
1012 | b ->fff_restv
1013 |5:
1014 | cmp TMP0, TISNIL
1015 | bne ->fff_restv
1016 | b <4
1017 |
1018 |6:
1019 | movn TMP0, #~LJ_TISNUM
1020 | cmp ITYPE, TMP0
1021 | csel ITYPE, ITYPE, TMP0, hs
1022 | sub TMP1, GL, ITYPE, lsl #3
1023 | ldr TAB:RB, [TMP1, #offsetof(global_State, gcroot[GCROOT_BASEMT])-8]
1024 | b <2
1025 |
1026 |.ffunc_2 setmetatable
1027 | // Fast path: no mt for table yet and not clearing the mt.
1028 | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback
1029 | ldr TAB:TMP0, TAB:TMP1->metatable
1030 | asr ITYPE, CARG2, #47
1031 | ldrb TMP2w, TAB:TMP1->marked
1032 | cmn ITYPE, #-LJ_TTAB
1033 | and TAB:CARG2, CARG2, #LJ_GCVMASK
1034 | ccmp TAB:TMP0, #0, #0, eq
1035 | bne ->fff_fallback
1036 | str TAB:CARG2, TAB:TMP1->metatable
1037 | tbz TMP2w, #2, ->fff_restv // isblack(table)
1038 | barrierback TAB:TMP1, TMP2w, TMP0
1039 | b ->fff_restv
1040 |
1041 |.ffunc rawget
1042 | ldr CARG2, [BASE]
1043 | cmp NARGS8:RC, #16
1044 | blo ->fff_fallback
1045 | checktab CARG2, ->fff_fallback
1046 | mov CARG1, L
1047 | add CARG3, BASE, #8
1048 | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
1049 | // Returns cTValue *.
1050 | ldr CARG1, [CRET1]
1051 | b ->fff_restv
1052 |
1053 |//-- Base library: conversions ------------------------------------------
1054 |
1055 |.ffunc tonumber
1056 | // Only handles the number case inline (without a base argument).
1057 | ldr CARG1, [BASE]
1058 | cmp NARGS8:RC, #8
1059 | bne ->fff_fallback
1060 | checknumber CARG1, ->fff_fallback
1061 | b ->fff_restv
1062 |
1063 |.ffunc_1 tostring
1064 | // Only handles the string or number case inline.
1065 | asr ITYPE, CARG1, #47
1066 | cmn ITYPE, #-LJ_TSTR
1067 | // A __tostring method in the string base metatable is ignored.
1068 | beq ->fff_restv
1069 | // Handle numbers inline, unless a number base metatable is present.
1070 | ldr TMP1, GL->gcroot[GCROOT_BASEMT_NUM]
1071 | str BASE, L->base
1072 | cmn ITYPE, #-LJ_TISNUM
1073 | ccmp TMP1, #0, #0, ls
1074 | str PC, SAVE_PC // Redundant (but a defined value).
1075 | bne ->fff_fallback
1076 | ffgccheck
1077 | mov CARG1, L
1078 | mov CARG2, BASE
1079 | bl extern lj_strfmt_number // (lua_State *L, cTValue *o)
1080 | // Returns GCstr *.
1081 | movn TMP1, #~LJ_TSTR
1082 | ldr BASE, L->base
1083 | add CARG1, CARG1, TMP1, lsl #47
1084 | b ->fff_restv
1085 |
1086 |//-- Base library: iterators -------------------------------------------
1087 |
1088 |.ffunc_1 next
1089 | checktp CARG2, CARG1, LJ_TTAB, ->fff_fallback
1090 | str TISNIL, [BASE, NARGS8:RC] // Set missing 2nd arg to nil.
1091 | ldr PC, [BASE, FRAME_PC]
1092 | stp BASE, BASE, L->base // Add frame since C call can throw.
1093 | mov CARG1, L
1094 | add CARG3, BASE, #8
1095 | str PC, SAVE_PC
1096 | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
1097 | // Returns 0 at end of traversal.
1098 | str TISNIL, [BASE, #-16]
1099 | cbz CRET1, ->fff_res1 // End of traversal: return nil.
1100 | ldp CARG1, CARG2, [BASE, #8] // Copy key and value to results.
1101 | mov RC, #(2+1)*8
1102 | stp CARG1, CARG2, [BASE, #-16]
1103 | b ->fff_res
1104 |
1105 |.ffunc_1 pairs
1106 | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback
1107#if LJ_52
1108 | ldr TAB:CARG2, TAB:TMP1->metatable
1109#endif
1110 | ldr CFUNC:CARG4, CFUNC:CARG3->upvalue[0]
1111 | ldr PC, [BASE, FRAME_PC]
1112#if LJ_52
1113 | cbnz TAB:CARG2, ->fff_fallback
1114#endif
1115 | mov RC, #(3+1)*8
1116 | stp CARG1, TISNIL, [BASE, #-8]
1117 | str CFUNC:CARG4, [BASE, #-16]
1118 | b ->fff_res
1119 |
1120 |.ffunc_2 ipairs_aux
1121 | checktab CARG1, ->fff_fallback
1122 | checkint CARG2, ->fff_fallback
1123 | ldr TMP1w, TAB:CARG1->asize
1124 | ldr CARG3, TAB:CARG1->array
1125 | ldr TMP0w, TAB:CARG1->hmask
1126 | add CARG2w, CARG2w, #1
1127 | cmp CARG2w, TMP1w
1128 | ldr PC, [BASE, FRAME_PC]
1129 | add TMP2, CARG2, TISNUM
1130 | mov RC, #(0+1)*8
1131 | str TMP2, [BASE, #-16]
1132 | bhs >2 // Not in array part?
1133 | ldr TMP0, [CARG3, CARG2, lsl #3]
1134 |1:
1135 | mov TMP1, #(2+1)*8
1136 | cmp TMP0, TISNIL
1137 | str TMP0, [BASE, #-8]
1138 | csel RC, RC, TMP1, eq
1139 | b ->fff_res
1140 |2: // Check for empty hash part first. Otherwise call C function.
1141 | cbz TMP0w, ->fff_res
1142 | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
1143 | // Returns cTValue * or NULL.
1144 | cbz CRET1, ->fff_res
1145 | ldr TMP0, [CRET1]
1146 | b <1
1147 |
1148 |.ffunc_1 ipairs
1149 | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback
1150#if LJ_52
1151 | ldr TAB:CARG2, TAB:TMP1->metatable
1152#endif
1153 | ldr CFUNC:CARG4, CFUNC:CARG3->upvalue[0]
1154 | ldr PC, [BASE, FRAME_PC]
1155#if LJ_52
1156 | cbnz TAB:CARG2, ->fff_fallback
1157#endif
1158 | mov RC, #(3+1)*8
1159 | stp CARG1, TISNUM, [BASE, #-8]
1160 | str CFUNC:CARG4, [BASE, #-16]
1161 | b ->fff_res
1162 |
1163 |//-- Base library: catch errors ----------------------------------------
1164 |
1165 |.ffunc pcall
1166 | ldrb TMP0w, GL->hookmask
1167 | subs NARGS8:RC, NARGS8:RC, #8
1168 | blo ->fff_fallback
1169 | mov RB, BASE
1170 | add BASE, BASE, #16
1171 | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1
1172 | add PC, TMP0, #16+FRAME_PCALL
1173 | beq ->vm_call_dispatch
1174 |1:
1175 | add TMP2, BASE, NARGS8:RC
1176 |2:
1177 | ldr TMP0, [TMP2, #-16]
1178 | str TMP0, [TMP2, #-8]!
1179 | cmp TMP2, BASE
1180 | bne <2
1181 | b ->vm_call_dispatch
1182 |
1183 |.ffunc xpcall
1184 | ldp CARG1, CARG2, [BASE]
1185 | ldrb TMP0w, GL->hookmask
1186 | subs NARGS8:TMP1, NARGS8:RC, #16
1187 | blo ->fff_fallback
1188 | mov RB, BASE
1189 | asr ITYPE, CARG2, #47
1190 | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1
1191 | cmn ITYPE, #-LJ_TFUNC
1192 | add PC, TMP0, #24+FRAME_PCALL
1193 | bne ->fff_fallback // Traceback must be a function.
1194 | mov NARGS8:RC, NARGS8:TMP1
1195 | add BASE, BASE, #24
1196 | stp CARG2, CARG1, [RB] // Swap function and traceback.
1197 | cbz NARGS8:RC, ->vm_call_dispatch
1198 | b <1
1199 |
1200 |//-- Coroutine library --------------------------------------------------
1201 |
1202 |.macro coroutine_resume_wrap, resume
1203 |.if resume
1204 |.ffunc_1 coroutine_resume
1205 | checktp CARG1, LJ_TTHREAD, ->fff_fallback
1206 |.else
1207 |.ffunc coroutine_wrap_aux
1208 | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr
1209 | and L:CARG1, CARG1, #LJ_GCVMASK
1210 |.endif
1211 | ldr PC, [BASE, FRAME_PC]
1212 | str BASE, L->base
1213 | ldp RB, CARG2, L:CARG1->base
1214 | ldrb TMP1w, L:CARG1->status
1215 | add TMP0, CARG2, TMP1
1216 | str PC, SAVE_PC
1217 | cmp TMP0, RB
1218 | beq ->fff_fallback
1219 | cmp TMP1, #LUA_YIELD
1220 | add TMP0, CARG2, #8
1221 | csel CARG2, CARG2, TMP0, hs
1222 | ldr CARG4, L:CARG1->maxstack
1223 | add CARG3, CARG2, NARGS8:RC
1224 | ldr RB, L:CARG1->cframe
1225 | ccmp CARG3, CARG4, #2, ls
1226 | ccmp RB, #0, #2, ls
1227 | bhi ->fff_fallback
1228 |.if resume
1229 | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC.
1230 | add BASE, BASE, #8
1231 | sub NARGS8:RC, NARGS8:RC, #8
1232 |.endif
1233 | str CARG3, L:CARG1->top
1234 | str BASE, L->top
1235 | cbz NARGS8:RC, >3
1236 |2: // Move args to coroutine.
1237 | ldr TMP0, [BASE, RB]
1238 | cmp RB, NARGS8:RC
1239 | str TMP0, [CARG2, RB]
1240 | add RB, RB, #8
1241 | bne <2
1242 |3:
1243 | mov CARG3, #0
1244 | mov L:RA, L:CARG1
1245 | mov CARG4, #0
1246 | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
1247 | // Returns thread status.
1248 |4:
1249 | ldp CARG3, CARG4, L:RA->base
1250 | cmp CRET1, #LUA_YIELD
1251 | ldr BASE, L->base
1252 | str L, GL->cur_L
1253 | st_vmstate ST_INTERP
1254 | bhi >8
1255 | sub RC, CARG4, CARG3
1256 | ldr CARG1, L->maxstack
1257 | add CARG2, BASE, RC
1258 | cbz RC, >6 // No results?
1259 | cmp CARG2, CARG1
1260 | mov RB, #0
1261 | bhi >9 // Need to grow stack?
1262 |
1263 | sub CARG4, RC, #8
1264 | str CARG3, L:RA->top // Clear coroutine stack.
1265 |5: // Move results from coroutine.
1266 | ldr TMP0, [CARG3, RB]
1267 | cmp RB, CARG4
1268 | str TMP0, [BASE, RB]
1269 | add RB, RB, #8
1270 | bne <5
1271 |6:
1272 |.if resume
1273 | mov_true TMP1
1274 | add RC, RC, #16
1275 |7:
1276 | str TMP1, [BASE, #-8] // Prepend true/false to results.
1277 | sub RA, BASE, #8
1278 |.else
1279 | mov RA, BASE
1280 | add RC, RC, #8
1281 |.endif
1282 | ands CARG1, PC, #FRAME_TYPE
1283 | str PC, SAVE_PC
1284 | str RCw, SAVE_MULTRES
1285 | beq ->BC_RET_Z
1286 | b ->vm_return
1287 |
1288 |8: // Coroutine returned with error (at co->top-1).
1289 |.if resume
1290 | ldr TMP0, [CARG4, #-8]!
1291 | mov_false TMP1
1292 | mov RC, #(2+1)*8
1293 | str CARG4, L:RA->top // Remove error from coroutine stack.
1294 | str TMP0, [BASE] // Copy error message.
1295 | b <7
1296 |.else
1297 | mov CARG1, L
1298 | mov CARG2, L:RA
1299 | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
1300 | // Never returns.
1301 |.endif
1302 |
1303 |9: // Handle stack expansion on return from yield.
1304 | mov CARG1, L
1305 | lsr CARG2, RC, #3
1306 | bl extern lj_state_growstack // (lua_State *L, int n)
1307 | mov CRET1, #0
1308 | b <4
1309 |.endmacro
1310 |
1311 | coroutine_resume_wrap 1 // coroutine.resume
1312 | coroutine_resume_wrap 0 // coroutine.wrap
1313 |
1314 |.ffunc coroutine_yield
1315 | ldr TMP0, L->cframe
1316 | add TMP1, BASE, NARGS8:RC
1317 | mov CRET1, #LUA_YIELD
1318 | stp BASE, TMP1, L->base
1319 | tbz TMP0, #0, ->fff_fallback
1320 | str xzr, L->cframe
1321 | strb CRET1w, L->status
1322 | b ->vm_leave_unw
1323 |
1324 |//-- Math library -------------------------------------------------------
1325 |
1326 |.macro math_round, func, round
1327 | .ffunc math_ .. func
1328 | ldr CARG1, [BASE]
1329 | cmp NARGS8:RC, #8
1330 | ldr d0, [BASE]
1331 | blo ->fff_fallback
1332 | cmp TISNUMhi, CARG1, lsr #32
1333 | beq ->fff_restv
1334 | blo ->fff_fallback
1335 | round d0, d0
1336 | b ->fff_resn
1337 |.endmacro
1338 |
1339 | math_round floor, frintm
1340 | math_round ceil, frintp
1341 |
1342 |.ffunc_1 math_abs
1343 | checknumber CARG1, ->fff_fallback
1344 | and CARG1, CARG1, #U64x(7fffffff,ffffffff)
1345 | bne ->fff_restv
1346 | eor CARG2w, CARG1w, CARG1w, asr #31
1347 | movz CARG3, #0x41e0, lsl #48 // 2^31.
1348 | subs CARG1w, CARG2w, CARG1w, asr #31
1349 | add CARG1, CARG1, TISNUM
1350 | csel CARG1, CARG1, CARG3, pl
1351 | // Fallthrough.
1352 |
1353 |->fff_restv:
1354 | // CARG1 = TValue result.
1355 | ldr PC, [BASE, FRAME_PC]
1356 | str CARG1, [BASE, #-16]
1357 |->fff_res1:
1358 | // PC = return.
1359 | mov RC, #(1+1)*8
1360 |->fff_res:
1361 | // RC = (nresults+1)*8, PC = return.
1362 | ands CARG1, PC, #FRAME_TYPE
1363 | str RCw, SAVE_MULTRES
1364 | sub RA, BASE, #16
1365 | bne ->vm_return
1366 | ldr INSw, [PC, #-4]
1367 | decode_RB RB, INS
1368 |5:
1369 | cmp RC, RB, lsl #3 // More results expected?
1370 | blo >6
1371 | decode_RA TMP1, INS
1372 | // Adjust BASE. KBASE is assumed to be set for the calling frame.
1373 | sub BASE, RA, TMP1, lsl #3
1374 | ins_next
1375 |
1376 |6: // Fill up results with nil.
1377 | add TMP1, RA, RC
1378 | add RC, RC, #8
1379 | str TISNIL, [TMP1, #-8]
1380 | b <5
1381 |
1382 |.macro math_extern, func
1383 | .ffunc_n math_ .. func
1384 | bl extern func
1385 | b ->fff_resn
1386 |.endmacro
1387 |
1388 |.macro math_extern2, func
1389 | .ffunc_nn math_ .. func
1390 | bl extern func
1391 | b ->fff_resn
1392 |.endmacro
1393 |
1394 |.ffunc_n math_sqrt
1395 | fsqrt d0, d0
1396 |->fff_resn:
1397 | ldr PC, [BASE, FRAME_PC]
1398 | str d0, [BASE, #-16]
1399 | b ->fff_res1
1400 |
1401 |.ffunc math_log
1402 | ldr CARG1, [BASE]
1403 | cmp NARGS8:RC, #8
1404 | ldr FARG1, [BASE]
1405 | bne ->fff_fallback // Need exactly 1 argument.
1406 | checknum CARG1, ->fff_fallback
1407 | bl extern log
1408 | b ->fff_resn
1409 |
1410 | math_extern log10
1411 | math_extern exp
1412 | math_extern sin
1413 | math_extern cos
1414 | math_extern tan
1415 | math_extern asin
1416 | math_extern acos
1417 | math_extern atan
1418 | math_extern sinh
1419 | math_extern cosh
1420 | math_extern tanh
1421 | math_extern2 pow
1422 | math_extern2 atan2
1423 | math_extern2 fmod
1424 |
1425 |.ffunc_2 math_ldexp
1426 | ldr FARG1, [BASE]
1427 | checknum CARG1, ->fff_fallback
1428 | checkint CARG2, ->fff_fallback
1429 | sxtw CARG1, CARG2w
1430 | bl extern ldexp // (double x, int exp)
1431 | b ->fff_resn
1432 |
1433 |.ffunc_n math_frexp
1434 | add CARG1, sp, TMPDofs
1435 | bl extern frexp
1436 | ldr CARG2w, TMPD
1437 | ldr PC, [BASE, FRAME_PC]
1438 | str d0, [BASE, #-16]
1439 | mov RC, #(2+1)*8
1440 | add CARG2, CARG2, TISNUM
1441 | str CARG2, [BASE, #-8]
1442 | b ->fff_res
1443 |
1444 |.ffunc_n math_modf
1445 | sub CARG1, BASE, #16
1446 | ldr PC, [BASE, FRAME_PC]
1447 | bl extern modf
1448 | mov RC, #(2+1)*8
1449 | str d0, [BASE, #-8]
1450 | b ->fff_res
1451 |
1452 |.macro math_minmax, name, cond, fcond
1453 | .ffunc_1 name
1454 | add RB, BASE, RC
1455 | add RA, BASE, #8
1456 | checkint CARG1, >4
1457 |1: // Handle integers.
1458 | ldr CARG2, [RA]
1459 | cmp RA, RB
1460 | bhs ->fff_restv
1461 | checkint CARG2, >3
1462 | cmp CARG1w, CARG2w
1463 | add RA, RA, #8
1464 | csel CARG1, CARG2, CARG1, cond
1465 | b <1
1466 |3: // Convert intermediate result to number and continue below.
1467 | scvtf d0, CARG1w
1468 | blo ->fff_fallback
1469 | ldr d1, [RA]
1470 | b >6
1471 |
1472 |4:
1473 | ldr d0, [BASE]
1474 | blo ->fff_fallback
1475 |5: // Handle numbers.
1476 | ldr CARG2, [RA]
1477 | ldr d1, [RA]
1478 | cmp RA, RB
1479 | bhs ->fff_resn
1480 | checknum CARG2, >7
1481 |6:
1482 | fcmp d0, d1
1483 | add RA, RA, #8
1484 | fcsel d0, d1, d0, fcond
1485 | b <5
1486 |7: // Convert integer to number and continue above.
1487 | scvtf d1, CARG2w
1488 | blo ->fff_fallback
1489 | b <6
1490 |.endmacro
1491 |
1492 | math_minmax math_min, gt, pl
1493 | math_minmax math_max, lt, le
1494 |
1495 |//-- String library -----------------------------------------------------
1496 |
1497 |.ffunc string_byte // Only handle the 1-arg case here.
1498 | ldp PC, CARG1, [BASE, FRAME_PC]
1499 | cmp NARGS8:RC, #8
1500 | asr ITYPE, CARG1, #47
1501 | ccmn ITYPE, #-LJ_TSTR, #0, eq
1502 | and STR:CARG1, CARG1, #LJ_GCVMASK
1503 | bne ->fff_fallback
1504 | ldrb TMP0w, STR:CARG1[1] // Access is always ok (NUL at end).
1505 | ldr CARG3w, STR:CARG1->len
1506 | add TMP0, TMP0, TISNUM
1507 | str TMP0, [BASE, #-16]
1508 | mov RC, #(0+1)*8
1509 | cbz CARG3, ->fff_res
1510 | b ->fff_res1
1511 |
1512 |.ffunc string_char // Only handle the 1-arg case here.
1513 | ffgccheck
1514 | ldp PC, CARG1, [BASE, FRAME_PC]
1515 | cmp CARG1w, #255
1516 | ccmp NARGS8:RC, #8, #0, ls // Need exactly 1 argument.
1517 | bne ->fff_fallback
1518 | checkint CARG1, ->fff_fallback
1519 | mov CARG3, #1
1520 | // Point to the char inside the integer in the stack slot.
1521 |.if ENDIAN_LE
1522 | mov CARG2, BASE
1523 |.else
1524 | add CARG2, BASE, #7
1525 |.endif
1526 |->fff_newstr:
1527 | // CARG2 = str, CARG3 = len.
1528 | str BASE, L->base
1529 | mov CARG1, L
1530 | str PC, SAVE_PC
1531 | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
1532 |->fff_resstr:
1533 | // Returns GCstr *.
1534 | ldr BASE, L->base
1535 | movn TMP1, #~LJ_TSTR
1536 | add CARG1, CARG1, TMP1, lsl #47
1537 | b ->fff_restv
1538 |
1539 |.ffunc string_sub
1540 | ffgccheck
1541 | ldr CARG1, [BASE]
1542 | ldr CARG3, [BASE, #16]
1543 | cmp NARGS8:RC, #16
1544 | movn RB, #0
1545 | beq >1
1546 | blo ->fff_fallback
1547 | checkint CARG3, ->fff_fallback
1548 | sxtw RB, CARG3w
1549 |1:
1550 | ldr CARG2, [BASE, #8]
1551 | checkstr CARG1, ->fff_fallback
1552 | ldr TMP1w, STR:CARG1->len
1553 | checkint CARG2, ->fff_fallback
1554 | sxtw CARG2, CARG2w
1555 | // CARG1 = str, TMP1 = str->len, CARG2 = start, RB = end
1556 | add TMP2, RB, TMP1
1557 | cmp RB, #0
1558 | add TMP0, CARG2, TMP1
1559 | csinc RB, RB, TMP2, ge // if (end < 0) end += len+1
1560 | cmp CARG2, #0
1561 | csinc CARG2, CARG2, TMP0, ge // if (start < 0) start += len+1
1562 | cmp RB, #0
1563 | csel RB, RB, xzr, ge // if (end < 0) end = 0
1564 | cmp CARG2, #1
1565 | csinc CARG2, CARG2, xzr, ge // if (start < 1) start = 1
1566 | cmp RB, TMP1
1567 | csel RB, RB, TMP1, le // if (end > len) end = len
1568 | add CARG1, STR:CARG1, #sizeof(GCstr)-1
1569 | subs CARG3, RB, CARG2 // len = end - start
1570 | add CARG2, CARG1, CARG2
1571 | add CARG3, CARG3, #1 // len += 1
1572 | bge ->fff_newstr
1573 | add STR:CARG1, GL, #offsetof(global_State, strempty)
1574 | movn TMP1, #~LJ_TSTR
1575 | add CARG1, CARG1, TMP1, lsl #47
1576 | b ->fff_restv
1577 |
1578 |.macro ffstring_op, name
1579 | .ffunc string_ .. name
1580 | ffgccheck
1581 | ldr CARG2, [BASE]
1582 | cmp NARGS8:RC, #8
1583 | asr ITYPE, CARG2, #47
1584 | ccmn ITYPE, #-LJ_TSTR, #0, hs
1585 | and STR:CARG2, CARG2, #LJ_GCVMASK
1586 | bne ->fff_fallback
1587 | ldr TMP0, GL->tmpbuf.b
1588 | add SBUF:CARG1, GL, #offsetof(global_State, tmpbuf)
1589 | str BASE, L->base
1590 | str PC, SAVE_PC
1591 | str L, GL->tmpbuf.L
1592 | str TMP0, GL->tmpbuf.p
1593 | bl extern lj_buf_putstr_ .. name
1594 | bl extern lj_buf_tostr
1595 | b ->fff_resstr
1596 |.endmacro
1597 |
1598 |ffstring_op reverse
1599 |ffstring_op lower
1600 |ffstring_op upper
1601 |
1602 |//-- Bit library --------------------------------------------------------
1603 |
1604 |// FP number to bit conversion for soft-float. Clobbers CARG1-CARG3
1605 |->vm_tobit_fb:
1606 | bls ->fff_fallback
1607 | add CARG2, CARG1, CARG1
1608 | mov CARG3, #1076
1609 | sub CARG3, CARG3, CARG2, lsr #53
1610 | cmp CARG3, #53
1611 | bhi >1
1612 | and CARG2, CARG2, #U64x(001fffff,ffffffff)
1613 | orr CARG2, CARG2, #U64x(00200000,00000000)
1614 | cmp CARG1, #0
1615 | lsr CARG2, CARG2, CARG3
1616 | cneg CARG1w, CARG2w, mi
1617 | br lr
1618 |1:
1619 | mov CARG1w, #0
1620 | br lr
1621 |
1622 |.macro .ffunc_bit, name
1623 | .ffunc_1 bit_..name
1624 | adr lr, >1
1625 | checkint CARG1, ->vm_tobit_fb
1626 |1:
1627 |.endmacro
1628 |
1629 |.macro .ffunc_bit_op, name, ins
1630 | .ffunc_bit name
1631 | mov RA, #8
1632 | mov TMP0w, CARG1w
1633 | adr lr, >2
1634 |1:
1635 | ldr CARG1, [BASE, RA]
1636 | cmp RA, NARGS8:RC
1637 | add RA, RA, #8
1638 | bge >9
1639 | checkint CARG1, ->vm_tobit_fb
1640 |2:
1641 | ins TMP0w, TMP0w, CARG1w
1642 | b <1
1643 |.endmacro
1644 |
1645 |.ffunc_bit_op band, and
1646 |.ffunc_bit_op bor, orr
1647 |.ffunc_bit_op bxor, eor
1648 |
1649 |.ffunc_bit tobit
1650 | mov TMP0w, CARG1w
1651 |9: // Label reused by .ffunc_bit_op users.
1652 | add CARG1, TMP0, TISNUM
1653 | b ->fff_restv
1654 |
1655 |.ffunc_bit bswap
1656 | rev TMP0w, CARG1w
1657 | add CARG1, TMP0, TISNUM
1658 | b ->fff_restv
1659 |
1660 |.ffunc_bit bnot
1661 | mvn TMP0w, CARG1w
1662 | add CARG1, TMP0, TISNUM
1663 | b ->fff_restv
1664 |
1665 |.macro .ffunc_bit_sh, name, ins, shmod
1666 | .ffunc bit_..name
1667 | ldp TMP0, CARG1, [BASE]
1668 | cmp NARGS8:RC, #16
1669 | blo ->fff_fallback
1670 | adr lr, >1
1671 | checkint CARG1, ->vm_tobit_fb
1672 |1:
1673 |.if shmod == 0
1674 | mov TMP1, CARG1
1675 |.else
1676 | neg TMP1, CARG1
1677 |.endif
1678 | mov CARG1, TMP0
1679 | adr lr, >2
1680 | checkint CARG1, ->vm_tobit_fb
1681 |2:
1682 | ins TMP0w, CARG1w, TMP1w
1683 | add CARG1, TMP0, TISNUM
1684 | b ->fff_restv
1685 |.endmacro
1686 |
1687 |.ffunc_bit_sh lshift, lsl, 0
1688 |.ffunc_bit_sh rshift, lsr, 0
1689 |.ffunc_bit_sh arshift, asr, 0
1690 |.ffunc_bit_sh rol, ror, 1
1691 |.ffunc_bit_sh ror, ror, 0
1692 |
1693 |//-----------------------------------------------------------------------
1694 |
1695 |->fff_fallback: // Call fast function fallback handler.
1696 | // BASE = new base, RC = nargs*8
1697 | ldp CFUNC:CARG3, PC, [BASE, FRAME_FUNC] // Fallback may overwrite PC.
1698 | ldr TMP2, L->maxstack
1699 | add TMP1, BASE, NARGS8:RC
1700 | stp BASE, TMP1, L->base
1701 | and CFUNC:CARG3, CARG3, #LJ_GCVMASK
1702 | add TMP1, TMP1, #8*LUA_MINSTACK
1703 | ldr CARG3, CFUNC:CARG3->f
1704 | str PC, SAVE_PC // Redundant (but a defined value).
1705 | cmp TMP1, TMP2
1706 | mov CARG1, L
1707 | bhi >5 // Need to grow stack.
1708 | blr CARG3 // (lua_State *L)
1709 | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
1710 | ldr BASE, L->base
1711 | cmp CRET1w, #0
1712 | lsl RC, CRET1, #3
1713 | sub RA, BASE, #16
1714 | bgt ->fff_res // Returned nresults+1?
1715 |1: // Returned 0 or -1: retry fast path.
1716 | ldr CARG1, L->top
1717 | ldr CFUNC:CARG3, [BASE, FRAME_FUNC]
1718 | sub NARGS8:RC, CARG1, BASE
1719 | bne ->vm_call_tail // Returned -1?
1720 | and CFUNC:CARG3, CARG3, #LJ_GCVMASK
1721 | ins_callt // Returned 0: retry fast path.
1722 |
1723 |// Reconstruct previous base for vmeta_call during tailcall.
1724 |->vm_call_tail:
1725 | ands TMP0, PC, #FRAME_TYPE
1726 | and TMP1, PC, #~FRAME_TYPEP
1727 | bne >3
1728 | ldrb RAw, [PC, #-4+OFS_RA]
1729 | lsl RA, RA, #3
1730 | add TMP1, RA, #16
1731 |3:
1732 | sub RB, BASE, TMP1
1733 | b ->vm_call_dispatch // Resolve again for tailcall.
1734 |
1735 |5: // Grow stack for fallback handler.
1736 | mov CARG2, #LUA_MINSTACK
1737 | bl extern lj_state_growstack // (lua_State *L, int n)
1738 | ldr BASE, L->base
1739 | cmp CARG1, CARG1 // Set zero-flag to force retry.
1740 | b <1
1741 |
1742 |->fff_gcstep: // Call GC step function.
1743 | // BASE = new base, RC = nargs*8
1744 | add CARG2, BASE, NARGS8:RC // Calculate L->top.
1745 | mov RA, lr
1746 | stp BASE, CARG2, L->base
1747 | str PC, SAVE_PC // Redundant (but a defined value).
1748 | mov CARG1, L
1749 | bl extern lj_gc_step // (lua_State *L)
1750 | ldp BASE, CARG2, L->base
1751 | ldr CFUNC:CARG3, [BASE, FRAME_FUNC]
1752 | mov lr, RA // Help return address predictor.
1753 | sub NARGS8:RC, CARG2, BASE // Calculate nargs*8.
1754 | and CFUNC:CARG3, CARG3, #LJ_GCVMASK
1755 | ret
1756 |
1757 |//-----------------------------------------------------------------------
1758 |//-- Special dispatch targets -------------------------------------------
1759 |//-----------------------------------------------------------------------
1760 |
1761 |->vm_record: // Dispatch target for recording phase.
1762 |.if JIT
1763 | ldrb CARG1w, GL->hookmask
1764 | tst CARG1, #HOOK_VMEVENT // No recording while in vmevent.
1765 | bne >5
1766 | // Decrement the hookcount for consistency, but always do the call.
1767 | ldr CARG2w, GL->hookcount
1768 | tst CARG1, #HOOK_ACTIVE
1769 | bne >1
1770 | sub CARG2w, CARG2w, #1
1771 | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
1772 | beq >1
1773 | str CARG2w, GL->hookcount
1774 | b >1
1775 |.endif
1776 |
1777 |->vm_rethook: // Dispatch target for return hooks.
1778 | ldrb TMP2w, GL->hookmask
1779 | tbz TMP2w, #HOOK_ACTIVE_SHIFT, >1 // Hook already active?
1780 |5: // Re-dispatch to static ins.
1781 | ldr TMP0, [TMP1, #GG_G2DISP+GG_DISP2STATIC]
1782 | br TMP0
1783 |
1784 |->vm_inshook: // Dispatch target for instr/line hooks.
1785 | ldrb TMP2w, GL->hookmask
1786 | ldr TMP3w, GL->hookcount
1787 | tbnz TMP2w, #HOOK_ACTIVE_SHIFT, <5 // Hook already active?
1788 | tst TMP2w, #LUA_MASKLINE|LUA_MASKCOUNT
1789 | beq <5
1790 | sub TMP3w, TMP3w, #1
1791 | str TMP3w, GL->hookcount
1792 | cbz TMP3w, >1
1793 | tbz TMP2w, #LUA_HOOKLINE, <5
1794 |1:
1795 | mov CARG1, L
1796 | str BASE, L->base
1797 | mov CARG2, PC
1798 | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
1799 | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
1800 |3:
1801 | ldr BASE, L->base
1802 |4: // Re-dispatch to static ins.
1803 | ldr INSw, [PC, #-4]
1804 | add TMP1, GL, INS, uxtb #3
1805 | decode_RA RA, INS
1806 | ldr TMP0, [TMP1, #GG_G2DISP+GG_DISP2STATIC]
1807 | decode_RD RC, INS
1808 | br TMP0
1809 |
1810 |->cont_hook: // Continue from hook yield.
1811 | ldr CARG1, [CARG4, #-40]
1812 | add PC, PC, #4
1813 | str CARG1w, SAVE_MULTRES // Restore MULTRES for *M ins.
1814 | b <4
1815 |
1816 |->vm_hotloop: // Hot loop counter underflow.
1817 |.if JIT
1818 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Same as curr_topL(L).
1819 | add CARG1, GL, #GG_G2DISP+GG_DISP2J
1820 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
1821 | str PC, SAVE_PC
1822 | ldr CARG3, LFUNC:CARG3->pc
1823 | mov CARG2, PC
1824 | str L, [GL, #GL_J(L)]
1825 | ldrb CARG3w, [CARG3, #PC2PROTO(framesize)]
1826 | str BASE, L->base
1827 | add CARG3, BASE, CARG3, lsl #3
1828 | str CARG3, L->top
1829 | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
1830 | b <3
1831 |.endif
1832 |
1833 |->vm_callhook: // Dispatch target for call hooks.
1834 | mov CARG2, PC
1835 |.if JIT
1836 | b >1
1837 |.endif
1838 |
1839 |->vm_hotcall: // Hot call counter underflow.
1840 |.if JIT
1841 | orr CARG2, PC, #1
1842 |1:
1843 |.endif
1844 | add TMP1, BASE, NARGS8:RC
1845 | str PC, SAVE_PC
1846 | mov CARG1, L
1847 | sub RA, RA, BASE
1848 | stp BASE, TMP1, L->base
1849 | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
1850 | // Returns ASMFunction.
1851 | ldp BASE, TMP1, L->base
1852 | str xzr, SAVE_PC // Invalidate for subsequent line hook.
1853 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
1854 | add RA, BASE, RA
1855 | sub NARGS8:RC, TMP1, BASE
1856 | ldr INSw, [PC, #-4]
1857 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
1858 | br CRET1
1859 |
1860 |->cont_stitch: // Trace stitching.
1861 |.if JIT
1862 | // RA = resultptr, CARG4 = meta base
1863 | ldr RBw, SAVE_MULTRES
1864 | ldr INSw, [PC, #-4]
1865 | ldr TRACE:CARG3, [CARG4, #-40] // Save previous trace.
1866 | subs RB, RB, #8
1867 | decode_RA RC, INS // Call base.
1868 | and CARG3, CARG3, #LJ_GCVMASK
1869 | beq >2
1870 |1: // Move results down.
1871 | ldr CARG1, [RA]
1872 | add RA, RA, #8
1873 | subs RB, RB, #8
1874 | str CARG1, [BASE, RC, lsl #3]
1875 | add RC, RC, #1
1876 | bne <1
1877 |2:
1878 | decode_RA RA, INS
1879 | decode_RB RB, INS
1880 | add RA, RA, RB
1881 |3:
1882 | cmp RA, RC
1883 | bhi >9 // More results wanted?
1884 |
1885 | ldrh RAw, TRACE:CARG3->traceno
1886 | ldrh RCw, TRACE:CARG3->link
1887 | cmp RCw, RAw
1888 | beq ->cont_nop // Blacklisted.
1889 | cmp RCw, #0
1890 | bne =>BC_JLOOP // Jump to stitched trace.
1891 |
1892 | // Stitch a new trace to the previous trace.
1893 | mov CARG1, #GL_J(exitno)
1894 | str RAw, [GL, CARG1]
1895 | mov CARG1, #GL_J(L)
1896 | str L, [GL, CARG1]
1897 | str BASE, L->base
1898 | add CARG1, GL, #GG_G2J
1899 | mov CARG2, PC
1900 | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
1901 | ldr BASE, L->base
1902 | b ->cont_nop
1903 |
1904 |9: // Fill up results with nil.
1905 | str TISNIL, [BASE, RC, lsl #3]
1906 | add RC, RC, #1
1907 | b <3
1908 |.endif
1909 |
1910 |->vm_profhook: // Dispatch target for profiler hook.
1911#if LJ_HASPROFILE
1912 | mov CARG1, L
1913 | str BASE, L->base
1914 | mov CARG2, PC
1915 | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
1916 | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
1917 | ldr BASE, L->base
1918 | sub PC, PC, #4
1919 | b ->cont_nop
1920#endif
1921 |
1922 |//-----------------------------------------------------------------------
1923 |//-- Trace exit handler -------------------------------------------------
1924 |//-----------------------------------------------------------------------
1925 |
1926 |.macro savex_, a, b
1927 | stp d..a, d..b, [sp, #a*8]
1928 | stp x..a, x..b, [sp, #32*8+a*8]
1929 |.endmacro
1930 |
1931 |->vm_exit_handler:
1932 |.if JIT
1933 | sub sp, sp, #(64*8)
1934 | savex_, 0, 1
1935 | savex_, 2, 3
1936 | savex_, 4, 5
1937 | savex_, 6, 7
1938 | savex_, 8, 9
1939 | savex_, 10, 11
1940 | savex_, 12, 13
1941 | savex_, 14, 15
1942 | savex_, 16, 17
1943 | savex_, 18, 19
1944 | savex_, 20, 21
1945 | savex_, 22, 23
1946 | savex_, 24, 25
1947 | savex_, 26, 27
1948 | savex_, 28, 29
1949 | stp d30, d31, [sp, #30*8]
1950 | ldr CARG1, [sp, #64*8] // Load original value of lr.
1951 | add CARG3, sp, #64*8 // Recompute original value of sp.
1952 | mv_vmstate CARG4w, EXIT
1953 | stp xzr, CARG3, [sp, #62*8] // Store 0/sp in RID_LR/RID_SP.
1954 | sub CARG1, CARG1, lr
1955 | ldr L, GL->cur_L
1956 | lsr CARG1, CARG1, #2
1957 | ldr BASE, GL->jit_base
1958 | sub CARG1, CARG1, #2
1959 | ldr CARG2w, [lr] // Load trace number.
1960 | st_vmstate CARG4w
1961 |.if ENDIAN_BE
1962 | rev32 CARG2, CARG2
1963 |.endif
1964 | str BASE, L->base
1965 | ubfx CARG2w, CARG2w, #5, #16
1966 | str CARG1w, [GL, #GL_J(exitno)]
1967 | str CARG2w, [GL, #GL_J(parent)]
1968 | str L, [GL, #GL_J(L)]
1969 | str xzr, GL->jit_base
1970 | add CARG1, GL, #GG_G2J
1971 | mov CARG2, sp
1972 | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
1973 | // Returns MULTRES (unscaled) or negated error code.
1974 | ldr CARG2, L->cframe
1975 | ldr BASE, L->base
1976 | and sp, CARG2, #CFRAME_RAWMASK
1977 | ldr PC, SAVE_PC // Get SAVE_PC.
1978 | str L, SAVE_L // Set SAVE_L (on-trace resume/yield).
1979 | b >1
1980 |.endif
1981 |
1982 |->vm_exit_interp:
1983 | // CARG1 = MULTRES or negated error code, BASE, PC and GL set.
1984 |.if JIT
1985 | ldr L, SAVE_L
1986 |1:
1987 | cmp CARG1w, #0
1988 | blt >9 // Check for error from exit.
1989 | lsl RC, CARG1, #3
1990 | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
1991 | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
1992 | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
1993 | movn TISNIL, #0
1994 | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
1995 | str RCw, SAVE_MULTRES
1996 | str BASE, L->base
1997 | ldr CARG2, LFUNC:CARG2->pc
1998 | str xzr, GL->jit_base
1999 | mv_vmstate CARG4w, INTERP
2000 | ldr KBASE, [CARG2, #PC2PROTO(k)]
2001 | // Modified copy of ins_next which handles function header dispatch, too.
2002 | ldrb RBw, [PC, # OFS_OP]
2003 | ldr INSw, [PC], #4
2004 | st_vmstate CARG4w
2005 | cmp RBw, #BC_FUNCC+2 // Fast function?
2006 | add TMP1, GL, INS, uxtb #3
2007 | bhs >4
2008 |2:
2009 | cmp RBw, #BC_FUNCF // Function header?
2010 | add TMP0, GL, RB, uxtb #3
2011 | ldr RB, [TMP0, #GG_G2DISP]
2012 | decode_RA RA, INS
2013 | lsr TMP0, INS, #16
2014 | csel RC, TMP0, RC, lo
2015 | blo >5
2016 | ldr CARG3, [BASE, FRAME_FUNC]
2017 | sub RC, RC, #8
2018 | add RA, BASE, RA, lsl #3 // Yes: RA = BASE+framesize*8, RC = nargs*8
2019 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
2020 |5:
2021 | br RB
2022 |
2023 |4: // Check frame below fast function.
2024 | ldr CARG1, [BASE, FRAME_PC]
2025 | ands CARG2, CARG1, #FRAME_TYPE
2026 | bne <2 // Trace stitching continuation?
2027 | // Otherwise set KBASE for Lua function below fast function.
2028 | ldr CARG3w, [CARG1, #-4]
2029 | decode_RA CARG1, CARG3
2030 | sub CARG2, BASE, CARG1, lsl #3
2031 | ldr LFUNC:CARG3, [CARG2, #-32]
2032 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
2033 | ldr CARG3, LFUNC:CARG3->pc
2034 | ldr KBASE, [CARG3, #PC2PROTO(k)]
2035 | b <2
2036 |
2037 |9: // Rethrow error from the right C frame.
2038 | mov CARG1, L
2039 | bl extern lj_err_run // (lua_State *L)
2040 |.endif
2041 |
2042 |//-----------------------------------------------------------------------
2043 |//-- Math helper functions ----------------------------------------------
2044 |//-----------------------------------------------------------------------
2045 |
2046 | // int lj_vm_modi(int dividend, int divisor);
2047 |->vm_modi:
2048 | eor CARG4w, CARG1w, CARG2w
2049 | cmp CARG4w, #0
2050 | eor CARG3w, CARG1w, CARG1w, asr #31
2051 | eor CARG4w, CARG2w, CARG2w, asr #31
2052 | sub CARG3w, CARG3w, CARG1w, asr #31
2053 | sub CARG4w, CARG4w, CARG2w, asr #31
2054 | udiv CARG1w, CARG3w, CARG4w
2055 | msub CARG1w, CARG1w, CARG4w, CARG3w
2056 | ccmp CARG1w, #0, #4, mi
2057 | sub CARG3w, CARG1w, CARG4w
2058 | csel CARG1w, CARG1w, CARG3w, eq
2059 | eor CARG3w, CARG1w, CARG2w
2060 | cmp CARG3w, #0
2061 | cneg CARG1w, CARG1w, mi
2062 | ret
2063 |
2064 |//-----------------------------------------------------------------------
2065 |//-- Miscellaneous functions --------------------------------------------
2066 |//-----------------------------------------------------------------------
2067 |
2068 |//-----------------------------------------------------------------------
2069 |//-- FFI helper functions -----------------------------------------------
2070 |//-----------------------------------------------------------------------
2071 |
2072 |// Handler for callback functions.
2073 |// Saveregs already performed. Callback slot number in [sp], g in r12.
2074 |->vm_ffi_callback:
2075 |.if FFI
2076 |.type CTSTATE, CTState, PC
2077 | saveregs
2078 | ldr CTSTATE, GL:x10->ctype_state
2079 | mov GL, x10
2080 | add x10, sp, # CFRAME_SPACE
2081 | str w9, CTSTATE->cb.slot
2082 | stp x0, x1, CTSTATE->cb.gpr[0]
2083 | stp d0, d1, CTSTATE->cb.fpr[0]
2084 | stp x2, x3, CTSTATE->cb.gpr[2]
2085 | stp d2, d3, CTSTATE->cb.fpr[2]
2086 | stp x4, x5, CTSTATE->cb.gpr[4]
2087 | stp d4, d5, CTSTATE->cb.fpr[4]
2088 | stp x6, x7, CTSTATE->cb.gpr[6]
2089 | stp d6, d7, CTSTATE->cb.fpr[6]
2090 | str x10, CTSTATE->cb.stack
2091 | mov CARG1, CTSTATE
2092 | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
2093 | mov CARG2, sp
2094 | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
2095 | // Returns lua_State *.
2096 | ldp BASE, RC, L:CRET1->base
2097 | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
2098 | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
2099 | movn TISNIL, #0
2100 | mov L, CRET1
2101 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
2102 | sub RC, RC, BASE
2103 | st_vmstate ST_INTERP
2104 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
2105 | ins_callt
2106 |.endif
2107 |
2108 |->cont_ffi_callback: // Return from FFI callback.
2109 |.if FFI
2110 | ldr CTSTATE, GL->ctype_state
2111 | stp BASE, CARG4, L->base
2112 | str L, CTSTATE->L
2113 | mov CARG1, CTSTATE
2114 | mov CARG2, RA
2115 | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
2116 | ldp x0, x1, CTSTATE->cb.gpr[0]
2117 | ldp d0, d1, CTSTATE->cb.fpr[0]
2118 | b ->vm_leave_unw
2119 |.endif
2120 |
2121 |->vm_ffi_call: // Call C function via FFI.
2122 | // Caveat: needs special frame unwinding, see below.
2123 |.if FFI
2124 | .type CCSTATE, CCallState, x19
2125 | stp fp, lr, [sp, #-32]!
2126 | add fp, sp, #0
2127 | str CCSTATE, [sp, #16]
2128 | mov CCSTATE, x0
2129 | ldr TMP0w, CCSTATE:x0->spadj
2130 | ldrb TMP1w, CCSTATE->nsp
2131 | add TMP2, CCSTATE, #offsetof(CCallState, stack)
2132 | subs TMP1, TMP1, #1
2133 | ldr TMP3, CCSTATE->func
2134 | sub sp, fp, TMP0
2135 | bmi >2
2136 |1: // Copy stack slots
2137 | ldr TMP0, [TMP2, TMP1, lsl #3]
2138 | str TMP0, [sp, TMP1, lsl #3]
2139 | subs TMP1, TMP1, #1
2140 | bpl <1
2141 |2:
2142 | ldp x0, x1, CCSTATE->gpr[0]
2143 | ldp d0, d1, CCSTATE->fpr[0]
2144 | ldp x2, x3, CCSTATE->gpr[2]
2145 | ldp d2, d3, CCSTATE->fpr[2]
2146 | ldp x4, x5, CCSTATE->gpr[4]
2147 | ldp d4, d5, CCSTATE->fpr[4]
2148 | ldp x6, x7, CCSTATE->gpr[6]
2149 | ldp d6, d7, CCSTATE->fpr[6]
2150 | ldr x8, CCSTATE->retp
2151 | blr TMP3
2152 | mov sp, fp
2153 | stp x0, x1, CCSTATE->gpr[0]
2154 | stp d0, d1, CCSTATE->fpr[0]
2155 | stp d2, d3, CCSTATE->fpr[2]
2156 | ldr CCSTATE, [sp, #16]
2157 | ldp fp, lr, [sp], #32
2158 | ret
2159 |.endif
2160 |// Note: vm_ffi_call must be the last function in this object file!
2161 |
2162 |//-----------------------------------------------------------------------
2163}
2164
2165/* Generate the code for a single instruction. */
2166static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2167{
2168 int vk = 0;
2169 |=>defop:
2170
2171 switch (op) {
2172
2173 /* -- Comparison ops ---------------------------------------------------- */
2174
2175 /* Remember: all ops branch for a true comparison, fall through otherwise. */
2176
2177 case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
2178 | // RA = src1, RC = src2, JMP with RC = target
2179 | ldr CARG1, [BASE, RA, lsl #3]
2180 | ldrh RBw, [PC, # OFS_RD]
2181 | ldr CARG2, [BASE, RC, lsl #3]
2182 | add PC, PC, #4
2183 | add RB, PC, RB, lsl #2
2184 | sub RB, RB, #0x20000
2185 | checkint CARG1, >3
2186 | checkint CARG2, >4
2187 | cmp CARG1w, CARG2w
2188 if (op == BC_ISLT) {
2189 | csel PC, RB, PC, lt
2190 } else if (op == BC_ISGE) {
2191 | csel PC, RB, PC, ge
2192 } else if (op == BC_ISLE) {
2193 | csel PC, RB, PC, le
2194 } else {
2195 | csel PC, RB, PC, gt
2196 }
2197 |1:
2198 | ins_next
2199 |
2200 |3: // RA not int.
2201 | ldr FARG1, [BASE, RA, lsl #3]
2202 | blo ->vmeta_comp
2203 | ldr FARG2, [BASE, RC, lsl #3]
2204 | cmp TISNUMhi, CARG2, lsr #32
2205 | bhi >5
2206 | bne ->vmeta_comp
2207 | // RA number, RC int.
2208 | scvtf FARG2, CARG2w
2209 | b >5
2210 |
2211 |4: // RA int, RC not int
2212 | ldr FARG2, [BASE, RC, lsl #3]
2213 | blo ->vmeta_comp
2214 | // RA int, RC number.
2215 | scvtf FARG1, CARG1w
2216 |
2217 |5: // RA number, RC number
2218 | fcmp FARG1, FARG2
2219 | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
2220 if (op == BC_ISLT) {
2221 | csel PC, RB, PC, lo
2222 } else if (op == BC_ISGE) {
2223 | csel PC, RB, PC, hs
2224 } else if (op == BC_ISLE) {
2225 | csel PC, RB, PC, ls
2226 } else {
2227 | csel PC, RB, PC, hi
2228 }
2229 | b <1
2230 break;
2231
2232 case BC_ISEQV: case BC_ISNEV:
2233 vk = op == BC_ISEQV;
2234 | // RA = src1, RC = src2, JMP with RC = target
2235 | ldr CARG1, [BASE, RA, lsl #3]
2236 | add RC, BASE, RC, lsl #3
2237 | ldrh RBw, [PC, # OFS_RD]
2238 | ldr CARG3, [RC]
2239 | add PC, PC, #4
2240 | add RB, PC, RB, lsl #2
2241 | sub RB, RB, #0x20000
2242 | asr ITYPE, CARG3, #47
2243 | cmn ITYPE, #-LJ_TISNUM
2244 if (vk) {
2245 | bls ->BC_ISEQN_Z
2246 } else {
2247 | bls ->BC_ISNEN_Z
2248 }
2249 | // RC is not a number.
2250 | asr TMP0, CARG1, #47
2251 |.if FFI
2252 | // Check if RC or RA is a cdata.
2253 | cmn ITYPE, #-LJ_TCDATA
2254 | ccmn TMP0, #-LJ_TCDATA, #4, ne
2255 | beq ->vmeta_equal_cd
2256 |.endif
2257 | cmp CARG1, CARG3
2258 | bne >2
2259 | // Tag and value are equal.
2260 if (vk) {
2261 |->BC_ISEQV_Z:
2262 | mov PC, RB // Perform branch.
2263 }
2264 |1:
2265 | ins_next
2266 |
2267 |2: // Check if the tags are the same and it's a table or userdata.
2268 | cmp ITYPE, TMP0
2269 | ccmn ITYPE, #-LJ_TISTABUD, #2, eq
2270 if (vk) {
2271 | bhi <1
2272 } else {
2273 | bhi ->BC_ISEQV_Z // Reuse code from opposite instruction.
2274 }
2275 | // Different tables or userdatas. Need to check __eq metamethod.
2276 | // Field metatable must be at same offset for GCtab and GCudata!
2277 | and TAB:CARG2, CARG1, #LJ_GCVMASK
2278 | ldr TAB:TMP2, TAB:CARG2->metatable
2279 if (vk) {
2280 | cbz TAB:TMP2, <1 // No metatable?
2281 | ldrb TMP1w, TAB:TMP2->nomm
2282 | mov CARG4, #0 // ne = 0
2283 | tbnz TMP1w, #MM_eq, <1 // 'no __eq' flag set: done.
2284 } else {
2285 | cbz TAB:TMP2, ->BC_ISEQV_Z // No metatable?
2286 | ldrb TMP1w, TAB:TMP2->nomm
2287 | mov CARG4, #1 // ne = 1.
2288 | tbnz TMP1w, #MM_eq, ->BC_ISEQV_Z // 'no __eq' flag set: done.
2289 }
2290 | b ->vmeta_equal
2291 break;
2292
2293 case BC_ISEQS: case BC_ISNES:
2294 vk = op == BC_ISEQS;
2295 | // RA = src, RC = str_const (~), JMP with RC = target
2296 | ldr CARG1, [BASE, RA, lsl #3]
2297 | mvn RC, RC
2298 | ldrh RBw, [PC, # OFS_RD]
2299 | ldr CARG2, [KBASE, RC, lsl #3]
2300 | add PC, PC, #4
2301 | movn TMP0, #~LJ_TSTR
2302 |.if FFI
2303 | asr ITYPE, CARG1, #47
2304 |.endif
2305 | add RB, PC, RB, lsl #2
2306 | add CARG2, CARG2, TMP0, lsl #47
2307 | sub RB, RB, #0x20000
2308 |.if FFI
2309 | cmn ITYPE, #-LJ_TCDATA
2310 | beq ->vmeta_equal_cd
2311 |.endif
2312 | cmp CARG1, CARG2
2313 if (vk) {
2314 | csel PC, RB, PC, eq
2315 } else {
2316 | csel PC, RB, PC, ne
2317 }
2318 | ins_next
2319 break;
2320
2321 case BC_ISEQN: case BC_ISNEN:
2322 vk = op == BC_ISEQN;
2323 | // RA = src, RC = num_const (~), JMP with RC = target
2324 | ldr CARG1, [BASE, RA, lsl #3]
2325 | add RC, KBASE, RC, lsl #3
2326 | ldrh RBw, [PC, # OFS_RD]
2327 | ldr CARG3, [RC]
2328 | add PC, PC, #4
2329 | add RB, PC, RB, lsl #2
2330 | sub RB, RB, #0x20000
2331 if (vk) {
2332 |->BC_ISEQN_Z:
2333 } else {
2334 |->BC_ISNEN_Z:
2335 }
2336 | checkint CARG1, >4
2337 | checkint CARG3, >6
2338 | cmp CARG1w, CARG3w
2339 |1:
2340 if (vk) {
2341 | csel PC, RB, PC, eq
2342 |2:
2343 } else {
2344 |2:
2345 | csel PC, RB, PC, ne
2346 }
2347 |3:
2348 | ins_next
2349 |
2350 |4: // RA not int.
2351 |.if FFI
2352 | blo >7
2353 |.else
2354 | blo <2
2355 |.endif
2356 | ldr FARG1, [BASE, RA, lsl #3]
2357 | ldr FARG2, [RC]
2358 | cmp TISNUMhi, CARG3, lsr #32
2359 | bne >5
2360 | // RA number, RC int.
2361 | scvtf FARG2, CARG3w
2362 |5:
2363 | // RA number, RC number.
2364 | fcmp FARG1, FARG2
2365 | b <1
2366 |
2367 |6: // RA int, RC number
2368 | ldr FARG2, [RC]
2369 | scvtf FARG1, CARG1w
2370 | fcmp FARG1, FARG2
2371 | b <1
2372 |
2373 |.if FFI
2374 |7:
2375 | asr ITYPE, CARG1, #47
2376 | cmn ITYPE, #-LJ_TCDATA
2377 | bne <2
2378 | b ->vmeta_equal_cd
2379 |.endif
2380 break;
2381
2382 case BC_ISEQP: case BC_ISNEP:
2383 vk = op == BC_ISEQP;
2384 | // RA = src, RC = primitive_type (~), JMP with RC = target
2385 | ldr TMP0, [BASE, RA, lsl #3]
2386 | ldrh RBw, [PC, # OFS_RD]
2387 | add PC, PC, #4
2388 | add RC, RC, #1
2389 | add RB, PC, RB, lsl #2
2390 |.if FFI
2391 | asr ITYPE, TMP0, #47
2392 | cmn ITYPE, #-LJ_TCDATA
2393 | beq ->vmeta_equal_cd
2394 | cmn RC, ITYPE
2395 |.else
2396 | cmn RC, TMP0, asr #47
2397 |.endif
2398 | sub RB, RB, #0x20000
2399 if (vk) {
2400 | csel PC, RB, PC, eq
2401 } else {
2402 | csel PC, RB, PC, ne
2403 }
2404 | ins_next
2405 break;
2406
2407 /* -- Unary test and copy ops ------------------------------------------- */
2408
2409 case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
2410 | // RA = dst or unused, RC = src, JMP with RC = target
2411 | ldrh RBw, [PC, # OFS_RD]
2412 | ldr TMP0, [BASE, RC, lsl #3]
2413 | add PC, PC, #4
2414 | mov_false TMP1
2415 | add RB, PC, RB, lsl #2
2416 | cmp TMP0, TMP1
2417 | sub RB, RB, #0x20000
2418 if (op == BC_ISTC || op == BC_IST) {
2419 if (op == BC_ISTC) {
2420 | csel RA, RA, RC, lo
2421 }
2422 | csel PC, RB, PC, lo
2423 } else {
2424 if (op == BC_ISFC) {
2425 | csel RA, RA, RC, hs
2426 }
2427 | csel PC, RB, PC, hs
2428 }
2429 if (op == BC_ISTC || op == BC_ISFC) {
2430 | str TMP0, [BASE, RA, lsl #3]
2431 }
2432 | ins_next
2433 break;
2434
2435 case BC_ISTYPE:
2436 | // RA = src, RC = -type
2437 | ldr TMP0, [BASE, RA, lsl #3]
2438 | cmn RC, TMP0, asr #47
2439 | bne ->vmeta_istype
2440 | ins_next
2441 break;
2442 case BC_ISNUM:
2443 | // RA = src, RC = -(TISNUM-1)
2444 | ldr TMP0, [BASE, RA]
2445 | checknum TMP0, ->vmeta_istype
2446 | ins_next
2447 break;
2448
2449 /* -- Unary ops --------------------------------------------------------- */
2450
2451 case BC_MOV:
2452 | // RA = dst, RC = src
2453 | ldr TMP0, [BASE, RC, lsl #3]
2454 | str TMP0, [BASE, RA, lsl #3]
2455 | ins_next
2456 break;
2457 case BC_NOT:
2458 | // RA = dst, RC = src
2459 | ldr TMP0, [BASE, RC, lsl #3]
2460 | mov_false TMP1
2461 | mov_true TMP2
2462 | cmp TMP0, TMP1
2463 | csel TMP0, TMP1, TMP2, lo
2464 | str TMP0, [BASE, RA, lsl #3]
2465 | ins_next
2466 break;
2467 case BC_UNM:
2468 | // RA = dst, RC = src
2469 | ldr TMP0, [BASE, RC, lsl #3]
2470 | asr ITYPE, TMP0, #47
2471 | cmn ITYPE, #-LJ_TISNUM
2472 | bhi ->vmeta_unm
2473 | eor TMP0, TMP0, #U64x(80000000,00000000)
2474 | bne >5
2475 | negs TMP0w, TMP0w
2476 | movz CARG3, #0x41e0, lsl #48 // 2^31.
2477 | add TMP0, TMP0, TISNUM
2478 | csel TMP0, TMP0, CARG3, vc
2479 |5:
2480 | str TMP0, [BASE, RA, lsl #3]
2481 | ins_next
2482 break;
2483 case BC_LEN:
2484 | // RA = dst, RC = src
2485 | ldr CARG1, [BASE, RC, lsl #3]
2486 | asr ITYPE, CARG1, #47
2487 | cmn ITYPE, #-LJ_TSTR
2488 | and CARG1, CARG1, #LJ_GCVMASK
2489 | bne >2
2490 | ldr CARG1w, STR:CARG1->len
2491 |1:
2492 | add CARG1, CARG1, TISNUM
2493 | str CARG1, [BASE, RA, lsl #3]
2494 | ins_next
2495 |
2496 |2:
2497 | cmn ITYPE, #-LJ_TTAB
2498 | bne ->vmeta_len
2499#if LJ_52
2500 | ldr TAB:CARG2, TAB:CARG1->metatable
2501 | cbnz TAB:CARG2, >9
2502 |3:
2503#endif
2504 |->BC_LEN_Z:
2505 | bl extern lj_tab_len // (GCtab *t)
2506 | // Returns uint32_t (but less than 2^31).
2507 | b <1
2508 |
2509#if LJ_52
2510 |9:
2511 | ldrb TMP1w, TAB:CARG2->nomm
2512 | tbnz TMP1w, #MM_len, <3 // 'no __len' flag set: done.
2513 | b ->vmeta_len
2514#endif
2515 break;
2516
2517 /* -- Binary ops -------------------------------------------------------- */
2518
2519 |.macro ins_arithcheck_int, target
2520 | checkint CARG1, target
2521 | checkint CARG2, target
2522 |.endmacro
2523 |
2524 |.macro ins_arithcheck_num, target
2525 | checknum CARG1, target
2526 | checknum CARG2, target
2527 |.endmacro
2528 |
2529 |.macro ins_arithcheck_nzdiv, target
2530 | cbz CARG2w, target
2531 |.endmacro
2532 |
2533 |.macro ins_arithhead
2534 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
2535 ||if (vk == 1) {
2536 | and RC, RC, #255
2537 | decode_RB RB, INS
2538 ||} else {
2539 | decode_RB RB, INS
2540 | and RC, RC, #255
2541 ||}
2542 |.endmacro
2543 |
2544 |.macro ins_arithload, reg1, reg2
2545 | // RA = dst, RB = src1, RC = src2 | num_const
2546 ||switch (vk) {
2547 ||case 0:
2548 | ldr reg1, [BASE, RB, lsl #3]
2549 | ldr reg2, [KBASE, RC, lsl #3]
2550 || break;
2551 ||case 1:
2552 | ldr reg1, [KBASE, RC, lsl #3]
2553 | ldr reg2, [BASE, RB, lsl #3]
2554 || break;
2555 ||default:
2556 | ldr reg1, [BASE, RB, lsl #3]
2557 | ldr reg2, [BASE, RC, lsl #3]
2558 || break;
2559 ||}
2560 |.endmacro
2561 |
2562 |.macro ins_arithfallback, ins
2563 ||switch (vk) {
2564 ||case 0:
2565 | ins ->vmeta_arith_vn
2566 || break;
2567 ||case 1:
2568 | ins ->vmeta_arith_nv
2569 || break;
2570 ||default:
2571 | ins ->vmeta_arith_vv
2572 || break;
2573 ||}
2574 |.endmacro
2575 |
2576 |.macro ins_arithmod, res, reg1, reg2
2577 | fdiv d2, reg1, reg2
2578 | frintm d2, d2
2579 | fmsub res, d2, reg2, reg1
2580 |.endmacro
2581 |
2582 |.macro ins_arithdn, intins, fpins
2583 | ins_arithhead
2584 | ins_arithload CARG1, CARG2
2585 | ins_arithcheck_int >5
2586 |.if "intins" == "smull"
2587 | smull CARG1, CARG1w, CARG2w
2588 | cmp CARG1, CARG1, sxtw
2589 | mov CARG1w, CARG1w
2590 | ins_arithfallback bne
2591 |.elif "intins" == "ins_arithmodi"
2592 | ins_arithfallback ins_arithcheck_nzdiv
2593 | bl ->vm_modi
2594 |.else
2595 | intins CARG1w, CARG1w, CARG2w
2596 | ins_arithfallback bvs
2597 |.endif
2598 | add CARG1, CARG1, TISNUM
2599 | str CARG1, [BASE, RA, lsl #3]
2600 |4:
2601 | ins_next
2602 |
2603 |5: // FP variant.
2604 | ins_arithload FARG1, FARG2
2605 | ins_arithfallback ins_arithcheck_num
2606 | fpins FARG1, FARG1, FARG2
2607 | str FARG1, [BASE, RA, lsl #3]
2608 | b <4
2609 |.endmacro
2610 |
2611 |.macro ins_arithfp, fpins
2612 | ins_arithhead
2613 | ins_arithload CARG1, CARG2
2614 | ins_arithload FARG1, FARG2
2615 | ins_arithfallback ins_arithcheck_num
2616 |.if "fpins" == "fpow"
2617 | bl extern pow
2618 |.else
2619 | fpins FARG1, FARG1, FARG2
2620 |.endif
2621 | str FARG1, [BASE, RA, lsl #3]
2622 | ins_next
2623 |.endmacro
2624
2625 case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
2626 | ins_arithdn adds, fadd
2627 break;
2628 case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
2629 | ins_arithdn subs, fsub
2630 break;
2631 case BC_MULVN: case BC_MULNV: case BC_MULVV:
2632 | ins_arithdn smull, fmul
2633 break;
2634 case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
2635 | ins_arithfp fdiv
2636 break;
2637 case BC_MODVN: case BC_MODNV: case BC_MODVV:
2638 | ins_arithdn ins_arithmodi, ins_arithmod
2639 break;
2640 case BC_POW:
2641 | // NYI: (partial) integer arithmetic.
2642 | ins_arithfp fpow
2643 break;
2644
2645 case BC_CAT:
2646 | decode_RB RB, INS
2647 | and RC, RC, #255
2648 | // RA = dst, RB = src_start, RC = src_end
2649 | str BASE, L->base
2650 | sub CARG3, RC, RB
2651 | add CARG2, BASE, RC, lsl #3
2652 |->BC_CAT_Z:
2653 | // RA = dst, CARG2 = top-1, CARG3 = left
2654 | mov CARG1, L
2655 | str PC, SAVE_PC
2656 | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
2657 | // Returns NULL (finished) or TValue * (metamethod).
2658 | ldrb RBw, [PC, #-4+OFS_RB]
2659 | ldr BASE, L->base
2660 | cbnz CRET1, ->vmeta_binop
2661 | ldr TMP0, [BASE, RB, lsl #3]
2662 | str TMP0, [BASE, RA, lsl #3] // Copy result to RA.
2663 | ins_next
2664 break;
2665
2666 /* -- Constant ops ------------------------------------------------------ */
2667
2668 case BC_KSTR:
2669 | // RA = dst, RC = str_const (~)
2670 | mvn RC, RC
2671 | ldr TMP0, [KBASE, RC, lsl #3]
2672 | movn TMP1, #~LJ_TSTR
2673 | add TMP0, TMP0, TMP1, lsl #47
2674 | str TMP0, [BASE, RA, lsl #3]
2675 | ins_next
2676 break;
2677 case BC_KCDATA:
2678 |.if FFI
2679 | // RA = dst, RC = cdata_const (~)
2680 | mvn RC, RC
2681 | ldr TMP0, [KBASE, RC, lsl #3]
2682 | movn TMP1, #~LJ_TCDATA
2683 | add TMP0, TMP0, TMP1, lsl #47
2684 | str TMP0, [BASE, RA, lsl #3]
2685 | ins_next
2686 |.endif
2687 break;
2688 case BC_KSHORT:
2689 | // RA = dst, RC = int16_literal
2690 | sxth RCw, RCw
2691 | add TMP0, RC, TISNUM
2692 | str TMP0, [BASE, RA, lsl #3]
2693 | ins_next
2694 break;
2695 case BC_KNUM:
2696 | // RA = dst, RC = num_const
2697 | ldr TMP0, [KBASE, RC, lsl #3]
2698 | str TMP0, [BASE, RA, lsl #3]
2699 | ins_next
2700 break;
2701 case BC_KPRI:
2702 | // RA = dst, RC = primitive_type (~)
2703 | mvn TMP0, RC, lsl #47
2704 | str TMP0, [BASE, RA, lsl #3]
2705 | ins_next
2706 break;
2707 case BC_KNIL:
2708 | // RA = base, RC = end
2709 | add RA, BASE, RA, lsl #3
2710 | add RC, BASE, RC, lsl #3
2711 | str TISNIL, [RA], #8
2712 |1:
2713 | cmp RA, RC
2714 | str TISNIL, [RA], #8
2715 | blt <1
2716 | ins_next_
2717 break;
2718
2719 /* -- Upvalue and function ops ------------------------------------------ */
2720
2721 case BC_UGET:
2722 | // RA = dst, RC = uvnum
2723 | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
2724 | add RC, RC, #offsetof(GCfuncL, uvptr)/8
2725 | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
2726 | ldr UPVAL:CARG2, [LFUNC:CARG2, RC, lsl #3]
2727 | ldr CARG2, UPVAL:CARG2->v
2728 | ldr TMP0, [CARG2]
2729 | str TMP0, [BASE, RA, lsl #3]
2730 | ins_next
2731 break;
2732 case BC_USETV:
2733 | // RA = uvnum, RC = src
2734 | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
2735 | add RA, RA, #offsetof(GCfuncL, uvptr)/8
2736 | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
2737 | ldr UPVAL:CARG1, [LFUNC:CARG2, RA, lsl #3]
2738 | ldr CARG3, [BASE, RC, lsl #3]
2739 | ldr CARG2, UPVAL:CARG1->v
2740 | ldrb TMP2w, UPVAL:CARG1->marked
2741 | ldrb TMP0w, UPVAL:CARG1->closed
2742 | asr ITYPE, CARG3, #47
2743 | str CARG3, [CARG2]
2744 | add ITYPE, ITYPE, #-LJ_TISGCV
2745 | tst TMP2w, #LJ_GC_BLACK // isblack(uv)
2746 | ccmp TMP0w, #0, #4, ne // && uv->closed
2747 | ccmn ITYPE, #-(LJ_TNUMX - LJ_TISGCV), #0, ne // && tvisgcv(v)
2748 | bhi >2
2749 |1:
2750 | ins_next
2751 |
2752 |2: // Check if new value is white.
2753 | and GCOBJ:CARG3, CARG3, #LJ_GCVMASK
2754 | ldrb TMP1w, GCOBJ:CARG3->gch.marked
2755 | tst TMP1w, #LJ_GC_WHITES // iswhite(str)
2756 | beq <1
2757 | // Crossed a write barrier. Move the barrier forward.
2758 | mov CARG1, GL
2759 | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
2760 | b <1
2761 break;
2762 case BC_USETS:
2763 | // RA = uvnum, RC = str_const (~)
2764 | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
2765 | add RA, RA, #offsetof(GCfuncL, uvptr)/8
2766 | mvn RC, RC
2767 | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
2768 | ldr UPVAL:CARG1, [LFUNC:CARG2, RA, lsl #3]
2769 | ldr STR:CARG3, [KBASE, RC, lsl #3]
2770 | movn TMP0, #~LJ_TSTR
2771 | ldr CARG2, UPVAL:CARG1->v
2772 | ldrb TMP2w, UPVAL:CARG1->marked
2773 | add TMP0, STR:CARG3, TMP0, lsl #47
2774 | ldrb TMP1w, STR:CARG3->marked
2775 | str TMP0, [CARG2]
2776 | tbnz TMP2w, #2, >2 // isblack(uv)
2777 |1:
2778 | ins_next
2779 |
2780 |2: // Check if string is white and ensure upvalue is closed.
2781 | ldrb TMP0w, UPVAL:CARG1->closed
2782 | tst TMP1w, #LJ_GC_WHITES // iswhite(str)
2783 | ccmp TMP0w, #0, #4, ne
2784 | beq <1
2785 | // Crossed a write barrier. Move the barrier forward.
2786 | mov CARG1, GL
2787 | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
2788 | b <1
2789 break;
2790 case BC_USETN:
2791 | // RA = uvnum, RC = num_const
2792 | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
2793 | add RA, RA, #offsetof(GCfuncL, uvptr)/8
2794 | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
2795 | ldr UPVAL:CARG2, [LFUNC:CARG2, RA, lsl #3]
2796 | ldr TMP0, [KBASE, RC, lsl #3]
2797 | ldr CARG2, UPVAL:CARG2->v
2798 | str TMP0, [CARG2]
2799 | ins_next
2800 break;
2801 case BC_USETP:
2802 | // RA = uvnum, RC = primitive_type (~)
2803 | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
2804 | add RA, RA, #offsetof(GCfuncL, uvptr)/8
2805 | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
2806 | ldr UPVAL:CARG2, [LFUNC:CARG2, RA, lsl #3]
2807 | mvn TMP0, RC, lsl #47
2808 | ldr CARG2, UPVAL:CARG2->v
2809 | str TMP0, [CARG2]
2810 | ins_next
2811 break;
2812
2813 case BC_UCLO:
2814 | // RA = level, RC = target
2815 | ldr CARG3, L->openupval
2816 | add RC, PC, RC, lsl #2
2817 | str BASE, L->base
2818 | sub PC, RC, #0x20000
2819 | cbz CARG3, >1
2820 | mov CARG1, L
2821 | add CARG2, BASE, RA, lsl #3
2822 | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
2823 | ldr BASE, L->base
2824 |1:
2825 | ins_next
2826 break;
2827
2828 case BC_FNEW:
2829 | // RA = dst, RC = proto_const (~) (holding function prototype)
2830 | mvn RC, RC
2831 | str BASE, L->base
2832 | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
2833 | str PC, SAVE_PC
2834 | ldr CARG2, [KBASE, RC, lsl #3]
2835 | mov CARG1, L
2836 | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
2837 | // (lua_State *L, GCproto *pt, GCfuncL *parent)
2838 | bl extern lj_func_newL_gc
2839 | // Returns GCfuncL *.
2840 | ldr BASE, L->base
2841 | movn TMP0, #~LJ_TFUNC
2842 | add CRET1, CRET1, TMP0, lsl #47
2843 | str CRET1, [BASE, RA, lsl #3]
2844 | ins_next
2845 break;
2846
2847 /* -- Table ops --------------------------------------------------------- */
2848
2849 case BC_TNEW:
2850 case BC_TDUP:
2851 | // RA = dst, RC = (hbits|asize) | tab_const (~)
2852 | ldp CARG3, CARG4, GL->gc.total // Assumes threshold follows total.
2853 | str BASE, L->base
2854 | str PC, SAVE_PC
2855 | mov CARG1, L
2856 | cmp CARG3, CARG4
2857 | bhs >5
2858 |1:
2859 if (op == BC_TNEW) {
2860 | and CARG2, RC, #0x7ff
2861 | lsr CARG3, RC, #11
2862 | cmp CARG2, #0x7ff
2863 | mov TMP0, #0x801
2864 | csel CARG2, CARG2, TMP0, ne
2865 | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
2866 | // Returns GCtab *.
2867 } else {
2868 | mvn RC, RC
2869 | ldr CARG2, [KBASE, RC, lsl #3]
2870 | bl extern lj_tab_dup // (lua_State *L, Table *kt)
2871 | // Returns GCtab *.
2872 }
2873 | ldr BASE, L->base
2874 | movk CRET1, #(LJ_TTAB>>1)&0xffff, lsl #48
2875 | str CRET1, [BASE, RA, lsl #3]
2876 | ins_next
2877 |
2878 |5:
2879 | bl extern lj_gc_step_fixtop // (lua_State *L)
2880 | mov CARG1, L
2881 | b <1
2882 break;
2883
2884 case BC_GGET:
2885 | // RA = dst, RC = str_const (~)
2886 case BC_GSET:
2887 | // RA = dst, RC = str_const (~)
2888 | ldr LFUNC:CARG1, [BASE, FRAME_FUNC]
2889 | mvn RC, RC
2890 | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
2891 | ldr TAB:CARG2, LFUNC:CARG1->env
2892 | ldr STR:RC, [KBASE, RC, lsl #3]
2893 if (op == BC_GGET) {
2894 | b ->BC_TGETS_Z
2895 } else {
2896 | b ->BC_TSETS_Z
2897 }
2898 break;
2899
2900 case BC_TGETV:
2901 | decode_RB RB, INS
2902 | and RC, RC, #255
2903 | // RA = dst, RB = table, RC = key
2904 | ldr CARG2, [BASE, RB, lsl #3]
2905 | ldr TMP1, [BASE, RC, lsl #3]
2906 | checktab CARG2, ->vmeta_tgetv
2907 | checkint TMP1, >9 // Integer key?
2908 | ldr CARG3, TAB:CARG2->array
2909 | ldr CARG1w, TAB:CARG2->asize
2910 | add CARG3, CARG3, TMP1, uxtw #3
2911 | cmp TMP1w, CARG1w // In array part?
2912 | bhs ->vmeta_tgetv
2913 | ldr TMP0, [CARG3]
2914 | cmp TMP0, TISNIL
2915 | beq >5
2916 |1:
2917 | str TMP0, [BASE, RA, lsl #3]
2918 | ins_next
2919 |
2920 |5: // Check for __index if table value is nil.
2921 | ldr TAB:CARG1, TAB:CARG2->metatable
2922 | cbz TAB:CARG1, <1 // No metatable: done.
2923 | ldrb TMP1w, TAB:CARG1->nomm
2924 | tbnz TMP1w, #MM_index, <1 // 'no __index' flag set: done.
2925 | b ->vmeta_tgetv
2926 |
2927 |9:
2928 | asr ITYPE, TMP1, #47
2929 | cmn ITYPE, #-LJ_TSTR // String key?
2930 | bne ->vmeta_tgetv
2931 | and STR:RC, TMP1, #LJ_GCVMASK
2932 | b ->BC_TGETS_Z
2933 break;
2934 case BC_TGETS:
2935 | decode_RB RB, INS
2936 | and RC, RC, #255
2937 | // RA = dst, RB = table, RC = str_const (~)
2938 | ldr CARG2, [BASE, RB, lsl #3]
2939 | mvn RC, RC
2940 | ldr STR:RC, [KBASE, RC, lsl #3]
2941 | checktab CARG2, ->vmeta_tgets1
2942 |->BC_TGETS_Z:
2943 | // TAB:CARG2 = GCtab *, STR:RC = GCstr *, RA = dst
2944 | ldr TMP1w, TAB:CARG2->hmask
2945 | ldr TMP2w, STR:RC->sid
2946 | ldr NODE:CARG3, TAB:CARG2->node
2947 | and TMP1w, TMP1w, TMP2w // idx = str->sid & tab->hmask
2948 | add TMP1, TMP1, TMP1, lsl #1
2949 | movn CARG4, #~LJ_TSTR
2950 | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8
2951 | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for.
2952 |1:
2953 | ldp TMP0, CARG1, NODE:CARG3->val
2954 | ldr NODE:CARG3, NODE:CARG3->next
2955 | cmp CARG1, CARG4
2956 | bne >4
2957 | cmp TMP0, TISNIL
2958 | beq >5
2959 |3:
2960 | str TMP0, [BASE, RA, lsl #3]
2961 | ins_next
2962 |
2963 |4: // Follow hash chain.
2964 | cbnz NODE:CARG3, <1
2965 | // End of hash chain: key not found, nil result.
2966 | mov TMP0, TISNIL
2967 |
2968 |5: // Check for __index if table value is nil.
2969 | ldr TAB:CARG1, TAB:CARG2->metatable
2970 | cbz TAB:CARG1, <3 // No metatable: done.
2971 | ldrb TMP1w, TAB:CARG1->nomm
2972 | tbnz TMP1w, #MM_index, <3 // 'no __index' flag set: done.
2973 | b ->vmeta_tgets
2974 break;
2975 case BC_TGETB:
2976 | decode_RB RB, INS
2977 | and RC, RC, #255
2978 | // RA = dst, RB = table, RC = index
2979 | ldr CARG2, [BASE, RB, lsl #3]
2980 | checktab CARG2, ->vmeta_tgetb
2981 | ldr CARG3, TAB:CARG2->array
2982 | ldr CARG1w, TAB:CARG2->asize
2983 | add CARG3, CARG3, RC, lsl #3
2984 | cmp RCw, CARG1w // In array part?
2985 | bhs ->vmeta_tgetb
2986 | ldr TMP0, [CARG3]
2987 | cmp TMP0, TISNIL
2988 | beq >5
2989 |1:
2990 | str TMP0, [BASE, RA, lsl #3]
2991 | ins_next
2992 |
2993 |5: // Check for __index if table value is nil.
2994 | ldr TAB:CARG1, TAB:CARG2->metatable
2995 | cbz TAB:CARG1, <1 // No metatable: done.
2996 | ldrb TMP1w, TAB:CARG1->nomm
2997 | tbnz TMP1w, #MM_index, <1 // 'no __index' flag set: done.
2998 | b ->vmeta_tgetb
2999 break;
3000 case BC_TGETR:
3001 | decode_RB RB, INS
3002 | and RC, RC, #255
3003 | // RA = dst, RB = table, RC = key
3004 | ldr CARG1, [BASE, RB, lsl #3]
3005 | ldr TMP1, [BASE, RC, lsl #3]
3006 | and TAB:CARG1, CARG1, #LJ_GCVMASK
3007 | ldr CARG3, TAB:CARG1->array
3008 | ldr TMP2w, TAB:CARG1->asize
3009 | add CARG3, CARG3, TMP1w, uxtw #3
3010 | cmp TMP1w, TMP2w // In array part?
3011 | bhs ->vmeta_tgetr
3012 | ldr TMP0, [CARG3]
3013 |->BC_TGETR_Z:
3014 | str TMP0, [BASE, RA, lsl #3]
3015 | ins_next
3016 break;
3017
3018 case BC_TSETV:
3019 | decode_RB RB, INS
3020 | and RC, RC, #255
3021 | // RA = src, RB = table, RC = key
3022 | ldr CARG2, [BASE, RB, lsl #3]
3023 | ldr TMP1, [BASE, RC, lsl #3]
3024 | checktab CARG2, ->vmeta_tsetv
3025 | checkint TMP1, >9 // Integer key?
3026 | ldr CARG3, TAB:CARG2->array
3027 | ldr CARG1w, TAB:CARG2->asize
3028 | add CARG3, CARG3, TMP1, uxtw #3
3029 | cmp TMP1w, CARG1w // In array part?
3030 | bhs ->vmeta_tsetv
3031 | ldr TMP1, [CARG3]
3032 | ldr TMP0, [BASE, RA, lsl #3]
3033 | ldrb TMP2w, TAB:CARG2->marked
3034 | cmp TMP1, TISNIL // Previous value is nil?
3035 | beq >5
3036 |1:
3037 | str TMP0, [CARG3]
3038 | tbnz TMP2w, #2, >7 // isblack(table)
3039 |2:
3040 | ins_next
3041 |
3042 |5: // Check for __newindex if previous value is nil.
3043 | ldr TAB:CARG1, TAB:CARG2->metatable
3044 | cbz TAB:CARG1, <1 // No metatable: done.
3045 | ldrb TMP1w, TAB:CARG1->nomm
3046 | tbnz TMP1w, #MM_newindex, <1 // 'no __newindex' flag set: done.
3047 | b ->vmeta_tsetv
3048 |
3049 |7: // Possible table write barrier for the value. Skip valiswhite check.
3050 | barrierback TAB:CARG2, TMP2w, TMP1
3051 | b <2
3052 |
3053 |9:
3054 | asr ITYPE, TMP1, #47
3055 | cmn ITYPE, #-LJ_TSTR // String key?
3056 | bne ->vmeta_tsetv
3057 | and STR:RC, TMP1, #LJ_GCVMASK
3058 | b ->BC_TSETS_Z
3059 break;
3060 case BC_TSETS:
3061 | decode_RB RB, INS
3062 | and RC, RC, #255
3063 | // RA = dst, RB = table, RC = str_const (~)
3064 | ldr CARG2, [BASE, RB, lsl #3]
3065 | mvn RC, RC
3066 | ldr STR:RC, [KBASE, RC, lsl #3]
3067 | checktab CARG2, ->vmeta_tsets1
3068 |->BC_TSETS_Z:
3069 | // TAB:CARG2 = GCtab *, STR:RC = GCstr *, RA = src
3070 | ldr TMP1w, TAB:CARG2->hmask
3071 | ldr TMP2w, STR:RC->sid
3072 | ldr NODE:CARG3, TAB:CARG2->node
3073 | and TMP1w, TMP1w, TMP2w // idx = str->sid & tab->hmask
3074 | add TMP1, TMP1, TMP1, lsl #1
3075 | movn CARG4, #~LJ_TSTR
3076 | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8
3077 | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for.
3078 | strb wzr, TAB:CARG2->nomm // Clear metamethod cache.
3079 |1:
3080 | ldp TMP1, CARG1, NODE:CARG3->val
3081 | ldr NODE:TMP3, NODE:CARG3->next
3082 | ldrb TMP2w, TAB:CARG2->marked
3083 | cmp CARG1, CARG4
3084 | bne >5
3085 | ldr TMP0, [BASE, RA, lsl #3]
3086 | cmp TMP1, TISNIL // Previous value is nil?
3087 | beq >4
3088 |2:
3089 | str TMP0, NODE:CARG3->val
3090 | tbnz TMP2w, #2, >7 // isblack(table)
3091 |3:
3092 | ins_next
3093 |
3094 |4: // Check for __newindex if previous value is nil.
3095 | ldr TAB:CARG1, TAB:CARG2->metatable
3096 | cbz TAB:CARG1, <2 // No metatable: done.
3097 | ldrb TMP1w, TAB:CARG1->nomm
3098 | tbnz TMP1w, #MM_newindex, <2 // 'no __newindex' flag set: done.
3099 | b ->vmeta_tsets
3100 |
3101 |5: // Follow hash chain.
3102 | mov NODE:CARG3, NODE:TMP3
3103 | cbnz NODE:TMP3, <1
3104 | // End of hash chain: key not found, add a new one.
3105 |
3106 | // But check for __newindex first.
3107 | ldr TAB:CARG1, TAB:CARG2->metatable
3108 | cbz TAB:CARG1, >6 // No metatable: continue.
3109 | ldrb TMP1w, TAB:CARG1->nomm
3110 | // 'no __newindex' flag NOT set: check.
3111 | tbz TMP1w, #MM_newindex, ->vmeta_tsets
3112 |6:
3113 | movn TMP1, #~LJ_TSTR
3114 | str PC, SAVE_PC
3115 | add TMP0, STR:RC, TMP1, lsl #47
3116 | str BASE, L->base
3117 | mov CARG1, L
3118 | str TMP0, TMPD
3119 | add CARG3, sp, TMPDofs
3120 | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
3121 | // Returns TValue *.
3122 | ldr BASE, L->base
3123 | ldr TMP0, [BASE, RA, lsl #3]
3124 | str TMP0, [CRET1]
3125 | b <3 // No 2nd write barrier needed.
3126 |
3127 |7: // Possible table write barrier for the value. Skip valiswhite check.
3128 | barrierback TAB:CARG2, TMP2w, TMP1
3129 | b <3
3130 break;
3131 case BC_TSETB:
3132 | decode_RB RB, INS
3133 | and RC, RC, #255
3134 | // RA = src, RB = table, RC = index
3135 | ldr CARG2, [BASE, RB, lsl #3]
3136 | checktab CARG2, ->vmeta_tsetb
3137 | ldr CARG3, TAB:CARG2->array
3138 | ldr CARG1w, TAB:CARG2->asize
3139 | add CARG3, CARG3, RC, lsl #3
3140 | cmp RCw, CARG1w // In array part?
3141 | bhs ->vmeta_tsetb
3142 | ldr TMP1, [CARG3]
3143 | ldr TMP0, [BASE, RA, lsl #3]
3144 | ldrb TMP2w, TAB:CARG2->marked
3145 | cmp TMP1, TISNIL // Previous value is nil?
3146 | beq >5
3147 |1:
3148 | str TMP0, [CARG3]
3149 | tbnz TMP2w, #2, >7 // isblack(table)
3150 |2:
3151 | ins_next
3152 |
3153 |5: // Check for __newindex if previous value is nil.
3154 | ldr TAB:CARG1, TAB:CARG2->metatable
3155 | cbz TAB:CARG1, <1 // No metatable: done.
3156 | ldrb TMP1w, TAB:CARG1->nomm
3157 | tbnz TMP1w, #MM_newindex, <1 // 'no __newindex' flag set: done.
3158 | b ->vmeta_tsetb
3159 |
3160 |7: // Possible table write barrier for the value. Skip valiswhite check.
3161 | barrierback TAB:CARG2, TMP2w, TMP1
3162 | b <2
3163 break;
3164 case BC_TSETR:
3165 | decode_RB RB, INS
3166 | and RC, RC, #255
3167 | // RA = src, RB = table, RC = key
3168 | ldr CARG2, [BASE, RB, lsl #3]
3169 | ldr TMP1, [BASE, RC, lsl #3]
3170 | and TAB:CARG2, CARG2, #LJ_GCVMASK
3171 | ldr CARG1, TAB:CARG2->array
3172 | ldrb TMP2w, TAB:CARG2->marked
3173 | ldr CARG4w, TAB:CARG2->asize
3174 | add CARG1, CARG1, TMP1, uxtw #3
3175 | tbnz TMP2w, #2, >7 // isblack(table)
3176 |2:
3177 | cmp TMP1w, CARG4w // In array part?
3178 | bhs ->vmeta_tsetr
3179 |->BC_TSETR_Z:
3180 | ldr TMP0, [BASE, RA, lsl #3]
3181 | str TMP0, [CARG1]
3182 | ins_next
3183 |
3184 |7: // Possible table write barrier for the value. Skip valiswhite check.
3185 | barrierback TAB:CARG2, TMP2w, TMP0
3186 | b <2
3187 break;
3188
3189 case BC_TSETM:
3190 | // RA = base (table at base-1), RC = num_const (start index)
3191 | add RA, BASE, RA, lsl #3
3192 |1:
3193 | ldr RBw, SAVE_MULTRES
3194 | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table.
3195 | ldr TMP1, [KBASE, RC, lsl #3] // Integer constant is in lo-word.
3196 | sub RB, RB, #8
3197 | cbz RB, >4 // Nothing to copy?
3198 | and TAB:CARG2, CARG2, #LJ_GCVMASK
3199 | ldr CARG1w, TAB:CARG2->asize
3200 | add CARG3w, TMP1w, RBw, lsr #3
3201 | ldr CARG4, TAB:CARG2->array
3202 | cmp CARG3, CARG1
3203 | add RB, RA, RB
3204 | bhi >5
3205 | add TMP1, CARG4, TMP1w, uxtw #3
3206 | ldrb TMP2w, TAB:CARG2->marked
3207 |3: // Copy result slots to table.
3208 | ldr TMP0, [RA], #8
3209 | str TMP0, [TMP1], #8
3210 | cmp RA, RB
3211 | blo <3
3212 | tbnz TMP2w, #2, >7 // isblack(table)
3213 |4:
3214 | ins_next
3215 |
3216 |5: // Need to resize array part.
3217 | str BASE, L->base
3218 | mov CARG1, L
3219 | str PC, SAVE_PC
3220 | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
3221 | // Must not reallocate the stack.
3222 | b <1
3223 |
3224 |7: // Possible table write barrier for any value. Skip valiswhite check.
3225 | barrierback TAB:CARG2, TMP2w, TMP1
3226 | b <4
3227 break;
3228
3229 /* -- Calls and vararg handling ----------------------------------------- */
3230
3231 case BC_CALLM:
3232 | // RA = base, (RB = nresults+1,) RC = extra_nargs
3233 | ldr TMP0w, SAVE_MULTRES
3234 | decode_RC8RD NARGS8:RC, RC
3235 | add NARGS8:RC, NARGS8:RC, TMP0
3236 | b ->BC_CALL_Z
3237 break;
3238 case BC_CALL:
3239 | decode_RC8RD NARGS8:RC, RC
3240 | // RA = base, (RB = nresults+1,) RC = (nargs+1)*8
3241 |->BC_CALL_Z:
3242 | mov RB, BASE // Save old BASE for vmeta_call.
3243 | add BASE, BASE, RA, lsl #3
3244 | ldr CARG3, [BASE]
3245 | sub NARGS8:RC, NARGS8:RC, #8
3246 | add BASE, BASE, #16
3247 | checkfunc CARG3, ->vmeta_call
3248 | ins_call
3249 break;
3250
3251 case BC_CALLMT:
3252 | // RA = base, (RB = 0,) RC = extra_nargs
3253 | ldr TMP0w, SAVE_MULTRES
3254 | add NARGS8:RC, TMP0, RC, lsl #3
3255 | b ->BC_CALLT1_Z
3256 break;
3257 case BC_CALLT:
3258 | lsl NARGS8:RC, RC, #3
3259 | // RA = base, (RB = 0,) RC = (nargs+1)*8
3260 |->BC_CALLT1_Z:
3261 | add RA, BASE, RA, lsl #3
3262 | ldr TMP1, [RA]
3263 | sub NARGS8:RC, NARGS8:RC, #8
3264 | add RA, RA, #16
3265 | checktp CARG3, TMP1, LJ_TFUNC, ->vmeta_callt
3266 | ldr PC, [BASE, FRAME_PC]
3267 |->BC_CALLT2_Z:
3268 | mov RB, #0
3269 | ldrb TMP2w, LFUNC:CARG3->ffid
3270 | tst PC, #FRAME_TYPE
3271 | bne >7
3272 |1:
3273 | str TMP1, [BASE, FRAME_FUNC] // Copy function down, but keep PC.
3274 | cbz NARGS8:RC, >3
3275 |2:
3276 | ldr TMP0, [RA, RB]
3277 | add TMP1, RB, #8
3278 | cmp TMP1, NARGS8:RC
3279 | str TMP0, [BASE, RB]
3280 | mov RB, TMP1
3281 | bne <2
3282 |3:
3283 | cmp TMP2, #1 // (> FF_C) Calling a fast function?
3284 | bhi >5
3285 |4:
3286 | ins_callt
3287 |
3288 |5: // Tailcall to a fast function with a Lua frame below.
3289 | ldrb RAw, [PC, #-4+OFS_RA]
3290 | sub CARG1, BASE, RA, lsl #3
3291 | ldr LFUNC:CARG1, [CARG1, #-32]
3292 | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
3293 | ldr CARG1, LFUNC:CARG1->pc
3294 | ldr KBASE, [CARG1, #PC2PROTO(k)]
3295 | b <4
3296 |
3297 |7: // Tailcall from a vararg function.
3298 | eor PC, PC, #FRAME_VARG
3299 | tst PC, #FRAME_TYPEP // Vararg frame below?
3300 | csel TMP2, RB, TMP2, ne // Clear ffid if no Lua function below.
3301 | bne <1
3302 | sub BASE, BASE, PC
3303 | ldr PC, [BASE, FRAME_PC]
3304 | tst PC, #FRAME_TYPE
3305 | csel TMP2, RB, TMP2, ne // Clear ffid if no Lua function below.
3306 | b <1
3307 break;
3308
3309 case BC_ITERC:
3310 | // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
3311 | add RA, BASE, RA, lsl #3
3312 | ldr CARG3, [RA, #-24]
3313 | mov RB, BASE // Save old BASE for vmeta_call.
3314 | ldp CARG1, CARG2, [RA, #-16]
3315 | add BASE, RA, #16
3316 | mov NARGS8:RC, #16 // Iterators get 2 arguments.
3317 | str CARG3, [RA] // Copy callable.
3318 | stp CARG1, CARG2, [RA, #16] // Copy state and control var.
3319 | checkfunc CARG3, ->vmeta_call
3320 | ins_call
3321 break;
3322
3323 case BC_ITERN:
3324 | // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
3325 |.if JIT
3326 | // NYI: add hotloop, record BC_ITERN.
3327 |.endif
3328 | add RA, BASE, RA, lsl #3
3329 | ldr TAB:RB, [RA, #-16]
3330 | ldrh TMP3w, [PC, # OFS_RD]
3331 | ldr CARG1w, [RA, #-8+LO] // Get index from control var.
3332 | add PC, PC, #4
3333 | add TMP3, PC, TMP3, lsl #2
3334 | and TAB:RB, RB, #LJ_GCVMASK
3335 | sub TMP3, TMP3, #0x20000
3336 | ldr TMP1w, TAB:RB->asize
3337 | ldr CARG2, TAB:RB->array
3338 |1: // Traverse array part.
3339 | subs RC, CARG1, TMP1
3340 | add CARG3, CARG2, CARG1, lsl #3
3341 | bhs >5 // Index points after array part?
3342 | ldr TMP0, [CARG3]
3343 | cmp TMP0, TISNIL
3344 | cinc CARG1, CARG1, eq // Skip holes in array part.
3345 | beq <1
3346 | add CARG1, CARG1, TISNUM
3347 | stp CARG1, TMP0, [RA]
3348 | add CARG1, CARG1, #1
3349 |3:
3350 | str CARG1w, [RA, #-8+LO] // Update control var.
3351 | mov PC, TMP3
3352 |4:
3353 | ins_next
3354 |
3355 |5: // Traverse hash part.
3356 | ldr TMP2w, TAB:RB->hmask
3357 | ldr NODE:RB, TAB:RB->node
3358 |6:
3359 | add CARG1, RC, RC, lsl #1
3360 | cmp RC, TMP2 // End of iteration? Branch to ITERN+1.
3361 | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8
3362 | bhi <4
3363 | ldp TMP0, CARG1, NODE:CARG3->val
3364 | cmp TMP0, TISNIL
3365 | add RC, RC, #1
3366 | beq <6 // Skip holes in hash part.
3367 | stp CARG1, TMP0, [RA]
3368 | add CARG1, RC, TMP1
3369 | b <3
3370 break;
3371
3372 case BC_ISNEXT:
3373 | // RA = base, RC = target (points to ITERN)
3374 | add RA, BASE, RA, lsl #3
3375 | ldr CFUNC:CARG1, [RA, #-24]
3376 | add RC, PC, RC, lsl #2
3377 | ldp TAB:CARG3, CARG4, [RA, #-16]
3378 | sub RC, RC, #0x20000
3379 | checkfunc CFUNC:CARG1, >5
3380 | asr TMP0, TAB:CARG3, #47
3381 | ldrb TMP1w, CFUNC:CARG1->ffid
3382 | cmn TMP0, #-LJ_TTAB
3383 | ccmp CARG4, TISNIL, #0, eq
3384 | ccmp TMP1w, #FF_next_N, #0, eq
3385 | bne >5
3386 | mov TMP0w, #0xfffe7fff
3387 | lsl TMP0, TMP0, #32
3388 | str TMP0, [RA, #-8] // Initialize control var.
3389 |1:
3390 | mov PC, RC
3391 | ins_next
3392 |
3393 |5: // Despecialize bytecode if any of the checks fail.
3394 | mov TMP0, #BC_JMP
3395 | mov TMP1, #BC_ITERC
3396 | strb TMP0w, [PC, #-4+OFS_OP]
3397 | strb TMP1w, [RC, # OFS_OP]
3398 | b <1
3399 break;
3400
3401 case BC_VARG:
3402 | decode_RB RB, INS
3403 | and RC, RC, #255
3404 | // RA = base, RB = (nresults+1), RC = numparams
3405 | ldr TMP1, [BASE, FRAME_PC]
3406 | add RC, BASE, RC, lsl #3
3407 | add RA, BASE, RA, lsl #3
3408 | add RC, RC, #FRAME_VARG
3409 | add TMP2, RA, RB, lsl #3
3410 | sub RC, RC, TMP1 // RC = vbase
3411 | // Note: RC may now be even _above_ BASE if nargs was < numparams.
3412 | sub TMP3, BASE, #16 // TMP3 = vtop
3413 | cbz RB, >5
3414 | sub TMP2, TMP2, #16
3415 |1: // Copy vararg slots to destination slots.
3416 | cmp RC, TMP3
3417 | ldr TMP0, [RC], #8
3418 | csel TMP0, TMP0, TISNIL, lo
3419 | cmp RA, TMP2
3420 | str TMP0, [RA], #8
3421 | blo <1
3422 |2:
3423 | ins_next
3424 |
3425 |5: // Copy all varargs.
3426 | ldr TMP0, L->maxstack
3427 | subs TMP2, TMP3, RC
3428 | csel RB, xzr, TMP2, le // MULTRES = (max(vtop-vbase,0)+1)*8
3429 | add RB, RB, #8
3430 | add TMP1, RA, TMP2
3431 | str RBw, SAVE_MULTRES
3432 | ble <2 // Nothing to copy.
3433 | cmp TMP1, TMP0
3434 | bhi >7
3435 |6:
3436 | ldr TMP0, [RC], #8
3437 | str TMP0, [RA], #8
3438 | cmp RC, TMP3
3439 | blo <6
3440 | b <2
3441 |
3442 |7: // Grow stack for varargs.
3443 | lsr CARG2, TMP2, #3
3444 | stp BASE, RA, L->base
3445 | mov CARG1, L
3446 | sub RC, RC, BASE // Need delta, because BASE may change.
3447 | str PC, SAVE_PC
3448 | bl extern lj_state_growstack // (lua_State *L, int n)
3449 | ldp BASE, RA, L->base
3450 | add RC, BASE, RC
3451 | sub TMP3, BASE, #16
3452 | b <6
3453 break;
3454
3455 /* -- Returns ----------------------------------------------------------- */
3456
3457 case BC_RETM:
3458 | // RA = results, RC = extra results
3459 | ldr TMP0w, SAVE_MULTRES
3460 | ldr PC, [BASE, FRAME_PC]
3461 | add RA, BASE, RA, lsl #3
3462 | add RC, TMP0, RC, lsl #3
3463 | b ->BC_RETM_Z
3464 break;
3465
3466 case BC_RET:
3467 | // RA = results, RC = nresults+1
3468 | ldr PC, [BASE, FRAME_PC]
3469 | lsl RC, RC, #3
3470 | add RA, BASE, RA, lsl #3
3471 |->BC_RETM_Z:
3472 | str RCw, SAVE_MULTRES
3473 |1:
3474 | ands CARG1, PC, #FRAME_TYPE
3475 | eor CARG2, PC, #FRAME_VARG
3476 | bne ->BC_RETV2_Z
3477 |
3478 |->BC_RET_Z:
3479 | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return
3480 | ldr INSw, [PC, #-4]
3481 | subs TMP1, RC, #8
3482 | sub CARG3, BASE, #16
3483 | beq >3
3484 |2:
3485 | ldr TMP0, [RA], #8
3486 | add BASE, BASE, #8
3487 | sub TMP1, TMP1, #8
3488 | str TMP0, [BASE, #-24]
3489 | cbnz TMP1, <2
3490 |3:
3491 | decode_RA RA, INS
3492 | sub CARG4, CARG3, RA, lsl #3
3493 | decode_RB RB, INS
3494 | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC]
3495 |5:
3496 | cmp RC, RB, lsl #3 // More results expected?
3497 | blo >6
3498 | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
3499 | mov BASE, CARG4
3500 | ldr CARG2, LFUNC:CARG1->pc
3501 | ldr KBASE, [CARG2, #PC2PROTO(k)]
3502 | ins_next
3503 |
3504 |6: // Fill up results with nil.
3505 | add BASE, BASE, #8
3506 | add RC, RC, #8
3507 | str TISNIL, [BASE, #-24]
3508 | b <5
3509 |
3510 |->BC_RETV1_Z: // Non-standard return case.
3511 | add RA, BASE, RA, lsl #3
3512 |->BC_RETV2_Z:
3513 | tst CARG2, #FRAME_TYPEP
3514 | bne ->vm_return
3515 | // Return from vararg function: relocate BASE down.
3516 | sub BASE, BASE, CARG2
3517 | ldr PC, [BASE, FRAME_PC]
3518 | b <1
3519 break;
3520
3521 case BC_RET0: case BC_RET1:
3522 | // RA = results, RC = nresults+1
3523 | ldr PC, [BASE, FRAME_PC]
3524 | lsl RC, RC, #3
3525 | str RCw, SAVE_MULTRES
3526 | ands CARG1, PC, #FRAME_TYPE
3527 | eor CARG2, PC, #FRAME_VARG
3528 | bne ->BC_RETV1_Z
3529 | ldr INSw, [PC, #-4]
3530 if (op == BC_RET1) {
3531 | ldr TMP0, [BASE, RA, lsl #3]
3532 }
3533 | sub CARG4, BASE, #16
3534 | decode_RA RA, INS
3535 | sub BASE, CARG4, RA, lsl #3
3536 if (op == BC_RET1) {
3537 | str TMP0, [CARG4], #8
3538 }
3539 | decode_RB RB, INS
3540 | ldr LFUNC:CARG1, [BASE, FRAME_FUNC]
3541 |5:
3542 | cmp RC, RB, lsl #3
3543 | blo >6
3544 | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
3545 | ldr CARG2, LFUNC:CARG1->pc
3546 | ldr KBASE, [CARG2, #PC2PROTO(k)]
3547 | ins_next
3548 |
3549 |6: // Fill up results with nil.
3550 | add RC, RC, #8
3551 | str TISNIL, [CARG4], #8
3552 | b <5
3553 break;
3554
3555 /* -- Loops and branches ------------------------------------------------ */
3556
3557 |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4]
3558 |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12]
3559 |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20]
3560 |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28]
3561
3562 case BC_FORL:
3563 |.if JIT
3564 | hotloop
3565 |.endif
3566 | // Fall through. Assumes BC_IFORL follows.
3567 break;
3568
3569 case BC_JFORI:
3570 case BC_JFORL:
3571#if !LJ_HASJIT
3572 break;
3573#endif
3574 case BC_FORI:
3575 case BC_IFORL:
3576 | // RA = base, RC = target (after end of loop or start of loop)
3577 vk = (op == BC_IFORL || op == BC_JFORL);
3578 | add RA, BASE, RA, lsl #3
3579 | ldp CARG1, CARG2, FOR_IDX // CARG1 = IDX, CARG2 = STOP
3580 | ldr CARG3, FOR_STEP // CARG3 = STEP
3581 if (op != BC_JFORL) {
3582 | add RC, PC, RC, lsl #2
3583 | sub RC, RC, #0x20000
3584 }
3585 | checkint CARG1, >5
3586 if (!vk) {
3587 | checkint CARG2, ->vmeta_for
3588 | checkint CARG3, ->vmeta_for
3589 | tbnz CARG3w, #31, >4
3590 | cmp CARG1w, CARG2w
3591 } else {
3592 | adds CARG1w, CARG1w, CARG3w
3593 | bvs >2
3594 | add TMP0, CARG1, TISNUM
3595 | tbnz CARG3w, #31, >4
3596 | cmp CARG1w, CARG2w
3597 }
3598 |1:
3599 if (op == BC_FORI) {
3600 | csel PC, RC, PC, gt
3601 } else if (op == BC_JFORI) {
3602 | mov PC, RC
3603 | ldrh RCw, [RC, #-4+OFS_RD]
3604 } else if (op == BC_IFORL) {
3605 | csel PC, RC, PC, le
3606 }
3607 if (vk) {
3608 | str TMP0, FOR_IDX
3609 | str TMP0, FOR_EXT
3610 } else {
3611 | str CARG1, FOR_EXT
3612 }
3613 if (op == BC_JFORI || op == BC_JFORL) {
3614 | ble =>BC_JLOOP
3615 }
3616 |2:
3617 | ins_next
3618 |
3619 |4: // Invert check for negative step.
3620 | cmp CARG2w, CARG1w
3621 | b <1
3622 |
3623 |5: // FP loop.
3624 | ldp d0, d1, FOR_IDX
3625 | blo ->vmeta_for
3626 if (!vk) {
3627 | checknum CARG2, ->vmeta_for
3628 | checknum CARG3, ->vmeta_for
3629 | str d0, FOR_EXT
3630 } else {
3631 | ldr d2, FOR_STEP
3632 | fadd d0, d0, d2
3633 }
3634 | tbnz CARG3, #63, >7
3635 | fcmp d0, d1
3636 |6:
3637 if (vk) {
3638 | str d0, FOR_IDX
3639 | str d0, FOR_EXT
3640 }
3641 if (op == BC_FORI) {
3642 | csel PC, RC, PC, hi
3643 } else if (op == BC_JFORI) {
3644 | ldrh RCw, [RC, #-4+OFS_RD]
3645 | bls =>BC_JLOOP
3646 } else if (op == BC_IFORL) {
3647 | csel PC, RC, PC, ls
3648 } else {
3649 | bls =>BC_JLOOP
3650 }
3651 | b <2
3652 |
3653 |7: // Invert check for negative step.
3654 | fcmp d1, d0
3655 | b <6
3656 break;
3657
3658 case BC_ITERL:
3659 |.if JIT
3660 | hotloop
3661 |.endif
3662 | // Fall through. Assumes BC_IITERL follows.
3663 break;
3664
3665 case BC_JITERL:
3666#if !LJ_HASJIT
3667 break;
3668#endif
3669 case BC_IITERL:
3670 | // RA = base, RC = target
3671 | ldr CARG1, [BASE, RA, lsl #3]
3672 | add TMP1, BASE, RA, lsl #3
3673 | cmp CARG1, TISNIL
3674 | beq >1 // Stop if iterator returned nil.
3675 if (op == BC_JITERL) {
3676 | str CARG1, [TMP1, #-8]
3677 | b =>BC_JLOOP
3678 } else {
3679 | add TMP0, PC, RC, lsl #2 // Otherwise save control var + branch.
3680 | sub PC, TMP0, #0x20000
3681 | str CARG1, [TMP1, #-8]
3682 }
3683 |1:
3684 | ins_next
3685 break;
3686
3687 case BC_LOOP:
3688 | // RA = base, RC = target (loop extent)
3689 | // Note: RA/RC is only used by trace recorder to determine scope/extent
3690 | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
3691 |.if JIT
3692 | hotloop
3693 |.endif
3694 | // Fall through. Assumes BC_ILOOP follows.
3695 break;
3696
3697 case BC_ILOOP:
3698 | // RA = base, RC = target (loop extent)
3699 | ins_next
3700 break;
3701
3702 case BC_JLOOP:
3703 |.if JIT
3704 | // RA = base (ignored), RC = traceno
3705 | ldr CARG1, [GL, #GL_J(trace)]
3706 | mov CARG2w, #0 // Traces on ARM64 don't store the trace #, so use 0.
3707 | ldr TRACE:RC, [CARG1, RC, lsl #3]
3708 | st_vmstate CARG2w
3709 | ldr RA, TRACE:RC->mcode
3710 | str BASE, GL->jit_base
3711 | str L, GL->tmpbuf.L
3712 | sub sp, sp, #16 // See SPS_FIXED. Avoids sp adjust in every root trace.
3713 | br RA
3714 |.endif
3715 break;
3716
3717 case BC_JMP:
3718 | // RA = base (only used by trace recorder), RC = target
3719 | add RC, PC, RC, lsl #2
3720 | sub PC, RC, #0x20000
3721 | ins_next
3722 break;
3723
3724 /* -- Function headers -------------------------------------------------- */
3725
3726 case BC_FUNCF:
3727 |.if JIT
3728 | hotcall
3729 |.endif
3730 case BC_FUNCV: /* NYI: compiled vararg functions. */
3731 | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
3732 break;
3733
3734 case BC_JFUNCF:
3735#if !LJ_HASJIT
3736 break;
3737#endif
3738 case BC_IFUNCF:
3739 | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
3740 | ldr CARG1, L->maxstack
3741 | ldrb TMP1w, [PC, #-4+PC2PROTO(numparams)]
3742 | ldr KBASE, [PC, #-4+PC2PROTO(k)]
3743 | cmp RA, CARG1
3744 | bhi ->vm_growstack_l
3745 |2:
3746 | cmp NARGS8:RC, TMP1, lsl #3 // Check for missing parameters.
3747 | blo >3
3748 if (op == BC_JFUNCF) {
3749 | decode_RD RC, INS
3750 | b =>BC_JLOOP
3751 } else {
3752 | ins_next
3753 }
3754 |
3755 |3: // Clear missing parameters.
3756 | str TISNIL, [BASE, NARGS8:RC]
3757 | add NARGS8:RC, NARGS8:RC, #8
3758 | b <2
3759 break;
3760
3761 case BC_JFUNCV:
3762#if !LJ_HASJIT
3763 break;
3764#endif
3765 | NYI // NYI: compiled vararg functions
3766 break; /* NYI: compiled vararg functions. */
3767
3768 case BC_IFUNCV:
3769 | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
3770 | ldr CARG1, L->maxstack
3771 | movn TMP0, #~LJ_TFUNC
3772 | add TMP2, BASE, RC
3773 | add LFUNC:CARG3, CARG3, TMP0, lsl #47
3774 | add RA, RA, RC
3775 | add TMP0, RC, #16+FRAME_VARG
3776 | str LFUNC:CARG3, [TMP2], #8 // Store (tagged) copy of LFUNC.
3777 | ldr KBASE, [PC, #-4+PC2PROTO(k)]
3778 | cmp RA, CARG1
3779 | str TMP0, [TMP2], #8 // Store delta + FRAME_VARG.
3780 | bhs ->vm_growstack_l
3781 | sub RC, TMP2, #16
3782 | ldrb TMP1w, [PC, #-4+PC2PROTO(numparams)]
3783 | mov RA, BASE
3784 | mov BASE, TMP2
3785 | cbz TMP1, >2
3786 |1:
3787 | cmp RA, RC // Less args than parameters?
3788 | bhs >3
3789 | ldr TMP0, [RA]
3790 | sub TMP1, TMP1, #1
3791 | str TISNIL, [RA], #8 // Clear old fixarg slot (help the GC).
3792 | str TMP0, [TMP2], #8
3793 | cbnz TMP1, <1
3794 |2:
3795 | ins_next
3796 |
3797 |3:
3798 | sub TMP1, TMP1, #1
3799 | str TISNIL, [TMP2], #8
3800 | cbz TMP1, <2
3801 | b <3
3802 break;
3803
3804 case BC_FUNCC:
3805 case BC_FUNCCW:
3806 | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8
3807 if (op == BC_FUNCC) {
3808 | ldr CARG4, CFUNC:CARG3->f
3809 } else {
3810 | ldr CARG4, GL->wrapf
3811 }
3812 | add CARG2, RA, NARGS8:RC
3813 | ldr CARG1, L->maxstack
3814 | add RC, BASE, NARGS8:RC
3815 | cmp CARG2, CARG1
3816 | stp BASE, RC, L->base
3817 if (op == BC_FUNCCW) {
3818 | ldr CARG2, CFUNC:CARG3->f
3819 }
3820 | mv_vmstate TMP0w, C
3821 | mov CARG1, L
3822 | bhi ->vm_growstack_c // Need to grow stack.
3823 | st_vmstate TMP0w
3824 | blr CARG4 // (lua_State *L [, lua_CFunction f])
3825 | // Returns nresults.
3826 | ldp BASE, TMP1, L->base
3827 | str L, GL->cur_L
3828 | sbfiz RC, CRET1, #3, #32
3829 | st_vmstate ST_INTERP
3830 | ldr PC, [BASE, FRAME_PC]
3831 | sub RA, TMP1, RC // RA = L->top - nresults*8
3832 | b ->vm_returnc
3833 break;
3834
3835 /* ---------------------------------------------------------------------- */
3836
3837 default:
3838 fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
3839 exit(2);
3840 break;
3841 }
3842}
3843
3844static int build_backend(BuildCtx *ctx)
3845{
3846 int op;
3847
3848 dasm_growpc(Dst, BC__MAX);
3849
3850 build_subroutines(ctx);
3851
3852 |.code_op
3853 for (op = 0; op < BC__MAX; op++)
3854 build_ins(ctx, (BCOp)op, op);
3855
3856 return BC__MAX;
3857}
3858
3859/* Emit pseudo frame-info for all assembler functions. */
3860static void emit_asm_debug(BuildCtx *ctx)
3861{
3862 int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
3863 int i, cf = CFRAME_SIZE >> 3;
3864 switch (ctx->mode) {
3865 case BUILD_elfasm:
3866 fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n");
3867 fprintf(ctx->fp,
3868 ".Lframe0:\n"
3869 "\t.long .LECIE0-.LSCIE0\n"
3870 ".LSCIE0:\n"
3871 "\t.long 0xffffffff\n"
3872 "\t.byte 0x1\n"
3873 "\t.string \"\"\n"
3874 "\t.uleb128 0x1\n"
3875 "\t.sleb128 -8\n"
3876 "\t.byte 30\n" /* Return address is in lr. */
3877 "\t.byte 0xc\n\t.uleb128 31\n\t.uleb128 0\n" /* def_cfa sp */
3878 "\t.align 3\n"
3879 ".LECIE0:\n\n");
3880 fprintf(ctx->fp,
3881 ".LSFDE0:\n"
3882 "\t.long .LEFDE0-.LASFDE0\n"
3883 ".LASFDE0:\n"
3884 "\t.long .Lframe0\n"
3885 "\t.quad .Lbegin\n"
3886 "\t.quad %d\n"
3887 "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
3888 "\t.byte 0x9d\n\t.uleb128 %d\n" /* offset fp */
3889 "\t.byte 0x9e\n\t.uleb128 %d\n", /* offset lr */
3890 fcofs, CFRAME_SIZE, cf, cf-1);
3891 for (i = 19; i <= 28; i++) /* offset x19-x28 */
3892 fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, cf-i+17);
3893 for (i = 8; i <= 15; i++) /* offset d8-d15 */
3894 fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n",
3895 64+i, cf-i-4);
3896 fprintf(ctx->fp,
3897 "\t.align 3\n"
3898 ".LEFDE0:\n\n");
3899#if LJ_HASFFI
3900 fprintf(ctx->fp,
3901 ".LSFDE1:\n"
3902 "\t.long .LEFDE1-.LASFDE1\n"
3903 ".LASFDE1:\n"
3904 "\t.long .Lframe0\n"
3905 "\t.quad lj_vm_ffi_call\n"
3906 "\t.quad %d\n"
3907 "\t.byte 0xe\n\t.uleb128 32\n" /* def_cfa_offset */
3908 "\t.byte 0x9d\n\t.uleb128 4\n" /* offset fp */
3909 "\t.byte 0x9e\n\t.uleb128 3\n" /* offset lr */
3910 "\t.byte 0x93\n\t.uleb128 2\n" /* offset x19 */
3911 "\t.align 3\n"
3912 ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
3913#endif
3914 fprintf(ctx->fp, "\t.section .eh_frame,\"a\",%%progbits\n");
3915 fprintf(ctx->fp,
3916 ".Lframe1:\n"
3917 "\t.long .LECIE1-.LSCIE1\n"
3918 ".LSCIE1:\n"
3919 "\t.long 0\n"
3920 "\t.byte 0x1\n"
3921 "\t.string \"zPR\"\n"
3922 "\t.uleb128 0x1\n"
3923 "\t.sleb128 -8\n"
3924 "\t.byte 30\n" /* Return address is in lr. */
3925 "\t.uleb128 6\n" /* augmentation length */
3926 "\t.byte 0x1b\n" /* pcrel|sdata4 */
3927 "\t.long lj_err_unwind_dwarf-.\n"
3928 "\t.byte 0x1b\n" /* pcrel|sdata4 */
3929 "\t.byte 0xc\n\t.uleb128 31\n\t.uleb128 0\n" /* def_cfa sp */
3930 "\t.align 3\n"
3931 ".LECIE1:\n\n");
3932 fprintf(ctx->fp,
3933 ".LSFDE2:\n"
3934 "\t.long .LEFDE2-.LASFDE2\n"
3935 ".LASFDE2:\n"
3936 "\t.long .LASFDE2-.Lframe1\n"
3937 "\t.long .Lbegin-.\n"
3938 "\t.long %d\n"
3939 "\t.uleb128 0\n" /* augmentation length */
3940 "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
3941 "\t.byte 0x9d\n\t.uleb128 %d\n" /* offset fp */
3942 "\t.byte 0x9e\n\t.uleb128 %d\n", /* offset lr */
3943 fcofs, CFRAME_SIZE, cf, cf-1);
3944 for (i = 19; i <= 28; i++) /* offset x19-x28 */
3945 fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, cf-i+17);
3946 for (i = 8; i <= 15; i++) /* offset d8-d15 */
3947 fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n",
3948 64+i, cf-i-4);
3949 fprintf(ctx->fp,
3950 "\t.align 3\n"
3951 ".LEFDE2:\n\n");
3952#if LJ_HASFFI
3953 fprintf(ctx->fp,
3954 ".Lframe2:\n"
3955 "\t.long .LECIE2-.LSCIE2\n"
3956 ".LSCIE2:\n"
3957 "\t.long 0\n"
3958 "\t.byte 0x1\n"
3959 "\t.string \"zR\"\n"
3960 "\t.uleb128 0x1\n"
3961 "\t.sleb128 -8\n"
3962 "\t.byte 30\n" /* Return address is in lr. */
3963 "\t.uleb128 1\n" /* augmentation length */
3964 "\t.byte 0x1b\n" /* pcrel|sdata4 */
3965 "\t.byte 0xc\n\t.uleb128 31\n\t.uleb128 0\n" /* def_cfa sp */
3966 "\t.align 3\n"
3967 ".LECIE2:\n\n");
3968 fprintf(ctx->fp,
3969 ".LSFDE3:\n"
3970 "\t.long .LEFDE3-.LASFDE3\n"
3971 ".LASFDE3:\n"
3972 "\t.long .LASFDE3-.Lframe2\n"
3973 "\t.long lj_vm_ffi_call-.\n"
3974 "\t.long %d\n"
3975 "\t.uleb128 0\n" /* augmentation length */
3976 "\t.byte 0xe\n\t.uleb128 32\n" /* def_cfa_offset */
3977 "\t.byte 0x9d\n\t.uleb128 4\n" /* offset fp */
3978 "\t.byte 0x9e\n\t.uleb128 3\n" /* offset lr */
3979 "\t.byte 0x93\n\t.uleb128 2\n" /* offset x19 */
3980 "\t.align 3\n"
3981 ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
3982#endif
3983 break;
3984 default:
3985 break;
3986 }
3987}
3988
diff --git a/src/vm_mips.dasc b/src/vm_mips.dasc
index 6bbad37b..1cb94785 100644
--- a/src/vm_mips.dasc
+++ b/src/vm_mips.dasc
@@ -1,6 +1,9 @@
1|// Low-level VM code for MIPS CPUs. 1|// Low-level VM code for MIPS CPUs.
2|// Bytecode interpreter, fast functions and helper functions. 2|// Bytecode interpreter, fast functions and helper functions.
3|// Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h 3|// Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4|//
5|// MIPS soft-float support contributed by Djordje Kovacevic and
6|// Stefan Pejic from RT-RK.com, sponsored by Cisco Systems, Inc.
4| 7|
5|.arch mips 8|.arch mips
6|.section code_op, code_sub 9|.section code_op, code_sub
@@ -18,6 +21,12 @@
18|// Fixed register assignments for the interpreter. 21|// Fixed register assignments for the interpreter.
19|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra 22|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra
20| 23|
24|.macro .FPU, a, b
25|.if FPU
26| a, b
27|.endif
28|.endmacro
29|
21|// The following must be C callee-save (but BASE is often refetched). 30|// The following must be C callee-save (but BASE is often refetched).
22|.define BASE, r16 // Base of current Lua stack frame. 31|.define BASE, r16 // Base of current Lua stack frame.
23|.define KBASE, r17 // Constants of current Lua function. 32|.define KBASE, r17 // Constants of current Lua function.
@@ -25,13 +34,15 @@
25|.define DISPATCH, r19 // Opcode dispatch table. 34|.define DISPATCH, r19 // Opcode dispatch table.
26|.define LREG, r20 // Register holding lua_State (also in SAVE_L). 35|.define LREG, r20 // Register holding lua_State (also in SAVE_L).
27|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8. 36|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8.
28|// NYI: r22 currently unused.
29| 37|
30|.define JGL, r30 // On-trace: global_State + 32768. 38|.define JGL, r30 // On-trace: global_State + 32768.
31| 39|
32|// Constants for type-comparisons, stores and conversions. C callee-save. 40|// Constants for type-comparisons, stores and conversions. C callee-save.
41|.define TISNUM, r22
33|.define TISNIL, r30 42|.define TISNIL, r30
43|.if FPU
34|.define TOBIT, f30 // 2^52 + 2^51. 44|.define TOBIT, f30 // 2^52 + 2^51.
45|.endif
35| 46|
36|// The following temporaries are not saved across C calls, except for RA. 47|// The following temporaries are not saved across C calls, except for RA.
37|.define RA, r23 // Callee-save. 48|.define RA, r23 // Callee-save.
@@ -46,7 +57,7 @@
46|.define TMP2, r14 57|.define TMP2, r14
47|.define TMP3, r15 58|.define TMP3, r15
48| 59|
49|// Calling conventions. 60|// MIPS o32 calling convention.
50|.define CFUNCADDR, r25 61|.define CFUNCADDR, r25
51|.define CARG1, r4 62|.define CARG1, r4
52|.define CARG2, r5 63|.define CARG2, r5
@@ -56,13 +67,33 @@
56|.define CRET1, r2 67|.define CRET1, r2
57|.define CRET2, r3 68|.define CRET2, r3
58| 69|
70|.if ENDIAN_LE
71|.define SFRETLO, CRET1
72|.define SFRETHI, CRET2
73|.define SFARG1LO, CARG1
74|.define SFARG1HI, CARG2
75|.define SFARG2LO, CARG3
76|.define SFARG2HI, CARG4
77|.else
78|.define SFRETLO, CRET2
79|.define SFRETHI, CRET1
80|.define SFARG1LO, CARG2
81|.define SFARG1HI, CARG1
82|.define SFARG2LO, CARG4
83|.define SFARG2HI, CARG3
84|.endif
85|
86|.if FPU
59|.define FARG1, f12 87|.define FARG1, f12
60|.define FARG2, f14 88|.define FARG2, f14
61| 89|
62|.define FRET1, f0 90|.define FRET1, f0
63|.define FRET2, f2 91|.define FRET2, f2
92|.endif
64| 93|
65|// Stack layout while in interpreter. Must match with lj_frame.h. 94|// Stack layout while in interpreter. Must match with lj_frame.h.
95|.if FPU // MIPS32 hard-float.
96|
66|.define CFRAME_SPACE, 112 // Delta for sp. 97|.define CFRAME_SPACE, 112 // Delta for sp.
67| 98|
68|.define SAVE_ERRF, 124(sp) // 32 bit C frame info. 99|.define SAVE_ERRF, 124(sp) // 32 bit C frame info.
@@ -72,6 +103,20 @@
72|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by interpreter. 103|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by interpreter.
73|.define SAVE_GPR_, 72 // .. 72+10*4: 32 bit GPR saves. 104|.define SAVE_GPR_, 72 // .. 72+10*4: 32 bit GPR saves.
74|.define SAVE_FPR_, 24 // .. 24+6*8: 64 bit FPR saves. 105|.define SAVE_FPR_, 24 // .. 24+6*8: 64 bit FPR saves.
106|
107|.else // MIPS32 soft-float
108|
109|.define CFRAME_SPACE, 64 // Delta for sp.
110|
111|.define SAVE_ERRF, 76(sp) // 32 bit C frame info.
112|.define SAVE_NRES, 72(sp)
113|.define SAVE_CFRAME, 68(sp)
114|.define SAVE_L, 64(sp)
115|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by interpreter.
116|.define SAVE_GPR_, 24 // .. 24+10*4: 32 bit GPR saves.
117|
118|.endif
119|
75|.define SAVE_PC, 20(sp) 120|.define SAVE_PC, 20(sp)
76|.define ARG5, 16(sp) 121|.define ARG5, 16(sp)
77|.define CSAVE_4, 12(sp) 122|.define CSAVE_4, 12(sp)
@@ -83,43 +128,45 @@
83|.define ARG5_OFS, 16 128|.define ARG5_OFS, 16
84|.define SAVE_MULTRES, ARG5 129|.define SAVE_MULTRES, ARG5
85| 130|
131|//-----------------------------------------------------------------------
132|
86|.macro saveregs 133|.macro saveregs
87| addiu sp, sp, -CFRAME_SPACE 134| addiu sp, sp, -CFRAME_SPACE
88| sw ra, SAVE_GPR_+9*4(sp) 135| sw ra, SAVE_GPR_+9*4(sp)
89| sw r30, SAVE_GPR_+8*4(sp) 136| sw r30, SAVE_GPR_+8*4(sp)
90| sdc1 f30, SAVE_FPR_+5*8(sp) 137| .FPU sdc1 f30, SAVE_FPR_+5*8(sp)
91| sw r23, SAVE_GPR_+7*4(sp) 138| sw r23, SAVE_GPR_+7*4(sp)
92| sw r22, SAVE_GPR_+6*4(sp) 139| sw r22, SAVE_GPR_+6*4(sp)
93| sdc1 f28, SAVE_FPR_+4*8(sp) 140| .FPU sdc1 f28, SAVE_FPR_+4*8(sp)
94| sw r21, SAVE_GPR_+5*4(sp) 141| sw r21, SAVE_GPR_+5*4(sp)
95| sw r20, SAVE_GPR_+4*4(sp) 142| sw r20, SAVE_GPR_+4*4(sp)
96| sdc1 f26, SAVE_FPR_+3*8(sp) 143| .FPU sdc1 f26, SAVE_FPR_+3*8(sp)
97| sw r19, SAVE_GPR_+3*4(sp) 144| sw r19, SAVE_GPR_+3*4(sp)
98| sw r18, SAVE_GPR_+2*4(sp) 145| sw r18, SAVE_GPR_+2*4(sp)
99| sdc1 f24, SAVE_FPR_+2*8(sp) 146| .FPU sdc1 f24, SAVE_FPR_+2*8(sp)
100| sw r17, SAVE_GPR_+1*4(sp) 147| sw r17, SAVE_GPR_+1*4(sp)
101| sw r16, SAVE_GPR_+0*4(sp) 148| sw r16, SAVE_GPR_+0*4(sp)
102| sdc1 f22, SAVE_FPR_+1*8(sp) 149| .FPU sdc1 f22, SAVE_FPR_+1*8(sp)
103| sdc1 f20, SAVE_FPR_+0*8(sp) 150| .FPU sdc1 f20, SAVE_FPR_+0*8(sp)
104|.endmacro 151|.endmacro
105| 152|
106|.macro restoreregs_ret 153|.macro restoreregs_ret
107| lw ra, SAVE_GPR_+9*4(sp) 154| lw ra, SAVE_GPR_+9*4(sp)
108| lw r30, SAVE_GPR_+8*4(sp) 155| lw r30, SAVE_GPR_+8*4(sp)
109| ldc1 f30, SAVE_FPR_+5*8(sp) 156| .FPU ldc1 f30, SAVE_FPR_+5*8(sp)
110| lw r23, SAVE_GPR_+7*4(sp) 157| lw r23, SAVE_GPR_+7*4(sp)
111| lw r22, SAVE_GPR_+6*4(sp) 158| lw r22, SAVE_GPR_+6*4(sp)
112| ldc1 f28, SAVE_FPR_+4*8(sp) 159| .FPU ldc1 f28, SAVE_FPR_+4*8(sp)
113| lw r21, SAVE_GPR_+5*4(sp) 160| lw r21, SAVE_GPR_+5*4(sp)
114| lw r20, SAVE_GPR_+4*4(sp) 161| lw r20, SAVE_GPR_+4*4(sp)
115| ldc1 f26, SAVE_FPR_+3*8(sp) 162| .FPU ldc1 f26, SAVE_FPR_+3*8(sp)
116| lw r19, SAVE_GPR_+3*4(sp) 163| lw r19, SAVE_GPR_+3*4(sp)
117| lw r18, SAVE_GPR_+2*4(sp) 164| lw r18, SAVE_GPR_+2*4(sp)
118| ldc1 f24, SAVE_FPR_+2*8(sp) 165| .FPU ldc1 f24, SAVE_FPR_+2*8(sp)
119| lw r17, SAVE_GPR_+1*4(sp) 166| lw r17, SAVE_GPR_+1*4(sp)
120| lw r16, SAVE_GPR_+0*4(sp) 167| lw r16, SAVE_GPR_+0*4(sp)
121| ldc1 f22, SAVE_FPR_+1*8(sp) 168| .FPU ldc1 f22, SAVE_FPR_+1*8(sp)
122| ldc1 f20, SAVE_FPR_+0*8(sp) 169| .FPU ldc1 f20, SAVE_FPR_+0*8(sp)
123| jr ra 170| jr ra
124| addiu sp, sp, CFRAME_SPACE 171| addiu sp, sp, CFRAME_SPACE
125|.endmacro 172|.endmacro
@@ -138,6 +185,7 @@
138|.type NODE, Node 185|.type NODE, Node
139|.type NARGS8, int 186|.type NARGS8, int
140|.type TRACE, GCtrace 187|.type TRACE, GCtrace
188|.type SBUF, SBuf
141| 189|
142|//----------------------------------------------------------------------- 190|//-----------------------------------------------------------------------
143| 191|
@@ -152,13 +200,23 @@
152|//----------------------------------------------------------------------- 200|//-----------------------------------------------------------------------
153| 201|
154|// Endian-specific defines. 202|// Endian-specific defines.
155|.define FRAME_PC, LJ_ENDIAN_SELECT(-4,-8) 203|.if ENDIAN_LE
156|.define FRAME_FUNC, LJ_ENDIAN_SELECT(-8,-4) 204|.define FRAME_PC, -4
157|.define HI, LJ_ENDIAN_SELECT(4,0) 205|.define FRAME_FUNC, -8
158|.define LO, LJ_ENDIAN_SELECT(0,4) 206|.define HI, 4
159|.define OFS_RD, LJ_ENDIAN_SELECT(2,0) 207|.define LO, 0
160|.define OFS_RA, LJ_ENDIAN_SELECT(1,2) 208|.define OFS_RD, 2
161|.define OFS_OP, LJ_ENDIAN_SELECT(0,3) 209|.define OFS_RA, 1
210|.define OFS_OP, 0
211|.else
212|.define FRAME_PC, -8
213|.define FRAME_FUNC, -4
214|.define HI, 0
215|.define LO, 4
216|.define OFS_RD, 0
217|.define OFS_RA, 2
218|.define OFS_OP, 3
219|.endif
162| 220|
163|// Instruction decode. 221|// Instruction decode.
164|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro 222|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro
@@ -353,9 +411,11 @@ static void build_subroutines(BuildCtx *ctx)
353 |. sll TMP2, TMP2, 3 411 |. sll TMP2, TMP2, 3
354 |1: 412 |1:
355 | addiu TMP1, TMP1, -8 413 | addiu TMP1, TMP1, -8
356 | ldc1 f0, 0(RA) 414 | lw SFRETHI, HI(RA)
415 | lw SFRETLO, LO(RA)
357 | addiu RA, RA, 8 416 | addiu RA, RA, 8
358 | sdc1 f0, 0(BASE) 417 | sw SFRETHI, HI(BASE)
418 | sw SFRETLO, LO(BASE)
359 | bnez TMP1, <1 419 | bnez TMP1, <1
360 |. addiu BASE, BASE, 8 420 |. addiu BASE, BASE, 8
361 | 421 |
@@ -424,15 +484,16 @@ static void build_subroutines(BuildCtx *ctx)
424 | and sp, CARG1, AT 484 | and sp, CARG1, AT
425 |->vm_unwind_ff_eh: // Landing pad for external unwinder. 485 |->vm_unwind_ff_eh: // Landing pad for external unwinder.
426 | lw L, SAVE_L 486 | lw L, SAVE_L
427 | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). 487 | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
488 | li TISNUM, LJ_TISNUM // Setup type comparison constants.
428 | li TISNIL, LJ_TNIL 489 | li TISNIL, LJ_TNIL
429 | lw BASE, L->base 490 | lw BASE, L->base
430 | lw DISPATCH, L->glref // Setup pointer to dispatch table. 491 | lw DISPATCH, L->glref // Setup pointer to dispatch table.
431 | mtc1 TMP3, TOBIT 492 | .FPU mtc1 TMP3, TOBIT
432 | li TMP1, LJ_TFALSE 493 | li TMP1, LJ_TFALSE
433 | li_vmstate INTERP 494 | li_vmstate INTERP
434 | lw PC, FRAME_PC(BASE) // Fetch PC of previous frame. 495 | lw PC, FRAME_PC(BASE) // Fetch PC of previous frame.
435 | cvt.d.s TOBIT, TOBIT 496 | .FPU cvt.d.s TOBIT, TOBIT
436 | addiu RA, BASE, -8 // Results start at BASE-8. 497 | addiu RA, BASE, -8 // Results start at BASE-8.
437 | addiu DISPATCH, DISPATCH, GG_G2DISP 498 | addiu DISPATCH, DISPATCH, GG_G2DISP
438 | sw TMP1, HI(RA) // Prepend false to error message. 499 | sw TMP1, HI(RA) // Prepend false to error message.
@@ -486,21 +547,23 @@ static void build_subroutines(BuildCtx *ctx)
486 | addiu DISPATCH, DISPATCH, GG_G2DISP 547 | addiu DISPATCH, DISPATCH, GG_G2DISP
487 | sw r0, SAVE_NRES 548 | sw r0, SAVE_NRES
488 | sw r0, SAVE_ERRF 549 | sw r0, SAVE_ERRF
489 | sw TMP0, L->cframe 550 | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
490 | sw r0, SAVE_CFRAME 551 | sw r0, SAVE_CFRAME
491 | beqz TMP1, >3 552 | beqz TMP1, >3
492 |. sw CARG1, SAVE_PC // Any value outside of bytecode is ok. 553 |. sw TMP0, L->cframe
493 | 554 |
494 | // Resume after yield (like a return). 555 | // Resume after yield (like a return).
556 | sw L, DISPATCH_GL(cur_L)(DISPATCH)
495 | move RA, BASE 557 | move RA, BASE
496 | lw BASE, L->base 558 | lw BASE, L->base
559 | li TISNUM, LJ_TISNUM // Setup type comparison constants.
497 | lw TMP1, L->top 560 | lw TMP1, L->top
498 | lw PC, FRAME_PC(BASE) 561 | lw PC, FRAME_PC(BASE)
499 | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). 562 | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
500 | subu RD, TMP1, BASE 563 | subu RD, TMP1, BASE
501 | mtc1 TMP3, TOBIT 564 | .FPU mtc1 TMP3, TOBIT
502 | sb r0, L->status 565 | sb r0, L->status
503 | cvt.d.s TOBIT, TOBIT 566 | .FPU cvt.d.s TOBIT, TOBIT
504 | li_vmstate INTERP 567 | li_vmstate INTERP
505 | addiu RD, RD, 8 568 | addiu RD, RD, 8
506 | st_vmstate 569 | st_vmstate
@@ -525,25 +588,27 @@ static void build_subroutines(BuildCtx *ctx)
525 | 588 |
526 |1: // Entry point for vm_pcall above (PC = ftype). 589 |1: // Entry point for vm_pcall above (PC = ftype).
527 | lw TMP1, L:CARG1->cframe 590 | lw TMP1, L:CARG1->cframe
528 | sw CARG3, SAVE_NRES
529 | move L, CARG1 591 | move L, CARG1
530 | sw CARG1, SAVE_L 592 | sw CARG3, SAVE_NRES
531 | move BASE, CARG2
532 | sw sp, L->cframe // Add our C frame to cframe chain.
533 | lw DISPATCH, L->glref // Setup pointer to dispatch table. 593 | lw DISPATCH, L->glref // Setup pointer to dispatch table.
594 | sw CARG1, SAVE_L
595 | move BASE, CARG2
596 | addiu DISPATCH, DISPATCH, GG_G2DISP
534 | sw CARG1, SAVE_PC // Any value outside of bytecode is ok. 597 | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
535 | sw TMP1, SAVE_CFRAME 598 | sw TMP1, SAVE_CFRAME
536 | addiu DISPATCH, DISPATCH, GG_G2DISP 599 | sw sp, L->cframe // Add our C frame to cframe chain.
537 | 600 |
538 |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). 601 |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
602 | sw L, DISPATCH_GL(cur_L)(DISPATCH)
539 | lw TMP2, L->base // TMP2 = old base (used in vmeta_call). 603 | lw TMP2, L->base // TMP2 = old base (used in vmeta_call).
540 | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). 604 | li TISNUM, LJ_TISNUM // Setup type comparison constants.
605 | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
541 | lw TMP1, L->top 606 | lw TMP1, L->top
542 | mtc1 TMP3, TOBIT 607 | .FPU mtc1 TMP3, TOBIT
543 | addu PC, PC, BASE 608 | addu PC, PC, BASE
544 | subu NARGS8:RC, TMP1, BASE 609 | subu NARGS8:RC, TMP1, BASE
545 | subu PC, PC, TMP2 // PC = frame delta + frame type 610 | subu PC, PC, TMP2 // PC = frame delta + frame type
546 | cvt.d.s TOBIT, TOBIT 611 | .FPU cvt.d.s TOBIT, TOBIT
547 | li_vmstate INTERP 612 | li_vmstate INTERP
548 | li TISNIL, LJ_TNIL 613 | li TISNIL, LJ_TNIL
549 | st_vmstate 614 | st_vmstate
@@ -566,20 +631,21 @@ static void build_subroutines(BuildCtx *ctx)
566 | lw TMP0, L:CARG1->stack 631 | lw TMP0, L:CARG1->stack
567 | sw CARG1, SAVE_L 632 | sw CARG1, SAVE_L
568 | lw TMP1, L->top 633 | lw TMP1, L->top
634 | lw DISPATCH, L->glref // Setup pointer to dispatch table.
569 | sw CARG1, SAVE_PC // Any value outside of bytecode is ok. 635 | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
570 | subu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top). 636 | subu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
571 | lw TMP1, L->cframe 637 | lw TMP1, L->cframe
572 | sw sp, L->cframe // Add our C frame to cframe chain. 638 | addiu DISPATCH, DISPATCH, GG_G2DISP
573 | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame. 639 | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
574 | sw r0, SAVE_ERRF // No error function. 640 | sw r0, SAVE_ERRF // No error function.
575 | move CFUNCADDR, CARG4 641 | sw TMP1, SAVE_CFRAME
642 | sw sp, L->cframe // Add our C frame to cframe chain.
643 | sw L, DISPATCH_GL(cur_L)(DISPATCH)
576 | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud) 644 | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud)
577 |. sw TMP1, SAVE_CFRAME 645 |. move CFUNCADDR, CARG4
578 | move BASE, CRET1 646 | move BASE, CRET1
579 | lw DISPATCH, L->glref // Setup pointer to dispatch table.
580 | li PC, FRAME_CP
581 | bnez CRET1, <3 // Else continue with the call. 647 | bnez CRET1, <3 // Else continue with the call.
582 |. addiu DISPATCH, DISPATCH, GG_G2DISP 648 |. li PC, FRAME_CP
583 | b ->vm_leave_cp // No base? Just remove C frame. 649 | b ->vm_leave_cp // No base? Just remove C frame.
584 |. nop 650 |. nop
585 | 651 |
@@ -624,7 +690,8 @@ static void build_subroutines(BuildCtx *ctx)
624 |->cont_cat: // RA = resultptr, RB = meta base 690 |->cont_cat: // RA = resultptr, RB = meta base
625 | lw INS, -4(PC) 691 | lw INS, -4(PC)
626 | addiu CARG2, RB, -16 692 | addiu CARG2, RB, -16
627 | ldc1 f0, 0(RA) 693 | lw SFRETHI, HI(RA)
694 | lw SFRETLO, LO(RA)
628 | decode_RB8a MULTRES, INS 695 | decode_RB8a MULTRES, INS
629 | decode_RA8a RA, INS 696 | decode_RA8a RA, INS
630 | decode_RB8b MULTRES 697 | decode_RB8b MULTRES
@@ -632,11 +699,13 @@ static void build_subroutines(BuildCtx *ctx)
632 | addu TMP1, BASE, MULTRES 699 | addu TMP1, BASE, MULTRES
633 | sw BASE, L->base 700 | sw BASE, L->base
634 | subu CARG3, CARG2, TMP1 701 | subu CARG3, CARG2, TMP1
702 | sw SFRETHI, HI(CARG2)
635 | bne TMP1, CARG2, ->BC_CAT_Z 703 | bne TMP1, CARG2, ->BC_CAT_Z
636 |. sdc1 f0, 0(CARG2) 704 |. sw SFRETLO, LO(CARG2)
637 | addu RA, BASE, RA 705 | addu RA, BASE, RA
706 | sw SFRETHI, HI(RA)
638 | b ->cont_nop 707 | b ->cont_nop
639 |. sdc1 f0, 0(RA) 708 |. sw SFRETLO, LO(RA)
640 | 709 |
641 |//-- Table indexing metamethods ----------------------------------------- 710 |//-- Table indexing metamethods -----------------------------------------
642 | 711 |
@@ -659,10 +728,9 @@ static void build_subroutines(BuildCtx *ctx)
659 |. sw TMP1, HI(CARG3) 728 |. sw TMP1, HI(CARG3)
660 | 729 |
661 |->vmeta_tgetb: // TMP0 = index 730 |->vmeta_tgetb: // TMP0 = index
662 | mtc1 TMP0, f0
663 | cvt.d.w f0, f0
664 | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) 731 | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
665 | sdc1 f0, 0(CARG3) 732 | sw TMP0, LO(CARG3)
733 | sw TISNUM, HI(CARG3)
666 | 734 |
667 |->vmeta_tgetv: 735 |->vmeta_tgetv:
668 |1: 736 |1:
@@ -674,9 +742,11 @@ static void build_subroutines(BuildCtx *ctx)
674 | // Returns TValue * (finished) or NULL (metamethod). 742 | // Returns TValue * (finished) or NULL (metamethod).
675 | beqz CRET1, >3 743 | beqz CRET1, >3
676 |. addiu TMP1, BASE, -FRAME_CONT 744 |. addiu TMP1, BASE, -FRAME_CONT
677 | ldc1 f0, 0(CRET1) 745 | lw SFARG1HI, HI(CRET1)
746 | lw SFARG2HI, LO(CRET1)
678 | ins_next1 747 | ins_next1
679 | sdc1 f0, 0(RA) 748 | sw SFARG1HI, HI(RA)
749 | sw SFARG2HI, LO(RA)
680 | ins_next2 750 | ins_next2
681 | 751 |
682 |3: // Call __index metamethod. 752 |3: // Call __index metamethod.
@@ -688,6 +758,17 @@ static void build_subroutines(BuildCtx *ctx)
688 | b ->vm_call_dispatch_f 758 | b ->vm_call_dispatch_f
689 |. li NARGS8:RC, 16 // 2 args for func(t, k). 759 |. li NARGS8:RC, 16 // 2 args for func(t, k).
690 | 760 |
761 |->vmeta_tgetr:
762 | load_got lj_tab_getinth
763 | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
764 |. nop
765 | // Returns cTValue * or NULL.
766 | beqz CRET1, ->BC_TGETR_Z
767 |. move SFARG2HI, TISNIL
768 | lw SFARG2HI, HI(CRET1)
769 | b ->BC_TGETR_Z
770 |. lw SFARG2LO, LO(CRET1)
771 |
691 |//----------------------------------------------------------------------- 772 |//-----------------------------------------------------------------------
692 | 773 |
693 |->vmeta_tsets1: 774 |->vmeta_tsets1:
@@ -709,10 +790,9 @@ static void build_subroutines(BuildCtx *ctx)
709 |. sw TMP1, HI(CARG3) 790 |. sw TMP1, HI(CARG3)
710 | 791 |
711 |->vmeta_tsetb: // TMP0 = index 792 |->vmeta_tsetb: // TMP0 = index
712 | mtc1 TMP0, f0
713 | cvt.d.w f0, f0
714 | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) 793 | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
715 | sdc1 f0, 0(CARG3) 794 | sw TMP0, LO(CARG3)
795 | sw TISNUM, HI(CARG3)
716 | 796 |
717 |->vmeta_tsetv: 797 |->vmeta_tsetv:
718 |1: 798 |1:
@@ -722,11 +802,13 @@ static void build_subroutines(BuildCtx *ctx)
722 | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) 802 | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
723 |. move CARG1, L 803 |. move CARG1, L
724 | // Returns TValue * (finished) or NULL (metamethod). 804 | // Returns TValue * (finished) or NULL (metamethod).
805 | lw SFARG1HI, HI(RA)
725 | beqz CRET1, >3 806 | beqz CRET1, >3
726 |. ldc1 f0, 0(RA) 807 |. lw SFARG1LO, LO(RA)
727 | // NOBARRIER: lj_meta_tset ensures the table is not black. 808 | // NOBARRIER: lj_meta_tset ensures the table is not black.
728 | ins_next1 809 | ins_next1
729 | sdc1 f0, 0(CRET1) 810 | sw SFARG1HI, HI(CRET1)
811 | sw SFARG1LO, LO(CRET1)
730 | ins_next2 812 | ins_next2
731 | 813 |
732 |3: // Call __newindex metamethod. 814 |3: // Call __newindex metamethod.
@@ -736,14 +818,27 @@ static void build_subroutines(BuildCtx *ctx)
736 | sw PC, -16+HI(BASE) // [cont|PC] 818 | sw PC, -16+HI(BASE) // [cont|PC]
737 | subu PC, BASE, TMP1 819 | subu PC, BASE, TMP1
738 | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. 820 | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
739 | sdc1 f0, 16(BASE) // Copy value to third argument. 821 | sw SFARG1HI, 16+HI(BASE) // Copy value to third argument.
822 | sw SFARG1LO, 16+LO(BASE)
740 | b ->vm_call_dispatch_f 823 | b ->vm_call_dispatch_f
741 |. li NARGS8:RC, 24 // 3 args for func(t, k, v) 824 |. li NARGS8:RC, 24 // 3 args for func(t, k, v)
742 | 825 |
826 |->vmeta_tsetr:
827 | load_got lj_tab_setinth
828 | sw BASE, L->base
829 | sw PC, SAVE_PC
830 | call_intern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
831 |. move CARG1, L
832 | // Returns TValue *.
833 | b ->BC_TSETR_Z
834 |. nop
835 |
743 |//-- Comparison metamethods --------------------------------------------- 836 |//-- Comparison metamethods ---------------------------------------------
744 | 837 |
745 |->vmeta_comp: 838 |->vmeta_comp:
746 | // CARG2, CARG3 are already set by BC_ISLT/BC_ISGE/BC_ISLE/BC_ISGT. 839 | // RA/RD point to o1/o2.
840 | move CARG2, RA
841 | move CARG3, RD
747 | load_got lj_meta_comp 842 | load_got lj_meta_comp
748 | addiu PC, PC, -4 843 | addiu PC, PC, -4
749 | sw BASE, L->base 844 | sw BASE, L->base
@@ -769,11 +864,13 @@ static void build_subroutines(BuildCtx *ctx)
769 | 864 |
770 |->cont_ra: // RA = resultptr 865 |->cont_ra: // RA = resultptr
771 | lbu TMP1, -4+OFS_RA(PC) 866 | lbu TMP1, -4+OFS_RA(PC)
772 | ldc1 f0, 0(RA) 867 | lw SFRETHI, HI(RA)
868 | lw SFRETLO, LO(RA)
773 | sll TMP1, TMP1, 3 869 | sll TMP1, TMP1, 3
774 | addu TMP1, BASE, TMP1 870 | addu TMP1, BASE, TMP1
871 | sw SFRETHI, HI(TMP1)
775 | b ->cont_nop 872 | b ->cont_nop
776 |. sdc1 f0, 0(TMP1) 873 |. sw SFRETLO, LO(TMP1)
777 | 874 |
778 |->cont_condt: // RA = resultptr 875 |->cont_condt: // RA = resultptr
779 | lw TMP0, HI(RA) 876 | lw TMP0, HI(RA)
@@ -788,8 +885,11 @@ static void build_subroutines(BuildCtx *ctx)
788 |. addiu TMP2, AT, -1 // Branch if result is false. 885 |. addiu TMP2, AT, -1 // Branch if result is false.
789 | 886 |
790 |->vmeta_equal: 887 |->vmeta_equal:
791 | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV. 888 | // SFARG1LO/SFARG2LO point to o1/o2. TMP0 is set to 0/1.
792 | load_got lj_meta_equal 889 | load_got lj_meta_equal
890 | move CARG2, SFARG1LO
891 | move CARG3, SFARG2LO
892 | move CARG4, TMP0
793 | addiu PC, PC, -4 893 | addiu PC, PC, -4
794 | sw BASE, L->base 894 | sw BASE, L->base
795 | sw PC, SAVE_PC 895 | sw PC, SAVE_PC
@@ -813,17 +913,31 @@ static void build_subroutines(BuildCtx *ctx)
813 |. nop 913 |. nop
814 |.endif 914 |.endif
815 | 915 |
916 |->vmeta_istype:
917 | load_got lj_meta_istype
918 | addiu PC, PC, -4
919 | sw BASE, L->base
920 | srl CARG2, RA, 3
921 | srl CARG3, RD, 3
922 | sw PC, SAVE_PC
923 | call_intern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
924 |. move CARG1, L
925 | b ->cont_nop
926 |. nop
927 |
816 |//-- Arithmetic metamethods --------------------------------------------- 928 |//-- Arithmetic metamethods ---------------------------------------------
817 | 929 |
818 |->vmeta_unm: 930 |->vmeta_unm:
819 | move CARG4, CARG3 931 | move RC, RB
820 | 932 |
821 |->vmeta_arith: 933 |->vmeta_arith:
822 | load_got lj_meta_arith 934 | load_got lj_meta_arith
823 | decode_OP1 TMP0, INS 935 | decode_OP1 TMP0, INS
824 | sw BASE, L->base 936 | sw BASE, L->base
825 | sw PC, SAVE_PC
826 | move CARG2, RA 937 | move CARG2, RA
938 | sw PC, SAVE_PC
939 | move CARG3, RB
940 | move CARG4, RC
827 | sw TMP0, ARG5 941 | sw TMP0, ARG5
828 | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op) 942 | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
829 |. move CARG1, L 943 |. move CARG1, L
@@ -931,40 +1045,52 @@ static void build_subroutines(BuildCtx *ctx)
931 | 1045 |
932 |.macro .ffunc_1, name 1046 |.macro .ffunc_1, name
933 |->ff_ .. name: 1047 |->ff_ .. name:
1048 | lw SFARG1HI, HI(BASE)
934 | beqz NARGS8:RC, ->fff_fallback 1049 | beqz NARGS8:RC, ->fff_fallback
935 |. lw CARG3, HI(BASE) 1050 |. lw SFARG1LO, LO(BASE)
936 | lw CARG1, LO(BASE)
937 |.endmacro 1051 |.endmacro
938 | 1052 |
939 |.macro .ffunc_2, name 1053 |.macro .ffunc_2, name
940 |->ff_ .. name: 1054 |->ff_ .. name:
941 | sltiu AT, NARGS8:RC, 16 1055 | sltiu AT, NARGS8:RC, 16
942 | lw CARG3, HI(BASE) 1056 | lw SFARG1HI, HI(BASE)
943 | bnez AT, ->fff_fallback 1057 | bnez AT, ->fff_fallback
944 |. lw CARG4, 8+HI(BASE) 1058 |. lw SFARG2HI, 8+HI(BASE)
945 | lw CARG1, LO(BASE) 1059 | lw SFARG1LO, LO(BASE)
946 | lw CARG2, 8+LO(BASE) 1060 | lw SFARG2LO, 8+LO(BASE)
947 |.endmacro 1061 |.endmacro
948 | 1062 |
949 |.macro .ffunc_n, name // Caveat: has delay slot! 1063 |.macro .ffunc_n, name // Caveat: has delay slot!
950 |->ff_ .. name: 1064 |->ff_ .. name:
951 | lw CARG3, HI(BASE) 1065 | lw SFARG1HI, HI(BASE)
1066 |.if FPU
1067 | ldc1 FARG1, 0(BASE)
1068 |.else
1069 | lw SFARG1LO, LO(BASE)
1070 |.endif
952 | beqz NARGS8:RC, ->fff_fallback 1071 | beqz NARGS8:RC, ->fff_fallback
953 |. ldc1 FARG1, 0(BASE) 1072 |. sltiu AT, SFARG1HI, LJ_TISNUM
954 | sltiu AT, CARG3, LJ_TISNUM
955 | beqz AT, ->fff_fallback 1073 | beqz AT, ->fff_fallback
956 |.endmacro 1074 |.endmacro
957 | 1075 |
958 |.macro .ffunc_nn, name // Caveat: has delay slot! 1076 |.macro .ffunc_nn, name // Caveat: has delay slot!
959 |->ff_ .. name: 1077 |->ff_ .. name:
960 | sltiu AT, NARGS8:RC, 16 1078 | sltiu AT, NARGS8:RC, 16
961 | lw CARG3, HI(BASE) 1079 | lw SFARG1HI, HI(BASE)
962 | bnez AT, ->fff_fallback 1080 | bnez AT, ->fff_fallback
963 |. lw CARG4, 8+HI(BASE) 1081 |. lw SFARG2HI, 8+HI(BASE)
964 | ldc1 FARG1, 0(BASE) 1082 | sltiu TMP0, SFARG1HI, LJ_TISNUM
965 | ldc1 FARG2, 8(BASE) 1083 |.if FPU
966 | sltiu TMP0, CARG3, LJ_TISNUM 1084 | ldc1 FARG1, 0(BASE)
967 | sltiu TMP1, CARG4, LJ_TISNUM 1085 |.else
1086 | lw SFARG1LO, LO(BASE)
1087 |.endif
1088 | sltiu TMP1, SFARG2HI, LJ_TISNUM
1089 |.if FPU
1090 | ldc1 FARG2, 8(BASE)
1091 |.else
1092 | lw SFARG2LO, 8+LO(BASE)
1093 |.endif
968 | and TMP0, TMP0, TMP1 1094 | and TMP0, TMP0, TMP1
969 | beqz TMP0, ->fff_fallback 1095 | beqz TMP0, ->fff_fallback
970 |.endmacro 1096 |.endmacro
@@ -980,53 +1106,55 @@ static void build_subroutines(BuildCtx *ctx)
980 |//-- Base library: checks ----------------------------------------------- 1106 |//-- Base library: checks -----------------------------------------------
981 | 1107 |
982 |.ffunc_1 assert 1108 |.ffunc_1 assert
983 | sltiu AT, CARG3, LJ_TISTRUECOND 1109 | sltiu AT, SFARG1HI, LJ_TISTRUECOND
984 | beqz AT, ->fff_fallback 1110 | beqz AT, ->fff_fallback
985 |. addiu RA, BASE, -8 1111 |. addiu RA, BASE, -8
986 | lw PC, FRAME_PC(BASE) 1112 | lw PC, FRAME_PC(BASE)
987 | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8. 1113 | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
988 | addu TMP2, RA, NARGS8:RC 1114 | addu TMP2, RA, NARGS8:RC
989 | sw CARG3, HI(RA) 1115 | sw SFARG1HI, HI(RA)
990 | addiu TMP1, BASE, 8 1116 | addiu TMP1, BASE, 8
991 | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument. 1117 | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument.
992 |. sw CARG1, LO(RA) 1118 |. sw SFARG1LO, LO(RA)
993 |1: 1119 |1:
994 | ldc1 f0, 0(TMP1) 1120 | lw SFRETHI, HI(TMP1)
995 | sdc1 f0, -8(TMP1) 1121 | lw SFRETLO, LO(TMP1)
1122 | sw SFRETHI, -8+HI(TMP1)
1123 | sw SFRETLO, -8+LO(TMP1)
996 | bne TMP1, TMP2, <1 1124 | bne TMP1, TMP2, <1
997 |. addiu TMP1, TMP1, 8 1125 |. addiu TMP1, TMP1, 8
998 | b ->fff_res 1126 | b ->fff_res
999 |. nop 1127 |. nop
1000 | 1128 |
1001 |.ffunc type 1129 |.ffunc type
1002 | lw CARG3, HI(BASE) 1130 | lw SFARG1HI, HI(BASE)
1003 | li TMP1, LJ_TISNUM
1004 | beqz NARGS8:RC, ->fff_fallback 1131 | beqz NARGS8:RC, ->fff_fallback
1005 |. sltiu TMP0, CARG3, LJ_TISNUM 1132 |. sltiu TMP0, SFARG1HI, LJ_TISNUM
1006 | movz TMP1, CARG3, TMP0 1133 | movn SFARG1HI, TISNUM, TMP0
1007 | not TMP1, TMP1 1134 | not TMP1, SFARG1HI
1008 | sll TMP1, TMP1, 3 1135 | sll TMP1, TMP1, 3
1009 | addu TMP1, CFUNC:RB, TMP1 1136 | addu TMP1, CFUNC:RB, TMP1
1010 | b ->fff_resn 1137 | lw SFARG1HI, CFUNC:TMP1->upvalue[0].u32.hi
1011 |. ldc1 FRET1, CFUNC:TMP1->upvalue 1138 | b ->fff_restv
1139 |. lw SFARG1LO, CFUNC:TMP1->upvalue[0].u32.lo
1012 | 1140 |
1013 |//-- Base library: getters and setters --------------------------------- 1141 |//-- Base library: getters and setters ---------------------------------
1014 | 1142 |
1015 |.ffunc_1 getmetatable 1143 |.ffunc_1 getmetatable
1016 | li AT, LJ_TTAB 1144 | li AT, LJ_TTAB
1017 | bne CARG3, AT, >6 1145 | bne SFARG1HI, AT, >6
1018 |. li AT, LJ_TUDATA 1146 |. li AT, LJ_TUDATA
1019 |1: // Field metatable must be at same offset for GCtab and GCudata! 1147 |1: // Field metatable must be at same offset for GCtab and GCudata!
1020 | lw TAB:CARG1, TAB:CARG1->metatable 1148 | lw TAB:SFARG1LO, TAB:SFARG1LO->metatable
1021 |2: 1149 |2:
1022 | lw STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH) 1150 | lw STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
1023 | beqz TAB:CARG1, ->fff_restv 1151 | beqz TAB:SFARG1LO, ->fff_restv
1024 |. li CARG3, LJ_TNIL 1152 |. li SFARG1HI, LJ_TNIL
1025 | lw TMP0, TAB:CARG1->hmask 1153 | lw TMP0, TAB:SFARG1LO->hmask
1026 | li CARG3, LJ_TTAB // Use metatable as default result. 1154 | li SFARG1HI, LJ_TTAB // Use metatable as default result.
1027 | lw TMP1, STR:RC->hash 1155 | lw TMP1, STR:RC->sid
1028 | lw NODE:TMP2, TAB:CARG1->node 1156 | lw NODE:TMP2, TAB:SFARG1LO->node
1029 | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask 1157 | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
1030 | sll TMP0, TMP1, 5 1158 | sll TMP0, TMP1, 5
1031 | sll TMP1, TMP1, 3 1159 | sll TMP1, TMP1, 3
1032 | subu TMP1, TMP0, TMP1 1160 | subu TMP1, TMP0, TMP1
@@ -1037,7 +1165,7 @@ static void build_subroutines(BuildCtx *ctx)
1037 | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2) 1165 | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
1038 | lw NODE:TMP3, NODE:TMP2->next 1166 | lw NODE:TMP3, NODE:TMP2->next
1039 | bne CARG4, AT, >4 1167 | bne CARG4, AT, >4
1040 |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2) 1168 |. lw CARG3, offsetof(Node, val)+HI(NODE:TMP2)
1041 | beq TMP0, STR:RC, >5 1169 | beq TMP0, STR:RC, >5
1042 |. lw TMP1, offsetof(Node, val)+LO(NODE:TMP2) 1170 |. lw TMP1, offsetof(Node, val)+LO(NODE:TMP2)
1043 |4: 1171 |4:
@@ -1046,36 +1174,35 @@ static void build_subroutines(BuildCtx *ctx)
1046 | b <3 1174 | b <3
1047 |. nop 1175 |. nop
1048 |5: 1176 |5:
1049 | beq CARG2, TISNIL, ->fff_restv // Ditto for nil value. 1177 | beq CARG3, TISNIL, ->fff_restv // Ditto for nil value.
1050 |. nop 1178 |. nop
1051 | move CARG3, CARG2 // Return value of mt.__metatable. 1179 | move SFARG1HI, CARG3 // Return value of mt.__metatable.
1052 | b ->fff_restv 1180 | b ->fff_restv
1053 |. move CARG1, TMP1 1181 |. move SFARG1LO, TMP1
1054 | 1182 |
1055 |6: 1183 |6:
1056 | beq CARG3, AT, <1 1184 | beq SFARG1HI, AT, <1
1057 |. sltiu TMP0, CARG3, LJ_TISNUM 1185 |. sltu AT, TISNUM, SFARG1HI
1058 | li TMP1, LJ_TISNUM 1186 | movz SFARG1HI, TISNUM, AT
1059 | movz TMP1, CARG3, TMP0 1187 | not TMP1, SFARG1HI
1060 | not TMP1, TMP1
1061 | sll TMP1, TMP1, 2 1188 | sll TMP1, TMP1, 2
1062 | addu TMP1, DISPATCH, TMP1 1189 | addu TMP1, DISPATCH, TMP1
1063 | b <2 1190 | b <2
1064 |. lw TAB:CARG1, DISPATCH_GL(gcroot[GCROOT_BASEMT])(TMP1) 1191 |. lw TAB:SFARG1LO, DISPATCH_GL(gcroot[GCROOT_BASEMT])(TMP1)
1065 | 1192 |
1066 |.ffunc_2 setmetatable 1193 |.ffunc_2 setmetatable
1067 | // Fast path: no mt for table yet and not clearing the mt. 1194 | // Fast path: no mt for table yet and not clearing the mt.
1068 | li AT, LJ_TTAB 1195 | li AT, LJ_TTAB
1069 | bne CARG3, AT, ->fff_fallback 1196 | bne SFARG1HI, AT, ->fff_fallback
1070 |. addiu CARG4, CARG4, -LJ_TTAB 1197 |. addiu SFARG2HI, SFARG2HI, -LJ_TTAB
1071 | lw TAB:TMP1, TAB:CARG1->metatable 1198 | lw TAB:TMP1, TAB:SFARG1LO->metatable
1072 | lbu TMP3, TAB:CARG1->marked 1199 | lbu TMP3, TAB:SFARG1LO->marked
1073 | or AT, CARG4, TAB:TMP1 1200 | or AT, SFARG2HI, TAB:TMP1
1074 | bnez AT, ->fff_fallback 1201 | bnez AT, ->fff_fallback
1075 |. andi AT, TMP3, LJ_GC_BLACK // isblack(table) 1202 |. andi AT, TMP3, LJ_GC_BLACK // isblack(table)
1076 | beqz AT, ->fff_restv 1203 | beqz AT, ->fff_restv
1077 |. sw TAB:CARG2, TAB:CARG1->metatable 1204 |. sw TAB:SFARG2LO, TAB:SFARG1LO->metatable
1078 | barrierback TAB:CARG1, TMP3, TMP0, ->fff_restv 1205 | barrierback TAB:SFARG1LO, TMP3, TMP0, ->fff_restv
1079 | 1206 |
1080 |.ffunc rawget 1207 |.ffunc rawget
1081 | lw CARG4, HI(BASE) 1208 | lw CARG4, HI(BASE)
@@ -1089,44 +1216,44 @@ static void build_subroutines(BuildCtx *ctx)
1089 | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) 1216 | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
1090 |. move CARG1, L 1217 |. move CARG1, L
1091 | // Returns cTValue *. 1218 | // Returns cTValue *.
1092 | b ->fff_resn 1219 | lw SFARG1HI, HI(CRET1)
1093 |. ldc1 FRET1, 0(CRET1) 1220 | b ->fff_restv
1221 |. lw SFARG1LO, LO(CRET1)
1094 | 1222 |
1095 |//-- Base library: conversions ------------------------------------------ 1223 |//-- Base library: conversions ------------------------------------------
1096 | 1224 |
1097 |.ffunc tonumber 1225 |.ffunc tonumber
1098 | // Only handles the number case inline (without a base argument). 1226 | // Only handles the number case inline (without a base argument).
1099 | lw CARG1, HI(BASE) 1227 | lw CARG1, HI(BASE)
1100 | xori AT, NARGS8:RC, 8 1228 | xori AT, NARGS8:RC, 8 // Exactly one number argument.
1101 | sltiu CARG1, CARG1, LJ_TISNUM 1229 | sltu TMP0, TISNUM, CARG1
1102 | movn CARG1, r0, AT 1230 | or AT, AT, TMP0
1103 | beqz CARG1, ->fff_fallback // Exactly one number argument. 1231 | bnez AT, ->fff_fallback
1104 |. ldc1 FRET1, 0(BASE) 1232 |. lw SFARG1HI, HI(BASE)
1105 | b ->fff_resn 1233 | b ->fff_restv
1106 |. nop 1234 |. lw SFARG1LO, LO(BASE)
1107 | 1235 |
1108 |.ffunc_1 tostring 1236 |.ffunc_1 tostring
1109 | // Only handles the string or number case inline. 1237 | // Only handles the string or number case inline.
1110 | li AT, LJ_TSTR 1238 | li AT, LJ_TSTR
1111 | // A __tostring method in the string base metatable is ignored. 1239 | // A __tostring method in the string base metatable is ignored.
1112 | beq CARG3, AT, ->fff_restv // String key? 1240 | beq SFARG1HI, AT, ->fff_restv // String key?
1113 | // Handle numbers inline, unless a number base metatable is present. 1241 | // Handle numbers inline, unless a number base metatable is present.
1114 |. lw TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH) 1242 |. lw TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
1115 | sltiu TMP0, CARG3, LJ_TISNUM 1243 | sltu TMP0, TISNUM, SFARG1HI
1116 | sltiu TMP1, TMP1, 1 1244 | or TMP0, TMP0, TMP1
1117 | and TMP0, TMP0, TMP1 1245 | bnez TMP0, ->fff_fallback
1118 | beqz TMP0, ->fff_fallback
1119 |. sw BASE, L->base // Add frame since C call can throw. 1246 |. sw BASE, L->base // Add frame since C call can throw.
1120 | ffgccheck 1247 | ffgccheck
1121 |. sw PC, SAVE_PC // Redundant (but a defined value). 1248 |. sw PC, SAVE_PC // Redundant (but a defined value).
1122 | load_got lj_str_fromnum 1249 | load_got lj_strfmt_number
1123 | move CARG1, L 1250 | move CARG1, L
1124 | call_intern lj_str_fromnum // (lua_State *L, lua_Number *np) 1251 | call_intern lj_strfmt_number // (lua_State *L, cTValue *o)
1125 |. move CARG2, BASE 1252 |. move CARG2, BASE
1126 | // Returns GCstr *. 1253 | // Returns GCstr *.
1127 | li CARG3, LJ_TSTR 1254 | li SFARG1HI, LJ_TSTR
1128 | b ->fff_restv 1255 | b ->fff_restv
1129 |. move CARG1, CRET1 1256 |. move SFARG1LO, CRET1
1130 | 1257 |
1131 |//-- Base library: iterators ------------------------------------------- 1258 |//-- Base library: iterators -------------------------------------------
1132 | 1259 |
@@ -1148,31 +1275,38 @@ static void build_subroutines(BuildCtx *ctx)
1148 |. move CARG1, L 1275 |. move CARG1, L
1149 | // Returns 0 at end of traversal. 1276 | // Returns 0 at end of traversal.
1150 | beqz CRET1, ->fff_restv // End of traversal: return nil. 1277 | beqz CRET1, ->fff_restv // End of traversal: return nil.
1151 |. li CARG3, LJ_TNIL 1278 |. li SFARG1HI, LJ_TNIL
1152 | ldc1 f0, 8(BASE) // Copy key and value to results. 1279 | lw TMP0, 8+HI(BASE)
1280 | lw TMP1, 8+LO(BASE)
1153 | addiu RA, BASE, -8 1281 | addiu RA, BASE, -8
1154 | ldc1 f2, 16(BASE) 1282 | lw TMP2, 16+HI(BASE)
1155 | li RD, (2+1)*8 1283 | lw TMP3, 16+LO(BASE)
1156 | sdc1 f0, 0(RA) 1284 | sw TMP0, HI(RA)
1285 | sw TMP1, LO(RA)
1286 | sw TMP2, 8+HI(RA)
1287 | sw TMP3, 8+LO(RA)
1157 | b ->fff_res 1288 | b ->fff_res
1158 |. sdc1 f2, 8(RA) 1289 |. li RD, (2+1)*8
1159 | 1290 |
1160 |.ffunc_1 pairs 1291 |.ffunc_1 pairs
1161 | li AT, LJ_TTAB 1292 | li AT, LJ_TTAB
1162 | bne CARG3, AT, ->fff_fallback 1293 | bne SFARG1HI, AT, ->fff_fallback
1163 |. lw PC, FRAME_PC(BASE) 1294 |. lw PC, FRAME_PC(BASE)
1164#if LJ_52 1295#if LJ_52
1165 | lw TAB:TMP2, TAB:CARG1->metatable 1296 | lw TAB:TMP2, TAB:SFARG1LO->metatable
1166 | ldc1 f0, CFUNC:RB->upvalue[0] 1297 | lw TMP0, CFUNC:RB->upvalue[0].u32.hi
1298 | lw TMP1, CFUNC:RB->upvalue[0].u32.lo
1167 | bnez TAB:TMP2, ->fff_fallback 1299 | bnez TAB:TMP2, ->fff_fallback
1168#else 1300#else
1169 | ldc1 f0, CFUNC:RB->upvalue[0] 1301 | lw TMP0, CFUNC:RB->upvalue[0].u32.hi
1302 | lw TMP1, CFUNC:RB->upvalue[0].u32.lo
1170#endif 1303#endif
1171 |. addiu RA, BASE, -8 1304 |. addiu RA, BASE, -8
1172 | sw TISNIL, 8+HI(BASE) 1305 | sw TISNIL, 8+HI(BASE)
1173 | li RD, (3+1)*8 1306 | sw TMP0, HI(RA)
1307 | sw TMP1, LO(RA)
1174 | b ->fff_res 1308 | b ->fff_res
1175 |. sdc1 f0, 0(RA) 1309 |. li RD, (3+1)*8
1176 | 1310 |
1177 |.ffunc ipairs_aux 1311 |.ffunc ipairs_aux
1178 | sltiu AT, NARGS8:RC, 16 1312 | sltiu AT, NARGS8:RC, 16
@@ -1180,35 +1314,32 @@ static void build_subroutines(BuildCtx *ctx)
1180 | lw TAB:CARG1, LO(BASE) 1314 | lw TAB:CARG1, LO(BASE)
1181 | lw CARG4, 8+HI(BASE) 1315 | lw CARG4, 8+HI(BASE)
1182 | bnez AT, ->fff_fallback 1316 | bnez AT, ->fff_fallback
1183 |. ldc1 FARG2, 8(BASE) 1317 |. addiu CARG3, CARG3, -LJ_TTAB
1184 | addiu CARG3, CARG3, -LJ_TTAB 1318 | xor CARG4, CARG4, TISNUM
1185 | sltiu AT, CARG4, LJ_TISNUM 1319 | and AT, CARG3, CARG4
1186 | li TMP0, 1 1320 | bnez AT, ->fff_fallback
1187 | movn AT, r0, CARG3
1188 | mtc1 TMP0, FARG1
1189 | beqz AT, ->fff_fallback
1190 |. lw PC, FRAME_PC(BASE) 1321 |. lw PC, FRAME_PC(BASE)
1191 | cvt.w.d FRET1, FARG2 1322 | lw TMP2, 8+LO(BASE)
1192 | cvt.d.w FARG1, FARG1
1193 | lw TMP0, TAB:CARG1->asize 1323 | lw TMP0, TAB:CARG1->asize
1194 | lw TMP1, TAB:CARG1->array 1324 | lw TMP1, TAB:CARG1->array
1195 | mfc1 TMP2, FRET1
1196 | addiu RA, BASE, -8
1197 | add.d FARG2, FARG2, FARG1
1198 | addiu TMP2, TMP2, 1 1325 | addiu TMP2, TMP2, 1
1326 | sw TISNUM, -8+HI(BASE)
1199 | sltu AT, TMP2, TMP0 1327 | sltu AT, TMP2, TMP0
1328 | sw TMP2, -8+LO(BASE)
1329 | beqz AT, >2 // Not in array part?
1330 |. addiu RA, BASE, -8
1200 | sll TMP3, TMP2, 3 1331 | sll TMP3, TMP2, 3
1201 | addu TMP3, TMP1, TMP3 1332 | addu TMP3, TMP1, TMP3
1202 | beqz AT, >2 // Not in array part? 1333 | lw TMP1, HI(TMP3)
1203 |. sdc1 FARG2, 0(RA) 1334 | lw TMP2, LO(TMP3)
1204 | lw TMP2, HI(TMP3)
1205 | ldc1 f0, 0(TMP3)
1206 |1: 1335 |1:
1207 | beq TMP2, TISNIL, ->fff_res // End of iteration, return 0 results. 1336 | beq TMP1, TISNIL, ->fff_res // End of iteration, return 0 results.
1208 |. li RD, (0+1)*8 1337 |. li RD, (0+1)*8
1209 | li RD, (2+1)*8 1338 | sw TMP1, 8+HI(RA)
1339 | sw TMP2, 8+LO(RA)
1210 | b ->fff_res 1340 | b ->fff_res
1211 |. sdc1 f0, 8(RA) 1341 |. li RD, (2+1)*8
1342 |
1212 |2: // Check for empty hash part first. Otherwise call C function. 1343 |2: // Check for empty hash part first. Otherwise call C function.
1213 | lw TMP0, TAB:CARG1->hmask 1344 | lw TMP0, TAB:CARG1->hmask
1214 | load_got lj_tab_getinth 1345 | load_got lj_tab_getinth
@@ -1219,27 +1350,30 @@ static void build_subroutines(BuildCtx *ctx)
1219 | // Returns cTValue * or NULL. 1350 | // Returns cTValue * or NULL.
1220 | beqz CRET1, ->fff_res 1351 | beqz CRET1, ->fff_res
1221 |. li RD, (0+1)*8 1352 |. li RD, (0+1)*8
1222 | lw TMP2, HI(CRET1) 1353 | lw TMP1, HI(CRET1)
1223 | b <1 1354 | b <1
1224 |. ldc1 f0, 0(CRET1) 1355 |. lw TMP2, LO(CRET1)
1225 | 1356 |
1226 |.ffunc_1 ipairs 1357 |.ffunc_1 ipairs
1227 | li AT, LJ_TTAB 1358 | li AT, LJ_TTAB
1228 | bne CARG3, AT, ->fff_fallback 1359 | bne SFARG1HI, AT, ->fff_fallback
1229 |. lw PC, FRAME_PC(BASE) 1360 |. lw PC, FRAME_PC(BASE)
1230#if LJ_52 1361#if LJ_52
1231 | lw TAB:TMP2, TAB:CARG1->metatable 1362 | lw TAB:TMP2, TAB:SFARG1LO->metatable
1232 | ldc1 f0, CFUNC:RB->upvalue[0] 1363 | lw TMP0, CFUNC:RB->upvalue[0].u32.hi
1364 | lw TMP1, CFUNC:RB->upvalue[0].u32.lo
1233 | bnez TAB:TMP2, ->fff_fallback 1365 | bnez TAB:TMP2, ->fff_fallback
1234#else 1366#else
1235 | ldc1 f0, CFUNC:RB->upvalue[0] 1367 | lw TMP0, CFUNC:RB->upvalue[0].u32.hi
1368 | lw TMP1, CFUNC:RB->upvalue[0].u32.lo
1236#endif 1369#endif
1237 |. addiu RA, BASE, -8 1370 |. addiu RA, BASE, -8
1238 | sw r0, 8+HI(BASE) 1371 | sw TISNUM, 8+HI(BASE)
1239 | sw r0, 8+LO(BASE) 1372 | sw r0, 8+LO(BASE)
1240 | li RD, (3+1)*8 1373 | sw TMP0, HI(RA)
1374 | sw TMP1, LO(RA)
1241 | b ->fff_res 1375 | b ->fff_res
1242 |. sdc1 f0, 0(RA) 1376 |. li RD, (3+1)*8
1243 | 1377 |
1244 |//-- Base library: catch errors ---------------------------------------- 1378 |//-- Base library: catch errors ----------------------------------------
1245 | 1379 |
@@ -1259,8 +1393,9 @@ static void build_subroutines(BuildCtx *ctx)
1259 | sltiu AT, NARGS8:RC, 16 1393 | sltiu AT, NARGS8:RC, 16
1260 | lw CARG4, 8+HI(BASE) 1394 | lw CARG4, 8+HI(BASE)
1261 | bnez AT, ->fff_fallback 1395 | bnez AT, ->fff_fallback
1262 |. ldc1 FARG2, 8(BASE) 1396 |. lw CARG3, 8+LO(BASE)
1263 | ldc1 FARG1, 0(BASE) 1397 | lw CARG1, LO(BASE)
1398 | lw CARG2, HI(BASE)
1264 | lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH) 1399 | lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH)
1265 | li AT, LJ_TFUNC 1400 | li AT, LJ_TFUNC
1266 | move TMP2, BASE 1401 | move TMP2, BASE
@@ -1268,9 +1403,11 @@ static void build_subroutines(BuildCtx *ctx)
1268 | addiu BASE, BASE, 16 1403 | addiu BASE, BASE, 16
1269 | // Remember active hook before pcall. 1404 | // Remember active hook before pcall.
1270 | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT 1405 | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
1271 | sdc1 FARG2, 0(TMP2) // Swap function and traceback. 1406 | sw CARG3, LO(TMP2) // Swap function and traceback.
1407 | sw CARG4, HI(TMP2)
1272 | andi TMP3, TMP3, 1 1408 | andi TMP3, TMP3, 1
1273 | sdc1 FARG1, 8(TMP2) 1409 | sw CARG1, 8+LO(TMP2)
1410 | sw CARG2, 8+HI(TMP2)
1274 | addiu PC, TMP3, 16+FRAME_PCALL 1411 | addiu PC, TMP3, 16+FRAME_PCALL
1275 | b ->vm_call_dispatch 1412 | b ->vm_call_dispatch
1276 |. addiu NARGS8:RC, NARGS8:RC, -16 1413 |. addiu NARGS8:RC, NARGS8:RC, -16
@@ -1279,7 +1416,10 @@ static void build_subroutines(BuildCtx *ctx)
1279 | 1416 |
1280 |.macro coroutine_resume_wrap, resume 1417 |.macro coroutine_resume_wrap, resume
1281 |.if resume 1418 |.if resume
1282 |.ffunc_1 coroutine_resume 1419 |.ffunc coroutine_resume
1420 | lw CARG3, HI(BASE)
1421 | beqz NARGS8:RC, ->fff_fallback
1422 |. lw CARG1, LO(BASE)
1283 | li AT, LJ_TTHREAD 1423 | li AT, LJ_TTHREAD
1284 | bne CARG3, AT, ->fff_fallback 1424 | bne CARG3, AT, ->fff_fallback
1285 |.else 1425 |.else
@@ -1314,11 +1454,13 @@ static void build_subroutines(BuildCtx *ctx)
1314 | move CARG3, CARG2 1454 | move CARG3, CARG2
1315 | sw BASE, L->top 1455 | sw BASE, L->top
1316 |2: // Move args to coroutine. 1456 |2: // Move args to coroutine.
1317 | ldc1 f0, 0(BASE) 1457 | lw SFRETHI, HI(BASE)
1458 | lw SFRETLO, LO(BASE)
1318 | sltu AT, BASE, TMP1 1459 | sltu AT, BASE, TMP1
1319 | beqz AT, >3 1460 | beqz AT, >3
1320 |. addiu BASE, BASE, 8 1461 |. addiu BASE, BASE, 8
1321 | sdc1 f0, 0(CARG3) 1462 | sw SFRETHI, HI(CARG3)
1463 | sw SFRETLO, LO(CARG3)
1322 | b <2 1464 | b <2
1323 |. addiu CARG3, CARG3, 8 1465 |. addiu CARG3, CARG3, 8
1324 |3: 1466 |3:
@@ -1331,6 +1473,7 @@ static void build_subroutines(BuildCtx *ctx)
1331 | lw TMP3, L:RA->top 1473 | lw TMP3, L:RA->top
1332 | li_vmstate INTERP 1474 | li_vmstate INTERP
1333 | lw BASE, L->base 1475 | lw BASE, L->base
1476 | sw L, DISPATCH_GL(cur_L)(DISPATCH)
1334 | st_vmstate 1477 | st_vmstate
1335 | beqz AT, >8 1478 | beqz AT, >8
1336 |. subu RD, TMP3, TMP2 1479 |. subu RD, TMP3, TMP2
@@ -1343,10 +1486,12 @@ static void build_subroutines(BuildCtx *ctx)
1343 | sw TMP2, L:RA->top // Clear coroutine stack. 1486 | sw TMP2, L:RA->top // Clear coroutine stack.
1344 | move TMP1, BASE 1487 | move TMP1, BASE
1345 |5: // Move results from coroutine. 1488 |5: // Move results from coroutine.
1346 | ldc1 f0, 0(TMP2) 1489 | lw SFRETHI, HI(TMP2)
1490 | lw SFRETLO, LO(TMP2)
1347 | addiu TMP2, TMP2, 8 1491 | addiu TMP2, TMP2, 8
1348 | sltu AT, TMP2, TMP3 1492 | sltu AT, TMP2, TMP3
1349 | sdc1 f0, 0(TMP1) 1493 | sw SFRETHI, HI(TMP1)
1494 | sw SFRETLO, LO(TMP1)
1350 | bnez AT, <5 1495 | bnez AT, <5
1351 |. addiu TMP1, TMP1, 8 1496 |. addiu TMP1, TMP1, 8
1352 |6: 1497 |6:
@@ -1371,12 +1516,14 @@ static void build_subroutines(BuildCtx *ctx)
1371 |.if resume 1516 |.if resume
1372 | addiu TMP3, TMP3, -8 1517 | addiu TMP3, TMP3, -8
1373 | li TMP1, LJ_TFALSE 1518 | li TMP1, LJ_TFALSE
1374 | ldc1 f0, 0(TMP3) 1519 | lw SFRETHI, HI(TMP3)
1520 | lw SFRETLO, LO(TMP3)
1375 | sw TMP3, L:RA->top // Remove error from coroutine stack. 1521 | sw TMP3, L:RA->top // Remove error from coroutine stack.
1376 | li RD, (2+1)*8 1522 | li RD, (2+1)*8
1377 | sw TMP1, -8+HI(BASE) // Prepend false to results. 1523 | sw TMP1, -8+HI(BASE) // Prepend false to results.
1378 | addiu RA, BASE, -8 1524 | addiu RA, BASE, -8
1379 | sdc1 f0, 0(BASE) // Copy error message. 1525 | sw SFRETHI, HI(BASE) // Copy error message.
1526 | sw SFRETLO, LO(BASE)
1380 | b <7 1527 | b <7
1381 |. andi TMP0, PC, FRAME_TYPE 1528 |. andi TMP0, PC, FRAME_TYPE
1382 |.else 1529 |.else
@@ -1412,20 +1559,29 @@ static void build_subroutines(BuildCtx *ctx)
1412 | 1559 |
1413 |//-- Math library ------------------------------------------------------- 1560 |//-- Math library -------------------------------------------------------
1414 | 1561 |
1415 |.ffunc_n math_abs 1562 |.ffunc_1 math_abs
1416 |. abs.d FRET1, FARG1 1563 | bne SFARG1HI, TISNUM, >1
1417 |->fff_resn: 1564 |. sra TMP0, SFARG1LO, 31
1418 | lw PC, FRAME_PC(BASE) 1565 | xor TMP1, SFARG1LO, TMP0
1419 | addiu RA, BASE, -8 1566 | subu SFARG1LO, TMP1, TMP0
1420 | b ->fff_res1 1567 | bgez SFARG1LO, ->fff_restv
1421 |. sdc1 FRET1, -8(BASE) 1568 |. nop
1569 | lui SFARG1HI, 0x41e0 // 2^31 as a double.
1570 | b ->fff_restv
1571 |. li SFARG1LO, 0
1572 |1:
1573 | sltiu AT, SFARG1HI, LJ_TISNUM
1574 | beqz AT, ->fff_fallback
1575 |. sll SFARG1HI, SFARG1HI, 1
1576 | srl SFARG1HI, SFARG1HI, 1
1577 |// fallthrough
1422 | 1578 |
1423 |->fff_restv: 1579 |->fff_restv:
1424 | // CARG3/CARG1 = TValue result. 1580 | // SFARG1LO/SFARG1HI = TValue result.
1425 | lw PC, FRAME_PC(BASE) 1581 | lw PC, FRAME_PC(BASE)
1426 | sw CARG3, -8+HI(BASE) 1582 | sw SFARG1HI, -8+HI(BASE)
1427 | addiu RA, BASE, -8 1583 | addiu RA, BASE, -8
1428 | sw CARG1, -8+LO(BASE) 1584 | sw SFARG1LO, -8+LO(BASE)
1429 |->fff_res1: 1585 |->fff_res1:
1430 | // RA = results, PC = return. 1586 | // RA = results, PC = return.
1431 | li RD, (1+1)*8 1587 | li RD, (1+1)*8
@@ -1454,15 +1610,19 @@ static void build_subroutines(BuildCtx *ctx)
1454 |. sw TISNIL, -8+HI(TMP1) 1610 |. sw TISNIL, -8+HI(TMP1)
1455 | 1611 |
1456 |.macro math_extern, func 1612 |.macro math_extern, func
1457 |->ff_math_ .. func: 1613 | .ffunc math_ .. func
1458 | lw CARG3, HI(BASE) 1614 | lw SFARG1HI, HI(BASE)
1459 | beqz NARGS8:RC, ->fff_fallback 1615 | beqz NARGS8:RC, ->fff_fallback
1460 |. load_got func 1616 |. load_got func
1461 | sltiu AT, CARG3, LJ_TISNUM 1617 | sltiu AT, SFARG1HI, LJ_TISNUM
1462 | beqz AT, ->fff_fallback 1618 | beqz AT, ->fff_fallback
1463 |. nop 1619 |.if FPU
1464 | call_extern
1465 |. ldc1 FARG1, 0(BASE) 1620 |. ldc1 FARG1, 0(BASE)
1621 |.else
1622 |. lw SFARG1LO, LO(BASE)
1623 |.endif
1624 | call_extern
1625 |. nop
1466 | b ->fff_resn 1626 | b ->fff_resn
1467 |. nop 1627 |. nop
1468 |.endmacro 1628 |.endmacro
@@ -1476,10 +1636,22 @@ static void build_subroutines(BuildCtx *ctx)
1476 |. nop 1636 |. nop
1477 |.endmacro 1637 |.endmacro
1478 | 1638 |
1639 |// TODO: Return integer type if result is integer (own sf implementation).
1479 |.macro math_round, func 1640 |.macro math_round, func
1480 | .ffunc_n math_ .. func 1641 |->ff_math_ .. func:
1481 |. nop 1642 | lw SFARG1HI, HI(BASE)
1643 | beqz NARGS8:RC, ->fff_fallback
1644 |. lw SFARG1LO, LO(BASE)
1645 | beq SFARG1HI, TISNUM, ->fff_restv
1646 |. sltu AT, SFARG1HI, TISNUM
1647 | beqz AT, ->fff_fallback
1648 |.if FPU
1649 |. ldc1 FARG1, 0(BASE)
1482 | bal ->vm_ .. func 1650 | bal ->vm_ .. func
1651 |.else
1652 |. load_got func
1653 | call_extern
1654 |.endif
1483 |. nop 1655 |. nop
1484 | b ->fff_resn 1656 | b ->fff_resn
1485 |. nop 1657 |. nop
@@ -1489,15 +1661,19 @@ static void build_subroutines(BuildCtx *ctx)
1489 | math_round ceil 1661 | math_round ceil
1490 | 1662 |
1491 |.ffunc math_log 1663 |.ffunc math_log
1492 | lw CARG3, HI(BASE)
1493 | li AT, 8 1664 | li AT, 8
1494 | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument. 1665 | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument.
1495 |. load_got log 1666 |. lw SFARG1HI, HI(BASE)
1496 | sltiu AT, CARG3, LJ_TISNUM 1667 | sltiu AT, SFARG1HI, LJ_TISNUM
1497 | beqz AT, ->fff_fallback 1668 | beqz AT, ->fff_fallback
1498 |. nop 1669 |. load_got log
1670 |.if FPU
1499 | call_extern 1671 | call_extern
1500 |. ldc1 FARG1, 0(BASE) 1672 |. ldc1 FARG1, 0(BASE)
1673 |.else
1674 | call_extern
1675 |. lw SFARG1LO, LO(BASE)
1676 |.endif
1501 | b ->fff_resn 1677 | b ->fff_resn
1502 |. nop 1678 |. nop
1503 | 1679 |
@@ -1516,23 +1692,43 @@ static void build_subroutines(BuildCtx *ctx)
1516 | math_extern2 atan2 1692 | math_extern2 atan2
1517 | math_extern2 fmod 1693 | math_extern2 fmod
1518 | 1694 |
1695 |.if FPU
1519 |.ffunc_n math_sqrt 1696 |.ffunc_n math_sqrt
1520 |. sqrt.d FRET1, FARG1 1697 |. sqrt.d FRET1, FARG1
1521 | b ->fff_resn 1698 |// fallthrough to ->fff_resn
1522 |. nop 1699 |.else
1700 | math_extern sqrt
1701 |.endif
1523 | 1702 |
1524 |->ff_math_deg: 1703 |->fff_resn:
1525 |.ffunc_n math_rad 1704 | lw PC, FRAME_PC(BASE)
1526 |. ldc1 FARG2, CFUNC:RB->upvalue[0] 1705 | addiu RA, BASE, -8
1527 | b ->fff_resn 1706 |.if FPU
1528 |. mul.d FRET1, FARG1, FARG2 1707 | b ->fff_res1
1708 |. sdc1 FRET1, -8(BASE)
1709 |.else
1710 | sw SFRETHI, -8+HI(BASE)
1711 | b ->fff_res1
1712 |. sw SFRETLO, -8+LO(BASE)
1713 |.endif
1529 | 1714 |
1530 |.ffunc_nn math_ldexp 1715 |
1531 | cvt.w.d FARG2, FARG2 1716 |.ffunc math_ldexp
1717 | sltiu AT, NARGS8:RC, 16
1718 | lw SFARG1HI, HI(BASE)
1719 | bnez AT, ->fff_fallback
1720 |. lw CARG4, 8+HI(BASE)
1721 | bne CARG4, TISNUM, ->fff_fallback
1532 | load_got ldexp 1722 | load_got ldexp
1533 | mfc1 CARG3, FARG2 1723 |. sltu AT, SFARG1HI, TISNUM
1724 | beqz AT, ->fff_fallback
1725 |.if FPU
1726 |. ldc1 FARG1, 0(BASE)
1727 |.else
1728 |. lw SFARG1LO, LO(BASE)
1729 |.endif
1534 | call_extern 1730 | call_extern
1535 |. nop 1731 |. lw CARG3, 8+LO(BASE)
1536 | b ->fff_resn 1732 | b ->fff_resn
1537 |. nop 1733 |. nop
1538 | 1734 |
@@ -1543,10 +1739,17 @@ static void build_subroutines(BuildCtx *ctx)
1543 |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv) 1739 |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
1544 | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH) 1740 | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH)
1545 | addiu RA, BASE, -8 1741 | addiu RA, BASE, -8
1742 |.if FPU
1546 | mtc1 TMP1, FARG2 1743 | mtc1 TMP1, FARG2
1547 | sdc1 FRET1, 0(RA) 1744 | sdc1 FRET1, 0(RA)
1548 | cvt.d.w FARG2, FARG2 1745 | cvt.d.w FARG2, FARG2
1549 | sdc1 FARG2, 8(RA) 1746 | sdc1 FARG2, 8(RA)
1747 |.else
1748 | sw SFRETLO, LO(RA)
1749 | sw SFRETHI, HI(RA)
1750 | sw TMP1, 8+LO(RA)
1751 | sw TISNUM, 8+HI(RA)
1752 |.endif
1550 | b ->fff_res 1753 | b ->fff_res
1551 |. li RD, (2+1)*8 1754 |. li RD, (2+1)*8
1552 | 1755 |
@@ -1556,49 +1759,109 @@ static void build_subroutines(BuildCtx *ctx)
1556 | call_extern 1759 | call_extern
1557 |. addiu CARG3, BASE, -8 1760 |. addiu CARG3, BASE, -8
1558 | addiu RA, BASE, -8 1761 | addiu RA, BASE, -8
1762 |.if FPU
1559 | sdc1 FRET1, 0(BASE) 1763 | sdc1 FRET1, 0(BASE)
1764 |.else
1765 | sw SFRETLO, LO(BASE)
1766 | sw SFRETHI, HI(BASE)
1767 |.endif
1560 | b ->fff_res 1768 | b ->fff_res
1561 |. li RD, (2+1)*8 1769 |. li RD, (2+1)*8
1562 | 1770 |
1563 |.macro math_minmax, name, ismax 1771 |.macro math_minmax, name, intins, ismax
1564 |->ff_ .. name: 1772 | .ffunc_1 name
1565 | lw CARG3, HI(BASE) 1773 | addu TMP3, BASE, NARGS8:RC
1566 | beqz NARGS8:RC, ->fff_fallback 1774 | bne SFARG1HI, TISNUM, >5
1567 |. ldc1 FRET1, 0(BASE) 1775 |. addiu TMP2, BASE, 8
1568 | sltiu AT, CARG3, LJ_TISNUM 1776 |1: // Handle integers.
1777 |. lw SFARG2HI, HI(TMP2)
1778 | beq TMP2, TMP3, ->fff_restv
1779 |. lw SFARG2LO, LO(TMP2)
1780 | bne SFARG2HI, TISNUM, >3
1781 |. slt AT, SFARG1LO, SFARG2LO
1782 | intins SFARG1LO, SFARG2LO, AT
1783 | b <1
1784 |. addiu TMP2, TMP2, 8
1785 |
1786 |3: // Convert intermediate result to number and continue with number loop.
1787 | sltiu AT, SFARG2HI, LJ_TISNUM
1569 | beqz AT, ->fff_fallback 1788 | beqz AT, ->fff_fallback
1570 |. addu TMP2, BASE, NARGS8:RC 1789 |.if FPU
1571 | addiu TMP1, BASE, 8 1790 |. mtc1 SFARG1LO, FRET1
1572 | beq TMP1, TMP2, ->fff_resn 1791 | cvt.d.w FRET1, FRET1
1573 |1: 1792 | b >7
1574 |. lw CARG3, HI(TMP1) 1793 |. ldc1 FARG1, 0(TMP2)
1575 | ldc1 FARG1, 0(TMP1) 1794 |.else
1576 | addiu TMP1, TMP1, 8 1795 |. nop
1577 | sltiu AT, CARG3, LJ_TISNUM 1796 | bal ->vm_sfi2d_1
1797 |. nop
1798 | b >7
1799 |. nop
1800 |.endif
1801 |
1802 |5:
1803 |. sltiu AT, SFARG1HI, LJ_TISNUM
1578 | beqz AT, ->fff_fallback 1804 | beqz AT, ->fff_fallback
1805 |.if FPU
1806 |. ldc1 FRET1, 0(BASE)
1807 |.endif
1808 |
1809 |6: // Handle numbers.
1810 |. lw SFARG2HI, HI(TMP2)
1811 |.if FPU
1812 | beq TMP2, TMP3, ->fff_resn
1813 |.else
1814 | beq TMP2, TMP3, ->fff_restv
1815 |.endif
1816 |. sltiu AT, SFARG2HI, LJ_TISNUM
1817 | beqz AT, >8
1818 |.if FPU
1819 |. ldc1 FARG1, 0(TMP2)
1820 |.else
1821 |. lw SFARG2LO, LO(TMP2)
1822 |.endif
1823 |7:
1824 |.if FPU
1579 |.if ismax 1825 |.if ismax
1580 |. c.olt.d FARG1, FRET1 1826 | c.olt.d FARG1, FRET1
1581 |.else 1827 |.else
1582 |. c.olt.d FRET1, FARG1 1828 | c.olt.d FRET1, FARG1
1829 |.endif
1830 | movf.d FRET1, FARG1
1831 |.else
1832 |.if ismax
1833 | bal ->vm_sfcmpogt
1834 |.else
1835 | bal ->vm_sfcmpolt
1583 |.endif 1836 |.endif
1584 | bne TMP1, TMP2, <1
1585 |. movf.d FRET1, FARG1
1586 | b ->fff_resn
1587 |. nop 1837 |. nop
1838 | movz SFARG1LO, SFARG2LO, CRET1
1839 | movz SFARG1HI, SFARG2HI, CRET1
1840 |.endif
1841 | b <6
1842 |. addiu TMP2, TMP2, 8
1843 |
1844 |8: // Convert integer to number and continue with number loop.
1845 | bne SFARG2HI, TISNUM, ->fff_fallback
1846 |.if FPU
1847 |. lwc1 FARG1, LO(TMP2)
1848 | b <7
1849 |. cvt.d.w FARG1, FARG1
1850 |.else
1851 |. nop
1852 | bal ->vm_sfi2d_2
1853 |. nop
1854 | b <7
1855 |. nop
1856 |.endif
1857 |
1588 |.endmacro 1858 |.endmacro
1589 | 1859 |
1590 | math_minmax math_min, 0 1860 | math_minmax math_min, movz, 0
1591 | math_minmax math_max, 1 1861 | math_minmax math_max, movn, 1
1592 | 1862 |
1593 |//-- String library ----------------------------------------------------- 1863 |//-- String library -----------------------------------------------------
1594 | 1864 |
1595 |.ffunc_1 string_len
1596 | li AT, LJ_TSTR
1597 | bne CARG3, AT, ->fff_fallback
1598 |. nop
1599 | b ->fff_resi
1600 |. lw CRET1, STR:CARG1->len
1601 |
1602 |.ffunc string_byte // Only handle the 1-arg case here. 1865 |.ffunc string_byte // Only handle the 1-arg case here.
1603 | lw CARG3, HI(BASE) 1866 | lw CARG3, HI(BASE)
1604 | lw STR:CARG1, LO(BASE) 1867 | lw STR:CARG1, LO(BASE)
@@ -1608,33 +1871,31 @@ static void build_subroutines(BuildCtx *ctx)
1608 | bnez AT, ->fff_fallback // Need exactly 1 string argument. 1871 | bnez AT, ->fff_fallback // Need exactly 1 string argument.
1609 |. nop 1872 |. nop
1610 | lw TMP0, STR:CARG1->len 1873 | lw TMP0, STR:CARG1->len
1611 | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
1612 | addiu RA, BASE, -8 1874 | addiu RA, BASE, -8
1875 | lw PC, FRAME_PC(BASE)
1613 | sltu RD, r0, TMP0 1876 | sltu RD, r0, TMP0
1614 | mtc1 TMP1, f0 1877 | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
1615 | addiu RD, RD, 1 1878 | addiu RD, RD, 1
1616 | cvt.d.w f0, f0
1617 | lw PC, FRAME_PC(BASE)
1618 | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8 1879 | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8
1880 | sw TISNUM, HI(RA)
1619 | b ->fff_res 1881 | b ->fff_res
1620 |. sdc1 f0, 0(RA) 1882 |. sw TMP1, LO(RA)
1621 | 1883 |
1622 |.ffunc string_char // Only handle the 1-arg case here. 1884 |.ffunc string_char // Only handle the 1-arg case here.
1623 | ffgccheck 1885 | ffgccheck
1624 |. nop 1886 |. nop
1625 | lw CARG3, HI(BASE) 1887 | lw CARG3, HI(BASE)
1626 | ldc1 FARG1, 0(BASE) 1888 | lw CARG1, LO(BASE)
1627 | li AT, 8 1889 | li TMP1, 255
1628 | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument. 1890 | xori AT, NARGS8:RC, 8 // Exactly 1 argument.
1629 |. sltiu AT, CARG3, LJ_TISNUM 1891 | xor TMP0, CARG3, TISNUM // Integer.
1630 | beqz AT, ->fff_fallback 1892 | sltu TMP1, TMP1, CARG1 // !(255 < n).
1893 | or AT, AT, TMP0
1894 | or AT, AT, TMP1
1895 | bnez AT, ->fff_fallback
1631 |. li CARG3, 1 1896 |. li CARG3, 1
1632 | cvt.w.d FARG1, FARG1
1633 | addiu CARG2, sp, ARG5_OFS 1897 | addiu CARG2, sp, ARG5_OFS
1634 | sltiu AT, TMP0, 256 1898 | sb CARG1, ARG5
1635 | mfc1 TMP0, FARG1
1636 | beqz AT, ->fff_fallback
1637 |. sw TMP0, ARG5
1638 |->fff_newstr: 1899 |->fff_newstr:
1639 | load_got lj_str_new 1900 | load_got lj_str_new
1640 | sw BASE, L->base 1901 | sw BASE, L->base
@@ -1643,35 +1904,30 @@ static void build_subroutines(BuildCtx *ctx)
1643 |. move CARG1, L 1904 |. move CARG1, L
1644 | // Returns GCstr *. 1905 | // Returns GCstr *.
1645 | lw BASE, L->base 1906 | lw BASE, L->base
1646 | move CARG1, CRET1 1907 |->fff_resstr:
1908 | move SFARG1LO, CRET1
1647 | b ->fff_restv 1909 | b ->fff_restv
1648 |. li CARG3, LJ_TSTR 1910 |. li SFARG1HI, LJ_TSTR
1649 | 1911 |
1650 |.ffunc string_sub 1912 |.ffunc string_sub
1651 | ffgccheck 1913 | ffgccheck
1652 |. nop 1914 |. nop
1653 | addiu AT, NARGS8:RC, -16 1915 | addiu AT, NARGS8:RC, -16
1654 | lw CARG3, 16+HI(BASE) 1916 | lw CARG3, 16+HI(BASE)
1655 | ldc1 f0, 16(BASE)
1656 | lw TMP0, HI(BASE) 1917 | lw TMP0, HI(BASE)
1657 | lw STR:CARG1, LO(BASE) 1918 | lw STR:CARG1, LO(BASE)
1658 | bltz AT, ->fff_fallback 1919 | bltz AT, ->fff_fallback
1659 | lw CARG2, 8+HI(BASE) 1920 |. lw CARG2, 8+HI(BASE)
1660 | ldc1 f2, 8(BASE)
1661 | beqz AT, >1 1921 | beqz AT, >1
1662 |. li CARG4, -1 1922 |. li CARG4, -1
1663 | cvt.w.d f0, f0 1923 | bne CARG3, TISNUM, ->fff_fallback
1664 | sltiu AT, CARG3, LJ_TISNUM 1924 |. lw CARG4, 16+LO(BASE)
1665 | beqz AT, ->fff_fallback
1666 |. mfc1 CARG4, f0
1667 |1: 1925 |1:
1668 | sltiu AT, CARG2, LJ_TISNUM 1926 | bne CARG2, TISNUM, ->fff_fallback
1669 | beqz AT, ->fff_fallback
1670 |. li AT, LJ_TSTR 1927 |. li AT, LJ_TSTR
1671 | cvt.w.d f2, f2
1672 | bne TMP0, AT, ->fff_fallback 1928 | bne TMP0, AT, ->fff_fallback
1673 |. lw CARG2, STR:CARG1->len 1929 |. lw CARG3, 8+LO(BASE)
1674 | mfc1 CARG3, f2 1930 | lw CARG2, STR:CARG1->len
1675 | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end 1931 | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end
1676 | slt AT, CARG4, r0 1932 | slt AT, CARG4, r0
1677 | addiu TMP0, CARG2, 1 1933 | addiu TMP0, CARG2, 1
@@ -1693,139 +1949,130 @@ static void build_subroutines(BuildCtx *ctx)
1693 | bgez CARG3, ->fff_newstr 1949 | bgez CARG3, ->fff_newstr
1694 |. addiu CARG3, CARG3, 1 // len++ 1950 |. addiu CARG3, CARG3, 1 // len++
1695 |->fff_emptystr: // Return empty string. 1951 |->fff_emptystr: // Return empty string.
1696 | addiu STR:CARG1, DISPATCH, DISPATCH_GL(strempty) 1952 | addiu STR:SFARG1LO, DISPATCH, DISPATCH_GL(strempty)
1697 | b ->fff_restv 1953 | b ->fff_restv
1698 |. li CARG3, LJ_TSTR 1954 |. li SFARG1HI, LJ_TSTR
1699 |
1700 |.ffunc string_rep // Only handle the 1-char case inline.
1701 | ffgccheck
1702 |. nop
1703 | lw TMP0, HI(BASE)
1704 | addiu AT, NARGS8:RC, -16 // Exactly 2 arguments.
1705 | lw CARG4, 8+HI(BASE)
1706 | lw STR:CARG1, LO(BASE)
1707 | addiu TMP0, TMP0, -LJ_TSTR
1708 | ldc1 f0, 8(BASE)
1709 | or AT, AT, TMP0
1710 | bnez AT, ->fff_fallback
1711 |. sltiu AT, CARG4, LJ_TISNUM
1712 | cvt.w.d f0, f0
1713 | beqz AT, ->fff_fallback
1714 |. lw TMP0, STR:CARG1->len
1715 | mfc1 CARG3, f0
1716 | lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
1717 | li AT, 1
1718 | blez CARG3, ->fff_emptystr // Count <= 0?
1719 |. sltu AT, AT, TMP0
1720 | beqz TMP0, ->fff_emptystr // Zero length string?
1721 |. sltu TMP0, TMP1, CARG3
1722 | or AT, AT, TMP0
1723 | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
1724 | bnez AT, ->fff_fallback // Fallback for > 1-char strings.
1725 |. lbu TMP0, STR:CARG1[1]
1726 | addu TMP2, CARG2, CARG3
1727 |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
1728 | addiu TMP2, TMP2, -1
1729 | sltu AT, CARG2, TMP2
1730 | bnez AT, <1
1731 |. sb TMP0, 0(TMP2)
1732 | b ->fff_newstr
1733 |. nop
1734 | 1955 |
1735 |.ffunc string_reverse 1956 |.macro ffstring_op, name
1957 | .ffunc string_ .. name
1736 | ffgccheck 1958 | ffgccheck
1737 |. nop 1959 |. nop
1738 | lw CARG3, HI(BASE) 1960 | lw CARG3, HI(BASE)
1739 | lw STR:CARG1, LO(BASE) 1961 | lw STR:CARG2, LO(BASE)
1740 | beqz NARGS8:RC, ->fff_fallback 1962 | beqz NARGS8:RC, ->fff_fallback
1741 |. li AT, LJ_TSTR 1963 |. li AT, LJ_TSTR
1742 | bne CARG3, AT, ->fff_fallback 1964 | bne CARG3, AT, ->fff_fallback
1743 |. lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH) 1965 |. addiu SBUF:CARG1, DISPATCH, DISPATCH_GL(tmpbuf)
1744 | lw CARG3, STR:CARG1->len 1966 | load_got lj_buf_putstr_ .. name
1745 | addiu CARG1, STR:CARG1, #STR 1967 | lw TMP0, SBUF:CARG1->b
1746 | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH) 1968 | sw L, SBUF:CARG1->L
1747 | sltu AT, TMP1, CARG3 1969 | sw BASE, L->base
1748 | bnez AT, ->fff_fallback 1970 | sw TMP0, SBUF:CARG1->p
1749 |. addu TMP3, CARG1, CARG3 1971 | call_intern extern lj_buf_putstr_ .. name
1750 | addu CARG4, CARG2, CARG3 1972 |. sw PC, SAVE_PC
1751 |1: // Reverse string copy. 1973 | load_got lj_buf_tostr
1752 | lbu TMP1, 0(CARG1) 1974 | call_intern lj_buf_tostr
1753 | sltu AT, CARG1, TMP3 1975 |. move SBUF:CARG1, SBUF:CRET1
1754 | beqz AT, ->fff_newstr 1976 | b ->fff_resstr
1755 |. addiu CARG1, CARG1, 1 1977 |. lw BASE, L->base
1756 | addiu CARG4, CARG4, -1
1757 | b <1
1758 | sb TMP1, 0(CARG4)
1759 |
1760 |.macro ffstring_case, name, lo
1761 | .ffunc name
1762 | ffgccheck
1763 |. nop
1764 | lw CARG3, HI(BASE)
1765 | lw STR:CARG1, LO(BASE)
1766 | beqz NARGS8:RC, ->fff_fallback
1767 |. li AT, LJ_TSTR
1768 | bne CARG3, AT, ->fff_fallback
1769 |. lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
1770 | lw CARG3, STR:CARG1->len
1771 | addiu CARG1, STR:CARG1, #STR
1772 | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
1773 | sltu AT, TMP1, CARG3
1774 | bnez AT, ->fff_fallback
1775 |. addu TMP3, CARG1, CARG3
1776 | move CARG4, CARG2
1777 |1: // ASCII case conversion.
1778 | lbu TMP1, 0(CARG1)
1779 | sltu AT, CARG1, TMP3
1780 | beqz AT, ->fff_newstr
1781 |. addiu TMP0, TMP1, -lo
1782 | xori TMP2, TMP1, 0x20
1783 | sltiu AT, TMP0, 26
1784 | movn TMP1, TMP2, AT
1785 | addiu CARG1, CARG1, 1
1786 | sb TMP1, 0(CARG4)
1787 | b <1
1788 |. addiu CARG4, CARG4, 1
1789 |.endmacro 1978 |.endmacro
1790 | 1979 |
1791 |ffstring_case string_lower, 65 1980 |ffstring_op reverse
1792 |ffstring_case string_upper, 97 1981 |ffstring_op lower
1982 |ffstring_op upper
1793 | 1983 |
1794 |//-- Table library ------------------------------------------------------ 1984 |//-- Bit library --------------------------------------------------------
1795 | 1985 |
1796 |.ffunc_1 table_getn 1986 |->vm_tobit_fb:
1797 | li AT, LJ_TTAB 1987 | beqz TMP1, ->fff_fallback
1798 | bne CARG3, AT, ->fff_fallback 1988 |.if FPU
1799 |. load_got lj_tab_len 1989 |. ldc1 FARG1, 0(BASE)
1800 | call_intern lj_tab_len // (GCtab *t) 1990 | add.d FARG1, FARG1, TOBIT
1801 |. nop 1991 | jr ra
1802 | // Returns uint32_t (but less than 2^31). 1992 |. mfc1 CRET1, FARG1
1803 | b ->fff_resi 1993 |.else
1994 |// FP number to bit conversion for soft-float.
1995 |->vm_tobit:
1996 | sll TMP0, SFARG1HI, 1
1997 | lui AT, 0x0020
1998 | addu TMP0, TMP0, AT
1999 | slt AT, TMP0, r0
2000 | movz SFARG1LO, r0, AT
2001 | beqz AT, >2
2002 |. li TMP1, 0x3e0
2003 | not TMP1, TMP1
2004 | sra TMP0, TMP0, 21
2005 | subu TMP0, TMP1, TMP0
2006 | slt AT, TMP0, r0
2007 | bnez AT, >1
2008 |. sll TMP1, SFARG1HI, 11
2009 | lui AT, 0x8000
2010 | or TMP1, TMP1, AT
2011 | srl AT, SFARG1LO, 21
2012 | or TMP1, TMP1, AT
2013 | slt AT, SFARG1HI, r0
2014 | beqz AT, >2
2015 |. srlv SFARG1LO, TMP1, TMP0
2016 | subu SFARG1LO, r0, SFARG1LO
2017 |2:
2018 | jr ra
2019 |. move CRET1, SFARG1LO
2020 |1:
2021 | addiu TMP0, TMP0, 21
2022 | srlv TMP1, SFARG1LO, TMP0
2023 | li AT, 20
2024 | subu TMP0, AT, TMP0
2025 | sll SFARG1LO, SFARG1HI, 12
2026 | sllv AT, SFARG1LO, TMP0
2027 | or SFARG1LO, TMP1, AT
2028 | slt AT, SFARG1HI, r0
2029 | beqz AT, <2
1804 |. nop 2030 |. nop
1805 | 2031 | jr ra
1806 |//-- Bit library -------------------------------------------------------- 2032 |. subu CRET1, r0, SFARG1LO
2033 |.endif
1807 | 2034 |
1808 |.macro .ffunc_bit, name 2035 |.macro .ffunc_bit, name
1809 | .ffunc_n bit_..name 2036 | .ffunc_1 bit_..name
1810 |. add.d FARG1, FARG1, TOBIT 2037 | beq SFARG1HI, TISNUM, >6
1811 | mfc1 CRET1, FARG1 2038 |. move CRET1, SFARG1LO
2039 | bal ->vm_tobit_fb
2040 |. sltu TMP1, SFARG1HI, TISNUM
2041 |6:
1812 |.endmacro 2042 |.endmacro
1813 | 2043 |
1814 |.macro .ffunc_bit_op, name, ins 2044 |.macro .ffunc_bit_op, name, ins
1815 | .ffunc_bit name 2045 | .ffunc_bit name
1816 | addiu TMP1, BASE, 8 2046 | addiu TMP2, BASE, 8
1817 | addu TMP2, BASE, NARGS8:RC 2047 | addu TMP3, BASE, NARGS8:RC
1818 |1: 2048 |1:
1819 | lw CARG4, HI(TMP1) 2049 | lw SFARG1HI, HI(TMP2)
1820 | beq TMP1, TMP2, ->fff_resi 2050 | beq TMP2, TMP3, ->fff_resi
1821 |. ldc1 FARG1, 0(TMP1) 2051 |. lw SFARG1LO, LO(TMP2)
1822 | sltiu AT, CARG4, LJ_TISNUM 2052 |.if FPU
1823 | beqz AT, ->fff_fallback 2053 | bne SFARG1HI, TISNUM, >2
1824 | add.d FARG1, FARG1, TOBIT 2054 |. addiu TMP2, TMP2, 8
1825 | mfc1 CARG2, FARG1
1826 | ins CRET1, CRET1, CARG2
1827 | b <1 2055 | b <1
1828 |. addiu TMP1, TMP1, 8 2056 |. ins CRET1, CRET1, SFARG1LO
2057 |2:
2058 | ldc1 FARG1, -8(TMP2)
2059 | sltu TMP1, SFARG1HI, TISNUM
2060 | beqz TMP1, ->fff_fallback
2061 |. add.d FARG1, FARG1, TOBIT
2062 | mfc1 SFARG1LO, FARG1
2063 | b <1
2064 |. ins CRET1, CRET1, SFARG1LO
2065 |.else
2066 | beq SFARG1HI, TISNUM, >2
2067 |. move CRET2, CRET1
2068 | bal ->vm_tobit_fb
2069 |. sltu TMP1, SFARG1HI, TISNUM
2070 | move SFARG1LO, CRET2
2071 |2:
2072 | ins CRET1, CRET1, SFARG1LO
2073 | b <1
2074 |. addiu TMP2, TMP2, 8
2075 |.endif
1829 |.endmacro 2076 |.endmacro
1830 | 2077 |
1831 |.ffunc_bit_op band, and 2078 |.ffunc_bit_op band, and
@@ -1849,24 +2096,28 @@ static void build_subroutines(BuildCtx *ctx)
1849 |. not CRET1, CRET1 2096 |. not CRET1, CRET1
1850 | 2097 |
1851 |.macro .ffunc_bit_sh, name, ins, shmod 2098 |.macro .ffunc_bit_sh, name, ins, shmod
1852 | .ffunc_nn bit_..name 2099 | .ffunc_2 bit_..name
1853 |. add.d FARG1, FARG1, TOBIT 2100 | beq SFARG1HI, TISNUM, >1
1854 | add.d FARG2, FARG2, TOBIT 2101 |. nop
1855 | mfc1 CARG1, FARG1 2102 | bal ->vm_tobit_fb
1856 | mfc1 CARG2, FARG2 2103 |. sltu TMP1, SFARG1HI, TISNUM
2104 | move SFARG1LO, CRET1
2105 |1:
2106 | bne SFARG2HI, TISNUM, ->fff_fallback
2107 |. nop
1857 |.if shmod == 1 2108 |.if shmod == 1
1858 | li AT, 32 2109 | li AT, 32
1859 | subu TMP0, AT, CARG2 2110 | subu TMP0, AT, SFARG2LO
1860 | sllv CARG2, CARG1, CARG2 2111 | sllv SFARG2LO, SFARG1LO, SFARG2LO
1861 | srlv CARG1, CARG1, TMP0 2112 | srlv SFARG1LO, SFARG1LO, TMP0
1862 |.elif shmod == 2 2113 |.elif shmod == 2
1863 | li AT, 32 2114 | li AT, 32
1864 | subu TMP0, AT, CARG2 2115 | subu TMP0, AT, SFARG2LO
1865 | srlv CARG2, CARG1, CARG2 2116 | srlv SFARG2LO, SFARG1LO, SFARG2LO
1866 | sllv CARG1, CARG1, TMP0 2117 | sllv SFARG1LO, SFARG1LO, TMP0
1867 |.endif 2118 |.endif
1868 | b ->fff_resi 2119 | b ->fff_resi
1869 |. ins CRET1, CARG1, CARG2 2120 |. ins CRET1, SFARG1LO, SFARG2LO
1870 |.endmacro 2121 |.endmacro
1871 | 2122 |
1872 |.ffunc_bit_sh lshift, sllv, 0 2123 |.ffunc_bit_sh lshift, sllv, 0
@@ -1878,9 +2129,11 @@ static void build_subroutines(BuildCtx *ctx)
1878 | 2129 |
1879 |.ffunc_bit tobit 2130 |.ffunc_bit tobit
1880 |->fff_resi: 2131 |->fff_resi:
1881 | mtc1 CRET1, FRET1 2132 | lw PC, FRAME_PC(BASE)
1882 | b ->fff_resn 2133 | addiu RA, BASE, -8
1883 |. cvt.d.w FRET1, FRET1 2134 | sw TISNUM, -8+HI(BASE)
2135 | b ->fff_res1
2136 |. sw CRET1, -8+LO(BASE)
1884 | 2137 |
1885 |//----------------------------------------------------------------------- 2138 |//-----------------------------------------------------------------------
1886 | 2139 |
@@ -2067,19 +2320,96 @@ static void build_subroutines(BuildCtx *ctx)
2067 | jr CRET1 2320 | jr CRET1
2068 |. lw INS, -4(PC) 2321 |. lw INS, -4(PC)
2069 | 2322 |
2323 |->cont_stitch: // Trace stitching.
2324 |.if JIT
2325 | // RA = resultptr, RB = meta base
2326 | lw INS, -4(PC)
2327 | lw TMP2, -24+LO(RB) // Save previous trace.
2328 | decode_RA8a RC, INS
2329 | addiu AT, MULTRES, -8
2330 | decode_RA8b RC
2331 | beqz AT, >2
2332 |. addu RC, BASE, RC // Call base.
2333 |1: // Move results down.
2334 | lw SFRETHI, HI(RA)
2335 | lw SFRETLO, LO(RA)
2336 | addiu AT, AT, -8
2337 | addiu RA, RA, 8
2338 | sw SFRETHI, HI(RC)
2339 | sw SFRETLO, LO(RC)
2340 | bnez AT, <1
2341 |. addiu RC, RC, 8
2342 |2:
2343 | decode_RA8a RA, INS
2344 | decode_RB8a RB, INS
2345 | decode_RA8b RA
2346 | decode_RB8b RB
2347 | addu RA, RA, RB
2348 | addu RA, BASE, RA
2349 |3:
2350 | sltu AT, RC, RA
2351 | bnez AT, >9 // More results wanted?
2352 |. nop
2353 |
2354 | lhu TMP3, TRACE:TMP2->traceno
2355 | lhu RD, TRACE:TMP2->link
2356 | beq RD, TMP3, ->cont_nop // Blacklisted.
2357 |. load_got lj_dispatch_stitch
2358 | bnez RD, =>BC_JLOOP // Jump to stitched trace.
2359 |. sll RD, RD, 3
2360 |
2361 | // Stitch a new trace to the previous trace.
2362 | sw TMP3, DISPATCH_J(exitno)(DISPATCH)
2363 | sw L, DISPATCH_J(L)(DISPATCH)
2364 | sw BASE, L->base
2365 | addiu CARG1, DISPATCH, GG_DISP2J
2366 | call_intern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
2367 |. move CARG2, PC
2368 | b ->cont_nop
2369 |. lw BASE, L->base
2370 |
2371 |9:
2372 | sw TISNIL, HI(RC)
2373 | b <3
2374 |. addiu RC, RC, 8
2375 |.endif
2376 |
2377 |->vm_profhook: // Dispatch target for profiler hook.
2378#if LJ_HASPROFILE
2379 | load_got lj_dispatch_profile
2380 | sw MULTRES, SAVE_MULTRES
2381 | move CARG2, PC
2382 | sw BASE, L->base
2383 | call_intern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
2384 |. move CARG1, L
2385 | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
2386 | addiu PC, PC, -4
2387 | b ->cont_nop
2388 |. lw BASE, L->base
2389#endif
2390 |
2070 |//----------------------------------------------------------------------- 2391 |//-----------------------------------------------------------------------
2071 |//-- Trace exit handler ------------------------------------------------- 2392 |//-- Trace exit handler -------------------------------------------------
2072 |//----------------------------------------------------------------------- 2393 |//-----------------------------------------------------------------------
2073 | 2394 |
2074 |.macro savex_, a, b 2395 |.macro savex_, a, b
2396 |.if FPU
2075 | sdc1 f..a, 16+a*8(sp) 2397 | sdc1 f..a, 16+a*8(sp)
2076 | sw r..a, 16+32*8+a*4(sp) 2398 | sw r..a, 16+32*8+a*4(sp)
2077 | sw r..b, 16+32*8+b*4(sp) 2399 | sw r..b, 16+32*8+b*4(sp)
2400 |.else
2401 | sw r..a, 16+a*4(sp)
2402 | sw r..b, 16+b*4(sp)
2403 |.endif
2078 |.endmacro 2404 |.endmacro
2079 | 2405 |
2080 |->vm_exit_handler: 2406 |->vm_exit_handler:
2081 |.if JIT 2407 |.if JIT
2408 |.if FPU
2082 | addiu sp, sp, -(16+32*8+32*4) 2409 | addiu sp, sp, -(16+32*8+32*4)
2410 |.else
2411 | addiu sp, sp, -(16+32*4)
2412 |.endif
2083 | savex_ 0, 1 2413 | savex_ 0, 1
2084 | savex_ 2, 3 2414 | savex_ 2, 3
2085 | savex_ 4, 5 2415 | savex_ 4, 5
@@ -2094,25 +2424,34 @@ static void build_subroutines(BuildCtx *ctx)
2094 | savex_ 22, 23 2424 | savex_ 22, 23
2095 | savex_ 24, 25 2425 | savex_ 24, 25
2096 | savex_ 26, 27 2426 | savex_ 26, 27
2427 |.if FPU
2097 | sdc1 f28, 16+28*8(sp) 2428 | sdc1 f28, 16+28*8(sp)
2098 | sw r28, 16+32*8+28*4(sp)
2099 | sdc1 f30, 16+30*8(sp) 2429 | sdc1 f30, 16+30*8(sp)
2430 | sw r28, 16+32*8+28*4(sp)
2100 | sw r30, 16+32*8+30*4(sp) 2431 | sw r30, 16+32*8+30*4(sp)
2101 | sw r0, 16+32*8+31*4(sp) // Clear RID_TMP. 2432 | sw r0, 16+32*8+31*4(sp) // Clear RID_TMP.
2433 | addiu TMP2, sp, 16+32*8+32*4 // Recompute original value of sp.
2434 | sw TMP2, 16+32*8+29*4(sp) // Store sp in RID_SP
2435 |.else
2436 | sw r28, 16+28*4(sp)
2437 | sw r30, 16+30*4(sp)
2438 | sw r0, 16+31*4(sp) // Clear RID_TMP.
2439 | addiu TMP2, sp, 16+32*4 // Recompute original value of sp.
2440 | sw TMP2, 16+29*4(sp) // Store sp in RID_SP
2441 |.endif
2102 | li_vmstate EXIT 2442 | li_vmstate EXIT
2103 | addiu TMP2, sp, 16+32*8+32*4 // Recompute original value of sp.
2104 | addiu DISPATCH, JGL, -GG_DISP2G-32768 2443 | addiu DISPATCH, JGL, -GG_DISP2G-32768
2105 | lw TMP1, 0(TMP2) // Load exit number. 2444 | lw TMP1, 0(TMP2) // Load exit number.
2106 | st_vmstate 2445 | st_vmstate
2107 | sw TMP2, 16+32*8+29*4(sp) // Store sp in RID_SP. 2446 | lw L, DISPATCH_GL(cur_L)(DISPATCH)
2108 | lw L, DISPATCH_GL(jit_L)(DISPATCH) 2447 | lw BASE, DISPATCH_GL(jit_base)(DISPATCH)
2109 | lw BASE, DISPATCH_GL(jit_base)(DISPATCH)
2110 | load_got lj_trace_exit 2448 | load_got lj_trace_exit
2111 | sw L, DISPATCH_J(L)(DISPATCH) 2449 | sw L, DISPATCH_J(L)(DISPATCH)
2112 | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number. 2450 | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number.
2451 | sw BASE, L->base
2113 | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number. 2452 | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number.
2114 | addiu CARG1, DISPATCH, GG_DISP2J 2453 | addiu CARG1, DISPATCH, GG_DISP2J
2115 | sw BASE, L->base 2454 | sw r0, DISPATCH_GL(jit_base)(DISPATCH)
2116 | call_intern lj_trace_exit // (jit_State *J, ExitState *ex) 2455 | call_intern lj_trace_exit // (jit_State *J, ExitState *ex)
2117 |. addiu CARG2, sp, 16 2456 |. addiu CARG2, sp, 16
2118 | // Returns MULTRES (unscaled) or negated error code. 2457 | // Returns MULTRES (unscaled) or negated error code.
@@ -2128,19 +2467,21 @@ static void build_subroutines(BuildCtx *ctx)
2128 |.if JIT 2467 |.if JIT
2129 | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set. 2468 | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set.
2130 | lw L, SAVE_L 2469 | lw L, SAVE_L
2131 | addiu DISPATCH, JGL, -GG_DISP2G-32768 2470 | addiu DISPATCH, JGL, -GG_DISP2G-32768
2471 | sw BASE, L->base
2132 |1: 2472 |1:
2133 | bltz CRET1, >3 // Check for error from exit. 2473 | bltz CRET1, >9 // Check for error from exit.
2134 |. lw LFUNC:TMP1, FRAME_FUNC(BASE) 2474 |. lw LFUNC:RB, FRAME_FUNC(BASE)
2135 | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). 2475 | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
2136 | sll MULTRES, CRET1, 3 2476 | sll MULTRES, CRET1, 3
2137 | li TISNIL, LJ_TNIL 2477 | li TISNIL, LJ_TNIL
2478 | li TISNUM, LJ_TISNUM // Setup type comparison constants.
2138 | sw MULTRES, SAVE_MULTRES 2479 | sw MULTRES, SAVE_MULTRES
2139 | mtc1 TMP3, TOBIT 2480 | .FPU mtc1 TMP3, TOBIT
2140 | lw TMP1, LFUNC:TMP1->pc 2481 | lw TMP1, LFUNC:RB->pc
2141 | sw r0, DISPATCH_GL(jit_L)(DISPATCH) 2482 | sw r0, DISPATCH_GL(jit_base)(DISPATCH)
2142 | lw KBASE, PC2PROTO(k)(TMP1) 2483 | lw KBASE, PC2PROTO(k)(TMP1)
2143 | cvt.d.s TOBIT, TOBIT 2484 | .FPU cvt.d.s TOBIT, TOBIT
2144 | // Modified copy of ins_next which handles function header dispatch, too. 2485 | // Modified copy of ins_next which handles function header dispatch, too.
2145 | lw INS, 0(PC) 2486 | lw INS, 0(PC)
2146 | addiu PC, PC, 4 2487 | addiu PC, PC, 4
@@ -2148,7 +2489,7 @@ static void build_subroutines(BuildCtx *ctx)
2148 | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH) 2489 | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
2149 | decode_OP4a TMP1, INS 2490 | decode_OP4a TMP1, INS
2150 | decode_OP4b TMP1 2491 | decode_OP4b TMP1
2151 | sltiu TMP2, TMP1, BC_FUNCF*4 // Function header? 2492 | sltiu TMP2, TMP1, BC_FUNCF*4
2152 | addu TMP0, DISPATCH, TMP1 2493 | addu TMP0, DISPATCH, TMP1
2153 | decode_RD8a RD, INS 2494 | decode_RD8a RD, INS
2154 | lw AT, 0(TMP0) 2495 | lw AT, 0(TMP0)
@@ -2158,11 +2499,27 @@ static void build_subroutines(BuildCtx *ctx)
2158 | jr AT 2499 | jr AT
2159 |. decode_RD8b RD 2500 |. decode_RD8b RD
2160 |2: 2501 |2:
2502 | sltiu TMP2, TMP1, (BC_FUNCC+2)*4 // Fast function?
2503 | bnez TMP2, >3
2504 |. lw TMP1, FRAME_PC(BASE)
2505 | // Check frame below fast function.
2506 | andi TMP0, TMP1, FRAME_TYPE
2507 | bnez TMP0, >3 // Trace stitching continuation?
2508 |. nop
2509 | // Otherwise set KBASE for Lua function below fast function.
2510 | lw TMP2, -4(TMP1)
2511 | decode_RA8a TMP0, TMP2
2512 | decode_RA8b TMP0
2513 | subu TMP1, BASE, TMP0
2514 | lw LFUNC:TMP2, -8+FRAME_FUNC(TMP1)
2515 | lw TMP1, LFUNC:TMP2->pc
2516 | lw KBASE, PC2PROTO(k)(TMP1)
2517 |3:
2161 | addiu RC, MULTRES, -8 2518 | addiu RC, MULTRES, -8
2162 | jr AT 2519 | jr AT
2163 |. addu RA, RA, BASE 2520 |. addu RA, RA, BASE
2164 | 2521 |
2165 |3: // Rethrow error from the right C frame. 2522 |9: // Rethrow error from the right C frame.
2166 | load_got lj_err_run 2523 | load_got lj_err_run
2167 | call_intern lj_err_run // (lua_State *L) 2524 | call_intern lj_err_run // (lua_State *L)
2168 |. move CARG1, L 2525 |. move CARG1, L
@@ -2172,8 +2529,9 @@ static void build_subroutines(BuildCtx *ctx)
2172 |//-- Math helper functions ---------------------------------------------- 2529 |//-- Math helper functions ----------------------------------------------
2173 |//----------------------------------------------------------------------- 2530 |//-----------------------------------------------------------------------
2174 | 2531 |
2532 |// Hard-float round to integer.
2175 |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1. 2533 |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1.
2176 |.macro vm_round, func 2534 |.macro vm_round_hf, func
2177 | lui TMP0, 0x4330 // Hiword of 2^52 (double). 2535 | lui TMP0, 0x4330 // Hiword of 2^52 (double).
2178 | mtc1 r0, f4 2536 | mtc1 r0, f4
2179 | mtc1 TMP0, f5 2537 | mtc1 TMP0, f5
@@ -2215,6 +2573,12 @@ static void build_subroutines(BuildCtx *ctx)
2215 |. mov.d FRET1, FARG1 2573 |. mov.d FRET1, FARG1
2216 |.endmacro 2574 |.endmacro
2217 | 2575 |
2576 |.macro vm_round, func
2577 |.if FPU
2578 | vm_round_hf, func
2579 |.endif
2580 |.endmacro
2581 |
2218 |->vm_floor: 2582 |->vm_floor:
2219 | vm_round floor 2583 | vm_round floor
2220 |->vm_ceil: 2584 |->vm_ceil:
@@ -2224,6 +2588,215 @@ static void build_subroutines(BuildCtx *ctx)
2224 | vm_round trunc 2588 | vm_round trunc
2225 |.endif 2589 |.endif
2226 | 2590 |
2591 |// Soft-float integer to number conversion.
2592 |.macro sfi2d, AHI, ALO
2593 |.if not FPU
2594 | beqz ALO, >9 // Handle zero first.
2595 |. sra TMP0, ALO, 31
2596 | xor TMP1, ALO, TMP0
2597 | subu TMP1, TMP1, TMP0 // Absolute value in TMP1.
2598 | clz AHI, TMP1
2599 | andi TMP0, TMP0, 0x800 // Mask sign bit.
2600 | li AT, 0x3ff+31-1
2601 | sllv TMP1, TMP1, AHI // Align mantissa left with leading 1.
2602 | subu AHI, AT, AHI // Exponent - 1 in AHI.
2603 | sll ALO, TMP1, 21
2604 | or AHI, AHI, TMP0 // Sign | Exponent.
2605 | srl TMP1, TMP1, 11
2606 | sll AHI, AHI, 20 // Align left.
2607 | jr ra
2608 |. addu AHI, AHI, TMP1 // Add mantissa, increment exponent.
2609 |9:
2610 | jr ra
2611 |. li AHI, 0
2612 |.endif
2613 |.endmacro
2614 |
2615 |// Input SFARG1LO. Output: SFARG1*. Temporaries: AT, TMP0, TMP1.
2616 |->vm_sfi2d_1:
2617 | sfi2d SFARG1HI, SFARG1LO
2618 |
2619 |// Input SFARG2LO. Output: SFARG2*. Temporaries: AT, TMP0, TMP1.
2620 |->vm_sfi2d_2:
2621 | sfi2d SFARG2HI, SFARG2LO
2622 |
2623 |// Soft-float comparison. Equivalent to c.eq.d.
2624 |// Input: SFARG*. Output: CRET1. Temporaries: AT, TMP0, TMP1.
2625 |->vm_sfcmpeq:
2626 |.if not FPU
2627 | sll AT, SFARG1HI, 1
2628 | sll TMP0, SFARG2HI, 1
2629 | or CRET1, SFARG1LO, SFARG2LO
2630 | or TMP1, AT, TMP0
2631 | or TMP1, TMP1, CRET1
2632 | beqz TMP1, >8 // Both args +-0: return 1.
2633 |. sltu CRET1, r0, SFARG1LO
2634 | lui TMP1, 0xffe0
2635 | addu AT, AT, CRET1
2636 | sltu CRET1, r0, SFARG2LO
2637 | sltu AT, TMP1, AT
2638 | addu TMP0, TMP0, CRET1
2639 | sltu TMP0, TMP1, TMP0
2640 | or TMP1, AT, TMP0
2641 | bnez TMP1, >9 // Either arg is NaN: return 0;
2642 |. xor TMP0, SFARG1HI, SFARG2HI
2643 | xor TMP1, SFARG1LO, SFARG2LO
2644 | or AT, TMP0, TMP1
2645 | jr ra
2646 |. sltiu CRET1, AT, 1 // Same values: return 1.
2647 |8:
2648 | jr ra
2649 |. li CRET1, 1
2650 |9:
2651 | jr ra
2652 |. li CRET1, 0
2653 |.endif
2654 |
2655 |// Soft-float comparison. Equivalent to c.ult.d and c.olt.d.
2656 |// Input: SFARG*. Output: CRET1. Temporaries: AT, TMP0, TMP1, CRET2.
2657 |->vm_sfcmpult:
2658 |.if not FPU
2659 | b >1
2660 |. li CRET2, 1
2661 |.endif
2662 |
2663 |->vm_sfcmpolt:
2664 |.if not FPU
2665 | li CRET2, 0
2666 |1:
2667 | sll AT, SFARG1HI, 1
2668 | sll TMP0, SFARG2HI, 1
2669 | or CRET1, SFARG1LO, SFARG2LO
2670 | or TMP1, AT, TMP0
2671 | or TMP1, TMP1, CRET1
2672 | beqz TMP1, >8 // Both args +-0: return 0.
2673 |. sltu CRET1, r0, SFARG1LO
2674 | lui TMP1, 0xffe0
2675 | addu AT, AT, CRET1
2676 | sltu CRET1, r0, SFARG2LO
2677 | sltu AT, TMP1, AT
2678 | addu TMP0, TMP0, CRET1
2679 | sltu TMP0, TMP1, TMP0
2680 | or TMP1, AT, TMP0
2681 | bnez TMP1, >9 // Either arg is NaN: return 0 or 1;
2682 |. and AT, SFARG1HI, SFARG2HI
2683 | bltz AT, >5 // Both args negative?
2684 |. nop
2685 | beq SFARG1HI, SFARG2HI, >8
2686 |. sltu CRET1, SFARG1LO, SFARG2LO
2687 | jr ra
2688 |. slt CRET1, SFARG1HI, SFARG2HI
2689 |5: // Swap conditions if both operands are negative.
2690 | beq SFARG1HI, SFARG2HI, >8
2691 |. sltu CRET1, SFARG2LO, SFARG1LO
2692 | jr ra
2693 |. slt CRET1, SFARG2HI, SFARG1HI
2694 |8:
2695 | jr ra
2696 |. nop
2697 |9:
2698 | jr ra
2699 |. move CRET1, CRET2
2700 |.endif
2701 |
2702 |->vm_sfcmpogt:
2703 |.if not FPU
2704 | sll AT, SFARG2HI, 1
2705 | sll TMP0, SFARG1HI, 1
2706 | or CRET1, SFARG2LO, SFARG1LO
2707 | or TMP1, AT, TMP0
2708 | or TMP1, TMP1, CRET1
2709 | beqz TMP1, >8 // Both args +-0: return 0.
2710 |. sltu CRET1, r0, SFARG2LO
2711 | lui TMP1, 0xffe0
2712 | addu AT, AT, CRET1
2713 | sltu CRET1, r0, SFARG1LO
2714 | sltu AT, TMP1, AT
2715 | addu TMP0, TMP0, CRET1
2716 | sltu TMP0, TMP1, TMP0
2717 | or TMP1, AT, TMP0
2718 | bnez TMP1, >9 // Either arg is NaN: return 0 or 1;
2719 |. and AT, SFARG2HI, SFARG1HI
2720 | bltz AT, >5 // Both args negative?
2721 |. nop
2722 | beq SFARG2HI, SFARG1HI, >8
2723 |. sltu CRET1, SFARG2LO, SFARG1LO
2724 | jr ra
2725 |. slt CRET1, SFARG2HI, SFARG1HI
2726 |5: // Swap conditions if both operands are negative.
2727 | beq SFARG2HI, SFARG1HI, >8
2728 |. sltu CRET1, SFARG1LO, SFARG2LO
2729 | jr ra
2730 |. slt CRET1, SFARG1HI, SFARG2HI
2731 |8:
2732 | jr ra
2733 |. nop
2734 |9:
2735 | jr ra
2736 |. li CRET1, 0
2737 |.endif
2738 |
2739 |// Soft-float comparison. Equivalent to c.ole.d a, b or c.ole.d b, a.
2740 |// Input: SFARG*, TMP3. Output: CRET1. Temporaries: AT, TMP0, TMP1.
2741 |->vm_sfcmpolex:
2742 |.if not FPU
2743 | sll AT, SFARG1HI, 1
2744 | sll TMP0, SFARG2HI, 1
2745 | or CRET1, SFARG1LO, SFARG2LO
2746 | or TMP1, AT, TMP0
2747 | or TMP1, TMP1, CRET1
2748 | beqz TMP1, >8 // Both args +-0: return 1.
2749 |. sltu CRET1, r0, SFARG1LO
2750 | lui TMP1, 0xffe0
2751 | addu AT, AT, CRET1
2752 | sltu CRET1, r0, SFARG2LO
2753 | sltu AT, TMP1, AT
2754 | addu TMP0, TMP0, CRET1
2755 | sltu TMP0, TMP1, TMP0
2756 | or TMP1, AT, TMP0
2757 | bnez TMP1, >9 // Either arg is NaN: return 0;
2758 |. and AT, SFARG1HI, SFARG2HI
2759 | xor AT, AT, TMP3
2760 | bltz AT, >5 // Both args negative?
2761 |. nop
2762 | beq SFARG1HI, SFARG2HI, >6
2763 |. sltu CRET1, SFARG2LO, SFARG1LO
2764 | jr ra
2765 |. slt CRET1, SFARG2HI, SFARG1HI
2766 |5: // Swap conditions if both operands are negative.
2767 | beq SFARG1HI, SFARG2HI, >6
2768 |. sltu CRET1, SFARG1LO, SFARG2LO
2769 | slt CRET1, SFARG1HI, SFARG2HI
2770 |6:
2771 | jr ra
2772 |. nop
2773 |8:
2774 | jr ra
2775 |. li CRET1, 1
2776 |9:
2777 | jr ra
2778 |. li CRET1, 0
2779 |.endif
2780 |
2781 |.macro sfmin_max, name, fpcall
2782 |->vm_sf .. name:
2783 |.if JIT and not FPU
2784 | move TMP2, ra
2785 | bal ->fpcall
2786 |. nop
2787 | move TMP0, CRET1
2788 | move SFRETHI, SFARG1HI
2789 | move SFRETLO, SFARG1LO
2790 | move ra, TMP2
2791 | movz SFRETHI, SFARG2HI, TMP0
2792 | jr ra
2793 |. movz SFRETLO, SFARG2LO, TMP0
2794 |.endif
2795 |.endmacro
2796 |
2797 | sfmin_max min, vm_sfcmpolt
2798 | sfmin_max max, vm_sfcmpogt
2799 |
2227 |//----------------------------------------------------------------------- 2800 |//-----------------------------------------------------------------------
2228 |//-- Miscellaneous functions -------------------------------------------- 2801 |//-- Miscellaneous functions --------------------------------------------
2229 |//----------------------------------------------------------------------- 2802 |//-----------------------------------------------------------------------
@@ -2243,10 +2816,10 @@ static void build_subroutines(BuildCtx *ctx)
2243 | sw r1, CTSTATE->cb.slot 2816 | sw r1, CTSTATE->cb.slot
2244 | sw CARG1, CTSTATE->cb.gpr[0] 2817 | sw CARG1, CTSTATE->cb.gpr[0]
2245 | sw CARG2, CTSTATE->cb.gpr[1] 2818 | sw CARG2, CTSTATE->cb.gpr[1]
2246 | sdc1 FARG1, CTSTATE->cb.fpr[0] 2819 | .FPU sdc1 FARG1, CTSTATE->cb.fpr[0]
2247 | sw CARG3, CTSTATE->cb.gpr[2] 2820 | sw CARG3, CTSTATE->cb.gpr[2]
2248 | sw CARG4, CTSTATE->cb.gpr[3] 2821 | sw CARG4, CTSTATE->cb.gpr[3]
2249 | sdc1 FARG2, CTSTATE->cb.fpr[1] 2822 | .FPU sdc1 FARG2, CTSTATE->cb.fpr[1]
2250 | addiu TMP0, sp, CFRAME_SPACE+16 2823 | addiu TMP0, sp, CFRAME_SPACE+16
2251 | sw TMP0, CTSTATE->cb.stack 2824 | sw TMP0, CTSTATE->cb.stack
2252 | sw r0, SAVE_PC // Any value outside of bytecode is ok. 2825 | sw r0, SAVE_PC // Any value outside of bytecode is ok.
@@ -2256,15 +2829,16 @@ static void build_subroutines(BuildCtx *ctx)
2256 | // Returns lua_State *. 2829 | // Returns lua_State *.
2257 | lw BASE, L:CRET1->base 2830 | lw BASE, L:CRET1->base
2258 | lw RC, L:CRET1->top 2831 | lw RC, L:CRET1->top
2832 | li TISNUM, LJ_TISNUM // Setup type comparison constants.
2259 | move L, CRET1 2833 | move L, CRET1
2260 | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). 2834 | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
2261 | lw LFUNC:RB, FRAME_FUNC(BASE) 2835 | lw LFUNC:RB, FRAME_FUNC(BASE)
2262 | mtc1 TMP3, TOBIT 2836 | .FPU mtc1 TMP3, TOBIT
2263 | li_vmstate INTERP 2837 | li_vmstate INTERP
2264 | li TISNIL, LJ_TNIL 2838 | li TISNIL, LJ_TNIL
2265 | subu RC, RC, BASE 2839 | subu RC, RC, BASE
2266 | st_vmstate 2840 | st_vmstate
2267 | cvt.d.s TOBIT, TOBIT 2841 | .FPU cvt.d.s TOBIT, TOBIT
2268 | ins_callt 2842 | ins_callt
2269 |.endif 2843 |.endif
2270 | 2844 |
@@ -2278,11 +2852,11 @@ static void build_subroutines(BuildCtx *ctx)
2278 | move CARG2, RA 2852 | move CARG2, RA
2279 | call_intern lj_ccallback_leave // (CTState *cts, TValue *o) 2853 | call_intern lj_ccallback_leave // (CTState *cts, TValue *o)
2280 |. move CARG1, CTSTATE 2854 |. move CARG1, CTSTATE
2855 | .FPU ldc1 FRET1, CTSTATE->cb.fpr[0]
2281 | lw CRET1, CTSTATE->cb.gpr[0] 2856 | lw CRET1, CTSTATE->cb.gpr[0]
2282 | ldc1 FRET1, CTSTATE->cb.fpr[0] 2857 | .FPU ldc1 FRET2, CTSTATE->cb.fpr[1]
2283 | lw CRET2, CTSTATE->cb.gpr[1]
2284 | b ->vm_leave_unw 2858 | b ->vm_leave_unw
2285 |. ldc1 FRET2, CTSTATE->cb.fpr[1] 2859 |. lw CRET2, CTSTATE->cb.gpr[1]
2286 |.endif 2860 |.endif
2287 | 2861 |
2288 |->vm_ffi_call: // Call C function via FFI. 2862 |->vm_ffi_call: // Call C function via FFI.
@@ -2314,8 +2888,8 @@ static void build_subroutines(BuildCtx *ctx)
2314 | lw CARG2, CCSTATE->gpr[1] 2888 | lw CARG2, CCSTATE->gpr[1]
2315 | lw CARG3, CCSTATE->gpr[2] 2889 | lw CARG3, CCSTATE->gpr[2]
2316 | lw CARG4, CCSTATE->gpr[3] 2890 | lw CARG4, CCSTATE->gpr[3]
2317 | ldc1 FARG1, CCSTATE->fpr[0] 2891 | .FPU ldc1 FARG1, CCSTATE->fpr[0]
2318 | ldc1 FARG2, CCSTATE->fpr[1] 2892 | .FPU ldc1 FARG2, CCSTATE->fpr[1]
2319 | jalr CFUNCADDR 2893 | jalr CFUNCADDR
2320 |. lw CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1. 2894 |. lw CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
2321 | lw CCSTATE:TMP1, -12(r16) 2895 | lw CCSTATE:TMP1, -12(r16)
@@ -2323,8 +2897,13 @@ static void build_subroutines(BuildCtx *ctx)
2323 | lw ra, -4(r16) 2897 | lw ra, -4(r16)
2324 | sw CRET1, CCSTATE:TMP1->gpr[0] 2898 | sw CRET1, CCSTATE:TMP1->gpr[0]
2325 | sw CRET2, CCSTATE:TMP1->gpr[1] 2899 | sw CRET2, CCSTATE:TMP1->gpr[1]
2900 |.if FPU
2326 | sdc1 FRET1, CCSTATE:TMP1->fpr[0] 2901 | sdc1 FRET1, CCSTATE:TMP1->fpr[0]
2327 | sdc1 FRET2, CCSTATE:TMP1->fpr[1] 2902 | sdc1 FRET2, CCSTATE:TMP1->fpr[1]
2903 |.else
2904 | sw CARG1, CCSTATE:TMP1->gpr[2] // Soft-float: complex double .im part.
2905 | sw CARG2, CCSTATE:TMP1->gpr[3]
2906 |.endif
2328 | move sp, r16 2907 | move sp, r16
2329 | jr ra 2908 | jr ra
2330 |. move r16, TMP2 2909 |. move r16, TMP2
@@ -2348,82 +2927,143 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2348 2927
2349 case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: 2928 case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
2350 | // RA = src1*8, RD = src2*8, JMP with RD = target 2929 | // RA = src1*8, RD = src2*8, JMP with RD = target
2351 | addu CARG2, BASE, RA 2930 |.macro bc_comp, FRA, FRD, RAHI, RALO, RDHI, RDLO, movop, fmovop, fcomp, sfcomp
2352 | addu CARG3, BASE, RD 2931 | addu RA, BASE, RA
2353 | lw TMP0, HI(CARG2) 2932 | addu RD, BASE, RD
2354 | lw TMP1, HI(CARG3) 2933 | lw RAHI, HI(RA)
2355 | ldc1 f0, 0(CARG2) 2934 | lw RDHI, HI(RD)
2356 | ldc1 f2, 0(CARG3)
2357 | sltiu TMP0, TMP0, LJ_TISNUM
2358 | sltiu TMP1, TMP1, LJ_TISNUM
2359 | lhu TMP2, OFS_RD(PC) 2935 | lhu TMP2, OFS_RD(PC)
2360 | and TMP0, TMP0, TMP1
2361 | addiu PC, PC, 4 2936 | addiu PC, PC, 4
2362 | beqz TMP0, ->vmeta_comp 2937 | bne RAHI, TISNUM, >2
2363 |. lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535) 2938 |. lw RALO, LO(RA)
2364 | decode_RD4b TMP2 2939 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
2365 | addu TMP2, TMP2, TMP1 2940 | lw RDLO, LO(RD)
2366 if (op == BC_ISLT || op == BC_ISGE) { 2941 | bne RDHI, TISNUM, >5
2367 | c.olt.d f0, f2 2942 |. decode_RD4b TMP2
2368 } else { 2943 | slt AT, SFARG1LO, SFARG2LO
2369 | c.ole.d f0, f2 2944 | addu TMP2, TMP2, TMP3
2370 } 2945 | movop TMP2, r0, AT
2371 if (op == BC_ISLT || op == BC_ISLE) {
2372 | movf TMP2, r0
2373 } else {
2374 | movt TMP2, r0
2375 }
2376 | addu PC, PC, TMP2
2377 |1: 2946 |1:
2947 | addu PC, PC, TMP2
2378 | ins_next 2948 | ins_next
2949 |
2950 |2: // RA is not an integer.
2951 | sltiu AT, RAHI, LJ_TISNUM
2952 | beqz AT, ->vmeta_comp
2953 |. lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
2954 | sltiu AT, RDHI, LJ_TISNUM
2955 |.if FPU
2956 | ldc1 FRA, 0(RA)
2957 | ldc1 FRD, 0(RD)
2958 |.else
2959 | lw RDLO, LO(RD)
2960 |.endif
2961 | beqz AT, >4
2962 |. decode_RD4b TMP2
2963 |3: // RA and RD are both numbers.
2964 |.if FPU
2965 | fcomp f20, f22
2966 | addu TMP2, TMP2, TMP3
2967 | b <1
2968 |. fmovop TMP2, r0
2969 |.else
2970 | bal sfcomp
2971 |. addu TMP2, TMP2, TMP3
2972 | b <1
2973 |. movop TMP2, r0, CRET1
2974 |.endif
2975 |
2976 |4: // RA is a number, RD is not a number.
2977 | bne RDHI, TISNUM, ->vmeta_comp
2978 | // RA is a number, RD is an integer. Convert RD to a number.
2979 |.if FPU
2980 |. lwc1 FRD, LO(RD)
2981 | b <3
2982 |. cvt.d.w FRD, FRD
2983 |.else
2984 |. nop
2985 |.if "RDHI" == "SFARG1HI"
2986 | bal ->vm_sfi2d_1
2987 |.else
2988 | bal ->vm_sfi2d_2
2989 |.endif
2990 |. nop
2991 | b <3
2992 |. nop
2993 |.endif
2994 |
2995 |5: // RA is an integer, RD is not an integer
2996 | sltiu AT, RDHI, LJ_TISNUM
2997 | beqz AT, ->vmeta_comp
2998 | // RA is an integer, RD is a number. Convert RA to a number.
2999 |.if FPU
3000 |. mtc1 RALO, FRA
3001 | ldc1 FRD, 0(RD)
3002 | b <3
3003 | cvt.d.w FRA, FRA
3004 |.else
3005 |. nop
3006 |.if "RAHI" == "SFARG1HI"
3007 | bal ->vm_sfi2d_1
3008 |.else
3009 | bal ->vm_sfi2d_2
3010 |.endif
3011 |. nop
3012 | b <3
3013 |. nop
3014 |.endif
3015 |.endmacro
3016 |
3017 if (op == BC_ISLT) {
3018 | bc_comp f20, f22, SFARG1HI, SFARG1LO, SFARG2HI, SFARG2LO, movz, movf, c.olt.d, ->vm_sfcmpolt
3019 } else if (op == BC_ISGE) {
3020 | bc_comp f20, f22, SFARG1HI, SFARG1LO, SFARG2HI, SFARG2LO, movn, movt, c.olt.d, ->vm_sfcmpolt
3021 } else if (op == BC_ISLE) {
3022 | bc_comp f22, f20, SFARG2HI, SFARG2LO, SFARG1HI, SFARG1LO, movn, movt, c.ult.d, ->vm_sfcmpult
3023 } else {
3024 | bc_comp f22, f20, SFARG2HI, SFARG2LO, SFARG1HI, SFARG1LO, movz, movf, c.ult.d, ->vm_sfcmpult
3025 }
2379 break; 3026 break;
2380 3027
2381 case BC_ISEQV: case BC_ISNEV: 3028 case BC_ISEQV: case BC_ISNEV:
2382 vk = op == BC_ISEQV; 3029 vk = op == BC_ISEQV;
2383 | // RA = src1*8, RD = src2*8, JMP with RD = target 3030 | // RA = src1*8, RD = src2*8, JMP with RD = target
2384 | addu RA, BASE, RA 3031 | addu RA, BASE, RA
2385 | addiu PC, PC, 4 3032 | addiu PC, PC, 4
2386 | lw TMP0, HI(RA)
2387 | ldc1 f0, 0(RA)
2388 | addu RD, BASE, RD 3033 | addu RD, BASE, RD
3034 | lw SFARG1HI, HI(RA)
2389 | lhu TMP2, -4+OFS_RD(PC) 3035 | lhu TMP2, -4+OFS_RD(PC)
2390 | lw TMP1, HI(RD) 3036 | lw SFARG2HI, HI(RD)
2391 | ldc1 f2, 0(RD)
2392 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) 3037 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
2393 | sltiu AT, TMP0, LJ_TISNUM 3038 | sltu AT, TISNUM, SFARG1HI
2394 | sltiu CARG1, TMP1, LJ_TISNUM 3039 | sltu TMP0, TISNUM, SFARG2HI
2395 | decode_RD4b TMP2 3040 | or AT, AT, TMP0
2396 | and AT, AT, CARG1
2397 | beqz AT, >5
2398 |. addu TMP2, TMP2, TMP3
2399 | c.eq.d f0, f2
2400 if (vk) { 3041 if (vk) {
2401 | movf TMP2, r0 3042 | beqz AT, ->BC_ISEQN_Z
2402 } else { 3043 } else {
2403 | movt TMP2, r0 3044 | beqz AT, ->BC_ISNEN_Z
2404 } 3045 }
2405 |1: 3046 |. decode_RD4b TMP2
2406 | addu PC, PC, TMP2 3047 | // Either or both types are not numbers.
2407 | ins_next 3048 | lw SFARG1LO, LO(RA)
2408 |5: // Either or both types are not numbers. 3049 | lw SFARG2LO, LO(RD)
2409 | lw CARG2, LO(RA) 3050 | addu TMP2, TMP2, TMP3
2410 | lw CARG3, LO(RD)
2411 |.if FFI 3051 |.if FFI
2412 | li TMP3, LJ_TCDATA 3052 | li TMP3, LJ_TCDATA
2413 | beq TMP0, TMP3, ->vmeta_equal_cd 3053 | beq SFARG1HI, TMP3, ->vmeta_equal_cd
2414 |.endif 3054 |.endif
2415 |. sltiu AT, TMP0, LJ_TISPRI // Not a primitive? 3055 |. sltiu AT, SFARG1HI, LJ_TISPRI // Not a primitive?
2416 |.if FFI 3056 |.if FFI
2417 | beq TMP1, TMP3, ->vmeta_equal_cd 3057 | beq SFARG2HI, TMP3, ->vmeta_equal_cd
2418 |.endif 3058 |.endif
2419 |. xor TMP3, CARG2, CARG3 // Same tv? 3059 |. xor TMP3, SFARG1LO, SFARG2LO // Same tv?
2420 | xor TMP1, TMP1, TMP0 // Same type? 3060 | xor SFARG2HI, SFARG2HI, SFARG1HI // Same type?
2421 | sltiu CARG1, TMP0, LJ_TISTABUD+1 // Table or userdata? 3061 | sltiu TMP0, SFARG1HI, LJ_TISTABUD+1 // Table or userdata?
2422 | movz TMP3, r0, AT // Ignore tv if primitive. 3062 | movz TMP3, r0, AT // Ignore tv if primitive.
2423 | movn CARG1, r0, TMP1 // Tab/ud and same type? 3063 | movn TMP0, r0, SFARG2HI // Tab/ud and same type?
2424 | or AT, TMP1, TMP3 // Same type && (pri||same tv). 3064 | or AT, SFARG2HI, TMP3 // Same type && (pri||same tv).
2425 | movz CARG1, r0, AT 3065 | movz TMP0, r0, AT
2426 | beqz CARG1, <1 // Done if not tab/ud or not same type or same tv. 3066 | beqz TMP0, >1 // Done if not tab/ud or not same type or same tv.
2427 if (vk) { 3067 if (vk) {
2428 |. movn TMP2, r0, AT 3068 |. movn TMP2, r0, AT
2429 } else { 3069 } else {
@@ -2431,15 +3071,18 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2431 } 3071 }
2432 | // Different tables or userdatas. Need to check __eq metamethod. 3072 | // Different tables or userdatas. Need to check __eq metamethod.
2433 | // Field metatable must be at same offset for GCtab and GCudata! 3073 | // Field metatable must be at same offset for GCtab and GCudata!
2434 | lw TAB:TMP1, TAB:CARG2->metatable 3074 | lw TAB:TMP1, TAB:SFARG1LO->metatable
2435 | beqz TAB:TMP1, <1 // No metatable? 3075 | beqz TAB:TMP1, >1 // No metatable?
2436 |. nop 3076 |. nop
2437 | lbu TMP1, TAB:TMP1->nomm 3077 | lbu TMP1, TAB:TMP1->nomm
2438 | andi TMP1, TMP1, 1<<MM_eq 3078 | andi TMP1, TMP1, 1<<MM_eq
2439 | bnez TMP1, <1 // Or 'no __eq' flag set? 3079 | bnez TMP1, >1 // Or 'no __eq' flag set?
2440 |. nop 3080 |. nop
2441 | b ->vmeta_equal // Handle __eq metamethod. 3081 | b ->vmeta_equal // Handle __eq metamethod.
2442 |. li CARG4, 1-vk // ne = 0 or 1. 3082 |. li TMP0, 1-vk // ne = 0 or 1.
3083 |1:
3084 | addu PC, PC, TMP2
3085 | ins_next
2443 break; 3086 break;
2444 3087
2445 case BC_ISEQS: case BC_ISNES: 3088 case BC_ISEQS: case BC_ISNES:
@@ -2476,38 +3119,124 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2476 vk = op == BC_ISEQN; 3119 vk = op == BC_ISEQN;
2477 | // RA = src*8, RD = num_const*8, JMP with RD = target 3120 | // RA = src*8, RD = num_const*8, JMP with RD = target
2478 | addu RA, BASE, RA 3121 | addu RA, BASE, RA
2479 | addiu PC, PC, 4 3122 | addu RD, KBASE, RD
2480 | lw TMP0, HI(RA) 3123 | lw SFARG1HI, HI(RA)
2481 | ldc1 f0, 0(RA) 3124 | lw SFARG2HI, HI(RD)
2482 | addu RD, KBASE, RD 3125 | lhu TMP2, OFS_RD(PC)
2483 | lhu TMP2, -4+OFS_RD(PC) 3126 | addiu PC, PC, 4
2484 | ldc1 f2, 0(RD)
2485 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) 3127 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
2486 | sltiu AT, TMP0, LJ_TISNUM
2487 | decode_RD4b TMP2 3128 | decode_RD4b TMP2
2488 |.if FFI
2489 | beqz AT, >5
2490 |.else
2491 | beqz AT, >1
2492 |.endif
2493 |. addu TMP2, TMP2, TMP3
2494 | c.eq.d f0, f2
2495 if (vk) { 3129 if (vk) {
2496 | movf TMP2, r0 3130 |->BC_ISEQN_Z:
2497 | addu PC, PC, TMP2 3131 } else {
3132 |->BC_ISNEN_Z:
3133 }
3134 | bne SFARG1HI, TISNUM, >3
3135 |. lw SFARG1LO, LO(RA)
3136 | lw SFARG2LO, LO(RD)
3137 | addu TMP2, TMP2, TMP3
3138 | bne SFARG2HI, TISNUM, >6
3139 |. xor AT, SFARG1LO, SFARG2LO
3140 if (vk) {
3141 | movn TMP2, r0, AT
2498 |1: 3142 |1:
3143 | addu PC, PC, TMP2
3144 |2:
2499 } else { 3145 } else {
2500 | movt TMP2, r0 3146 | movz TMP2, r0, AT
2501 |1: 3147 |1:
3148 |2:
2502 | addu PC, PC, TMP2 3149 | addu PC, PC, TMP2
2503 } 3150 }
2504 | ins_next 3151 | ins_next
3152 |
3153 |3: // RA is not an integer.
3154 | sltiu AT, SFARG1HI, LJ_TISNUM
2505 |.if FFI 3155 |.if FFI
2506 |5: 3156 | beqz AT, >8
2507 | li AT, LJ_TCDATA 3157 |.else
2508 | beq TMP0, AT, ->vmeta_equal_cd 3158 | beqz AT, <2
3159 |.endif
3160 |. addu TMP2, TMP2, TMP3
3161 | sltiu AT, SFARG2HI, LJ_TISNUM
3162 |.if FPU
3163 | ldc1 f20, 0(RA)
3164 | ldc1 f22, 0(RD)
3165 |.endif
3166 | beqz AT, >5
3167 |. lw SFARG2LO, LO(RD)
3168 |4: // RA and RD are both numbers.
3169 |.if FPU
3170 | c.eq.d f20, f22
3171 | b <1
3172 if (vk) {
3173 |. movf TMP2, r0
3174 } else {
3175 |. movt TMP2, r0
3176 }
3177 |.else
3178 | bal ->vm_sfcmpeq
2509 |. nop 3179 |. nop
2510 | b <1 3180 | b <1
3181 if (vk) {
3182 |. movz TMP2, r0, CRET1
3183 } else {
3184 |. movn TMP2, r0, CRET1
3185 }
3186 |.endif
3187 |
3188 |5: // RA is a number, RD is not a number.
3189 |.if FFI
3190 | bne SFARG2HI, TISNUM, >9
3191 |.else
3192 | bne SFARG2HI, TISNUM, <2
3193 |.endif
3194 | // RA is a number, RD is an integer. Convert RD to a number.
3195 |.if FPU
3196 |. lwc1 f22, LO(RD)
3197 | b <4
3198 |. cvt.d.w f22, f22
3199 |.else
3200 |. nop
3201 | bal ->vm_sfi2d_2
3202 |. nop
3203 | b <4
3204 |. nop
3205 |.endif
3206 |
3207 |6: // RA is an integer, RD is not an integer
3208 | sltiu AT, SFARG2HI, LJ_TISNUM
3209 |.if FFI
3210 | beqz AT, >9
3211 |.else
3212 | beqz AT, <2
3213 |.endif
3214 | // RA is an integer, RD is a number. Convert RA to a number.
3215 |.if FPU
3216 |. mtc1 SFARG1LO, f20
3217 | ldc1 f22, 0(RD)
3218 | b <4
3219 | cvt.d.w f20, f20
3220 |.else
3221 |. nop
3222 | bal ->vm_sfi2d_1
3223 |. nop
3224 | b <4
3225 |. nop
3226 |.endif
3227 |
3228 |.if FFI
3229 |8:
3230 | li AT, LJ_TCDATA
3231 | bne SFARG1HI, AT, <2
3232 |. nop
3233 | b ->vmeta_equal_cd
3234 |. nop
3235 |9:
3236 | li AT, LJ_TCDATA
3237 | bne SFARG2HI, AT, <2
3238 |. nop
3239 | b ->vmeta_equal_cd
2511 |. nop 3240 |. nop
2512 |.endif 3241 |.endif
2513 break; 3242 break;
@@ -2559,7 +3288,8 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2559 | addu PC, PC, TMP2 3288 | addu PC, PC, TMP2
2560 } else { 3289 } else {
2561 | sltiu TMP0, TMP0, LJ_TISTRUECOND 3290 | sltiu TMP0, TMP0, LJ_TISTRUECOND
2562 | ldc1 f0, 0(RD) 3291 | lw SFRETHI, HI(RD)
3292 | lw SFRETLO, LO(RD)
2563 if (op == BC_ISTC) { 3293 if (op == BC_ISTC) {
2564 | beqz TMP0, >1 3294 | beqz TMP0, >1
2565 } else { 3295 } else {
@@ -2569,22 +3299,45 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2569 | decode_RD4b TMP2 3299 | decode_RD4b TMP2
2570 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) 3300 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
2571 | addu TMP2, TMP2, TMP3 3301 | addu TMP2, TMP2, TMP3
2572 | sdc1 f0, 0(RA) 3302 | sw SFRETHI, HI(RA)
3303 | sw SFRETLO, LO(RA)
2573 | addu PC, PC, TMP2 3304 | addu PC, PC, TMP2
2574 |1: 3305 |1:
2575 } 3306 }
2576 | ins_next 3307 | ins_next
2577 break; 3308 break;
2578 3309
3310 case BC_ISTYPE:
3311 | // RA = src*8, RD = -type*8
3312 | addu TMP2, BASE, RA
3313 | srl TMP1, RD, 3
3314 | lw TMP0, HI(TMP2)
3315 | ins_next1
3316 | addu AT, TMP0, TMP1
3317 | bnez AT, ->vmeta_istype
3318 |. ins_next2
3319 break;
3320 case BC_ISNUM:
3321 | // RA = src*8, RD = -(TISNUM-1)*8
3322 | addu TMP2, BASE, RA
3323 | lw TMP0, HI(TMP2)
3324 | ins_next1
3325 | sltiu AT, TMP0, LJ_TISNUM
3326 | beqz AT, ->vmeta_istype
3327 |. ins_next2
3328 break;
3329
2579 /* -- Unary ops --------------------------------------------------------- */ 3330 /* -- Unary ops --------------------------------------------------------- */
2580 3331
2581 case BC_MOV: 3332 case BC_MOV:
2582 | // RA = dst*8, RD = src*8 3333 | // RA = dst*8, RD = src*8
2583 | addu RD, BASE, RD 3334 | addu RD, BASE, RD
2584 | addu RA, BASE, RA 3335 | addu RA, BASE, RA
2585 | ldc1 f0, 0(RD) 3336 | lw SFRETHI, HI(RD)
3337 | lw SFRETLO, LO(RD)
2586 | ins_next1 3338 | ins_next1
2587 | sdc1 f0, 0(RA) 3339 | sw SFRETHI, HI(RA)
3340 | sw SFRETLO, LO(RA)
2588 | ins_next2 3341 | ins_next2
2589 break; 3342 break;
2590 case BC_NOT: 3343 case BC_NOT:
@@ -2601,16 +3354,25 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2601 break; 3354 break;
2602 case BC_UNM: 3355 case BC_UNM:
2603 | // RA = dst*8, RD = src*8 3356 | // RA = dst*8, RD = src*8
2604 | addu CARG3, BASE, RD 3357 | addu RB, BASE, RD
3358 | lw SFARG1HI, HI(RB)
2605 | addu RA, BASE, RA 3359 | addu RA, BASE, RA
2606 | lw TMP0, HI(CARG3) 3360 | bne SFARG1HI, TISNUM, >2
2607 | ldc1 f0, 0(CARG3) 3361 |. lw SFARG1LO, LO(RB)
2608 | sltiu AT, TMP0, LJ_TISNUM 3362 | lui TMP1, 0x8000
2609 | beqz AT, ->vmeta_unm 3363 | beq SFARG1LO, TMP1, ->vmeta_unm // Meta handler deals with -2^31.
2610 |. neg.d f0, f0 3364 |. negu SFARG1LO, SFARG1LO
3365 |1:
2611 | ins_next1 3366 | ins_next1
2612 | sdc1 f0, 0(RA) 3367 | sw SFARG1HI, HI(RA)
3368 | sw SFARG1LO, LO(RA)
2613 | ins_next2 3369 | ins_next2
3370 |2:
3371 | sltiu AT, SFARG1HI, LJ_TISNUM
3372 | beqz AT, ->vmeta_unm
3373 |. lui TMP1, 0x8000
3374 | b <1
3375 |. xor SFARG1HI, SFARG1HI, TMP1
2614 break; 3376 break;
2615 case BC_LEN: 3377 case BC_LEN:
2616 | // RA = dst*8, RD = src*8 3378 | // RA = dst*8, RD = src*8
@@ -2621,12 +3383,11 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2621 | li AT, LJ_TSTR 3383 | li AT, LJ_TSTR
2622 | bne TMP0, AT, >2 3384 | bne TMP0, AT, >2
2623 |. li AT, LJ_TTAB 3385 |. li AT, LJ_TTAB
2624 | lw CRET1, STR:CARG1->len 3386 | lw CRET1, STR:CARG1->len
2625 |1: 3387 |1:
2626 | mtc1 CRET1, f0
2627 | cvt.d.w f0, f0
2628 | ins_next1 3388 | ins_next1
2629 | sdc1 f0, 0(RA) 3389 | sw TISNUM, HI(RA)
3390 | sw CRET1, LO(RA)
2630 | ins_next2 3391 | ins_next2
2631 |2: 3392 |2:
2632 | bne TMP0, AT, ->vmeta_len 3393 | bne TMP0, AT, ->vmeta_len
@@ -2657,104 +3418,232 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2657 3418
2658 /* -- Binary ops -------------------------------------------------------- */ 3419 /* -- Binary ops -------------------------------------------------------- */
2659 3420
2660 |.macro ins_arithpre 3421 |.macro fpmod, a, b, c
3422 | bal ->vm_floor // floor(b/c)
3423 |. div.d FARG1, b, c
3424 | mul.d a, FRET1, c
3425 | sub.d a, b, a // b - floor(b/c)*c
3426 |.endmacro
3427
3428 |.macro sfpmod
3429 | addiu sp, sp, -16
3430 |
3431 | load_got __divdf3
3432 | sw SFARG1HI, HI(sp)
3433 | sw SFARG1LO, LO(sp)
3434 | sw SFARG2HI, 8+HI(sp)
3435 | call_extern
3436 |. sw SFARG2LO, 8+LO(sp)
3437 |
3438 | load_got floor
3439 | move SFARG1HI, SFRETHI
3440 | call_extern
3441 |. move SFARG1LO, SFRETLO
3442 |
3443 | load_got __muldf3
3444 | move SFARG1HI, SFRETHI
3445 | move SFARG1LO, SFRETLO
3446 | lw SFARG2HI, 8+HI(sp)
3447 | call_extern
3448 |. lw SFARG2LO, 8+LO(sp)
3449 |
3450 | load_got __subdf3
3451 | lw SFARG1HI, HI(sp)
3452 | lw SFARG1LO, LO(sp)
3453 | move SFARG2HI, SFRETHI
3454 | call_extern
3455 |. move SFARG2LO, SFRETLO
3456 |
3457 | addiu sp, sp, 16
3458 |.endmacro
3459
3460 |.macro ins_arithpre, label
2661 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); 3461 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
2662 | decode_RB8a RB, INS
2663 | decode_RB8b RB
2664 | decode_RDtoRC8 RC, RD
2665 | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 3462 | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
2666 ||switch (vk) { 3463 ||switch (vk) {
2667 ||case 0: 3464 ||case 0:
2668 | addu CARG3, BASE, RB 3465 | decode_RB8a RB, INS
2669 | addu CARG4, KBASE, RC 3466 | decode_RB8b RB
2670 | lw TMP1, HI(CARG3) 3467 | decode_RDtoRC8 RC, RD
2671 | ldc1 f20, 0(CARG3) 3468 | // RA = dst*8, RB = src1*8, RC = num_const*8
2672 | ldc1 f22, 0(CARG4) 3469 | addu RB, BASE, RB
2673 | sltiu AT, TMP1, LJ_TISNUM 3470 |.if "label" ~= "none"
3471 | b label
3472 |.endif
3473 |. addu RC, KBASE, RC
2674 || break; 3474 || break;
2675 ||case 1: 3475 ||case 1:
2676 | addu CARG4, BASE, RB 3476 | decode_RB8a RC, INS
2677 | addu CARG3, KBASE, RC 3477 | decode_RB8b RC
2678 | lw TMP1, HI(CARG4) 3478 | decode_RDtoRC8 RB, RD
2679 | ldc1 f22, 0(CARG4) 3479 | // RA = dst*8, RB = num_const*8, RC = src1*8
2680 | ldc1 f20, 0(CARG3) 3480 | addu RC, BASE, RC
2681 | sltiu AT, TMP1, LJ_TISNUM 3481 |.if "label" ~= "none"
3482 | b label
3483 |.endif
3484 |. addu RB, KBASE, RB
2682 || break; 3485 || break;
2683 ||default: 3486 ||default:
2684 | addu CARG3, BASE, RB 3487 | decode_RB8a RB, INS
2685 | addu CARG4, BASE, RC 3488 | decode_RB8b RB
2686 | lw TMP1, HI(CARG3) 3489 | decode_RDtoRC8 RC, RD
2687 | lw TMP2, HI(CARG4) 3490 | // RA = dst*8, RB = src1*8, RC = src2*8
2688 | ldc1 f20, 0(CARG3) 3491 | addu RB, BASE, RB
2689 | ldc1 f22, 0(CARG4) 3492 |.if "label" ~= "none"
2690 | sltiu AT, TMP1, LJ_TISNUM 3493 | b label
2691 | sltiu TMP0, TMP2, LJ_TISNUM 3494 |.endif
2692 | and AT, AT, TMP0 3495 |. addu RC, BASE, RC
2693 || break; 3496 || break;
2694 ||} 3497 ||}
2695 | beqz AT, ->vmeta_arith
2696 |. addu RA, BASE, RA
2697 |.endmacro 3498 |.endmacro
2698 | 3499 |
2699 |.macro fpmod, a, b, c 3500 |.macro ins_arith, intins, fpins, fpcall, label
2700 |->BC_MODVN_Z: 3501 | ins_arithpre none
2701 | bal ->vm_floor // floor(b/c)
2702 |. div.d FARG1, b, c
2703 | mul.d a, FRET1, c
2704 | sub.d a, b, a // b - floor(b/c)*c
2705 |.endmacro
2706 | 3502 |
2707 |.macro ins_arith, ins 3503 |.if "label" ~= "none"
2708 | ins_arithpre 3504 |label:
2709 |.if "ins" == "fpmod_" 3505 |.endif
2710 | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. 3506 |
2711 |. nop 3507 | lw SFARG1HI, HI(RB)
3508 | lw SFARG2HI, HI(RC)
3509 |
3510 |.if "intins" ~= "div"
3511 |
3512 | // Check for two integers.
3513 | lw SFARG1LO, LO(RB)
3514 | bne SFARG1HI, TISNUM, >5
3515 |. lw SFARG2LO, LO(RC)
3516 | bne SFARG2HI, TISNUM, >5
3517 |
3518 |.if "intins" == "addu"
3519 |. intins CRET1, SFARG1LO, SFARG2LO
3520 | xor TMP1, CRET1, SFARG1LO // ((y^a) & (y^b)) < 0: overflow.
3521 | xor TMP2, CRET1, SFARG2LO
3522 | and TMP1, TMP1, TMP2
3523 | bltz TMP1, ->vmeta_arith
3524 |. addu RA, BASE, RA
3525 |.elif "intins" == "subu"
3526 |. intins CRET1, SFARG1LO, SFARG2LO
3527 | xor TMP1, CRET1, SFARG1LO // ((y^a) & (a^b)) < 0: overflow.
3528 | xor TMP2, SFARG1LO, SFARG2LO
3529 | and TMP1, TMP1, TMP2
3530 | bltz TMP1, ->vmeta_arith
3531 |. addu RA, BASE, RA
3532 |.elif "intins" == "mult"
3533 |. intins SFARG1LO, SFARG2LO
3534 | mflo CRET1
3535 | mfhi TMP2
3536 | sra TMP1, CRET1, 31
3537 | bne TMP1, TMP2, ->vmeta_arith
3538 |. addu RA, BASE, RA
2712 |.else 3539 |.else
2713 | ins f0, f20, f22 3540 |. load_got lj_vm_modi
3541 | beqz SFARG2LO, ->vmeta_arith
3542 |. addu RA, BASE, RA
3543 |.if ENDIAN_BE
3544 | move CARG1, SFARG1LO
3545 |.endif
3546 | call_extern
3547 |. move CARG2, SFARG2LO
3548 |.endif
3549 |
2714 | ins_next1 3550 | ins_next1
2715 | sdc1 f0, 0(RA) 3551 | sw TISNUM, HI(RA)
3552 | sw CRET1, LO(RA)
3553 |3:
2716 | ins_next2 3554 | ins_next2
3555 |
3556 |.elif not FPU
3557 |
3558 | lw SFARG1LO, LO(RB)
3559 | lw SFARG2LO, LO(RC)
3560 |
2717 |.endif 3561 |.endif
3562 |
3563 |5: // Check for two numbers.
3564 | .FPU ldc1 f20, 0(RB)
3565 | sltiu AT, SFARG1HI, LJ_TISNUM
3566 | sltiu TMP0, SFARG2HI, LJ_TISNUM
3567 | .FPU ldc1 f22, 0(RC)
3568 | and AT, AT, TMP0
3569 | beqz AT, ->vmeta_arith
3570 |. addu RA, BASE, RA
3571 |
3572 |.if FPU
3573 | fpins FRET1, f20, f22
3574 |.elif "fpcall" == "sfpmod"
3575 | sfpmod
3576 |.else
3577 | load_got fpcall
3578 | call_extern
3579 |. nop
3580 |.endif
3581 |
3582 | ins_next1
3583 |.if not FPU
3584 | sw SFRETHI, HI(RA)
3585 |.endif
3586 |.if "intins" ~= "div"
3587 | b <3
3588 |.endif
3589 |.if FPU
3590 |. sdc1 FRET1, 0(RA)
3591 |.else
3592 |. sw SFRETLO, LO(RA)
3593 |.endif
3594 |.if "intins" == "div"
3595 | ins_next2
3596 |.endif
3597 |
2718 |.endmacro 3598 |.endmacro
2719 3599
2720 case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: 3600 case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
2721 | ins_arith add.d 3601 | ins_arith addu, add.d, __adddf3, none
2722 break; 3602 break;
2723 case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: 3603 case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
2724 | ins_arith sub.d 3604 | ins_arith subu, sub.d, __subdf3, none
2725 break; 3605 break;
2726 case BC_MULVN: case BC_MULNV: case BC_MULVV: 3606 case BC_MULVN: case BC_MULNV: case BC_MULVV:
2727 | ins_arith mul.d 3607 | ins_arith mult, mul.d, __muldf3, none
2728 break; 3608 break;
2729 case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: 3609 case BC_DIVVN:
2730 | ins_arith div.d 3610 | ins_arith div, div.d, __divdf3, ->BC_DIVVN_Z
3611 break;
3612 case BC_DIVNV: case BC_DIVVV:
3613 | ins_arithpre ->BC_DIVVN_Z
2731 break; 3614 break;
2732 case BC_MODVN: 3615 case BC_MODVN:
2733 | ins_arith fpmod 3616 | ins_arith modi, fpmod, sfpmod, ->BC_MODVN_Z
2734 break; 3617 break;
2735 case BC_MODNV: case BC_MODVV: 3618 case BC_MODNV: case BC_MODVV:
2736 | ins_arith fpmod_ 3619 | ins_arithpre ->BC_MODVN_Z
2737 break; 3620 break;
2738 case BC_POW: 3621 case BC_POW:
2739 | decode_RB8a RB, INS 3622 | ins_arithpre none
2740 | decode_RB8b RB 3623 | lw SFARG1HI, HI(RB)
2741 | decode_RDtoRC8 RC, RD 3624 | lw SFARG2HI, HI(RC)
2742 | addu CARG3, BASE, RB 3625 | sltiu AT, SFARG1HI, LJ_TISNUM
2743 | addu CARG4, BASE, RC 3626 | sltiu TMP0, SFARG2HI, LJ_TISNUM
2744 | lw TMP1, HI(CARG3)
2745 | lw TMP2, HI(CARG4)
2746 | ldc1 FARG1, 0(CARG3)
2747 | ldc1 FARG2, 0(CARG4)
2748 | sltiu AT, TMP1, LJ_TISNUM
2749 | sltiu TMP0, TMP2, LJ_TISNUM
2750 | and AT, AT, TMP0 3627 | and AT, AT, TMP0
2751 | load_got pow 3628 | load_got pow
2752 | beqz AT, ->vmeta_arith 3629 | beqz AT, ->vmeta_arith
2753 |. addu RA, BASE, RA 3630 |. addu RA, BASE, RA
3631 |.if FPU
3632 | ldc1 FARG1, 0(RB)
3633 | ldc1 FARG2, 0(RC)
3634 |.else
3635 | lw SFARG1LO, LO(RB)
3636 | lw SFARG2LO, LO(RC)
3637 |.endif
2754 | call_extern 3638 | call_extern
2755 |. nop 3639 |. nop
2756 | ins_next1 3640 | ins_next1
3641 |.if FPU
2757 | sdc1 FRET1, 0(RA) 3642 | sdc1 FRET1, 0(RA)
3643 |.else
3644 | sw SFRETHI, HI(RA)
3645 | sw SFRETLO, LO(RA)
3646 |.endif
2758 | ins_next2 3647 | ins_next2
2759 break; 3648 break;
2760 3649
@@ -2777,10 +3666,12 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2777 | bnez CRET1, ->vmeta_binop 3666 | bnez CRET1, ->vmeta_binop
2778 |. lw BASE, L->base 3667 |. lw BASE, L->base
2779 | addu RB, BASE, MULTRES 3668 | addu RB, BASE, MULTRES
2780 | ldc1 f0, 0(RB) 3669 | lw SFRETHI, HI(RB)
3670 | lw SFRETLO, LO(RB)
2781 | addu RA, BASE, RA 3671 | addu RA, BASE, RA
2782 | ins_next1 3672 | ins_next1
2783 | sdc1 f0, 0(RA) // Copy result from RB to RA. 3673 | sw SFRETHI, HI(RA)
3674 | sw SFRETLO, LO(RA)
2784 | ins_next2 3675 | ins_next2
2785 break; 3676 break;
2786 3677
@@ -2815,20 +3706,21 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2815 case BC_KSHORT: 3706 case BC_KSHORT:
2816 | // RA = dst*8, RD = int16_literal*8 3707 | // RA = dst*8, RD = int16_literal*8
2817 | sra RD, INS, 16 3708 | sra RD, INS, 16
2818 | mtc1 RD, f0
2819 | addu RA, BASE, RA 3709 | addu RA, BASE, RA
2820 | cvt.d.w f0, f0
2821 | ins_next1 3710 | ins_next1
2822 | sdc1 f0, 0(RA) 3711 | sw TISNUM, HI(RA)
3712 | sw RD, LO(RA)
2823 | ins_next2 3713 | ins_next2
2824 break; 3714 break;
2825 case BC_KNUM: 3715 case BC_KNUM:
2826 | // RA = dst*8, RD = num_const*8 3716 | // RA = dst*8, RD = num_const*8
2827 | addu RD, KBASE, RD 3717 | addu RD, KBASE, RD
2828 | addu RA, BASE, RA 3718 | addu RA, BASE, RA
2829 | ldc1 f0, 0(RD) 3719 | lw SFRETHI, HI(RD)
3720 | lw SFRETLO, LO(RD)
2830 | ins_next1 3721 | ins_next1
2831 | sdc1 f0, 0(RA) 3722 | sw SFRETHI, HI(RA)
3723 | sw SFRETLO, LO(RA)
2832 | ins_next2 3724 | ins_next2
2833 break; 3725 break;
2834 case BC_KPRI: 3726 case BC_KPRI:
@@ -2864,9 +3756,11 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2864 | lw UPVAL:RB, LFUNC:RD->uvptr 3756 | lw UPVAL:RB, LFUNC:RD->uvptr
2865 | ins_next1 3757 | ins_next1
2866 | lw TMP1, UPVAL:RB->v 3758 | lw TMP1, UPVAL:RB->v
2867 | ldc1 f0, 0(TMP1) 3759 | lw SFRETHI, HI(TMP1)
3760 | lw SFRETLO, LO(TMP1)
2868 | addu RA, BASE, RA 3761 | addu RA, BASE, RA
2869 | sdc1 f0, 0(RA) 3762 | sw SFRETHI, HI(RA)
3763 | sw SFRETLO, LO(RA)
2870 | ins_next2 3764 | ins_next2
2871 break; 3765 break;
2872 case BC_USETV: 3766 case BC_USETV:
@@ -2875,26 +3769,27 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2875 | srl RA, RA, 1 3769 | srl RA, RA, 1
2876 | addu RD, BASE, RD 3770 | addu RD, BASE, RD
2877 | addu RA, RA, LFUNC:RB 3771 | addu RA, RA, LFUNC:RB
2878 | ldc1 f0, 0(RD)
2879 | lw UPVAL:RB, LFUNC:RA->uvptr 3772 | lw UPVAL:RB, LFUNC:RA->uvptr
3773 | lw SFRETHI, HI(RD)
3774 | lw SFRETLO, LO(RD)
2880 | lbu TMP3, UPVAL:RB->marked 3775 | lbu TMP3, UPVAL:RB->marked
2881 | lw CARG2, UPVAL:RB->v 3776 | lw CARG2, UPVAL:RB->v
2882 | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv) 3777 | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
2883 | lbu TMP0, UPVAL:RB->closed 3778 | lbu TMP0, UPVAL:RB->closed
2884 | lw TMP2, HI(RD) 3779 | sw SFRETHI, HI(CARG2)
2885 | sdc1 f0, 0(CARG2) 3780 | sw SFRETLO, LO(CARG2)
2886 | li AT, LJ_GC_BLACK|1 3781 | li AT, LJ_GC_BLACK|1
2887 | or TMP3, TMP3, TMP0 3782 | or TMP3, TMP3, TMP0
2888 | beq TMP3, AT, >2 // Upvalue is closed and black? 3783 | beq TMP3, AT, >2 // Upvalue is closed and black?
2889 |. addiu TMP2, TMP2, -(LJ_TNUMX+1) 3784 |. addiu TMP2, SFRETHI, -(LJ_TNUMX+1)
2890 |1: 3785 |1:
2891 | ins_next 3786 | ins_next
2892 | 3787 |
2893 |2: // Check if new value is collectable. 3788 |2: // Check if new value is collectable.
2894 | sltiu AT, TMP2, LJ_TISGCV - (LJ_TNUMX+1) 3789 | sltiu AT, TMP2, LJ_TISGCV - (LJ_TNUMX+1)
2895 | beqz AT, <1 // tvisgcv(v) 3790 | beqz AT, <1 // tvisgcv(v)
2896 |. lw TMP1, LO(RD) 3791 |. nop
2897 | lbu TMP3, GCOBJ:TMP1->gch.marked 3792 | lbu TMP3, GCOBJ:SFRETLO->gch.marked
2898 | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v) 3793 | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
2899 | beqz TMP3, <1 3794 | beqz TMP3, <1
2900 |. load_got lj_gc_barrieruv 3795 |. load_got lj_gc_barrieruv
@@ -2942,11 +3837,13 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2942 | srl RA, RA, 1 3837 | srl RA, RA, 1
2943 | addu RD, KBASE, RD 3838 | addu RD, KBASE, RD
2944 | addu RA, RA, LFUNC:RB 3839 | addu RA, RA, LFUNC:RB
2945 | ldc1 f0, 0(RD) 3840 | lw UPVAL:RB, LFUNC:RA->uvptr
2946 | lw UPVAL:RB, LFUNC:RA->uvptr 3841 | lw SFRETHI, HI(RD)
3842 | lw SFRETLO, LO(RD)
3843 | lw TMP1, UPVAL:RB->v
2947 | ins_next1 3844 | ins_next1
2948 | lw TMP1, UPVAL:RB->v 3845 | sw SFRETHI, HI(TMP1)
2949 | sdc1 f0, 0(TMP1) 3846 | sw SFRETLO, LO(TMP1)
2950 | ins_next2 3847 | ins_next2
2951 break; 3848 break;
2952 case BC_USETP: 3849 case BC_USETP:
@@ -2956,10 +3853,10 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2956 | srl TMP0, RD, 3 3853 | srl TMP0, RD, 3
2957 | addu RA, RA, LFUNC:RB 3854 | addu RA, RA, LFUNC:RB
2958 | not TMP0, TMP0 3855 | not TMP0, TMP0
2959 | lw UPVAL:RB, LFUNC:RA->uvptr 3856 | lw UPVAL:RB, LFUNC:RA->uvptr
2960 | ins_next1 3857 | ins_next1
2961 | lw TMP1, UPVAL:RB->v 3858 | lw TMP1, UPVAL:RB->v
2962 | sw TMP0, HI(TMP1) 3859 | sw TMP0, HI(TMP1)
2963 | ins_next2 3860 | ins_next2
2964 break; 3861 break;
2965 3862
@@ -2995,8 +3892,8 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2995 | li TMP0, LJ_TFUNC 3892 | li TMP0, LJ_TFUNC
2996 | ins_next1 3893 | ins_next1
2997 | addu RA, BASE, RA 3894 | addu RA, BASE, RA
2998 | sw TMP0, HI(RA)
2999 | sw LFUNC:CRET1, LO(RA) 3895 | sw LFUNC:CRET1, LO(RA)
3896 | sw TMP0, HI(RA)
3000 | ins_next2 3897 | ins_next2
3001 break; 3898 break;
3002 3899
@@ -3077,31 +3974,23 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3077 | lw TMP2, HI(CARG3) 3974 | lw TMP2, HI(CARG3)
3078 | lw TAB:RB, LO(CARG2) 3975 | lw TAB:RB, LO(CARG2)
3079 | li AT, LJ_TTAB 3976 | li AT, LJ_TTAB
3080 | ldc1 f0, 0(CARG3)
3081 | bne TMP1, AT, ->vmeta_tgetv 3977 | bne TMP1, AT, ->vmeta_tgetv
3082 |. addu RA, BASE, RA 3978 |. addu RA, BASE, RA
3083 | sltiu AT, TMP2, LJ_TISNUM 3979 | bne TMP2, TISNUM, >5
3084 | beqz AT, >5 3980 |. lw RC, LO(CARG3)
3085 |. li AT, LJ_TSTR 3981 | lw TMP0, TAB:RB->asize
3086 |
3087 | // Convert number key to integer, check for integerness and range.
3088 | cvt.w.d f2, f0
3089 | lw TMP0, TAB:RB->asize
3090 | mfc1 TMP2, f2
3091 | cvt.d.w f4, f2
3092 | lw TMP1, TAB:RB->array 3982 | lw TMP1, TAB:RB->array
3093 | c.eq.d f0, f4 3983 | sltu AT, RC, TMP0
3094 | sltu AT, TMP2, TMP0 3984 | sll TMP2, RC, 3
3095 | movf AT, r0
3096 | sll TMP2, TMP2, 3
3097 | beqz AT, ->vmeta_tgetv // Integer key and in array part? 3985 | beqz AT, ->vmeta_tgetv // Integer key and in array part?
3098 |. addu TMP2, TMP1, TMP2 3986 |. addu TMP2, TMP1, TMP2
3099 | lw TMP0, HI(TMP2) 3987 | lw SFRETHI, HI(TMP2)
3100 | beq TMP0, TISNIL, >2 3988 | beq SFRETHI, TISNIL, >2
3101 |. ldc1 f0, 0(TMP2) 3989 |. lw SFRETLO, LO(TMP2)
3102 |1: 3990 |1:
3103 | ins_next1 3991 | ins_next1
3104 | sdc1 f0, 0(RA) 3992 | sw SFRETHI, HI(RA)
3993 | sw SFRETLO, LO(RA)
3105 | ins_next2 3994 | ins_next2
3106 | 3995 |
3107 |2: // Check for __index if table value is nil. 3996 |2: // Check for __index if table value is nil.
@@ -3116,8 +4005,9 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3116 |. nop 4005 |. nop
3117 | 4006 |
3118 |5: 4007 |5:
4008 | li AT, LJ_TSTR
3119 | bne TMP2, AT, ->vmeta_tgetv 4009 | bne TMP2, AT, ->vmeta_tgetv
3120 |. lw STR:RC, LO(CARG3) 4010 |. nop
3121 | b ->BC_TGETS_Z // String key? 4011 | b ->BC_TGETS_Z // String key?
3122 |. nop 4012 |. nop
3123 break; 4013 break;
@@ -3138,9 +4028,9 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3138 |->BC_TGETS_Z: 4028 |->BC_TGETS_Z:
3139 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8 4029 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
3140 | lw TMP0, TAB:RB->hmask 4030 | lw TMP0, TAB:RB->hmask
3141 | lw TMP1, STR:RC->hash 4031 | lw TMP1, STR:RC->sid
3142 | lw NODE:TMP2, TAB:RB->node 4032 | lw NODE:TMP2, TAB:RB->node
3143 | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask 4033 | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
3144 | sll TMP0, TMP1, 5 4034 | sll TMP0, TMP1, 5
3145 | sll TMP1, TMP1, 3 4035 | sll TMP1, TMP1, 3
3146 | subu TMP1, TMP0, TMP1 4036 | subu TMP1, TMP0, TMP1
@@ -3149,18 +4039,18 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3149 | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2) 4039 | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2)
3150 | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2) 4040 | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
3151 | lw NODE:TMP1, NODE:TMP2->next 4041 | lw NODE:TMP1, NODE:TMP2->next
3152 | lw CARG2, offsetof(Node, val)+HI(NODE:TMP2) 4042 | lw SFRETHI, offsetof(Node, val)+HI(NODE:TMP2)
3153 | addiu CARG1, CARG1, -LJ_TSTR 4043 | addiu CARG1, CARG1, -LJ_TSTR
3154 | xor TMP0, TMP0, STR:RC 4044 | xor TMP0, TMP0, STR:RC
3155 | or AT, CARG1, TMP0 4045 | or AT, CARG1, TMP0
3156 | bnez AT, >4 4046 | bnez AT, >4
3157 |. lw TAB:TMP3, TAB:RB->metatable 4047 |. lw TAB:TMP3, TAB:RB->metatable
3158 | beq CARG2, TISNIL, >5 // Key found, but nil value? 4048 | beq SFRETHI, TISNIL, >5 // Key found, but nil value?
3159 |. lw CARG1, offsetof(Node, val)+LO(NODE:TMP2) 4049 |. lw SFRETLO, offsetof(Node, val)+LO(NODE:TMP2)
3160 |3: 4050 |3:
3161 | ins_next1 4051 | ins_next1
3162 | sw CARG2, HI(RA) 4052 | sw SFRETHI, HI(RA)
3163 | sw CARG1, LO(RA) 4053 | sw SFRETLO, LO(RA)
3164 | ins_next2 4054 | ins_next2
3165 | 4055 |
3166 |4: // Follow hash chain. 4056 |4: // Follow hash chain.
@@ -3170,7 +4060,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3170 | 4060 |
3171 |5: // Check for __index if table value is nil. 4061 |5: // Check for __index if table value is nil.
3172 | beqz TAB:TMP3, <3 // No metatable: done. 4062 | beqz TAB:TMP3, <3 // No metatable: done.
3173 |. li CARG2, LJ_TNIL 4063 |. li SFRETHI, LJ_TNIL
3174 | lbu TMP0, TAB:TMP3->nomm 4064 | lbu TMP0, TAB:TMP3->nomm
3175 | andi TMP0, TMP0, 1<<MM_index 4065 | andi TMP0, TMP0, 1<<MM_index
3176 | bnez TMP0, <3 // 'no __index' flag set: done. 4066 | bnez TMP0, <3 // 'no __index' flag set: done.
@@ -3195,12 +4085,13 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3195 | sltu AT, TMP0, TMP1 4085 | sltu AT, TMP0, TMP1
3196 | beqz AT, ->vmeta_tgetb 4086 | beqz AT, ->vmeta_tgetb
3197 |. addu RC, TMP2, RC 4087 |. addu RC, TMP2, RC
3198 | lw TMP1, HI(RC) 4088 | lw SFRETHI, HI(RC)
3199 | beq TMP1, TISNIL, >5 4089 | beq SFRETHI, TISNIL, >5
3200 |. ldc1 f0, 0(RC) 4090 |. lw SFRETLO, LO(RC)
3201 |1: 4091 |1:
3202 | ins_next1 4092 | ins_next1
3203 | sdc1 f0, 0(RA) 4093 | sw SFRETHI, HI(RA)
4094 | sw SFRETLO, LO(RA)
3204 | ins_next2 4095 | ins_next2
3205 | 4096 |
3206 |5: // Check for __index if table value is nil. 4097 |5: // Check for __index if table value is nil.
@@ -3211,9 +4102,33 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3211 | andi TMP1, TMP1, 1<<MM_index 4102 | andi TMP1, TMP1, 1<<MM_index
3212 | bnez TMP1, <1 // 'no __index' flag set: done. 4103 | bnez TMP1, <1 // 'no __index' flag set: done.
3213 |. nop 4104 |. nop
3214 | b ->vmeta_tgetb // Caveat: preserve TMP0! 4105 | b ->vmeta_tgetb // Caveat: preserve TMP0 and CARG2!
3215 |. nop 4106 |. nop
3216 break; 4107 break;
4108 case BC_TGETR:
4109 | // RA = dst*8, RB = table*8, RC = key*8
4110 | decode_RB8a RB, INS
4111 | decode_RB8b RB
4112 | decode_RDtoRC8 RC, RD
4113 | addu RB, BASE, RB
4114 | addu RC, BASE, RC
4115 | lw TAB:CARG1, LO(RB)
4116 | lw CARG2, LO(RC)
4117 | addu RA, BASE, RA
4118 | lw TMP0, TAB:CARG1->asize
4119 | lw TMP1, TAB:CARG1->array
4120 | sltu AT, CARG2, TMP0
4121 | sll TMP2, CARG2, 3
4122 | beqz AT, ->vmeta_tgetr // In array part?
4123 |. addu CRET1, TMP1, TMP2
4124 | lw SFARG2HI, HI(CRET1)
4125 | lw SFARG2LO, LO(CRET1)
4126 |->BC_TGETR_Z:
4127 | ins_next1
4128 | sw SFARG2HI, HI(RA)
4129 | sw SFARG2LO, LO(RA)
4130 | ins_next2
4131 break;
3217 4132
3218 case BC_TSETV: 4133 case BC_TSETV:
3219 | // RA = src*8, RB = table*8, RC = key*8 4134 | // RA = src*8, RB = table*8, RC = key*8
@@ -3226,33 +4141,26 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3226 | lw TMP2, HI(CARG3) 4141 | lw TMP2, HI(CARG3)
3227 | lw TAB:RB, LO(CARG2) 4142 | lw TAB:RB, LO(CARG2)
3228 | li AT, LJ_TTAB 4143 | li AT, LJ_TTAB
3229 | ldc1 f0, 0(CARG3)
3230 | bne TMP1, AT, ->vmeta_tsetv 4144 | bne TMP1, AT, ->vmeta_tsetv
3231 |. addu RA, BASE, RA 4145 |. addu RA, BASE, RA
3232 | sltiu AT, TMP2, LJ_TISNUM 4146 | bne TMP2, TISNUM, >5
3233 | beqz AT, >5 4147 |. lw RC, LO(CARG3)
3234 |. li AT, LJ_TSTR 4148 | lw TMP0, TAB:RB->asize
3235 |
3236 | // Convert number key to integer, check for integerness and range.
3237 | cvt.w.d f2, f0
3238 | lw TMP0, TAB:RB->asize
3239 | mfc1 TMP2, f2
3240 | cvt.d.w f4, f2
3241 | lw TMP1, TAB:RB->array 4149 | lw TMP1, TAB:RB->array
3242 | c.eq.d f0, f4 4150 | sltu AT, RC, TMP0
3243 | sltu AT, TMP2, TMP0 4151 | sll TMP2, RC, 3
3244 | movf AT, r0
3245 | sll TMP2, TMP2, 3
3246 | beqz AT, ->vmeta_tsetv // Integer key and in array part? 4152 | beqz AT, ->vmeta_tsetv // Integer key and in array part?
3247 |. addu TMP1, TMP1, TMP2 4153 |. addu TMP1, TMP1, TMP2
3248 | lbu TMP3, TAB:RB->marked
3249 | lw TMP0, HI(TMP1) 4154 | lw TMP0, HI(TMP1)
4155 | lbu TMP3, TAB:RB->marked
4156 | lw SFRETHI, HI(RA)
3250 | beq TMP0, TISNIL, >3 4157 | beq TMP0, TISNIL, >3
3251 |. ldc1 f0, 0(RA) 4158 |. lw SFRETLO, LO(RA)
3252 |1: 4159 |1:
3253 | andi AT, TMP3, LJ_GC_BLACK // isblack(table) 4160 | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
3254 | bnez AT, >7 4161 | sw SFRETHI, HI(TMP1)
3255 |. sdc1 f0, 0(TMP1) 4162 | bnez AT, >7
4163 |. sw SFRETLO, LO(TMP1)
3256 |2: 4164 |2:
3257 | ins_next 4165 | ins_next
3258 | 4166 |
@@ -3268,8 +4176,9 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3268 |. nop 4176 |. nop
3269 | 4177 |
3270 |5: 4178 |5:
4179 | li AT, LJ_TSTR
3271 | bne TMP2, AT, ->vmeta_tsetv 4180 | bne TMP2, AT, ->vmeta_tsetv
3272 |. lw STR:RC, LO(CARG3) 4181 |. nop
3273 | b ->BC_TSETS_Z // String key? 4182 | b ->BC_TSETS_Z // String key?
3274 |. nop 4183 |. nop
3275 | 4184 |
@@ -3293,15 +4202,20 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3293 |->BC_TSETS_Z: 4202 |->BC_TSETS_Z:
3294 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8 4203 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8
3295 | lw TMP0, TAB:RB->hmask 4204 | lw TMP0, TAB:RB->hmask
3296 | lw TMP1, STR:RC->hash 4205 | lw TMP1, STR:RC->sid
3297 | lw NODE:TMP2, TAB:RB->node 4206 | lw NODE:TMP2, TAB:RB->node
3298 | sb r0, TAB:RB->nomm // Clear metamethod cache. 4207 | sb r0, TAB:RB->nomm // Clear metamethod cache.
3299 | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask 4208 | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
3300 | sll TMP0, TMP1, 5 4209 | sll TMP0, TMP1, 5
3301 | sll TMP1, TMP1, 3 4210 | sll TMP1, TMP1, 3
3302 | subu TMP1, TMP0, TMP1 4211 | subu TMP1, TMP0, TMP1
3303 | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8) 4212 | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
4213 |.if FPU
3304 | ldc1 f20, 0(RA) 4214 | ldc1 f20, 0(RA)
4215 |.else
4216 | lw SFRETHI, HI(RA)
4217 | lw SFRETLO, LO(RA)
4218 |.endif
3305 |1: 4219 |1:
3306 | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2) 4220 | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2)
3307 | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2) 4221 | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
@@ -3315,8 +4229,14 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3315 |. lw TAB:TMP0, TAB:RB->metatable 4229 |. lw TAB:TMP0, TAB:RB->metatable
3316 |2: 4230 |2:
3317 | andi AT, TMP3, LJ_GC_BLACK // isblack(table) 4231 | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
4232 |.if FPU
3318 | bnez AT, >7 4233 | bnez AT, >7
3319 |. sdc1 f20, NODE:TMP2->val 4234 |. sdc1 f20, NODE:TMP2->val
4235 |.else
4236 | sw SFRETHI, NODE:TMP2->val.u32.hi
4237 | bnez AT, >7
4238 |. sw SFRETLO, NODE:TMP2->val.u32.lo
4239 |.endif
3320 |3: 4240 |3:
3321 | ins_next 4241 | ins_next
3322 | 4242 |
@@ -3354,8 +4274,16 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3354 |. move CARG1, L 4274 |. move CARG1, L
3355 | // Returns TValue *. 4275 | // Returns TValue *.
3356 | lw BASE, L->base 4276 | lw BASE, L->base
4277 |.if FPU
3357 | b <3 // No 2nd write barrier needed. 4278 | b <3 // No 2nd write barrier needed.
3358 |. sdc1 f20, 0(CRET1) 4279 |. sdc1 f20, 0(CRET1)
4280 |.else
4281 | lw SFARG1HI, HI(RA)
4282 | lw SFARG1LO, LO(RA)
4283 | sw SFARG1HI, HI(CRET1)
4284 | b <3 // No 2nd write barrier needed.
4285 |. sw SFARG1LO, LO(CRET1)
4286 |.endif
3359 | 4287 |
3360 |7: // Possible table write barrier for the value. Skip valiswhite check. 4288 |7: // Possible table write barrier for the value. Skip valiswhite check.
3361 | barrierback TAB:RB, TMP3, TMP0, <3 4289 | barrierback TAB:RB, TMP3, TMP0, <3
@@ -3380,11 +4308,13 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3380 | lw TMP1, HI(RC) 4308 | lw TMP1, HI(RC)
3381 | lbu TMP3, TAB:RB->marked 4309 | lbu TMP3, TAB:RB->marked
3382 | beq TMP1, TISNIL, >5 4310 | beq TMP1, TISNIL, >5
3383 |. ldc1 f0, 0(RA)
3384 |1: 4311 |1:
4312 |. lw SFRETHI, HI(RA)
4313 | lw SFRETLO, LO(RA)
3385 | andi AT, TMP3, LJ_GC_BLACK // isblack(table) 4314 | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
4315 | sw SFRETHI, HI(RC)
3386 | bnez AT, >7 4316 | bnez AT, >7
3387 |. sdc1 f0, 0(RC) 4317 |. sw SFRETLO, LO(RC)
3388 |2: 4318 |2:
3389 | ins_next 4319 | ins_next
3390 | 4320 |
@@ -3396,12 +4326,43 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3396 | andi TMP1, TMP1, 1<<MM_newindex 4326 | andi TMP1, TMP1, 1<<MM_newindex
3397 | bnez TMP1, <1 // 'no __newindex' flag set: done. 4327 | bnez TMP1, <1 // 'no __newindex' flag set: done.
3398 |. nop 4328 |. nop
3399 | b ->vmeta_tsetb // Caveat: preserve TMP0! 4329 | b ->vmeta_tsetb // Caveat: preserve TMP0 and CARG2!
3400 |. nop 4330 |. nop
3401 | 4331 |
3402 |7: // Possible table write barrier for the value. Skip valiswhite check. 4332 |7: // Possible table write barrier for the value. Skip valiswhite check.
3403 | barrierback TAB:RB, TMP3, TMP0, <2 4333 | barrierback TAB:RB, TMP3, TMP0, <2
3404 break; 4334 break;
4335 case BC_TSETR:
4336 | // RA = dst*8, RB = table*8, RC = key*8
4337 | decode_RB8a RB, INS
4338 | decode_RB8b RB
4339 | decode_RDtoRC8 RC, RD
4340 | addu CARG1, BASE, RB
4341 | addu CARG3, BASE, RC
4342 | lw TAB:CARG2, LO(CARG1)
4343 | lw CARG3, LO(CARG3)
4344 | lbu TMP3, TAB:CARG2->marked
4345 | lw TMP0, TAB:CARG2->asize
4346 | lw TMP1, TAB:CARG2->array
4347 | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
4348 | bnez AT, >7
4349 |. addu RA, BASE, RA
4350 |2:
4351 | sltu AT, CARG3, TMP0
4352 | sll TMP2, CARG3, 3
4353 | beqz AT, ->vmeta_tsetr // In array part?
4354 |. addu CRET1, TMP1, TMP2
4355 |->BC_TSETR_Z:
4356 | lw SFARG1HI, HI(RA)
4357 | lw SFARG1LO, LO(RA)
4358 | ins_next1
4359 | sw SFARG1HI, HI(CRET1)
4360 | sw SFARG1LO, LO(CRET1)
4361 | ins_next2
4362 |
4363 |7: // Possible table write barrier for the value. Skip valiswhite check.
4364 | barrierback TAB:CARG2, TMP3, CRET1, <2
4365 break;
3405 4366
3406 case BC_TSETM: 4367 case BC_TSETM:
3407 | // RA = base*8 (table at base-1), RD = num_const*8 (start index) 4368 | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
@@ -3424,10 +4385,12 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3424 | addu TMP1, TMP1, CARG1 4385 | addu TMP1, TMP1, CARG1
3425 | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table) 4386 | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table)
3426 |3: // Copy result slots to table. 4387 |3: // Copy result slots to table.
3427 | ldc1 f0, 0(RA) 4388 | lw SFRETHI, HI(RA)
4389 | lw SFRETLO, LO(RA)
3428 | addiu RA, RA, 8 4390 | addiu RA, RA, 8
3429 | sltu AT, RA, TMP2 4391 | sltu AT, RA, TMP2
3430 | sdc1 f0, 0(TMP1) 4392 | sw SFRETHI, HI(TMP1)
4393 | sw SFRETLO, LO(TMP1)
3431 | bnez AT, <3 4394 | bnez AT, <3
3432 |. addiu TMP1, TMP1, 8 4395 |. addiu TMP1, TMP1, 8
3433 | bnez TMP0, >7 4396 | bnez TMP0, >7
@@ -3502,10 +4465,12 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3502 | beqz NARGS8:RC, >3 4465 | beqz NARGS8:RC, >3
3503 |. move TMP3, NARGS8:RC 4466 |. move TMP3, NARGS8:RC
3504 |2: 4467 |2:
3505 | ldc1 f0, 0(RA) 4468 | lw SFRETHI, HI(RA)
4469 | lw SFRETLO, LO(RA)
3506 | addiu RA, RA, 8 4470 | addiu RA, RA, 8
3507 | addiu TMP3, TMP3, -8 4471 | addiu TMP3, TMP3, -8
3508 | sdc1 f0, 0(TMP2) 4472 | sw SFRETHI, HI(TMP2)
4473 | sw SFRETLO, LO(TMP2)
3509 | bnez TMP3, <2 4474 | bnez TMP3, <2
3510 |. addiu TMP2, TMP2, 8 4475 |. addiu TMP2, TMP2, 8
3511 |3: 4476 |3:
@@ -3542,12 +4507,16 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3542 | li AT, LJ_TFUNC 4507 | li AT, LJ_TFUNC
3543 | lw TMP1, -24+HI(BASE) 4508 | lw TMP1, -24+HI(BASE)
3544 | lw LFUNC:RB, -24+LO(BASE) 4509 | lw LFUNC:RB, -24+LO(BASE)
3545 | ldc1 f2, -8(BASE) 4510 | lw SFARG1HI, -16+HI(BASE)
3546 | ldc1 f0, -16(BASE) 4511 | lw SFARG1LO, -16+LO(BASE)
4512 | lw SFARG2HI, -8+HI(BASE)
4513 | lw SFARG2LO, -8+LO(BASE)
3547 | sw TMP1, HI(BASE) // Copy callable. 4514 | sw TMP1, HI(BASE) // Copy callable.
3548 | sw LFUNC:RB, LO(BASE) 4515 | sw LFUNC:RB, LO(BASE)
3549 | sdc1 f2, 16(BASE) // Copy control var. 4516 | sw SFARG1HI, 8+HI(BASE) // Copy state.
3550 | sdc1 f0, 8(BASE) // Copy state. 4517 | sw SFARG1LO, 8+LO(BASE)
4518 | sw SFARG2HI, 16+HI(BASE) // Copy control var.
4519 | sw SFARG2LO, 16+LO(BASE)
3551 | addiu BASE, BASE, 8 4520 | addiu BASE, BASE, 8
3552 | bne TMP1, AT, ->vmeta_call 4521 | bne TMP1, AT, ->vmeta_call
3553 |. li NARGS8:RC, 16 // Iterators get 2 arguments. 4522 |. li NARGS8:RC, 16 // Iterators get 2 arguments.
@@ -3570,20 +4539,20 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3570 | beqz AT, >5 // Index points after array part? 4539 | beqz AT, >5 // Index points after array part?
3571 |. sll TMP3, RC, 3 4540 |. sll TMP3, RC, 3
3572 | addu TMP3, TMP1, TMP3 4541 | addu TMP3, TMP1, TMP3
3573 | lw TMP2, HI(TMP3) 4542 | lw SFARG1HI, HI(TMP3)
3574 | ldc1 f0, 0(TMP3) 4543 | lw SFARG1LO, LO(TMP3)
3575 | mtc1 RC, f2
3576 | lhu RD, -4+OFS_RD(PC) 4544 | lhu RD, -4+OFS_RD(PC)
3577 | beq TMP2, TISNIL, <1 // Skip holes in array part. 4545 | sw TISNUM, HI(RA)
4546 | sw RC, LO(RA)
4547 | beq SFARG1HI, TISNIL, <1 // Skip holes in array part.
3578 |. addiu RC, RC, 1 4548 |. addiu RC, RC, 1
3579 | cvt.d.w f2, f2 4549 | sw SFARG1HI, 8+HI(RA)
4550 | sw SFARG1LO, 8+LO(RA)
3580 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) 4551 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3581 | sdc1 f0, 8(RA)
3582 | decode_RD4b RD 4552 | decode_RD4b RD
3583 | addu RD, RD, TMP3 4553 | addu RD, RD, TMP3
3584 | sw RC, -8+LO(RA) // Update control var. 4554 | sw RC, -8+LO(RA) // Update control var.
3585 | addu PC, PC, RD 4555 | addu PC, PC, RD
3586 | sdc1 f2, 0(RA)
3587 |3: 4556 |3:
3588 | ins_next 4557 | ins_next
3589 | 4558 |
@@ -3598,18 +4567,21 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3598 | sll RB, RC, 3 4567 | sll RB, RC, 3
3599 | subu TMP3, TMP3, RB 4568 | subu TMP3, TMP3, RB
3600 | addu NODE:TMP3, TMP3, TMP2 4569 | addu NODE:TMP3, TMP3, TMP2
3601 | lw RB, HI(NODE:TMP3) 4570 | lw SFARG1HI, NODE:TMP3->val.u32.hi
3602 | ldc1 f0, 0(NODE:TMP3) 4571 | lw SFARG1LO, NODE:TMP3->val.u32.lo
3603 | lhu RD, -4+OFS_RD(PC) 4572 | lhu RD, -4+OFS_RD(PC)
3604 | beq RB, TISNIL, <6 // Skip holes in hash part. 4573 | beq SFARG1HI, TISNIL, <6 // Skip holes in hash part.
3605 |. addiu RC, RC, 1 4574 |. addiu RC, RC, 1
3606 | ldc1 f2, NODE:TMP3->key 4575 | lw SFARG2HI, NODE:TMP3->key.u32.hi
4576 | lw SFARG2LO, NODE:TMP3->key.u32.lo
3607 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535) 4577 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3608 | sdc1 f0, 8(RA) 4578 | sw SFARG1HI, 8+HI(RA)
4579 | sw SFARG1LO, 8+LO(RA)
3609 | addu RC, RC, TMP0 4580 | addu RC, RC, TMP0
3610 | decode_RD4b RD 4581 | decode_RD4b RD
3611 | addu RD, RD, TMP3 4582 | addu RD, RD, TMP3
3612 | sdc1 f2, 0(RA) 4583 | sw SFARG2HI, HI(RA)
4584 | sw SFARG2LO, LO(RA)
3613 | addu PC, PC, RD 4585 | addu PC, PC, RD
3614 | b <3 4586 | b <3
3615 |. sw RC, -8+LO(RA) // Update control var. 4587 |. sw RC, -8+LO(RA) // Update control var.
@@ -3689,9 +4661,11 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3689 | bnez AT, >7 4661 | bnez AT, >7
3690 |. addiu MULTRES, TMP1, 8 4662 |. addiu MULTRES, TMP1, 8
3691 |6: 4663 |6:
3692 | ldc1 f0, 0(RC) 4664 | lw SFRETHI, HI(RC)
4665 | lw SFRETLO, LO(RC)
3693 | addiu RC, RC, 8 4666 | addiu RC, RC, 8
3694 | sdc1 f0, 0(RA) 4667 | sw SFRETHI, HI(RA)
4668 | sw SFRETLO, LO(RA)
3695 | sltu AT, RC, TMP3 4669 | sltu AT, RC, TMP3
3696 | bnez AT, <6 // More vararg slots? 4670 | bnez AT, <6 // More vararg slots?
3697 |. addiu RA, RA, 8 4671 |. addiu RA, RA, 8
@@ -3747,10 +4721,12 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3747 | beqz RC, >3 4721 | beqz RC, >3
3748 |. subu BASE, TMP2, TMP0 4722 |. subu BASE, TMP2, TMP0
3749 |2: 4723 |2:
3750 | ldc1 f0, 0(RA) 4724 | lw SFRETHI, HI(RA)
4725 | lw SFRETLO, LO(RA)
3751 | addiu RA, RA, 8 4726 | addiu RA, RA, 8
3752 | addiu RC, RC, -8 4727 | addiu RC, RC, -8
3753 | sdc1 f0, 0(TMP2) 4728 | sw SFRETHI, HI(TMP2)
4729 | sw SFRETLO, LO(TMP2)
3754 | bnez RC, <2 4730 | bnez RC, <2
3755 |. addiu TMP2, TMP2, 8 4731 |. addiu TMP2, TMP2, 8
3756 |3: 4732 |3:
@@ -3791,14 +4767,16 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3791 | lw INS, -4(PC) 4767 | lw INS, -4(PC)
3792 | addiu TMP2, BASE, -8 4768 | addiu TMP2, BASE, -8
3793 if (op == BC_RET1) { 4769 if (op == BC_RET1) {
3794 | ldc1 f0, 0(RA) 4770 | lw SFRETHI, HI(RA)
4771 | lw SFRETLO, LO(RA)
3795 } 4772 }
3796 | decode_RB8a RB, INS 4773 | decode_RB8a RB, INS
3797 | decode_RA8a RA, INS 4774 | decode_RA8a RA, INS
3798 | decode_RB8b RB 4775 | decode_RB8b RB
3799 | decode_RA8b RA 4776 | decode_RA8b RA
3800 if (op == BC_RET1) { 4777 if (op == BC_RET1) {
3801 | sdc1 f0, 0(TMP2) 4778 | sw SFRETHI, HI(TMP2)
4779 | sw SFRETLO, LO(TMP2)
3802 } 4780 }
3803 | subu BASE, TMP2, RA 4781 | subu BASE, TMP2, RA
3804 |5: 4782 |5:
@@ -3840,69 +4818,147 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3840 | // RA = base*8, RD = target (after end of loop or start of loop) 4818 | // RA = base*8, RD = target (after end of loop or start of loop)
3841 vk = (op == BC_IFORL || op == BC_JFORL); 4819 vk = (op == BC_IFORL || op == BC_JFORL);
3842 | addu RA, BASE, RA 4820 | addu RA, BASE, RA
3843 if (vk) { 4821 | lw SFARG1HI, FORL_IDX*8+HI(RA)
3844 | ldc1 f0, FORL_IDX*8(RA) 4822 | lw SFARG1LO, FORL_IDX*8+LO(RA)
3845 | ldc1 f4, FORL_STEP*8(RA)
3846 | ldc1 f2, FORL_STOP*8(RA)
3847 | lw TMP3, FORL_STEP*8+HI(RA)
3848 | add.d f0, f0, f4
3849 | sdc1 f0, FORL_IDX*8(RA)
3850 } else {
3851 | lw TMP1, FORL_IDX*8+HI(RA)
3852 | lw TMP3, FORL_STEP*8+HI(RA)
3853 | lw TMP2, FORL_STOP*8+HI(RA)
3854 | sltiu TMP1, TMP1, LJ_TISNUM
3855 | sltiu TMP0, TMP3, LJ_TISNUM
3856 | sltiu TMP2, TMP2, LJ_TISNUM
3857 | and TMP1, TMP1, TMP0
3858 | and TMP1, TMP1, TMP2
3859 | ldc1 f0, FORL_IDX*8(RA)
3860 | beqz TMP1, ->vmeta_for
3861 |. ldc1 f2, FORL_STOP*8(RA)
3862 }
3863 if (op != BC_JFORL) { 4823 if (op != BC_JFORL) {
3864 | srl RD, RD, 1 4824 | srl RD, RD, 1
3865 | lui TMP0, (-(BCBIAS_J*4 >> 16) & 65535) 4825 | lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
4826 | addu TMP2, RD, TMP2
4827 }
4828 if (!vk) {
4829 | lw SFARG2HI, FORL_STOP*8+HI(RA)
4830 | lw SFARG2LO, FORL_STOP*8+LO(RA)
4831 | bne SFARG1HI, TISNUM, >5
4832 |. lw SFRETHI, FORL_STEP*8+HI(RA)
4833 | xor AT, SFARG2HI, TISNUM
4834 | lw SFRETLO, FORL_STEP*8+LO(RA)
4835 | xor TMP0, SFRETHI, TISNUM
4836 | or AT, AT, TMP0
4837 | bnez AT, ->vmeta_for
4838 |. slt AT, SFRETLO, r0
4839 | slt CRET1, SFARG2LO, SFARG1LO
4840 | slt TMP1, SFARG1LO, SFARG2LO
4841 | movn CRET1, TMP1, AT
4842 } else {
4843 | bne SFARG1HI, TISNUM, >5
4844 |. lw SFARG2LO, FORL_STEP*8+LO(RA)
4845 | lw SFRETLO, FORL_STOP*8+LO(RA)
4846 | move TMP3, SFARG1LO
4847 | addu SFARG1LO, SFARG1LO, SFARG2LO
4848 | xor TMP0, SFARG1LO, TMP3
4849 | xor TMP1, SFARG1LO, SFARG2LO
4850 | and TMP0, TMP0, TMP1
4851 | slt TMP1, SFARG1LO, SFRETLO
4852 | slt CRET1, SFRETLO, SFARG1LO
4853 | slt AT, SFARG2LO, r0
4854 | slt TMP0, TMP0, r0 // ((y^a) & (y^b)) < 0: overflow.
4855 | movn CRET1, TMP1, AT
4856 | or CRET1, CRET1, TMP0
4857 }
4858 |1:
4859 if (op == BC_FORI) {
4860 | movz TMP2, r0, CRET1
4861 | addu PC, PC, TMP2
4862 } else if (op == BC_JFORI) {
4863 | addu PC, PC, TMP2
4864 | lhu RD, -4+OFS_RD(PC)
4865 } else if (op == BC_IFORL) {
4866 | movn TMP2, r0, CRET1
4867 | addu PC, PC, TMP2
3866 } 4868 }
3867 | c.le.d 0, f0, f2 4869 if (vk) {
3868 | c.le.d 1, f2, f0 4870 | sw SFARG1HI, FORL_IDX*8+HI(RA)
3869 | sdc1 f0, FORL_EXT*8(RA) 4871 | sw SFARG1LO, FORL_IDX*8+LO(RA)
4872 }
4873 | ins_next1
4874 | sw SFARG1HI, FORL_EXT*8+HI(RA)
4875 | sw SFARG1LO, FORL_EXT*8+LO(RA)
4876 |2:
3870 if (op == BC_JFORI) { 4877 if (op == BC_JFORI) {
3871 | li TMP1, 1 4878 | beqz CRET1, =>BC_JLOOP
3872 | li TMP2, 1
3873 | addu TMP0, RD, TMP0
3874 | slt TMP3, TMP3, r0
3875 | movf TMP1, r0, 0
3876 | addu PC, PC, TMP0
3877 | movf TMP2, r0, 1
3878 | lhu RD, -4+OFS_RD(PC)
3879 | movn TMP1, TMP2, TMP3
3880 | bnez TMP1, =>BC_JLOOP
3881 |. decode_RD8b RD 4879 |. decode_RD8b RD
3882 } else if (op == BC_JFORL) { 4880 } else if (op == BC_JFORL) {
3883 | li TMP1, 1 4881 | beqz CRET1, =>BC_JLOOP
3884 | li TMP2, 1 4882 }
3885 | slt TMP3, TMP3, r0 4883 | ins_next2
3886 | movf TMP1, r0, 0 4884 |
3887 | movf TMP2, r0, 1 4885 |5: // FP loop.
3888 | movn TMP1, TMP2, TMP3 4886 |.if FPU
3889 | bnez TMP1, =>BC_JLOOP 4887 if (!vk) {
4888 | ldc1 f0, FORL_IDX*8(RA)
4889 | ldc1 f2, FORL_STOP*8(RA)
4890 | sltiu TMP0, SFARG1HI, LJ_TISNUM
4891 | sltiu TMP1, SFARG2HI, LJ_TISNUM
4892 | sltiu AT, SFRETHI, LJ_TISNUM
4893 | and TMP0, TMP0, TMP1
4894 | and AT, AT, TMP0
4895 | beqz AT, ->vmeta_for
4896 |. slt TMP3, SFRETHI, r0
4897 | c.ole.d 0, f0, f2
4898 | c.ole.d 1, f2, f0
4899 | li CRET1, 1
4900 | movt CRET1, r0, 0
4901 | movt AT, r0, 1
4902 | b <1
4903 |. movn CRET1, AT, TMP3
4904 } else {
4905 | ldc1 f0, FORL_IDX*8(RA)
4906 | ldc1 f4, FORL_STEP*8(RA)
4907 | ldc1 f2, FORL_STOP*8(RA)
4908 | lw SFARG2HI, FORL_STEP*8+HI(RA)
4909 | add.d f0, f0, f4
4910 | c.ole.d 0, f0, f2
4911 | c.ole.d 1, f2, f0
4912 | slt TMP3, SFARG2HI, r0
4913 | li CRET1, 1
4914 | li AT, 1
4915 | movt CRET1, r0, 0
4916 | movt AT, r0, 1
4917 | movn CRET1, AT, TMP3
4918 if (op == BC_IFORL) {
4919 | movn TMP2, r0, CRET1
4920 | addu PC, PC, TMP2
4921 }
4922 | sdc1 f0, FORL_IDX*8(RA)
4923 | ins_next1
4924 | b <2
4925 |. sdc1 f0, FORL_EXT*8(RA)
4926 }
4927 |.else
4928 if (!vk) {
4929 | sltiu TMP0, SFARG1HI, LJ_TISNUM
4930 | sltiu TMP1, SFARG2HI, LJ_TISNUM
4931 | sltiu AT, SFRETHI, LJ_TISNUM
4932 | and TMP0, TMP0, TMP1
4933 | and AT, AT, TMP0
4934 | beqz AT, ->vmeta_for
4935 |. nop
4936 | bal ->vm_sfcmpolex
4937 |. move TMP3, SFRETHI
4938 | b <1
3890 |. nop 4939 |. nop
3891 } else { 4940 } else {
3892 | addu TMP1, RD, TMP0 4941 | lw SFARG2HI, FORL_STEP*8+HI(RA)
3893 | slt TMP3, TMP3, r0 4942 | load_got __adddf3
3894 | move TMP2, TMP1 4943 | call_extern
3895 if (op == BC_FORI) { 4944 |. sw TMP2, ARG5
3896 | movt TMP1, r0, 0 4945 | lw SFARG2HI, FORL_STOP*8+HI(RA)
3897 | movt TMP2, r0, 1 4946 | lw SFARG2LO, FORL_STOP*8+LO(RA)
4947 | move SFARG1HI, SFRETHI
4948 | move SFARG1LO, SFRETLO
4949 | bal ->vm_sfcmpolex
4950 |. lw TMP3, FORL_STEP*8+HI(RA)
4951 if ( op == BC_JFORL ) {
4952 | lhu RD, -4+OFS_RD(PC)
4953 | lw TMP2, ARG5
4954 | b <1
4955 |. decode_RD8b RD
3898 } else { 4956 } else {
3899 | movf TMP1, r0, 0 4957 | b <1
3900 | movf TMP2, r0, 1 4958 |. lw TMP2, ARG5
3901 } 4959 }
3902 | movn TMP1, TMP2, TMP3
3903 | addu PC, PC, TMP1
3904 } 4960 }
3905 | ins_next 4961 |.endif
3906 break; 4962 break;
3907 4963
3908 case BC_ITERL: 4964 case BC_ITERL:
@@ -3961,8 +5017,8 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3961 | sw AT, DISPATCH_GL(vmstate)(DISPATCH) 5017 | sw AT, DISPATCH_GL(vmstate)(DISPATCH)
3962 | lw TRACE:TMP2, 0(TMP1) 5018 | lw TRACE:TMP2, 0(TMP1)
3963 | sw BASE, DISPATCH_GL(jit_base)(DISPATCH) 5019 | sw BASE, DISPATCH_GL(jit_base)(DISPATCH)
3964 | sw L, DISPATCH_GL(jit_L)(DISPATCH)
3965 | lw TMP2, TRACE:TMP2->mcode 5020 | lw TMP2, TRACE:TMP2->mcode
5021 | sw L, DISPATCH_GL(tmpbuf.L)(DISPATCH)
3966 | jr TMP2 5022 | jr TMP2
3967 |. addiu JGL, DISPATCH, GG_DISP2G+32768 5023 |. addiu JGL, DISPATCH, GG_DISP2G+32768
3968 |.endif 5024 |.endif
@@ -4088,6 +5144,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4088 | li_vmstate INTERP 5144 | li_vmstate INTERP
4089 | lw PC, FRAME_PC(BASE) // Fetch PC of caller. 5145 | lw PC, FRAME_PC(BASE) // Fetch PC of caller.
4090 | subu RA, TMP1, RD // RA = L->top - nresults*8 5146 | subu RA, TMP1, RD // RA = L->top - nresults*8
5147 | sw L, DISPATCH_GL(cur_L)(DISPATCH)
4091 | b ->vm_returnc 5148 | b ->vm_returnc
4092 |. st_vmstate 5149 |. st_vmstate
4093 break; 5150 break;
@@ -4150,8 +5207,10 @@ static void emit_asm_debug(BuildCtx *ctx)
4150 fcofs, CFRAME_SIZE); 5207 fcofs, CFRAME_SIZE);
4151 for (i = 23; i >= 16; i--) 5208 for (i = 23; i >= 16; i--)
4152 fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i); 5209 fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
5210#if !LJ_SOFTFP
4153 for (i = 30; i >= 20; i -= 2) 5211 for (i = 30; i >= 20; i -= 2)
4154 fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i); 5212 fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
5213#endif
4155 fprintf(ctx->fp, 5214 fprintf(ctx->fp,
4156 "\t.align 2\n" 5215 "\t.align 2\n"
4157 ".LEFDE0:\n\n"); 5216 ".LEFDE0:\n\n");
@@ -4203,8 +5262,10 @@ static void emit_asm_debug(BuildCtx *ctx)
4203 fcofs, CFRAME_SIZE); 5262 fcofs, CFRAME_SIZE);
4204 for (i = 23; i >= 16; i--) 5263 for (i = 23; i >= 16; i--)
4205 fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i); 5264 fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
5265#if !LJ_SOFTFP
4206 for (i = 30; i >= 20; i -= 2) 5266 for (i = 30; i >= 20; i -= 2)
4207 fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i); 5267 fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
5268#endif
4208 fprintf(ctx->fp, 5269 fprintf(ctx->fp,
4209 "\t.align 2\n" 5270 "\t.align 2\n"
4210 ".LEFDE2:\n\n"); 5271 ".LEFDE2:\n\n");
diff --git a/src/vm_mips64.dasc b/src/vm_mips64.dasc
new file mode 100644
index 00000000..8e71f362
--- /dev/null
+++ b/src/vm_mips64.dasc
@@ -0,0 +1,5453 @@
1|// Low-level VM code for MIPS64 CPUs.
2|// Bytecode interpreter, fast functions and helper functions.
3|// Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4|//
5|// Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com.
6|// Sponsored by Cisco Systems, Inc.
7|
8|.arch mips64
9|.section code_op, code_sub
10|
11|.actionlist build_actionlist
12|.globals GLOB_
13|.globalnames globnames
14|.externnames extnames
15|
16|// Note: The ragged indentation of the instructions is intentional.
17|// The starting columns indicate data dependencies.
18|
19|//-----------------------------------------------------------------------
20|
21|// Fixed register assignments for the interpreter.
22|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra
23|
24|.macro .FPU, a, b
25|.if FPU
26| a, b
27|.endif
28|.endmacro
29|
30|// The following must be C callee-save (but BASE is often refetched).
31|.define BASE, r16 // Base of current Lua stack frame.
32|.define KBASE, r17 // Constants of current Lua function.
33|.define PC, r18 // Next PC.
34|.define DISPATCH, r19 // Opcode dispatch table.
35|.define LREG, r20 // Register holding lua_State (also in SAVE_L).
36|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8.
37|
38|.define JGL, r30 // On-trace: global_State + 32768.
39|
40|// Constants for type-comparisons, stores and conversions. C callee-save.
41|.define TISNIL, r30
42|.define TISNUM, r22
43|.if FPU
44|.define TOBIT, f30 // 2^52 + 2^51.
45|.endif
46|
47|// The following temporaries are not saved across C calls, except for RA.
48|.define RA, r23 // Callee-save.
49|.define RB, r8
50|.define RC, r9
51|.define RD, r10
52|.define INS, r11
53|
54|.define AT, r1 // Assembler temporary.
55|.define TMP0, r12
56|.define TMP1, r13
57|.define TMP2, r14
58|.define TMP3, r15
59|
60|// MIPS n64 calling convention.
61|.define CFUNCADDR, r25
62|.define CARG1, r4
63|.define CARG2, r5
64|.define CARG3, r6
65|.define CARG4, r7
66|.define CARG5, r8
67|.define CARG6, r9
68|.define CARG7, r10
69|.define CARG8, r11
70|
71|.define CRET1, r2
72|.define CRET2, r3
73|
74|.if FPU
75|.define FARG1, f12
76|.define FARG2, f13
77|.define FARG3, f14
78|.define FARG4, f15
79|.define FARG5, f16
80|.define FARG6, f17
81|.define FARG7, f18
82|.define FARG8, f19
83|
84|.define FRET1, f0
85|.define FRET2, f2
86|
87|.define FTMP0, f20
88|.define FTMP1, f21
89|.define FTMP2, f22
90|.endif
91|
92|// Stack layout while in interpreter. Must match with lj_frame.h.
93|.if FPU // MIPS64 hard-float.
94|
95|.define CFRAME_SPACE, 192 // Delta for sp.
96|
97|//----- 16 byte aligned, <-- sp entering interpreter
98|.define SAVE_ERRF, 188(sp) // 32 bit values.
99|.define SAVE_NRES, 184(sp)
100|.define SAVE_CFRAME, 176(sp) // 64 bit values.
101|.define SAVE_L, 168(sp)
102|.define SAVE_PC, 160(sp)
103|//----- 16 byte aligned
104|.define SAVE_GPR_, 80 // .. 80+10*8: 64 bit GPR saves.
105|.define SAVE_FPR_, 16 // .. 16+8*8: 64 bit FPR saves.
106|
107|.else // MIPS64 soft-float
108|
109|.define CFRAME_SPACE, 128 // Delta for sp.
110|
111|//----- 16 byte aligned, <-- sp entering interpreter
112|.define SAVE_ERRF, 124(sp) // 32 bit values.
113|.define SAVE_NRES, 120(sp)
114|.define SAVE_CFRAME, 112(sp) // 64 bit values.
115|.define SAVE_L, 104(sp)
116|.define SAVE_PC, 96(sp)
117|//----- 16 byte aligned
118|.define SAVE_GPR_, 16 // .. 16+10*8: 64 bit GPR saves.
119|
120|.endif
121|
122|.define TMPX, 8(sp) // Unused by interpreter, temp for JIT code.
123|.define TMPD, 0(sp)
124|//----- 16 byte aligned
125|
126|.define TMPD_OFS, 0
127|
128|.define SAVE_MULTRES, TMPD
129|
130|//-----------------------------------------------------------------------
131|
132|.macro saveregs
133| daddiu sp, sp, -CFRAME_SPACE
134| sd ra, SAVE_GPR_+9*8(sp)
135| sd r30, SAVE_GPR_+8*8(sp)
136| .FPU sdc1 f31, SAVE_FPR_+7*8(sp)
137| sd r23, SAVE_GPR_+7*8(sp)
138| .FPU sdc1 f30, SAVE_FPR_+6*8(sp)
139| sd r22, SAVE_GPR_+6*8(sp)
140| .FPU sdc1 f29, SAVE_FPR_+5*8(sp)
141| sd r21, SAVE_GPR_+5*8(sp)
142| .FPU sdc1 f28, SAVE_FPR_+4*8(sp)
143| sd r20, SAVE_GPR_+4*8(sp)
144| .FPU sdc1 f27, SAVE_FPR_+3*8(sp)
145| sd r19, SAVE_GPR_+3*8(sp)
146| .FPU sdc1 f26, SAVE_FPR_+2*8(sp)
147| sd r18, SAVE_GPR_+2*8(sp)
148| .FPU sdc1 f25, SAVE_FPR_+1*8(sp)
149| sd r17, SAVE_GPR_+1*8(sp)
150| .FPU sdc1 f24, SAVE_FPR_+0*8(sp)
151| sd r16, SAVE_GPR_+0*8(sp)
152|.endmacro
153|
154|.macro restoreregs_ret
155| ld ra, SAVE_GPR_+9*8(sp)
156| ld r30, SAVE_GPR_+8*8(sp)
157| ld r23, SAVE_GPR_+7*8(sp)
158| .FPU ldc1 f31, SAVE_FPR_+7*8(sp)
159| ld r22, SAVE_GPR_+6*8(sp)
160| .FPU ldc1 f30, SAVE_FPR_+6*8(sp)
161| ld r21, SAVE_GPR_+5*8(sp)
162| .FPU ldc1 f29, SAVE_FPR_+5*8(sp)
163| ld r20, SAVE_GPR_+4*8(sp)
164| .FPU ldc1 f28, SAVE_FPR_+4*8(sp)
165| ld r19, SAVE_GPR_+3*8(sp)
166| .FPU ldc1 f27, SAVE_FPR_+3*8(sp)
167| ld r18, SAVE_GPR_+2*8(sp)
168| .FPU ldc1 f26, SAVE_FPR_+2*8(sp)
169| ld r17, SAVE_GPR_+1*8(sp)
170| .FPU ldc1 f25, SAVE_FPR_+1*8(sp)
171| ld r16, SAVE_GPR_+0*8(sp)
172| .FPU ldc1 f24, SAVE_FPR_+0*8(sp)
173| jr ra
174| daddiu sp, sp, CFRAME_SPACE
175|.endmacro
176|
177|// Type definitions. Some of these are only used for documentation.
178|.type L, lua_State, LREG
179|.type GL, global_State
180|.type TVALUE, TValue
181|.type GCOBJ, GCobj
182|.type STR, GCstr
183|.type TAB, GCtab
184|.type LFUNC, GCfuncL
185|.type CFUNC, GCfuncC
186|.type PROTO, GCproto
187|.type UPVAL, GCupval
188|.type NODE, Node
189|.type NARGS8, int
190|.type TRACE, GCtrace
191|.type SBUF, SBuf
192|
193|//-----------------------------------------------------------------------
194|
195|// Trap for not-yet-implemented parts.
196|.macro NYI; .long 0xf0f0f0f0; .endmacro
197|
198|// Macros to mark delay slots.
199|.macro ., a; a; .endmacro
200|.macro ., a,b; a,b; .endmacro
201|.macro ., a,b,c; a,b,c; .endmacro
202|.macro ., a,b,c,d; a,b,c,d; .endmacro
203|
204|.define FRAME_PC, -8
205|.define FRAME_FUNC, -16
206|
207|//-----------------------------------------------------------------------
208|
209|// Endian-specific defines.
210|.if ENDIAN_LE
211|.define HI, 4
212|.define LO, 0
213|.define OFS_RD, 2
214|.define OFS_RA, 1
215|.define OFS_OP, 0
216|.else
217|.define HI, 0
218|.define LO, 4
219|.define OFS_RD, 0
220|.define OFS_RA, 2
221|.define OFS_OP, 3
222|.endif
223|
224|// Instruction decode.
225|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro
226|.macro decode_OP8a, dst, ins; andi dst, ins, 0xff; .endmacro
227|.macro decode_OP8b, dst; sll dst, dst, 3; .endmacro
228|.macro decode_RC8a, dst, ins; srl dst, ins, 13; .endmacro
229|.macro decode_RC8b, dst; andi dst, dst, 0x7f8; .endmacro
230|.macro decode_RD4b, dst; sll dst, dst, 2; .endmacro
231|.macro decode_RA8a, dst, ins; srl dst, ins, 5; .endmacro
232|.macro decode_RA8b, dst; andi dst, dst, 0x7f8; .endmacro
233|.macro decode_RB8a, dst, ins; srl dst, ins, 21; .endmacro
234|.macro decode_RB8b, dst; andi dst, dst, 0x7f8; .endmacro
235|.macro decode_RD8a, dst, ins; srl dst, ins, 16; .endmacro
236|.macro decode_RD8b, dst; sll dst, dst, 3; .endmacro
237|.macro decode_RDtoRC8, dst, src; andi dst, src, 0x7f8; .endmacro
238|
239|// Instruction fetch.
240|.macro ins_NEXT1
241| lw INS, 0(PC)
242| daddiu PC, PC, 4
243|.endmacro
244|// Instruction decode+dispatch.
245|.macro ins_NEXT2
246| decode_OP8a TMP1, INS
247| decode_OP8b TMP1
248| daddu TMP0, DISPATCH, TMP1
249| decode_RD8a RD, INS
250| ld AT, 0(TMP0)
251| decode_RA8a RA, INS
252| decode_RD8b RD
253| jr AT
254| decode_RA8b RA
255|.endmacro
256|.macro ins_NEXT
257| ins_NEXT1
258| ins_NEXT2
259|.endmacro
260|
261|// Instruction footer.
262|.if 1
263| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
264| .define ins_next, ins_NEXT
265| .define ins_next_, ins_NEXT
266| .define ins_next1, ins_NEXT1
267| .define ins_next2, ins_NEXT2
268|.else
269| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
270| // Affects only certain kinds of benchmarks (and only with -j off).
271| .macro ins_next
272| b ->ins_next
273| .endmacro
274| .macro ins_next1
275| .endmacro
276| .macro ins_next2
277| b ->ins_next
278| .endmacro
279| .macro ins_next_
280| ->ins_next:
281| ins_NEXT
282| .endmacro
283|.endif
284|
285|// Call decode and dispatch.
286|.macro ins_callt
287| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
288| ld PC, LFUNC:RB->pc
289| lw INS, 0(PC)
290| daddiu PC, PC, 4
291| decode_OP8a TMP1, INS
292| decode_RA8a RA, INS
293| decode_OP8b TMP1
294| decode_RA8b RA
295| daddu TMP0, DISPATCH, TMP1
296| ld TMP0, 0(TMP0)
297| jr TMP0
298| daddu RA, RA, BASE
299|.endmacro
300|
301|.macro ins_call
302| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
303| sd PC, FRAME_PC(BASE)
304| ins_callt
305|.endmacro
306|
307|//-----------------------------------------------------------------------
308|
309|.macro branch_RD
310| srl TMP0, RD, 1
311| lui AT, (-(BCBIAS_J*4 >> 16) & 65535)
312| addu TMP0, TMP0, AT
313| daddu PC, PC, TMP0
314|.endmacro
315|
316|// Assumes DISPATCH is relative to GL.
317#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
318#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
319#define GG_DISP2GOT (GG_OFS(got) - GG_OFS(dispatch))
320#define DISPATCH_GOT(name) (GG_DISP2GOT + sizeof(void*)*LJ_GOT_##name)
321|
322#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
323|
324|.macro load_got, func
325| ld CFUNCADDR, DISPATCH_GOT(func)(DISPATCH)
326|.endmacro
327|// Much faster. Sadly, there's no easy way to force the required code layout.
328|// .macro call_intern, func; bal extern func; .endmacro
329|.macro call_intern, func; jalr CFUNCADDR; .endmacro
330|.macro call_extern; jalr CFUNCADDR; .endmacro
331|.macro jmp_extern; jr CFUNCADDR; .endmacro
332|
333|.macro hotcheck, delta, target
334| dsrl TMP1, PC, 1
335| andi TMP1, TMP1, 126
336| daddu TMP1, TMP1, DISPATCH
337| lhu TMP2, GG_DISP2HOT(TMP1)
338| addiu TMP2, TMP2, -delta
339| bltz TMP2, target
340|. sh TMP2, GG_DISP2HOT(TMP1)
341|.endmacro
342|
343|.macro hotloop
344| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
345|.endmacro
346|
347|.macro hotcall
348| hotcheck HOTCOUNT_CALL, ->vm_hotcall
349|.endmacro
350|
351|// Set current VM state. Uses TMP0.
352|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
353|.macro st_vmstate; sw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
354|
355|// Move table write barrier back. Overwrites mark and tmp.
356|.macro barrierback, tab, mark, tmp, target
357| ld tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
358| andi mark, mark, ~LJ_GC_BLACK & 255 // black2gray(tab)
359| sd tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
360| sb mark, tab->marked
361| b target
362|. sd tmp, tab->gclist
363|.endmacro
364|
365|// Clear type tag. Isolate lowest 14+32+1=47 bits of reg.
366|.macro cleartp, reg; dextm reg, reg, 0, 14; .endmacro
367|.macro cleartp, dst, reg; dextm dst, reg, 0, 14; .endmacro
368|
369|// Set type tag: Merge 17 type bits into bits [15+32=47, 31+32+1=64) of dst.
370|.macro settp, dst, tp; dinsu dst, tp, 15, 31; .endmacro
371|
372|// Extract (negative) type tag.
373|.macro gettp, dst, src; dsra dst, src, 47; .endmacro
374|
375|// Macros to check the TValue type and extract the GCobj. Branch on failure.
376|.macro checktp, reg, tp, target
377| gettp AT, reg
378| daddiu AT, AT, tp
379| bnez AT, target
380|. cleartp reg
381|.endmacro
382|.macro checktp, dst, reg, tp, target
383| gettp AT, reg
384| daddiu AT, AT, tp
385| bnez AT, target
386|. cleartp dst, reg
387|.endmacro
388|.macro checkstr, reg, target; checktp reg, -LJ_TSTR, target; .endmacro
389|.macro checktab, reg, target; checktp reg, -LJ_TTAB, target; .endmacro
390|.macro checkfunc, reg, target; checktp reg, -LJ_TFUNC, target; .endmacro
391|.macro checkint, reg, target // Caveat: has delay slot!
392| gettp AT, reg
393| bne AT, TISNUM, target
394|.endmacro
395|.macro checknum, reg, target // Caveat: has delay slot!
396| gettp AT, reg
397| sltiu AT, AT, LJ_TISNUM
398| beqz AT, target
399|.endmacro
400|
401|.macro mov_false, reg
402| lu reg, 0x8000
403| dsll reg, reg, 32
404| not reg, reg
405|.endmacro
406|.macro mov_true, reg
407| li reg, 0x0001
408| dsll reg, reg, 48
409| not reg, reg
410|.endmacro
411|
412|//-----------------------------------------------------------------------
413
414/* Generate subroutines used by opcodes and other parts of the VM. */
415/* The .code_sub section should be last to help static branch prediction. */
416static void build_subroutines(BuildCtx *ctx)
417{
418 |.code_sub
419 |
420 |//-----------------------------------------------------------------------
421 |//-- Return handling ----------------------------------------------------
422 |//-----------------------------------------------------------------------
423 |
424 |->vm_returnp:
425 | // See vm_return. Also: TMP2 = previous base.
426 | andi AT, PC, FRAME_P
427 | beqz AT, ->cont_dispatch
428 |
429 | // Return from pcall or xpcall fast func.
430 |. mov_true TMP1
431 | ld PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
432 | move BASE, TMP2 // Restore caller base.
433 | // Prepending may overwrite the pcall frame, so do it at the end.
434 | sd TMP1, -8(RA) // Prepend true to results.
435 | daddiu RA, RA, -8
436 |
437 |->vm_returnc:
438 | addiu RD, RD, 8 // RD = (nresults+1)*8.
439 | andi TMP0, PC, FRAME_TYPE
440 | beqz RD, ->vm_unwind_c_eh
441 |. li CRET1, LUA_YIELD
442 | beqz TMP0, ->BC_RET_Z // Handle regular return to Lua.
443 |. move MULTRES, RD
444 |
445 |->vm_return:
446 | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
447 | // TMP0 = PC & FRAME_TYPE
448 | li TMP2, -8
449 | xori AT, TMP0, FRAME_C
450 | and TMP2, PC, TMP2
451 | bnez AT, ->vm_returnp
452 | dsubu TMP2, BASE, TMP2 // TMP2 = previous base.
453 |
454 | addiu TMP1, RD, -8
455 | sd TMP2, L->base
456 | li_vmstate C
457 | lw TMP2, SAVE_NRES
458 | daddiu BASE, BASE, -16
459 | st_vmstate
460 | beqz TMP1, >2
461 |. sll TMP2, TMP2, 3
462 |1:
463 | addiu TMP1, TMP1, -8
464 | ld CRET1, 0(RA)
465 | daddiu RA, RA, 8
466 | sd CRET1, 0(BASE)
467 | bnez TMP1, <1
468 |. daddiu BASE, BASE, 8
469 |
470 |2:
471 | bne TMP2, RD, >6
472 |3:
473 |. sd BASE, L->top // Store new top.
474 |
475 |->vm_leave_cp:
476 | ld TMP0, SAVE_CFRAME // Restore previous C frame.
477 | move CRET1, r0 // Ok return status for vm_pcall.
478 | sd TMP0, L->cframe
479 |
480 |->vm_leave_unw:
481 | restoreregs_ret
482 |
483 |6:
484 | ld TMP1, L->maxstack
485 | slt AT, TMP2, RD
486 | bnez AT, >7 // Less results wanted?
487 | // More results wanted. Check stack size and fill up results with nil.
488 |. slt AT, BASE, TMP1
489 | beqz AT, >8
490 |. nop
491 | sd TISNIL, 0(BASE)
492 | addiu RD, RD, 8
493 | b <2
494 |. daddiu BASE, BASE, 8
495 |
496 |7: // Less results wanted.
497 | subu TMP0, RD, TMP2
498 | dsubu TMP0, BASE, TMP0 // Either keep top or shrink it.
499 |.if MIPSR6
500 | selnez TMP0, TMP0, TMP2 // LUA_MULTRET+1 case?
501 | seleqz BASE, BASE, TMP2
502 | b <3
503 |. or BASE, BASE, TMP0
504 |.else
505 | b <3
506 |. movn BASE, TMP0, TMP2 // LUA_MULTRET+1 case?
507 |.endif
508 |
509 |8: // Corner case: need to grow stack for filling up results.
510 | // This can happen if:
511 | // - A C function grows the stack (a lot).
512 | // - The GC shrinks the stack in between.
513 | // - A return back from a lua_call() with (high) nresults adjustment.
514 | load_got lj_state_growstack
515 | move MULTRES, RD
516 | srl CARG2, TMP2, 3
517 | call_intern lj_state_growstack // (lua_State *L, int n)
518 |. move CARG1, L
519 | lw TMP2, SAVE_NRES
520 | ld BASE, L->top // Need the (realloced) L->top in BASE.
521 | move RD, MULTRES
522 | b <2
523 |. sll TMP2, TMP2, 3
524 |
525 |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
526 | // (void *cframe, int errcode)
527 | move sp, CARG1
528 | move CRET1, CARG2
529 |->vm_unwind_c_eh: // Landing pad for external unwinder.
530 | ld L, SAVE_L
531 | li TMP0, ~LJ_VMST_C
532 | ld GL:TMP1, L->glref
533 | b ->vm_leave_unw
534 |. sw TMP0, GL:TMP1->vmstate
535 |
536 |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
537 | // (void *cframe)
538 | li AT, -4
539 | and sp, CARG1, AT
540 |->vm_unwind_ff_eh: // Landing pad for external unwinder.
541 | ld L, SAVE_L
542 | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
543 | li TISNIL, LJ_TNIL
544 | li TISNUM, LJ_TISNUM
545 | ld BASE, L->base
546 | ld DISPATCH, L->glref // Setup pointer to dispatch table.
547 | .FPU mtc1 TMP3, TOBIT
548 | mov_false TMP1
549 | li_vmstate INTERP
550 | ld PC, FRAME_PC(BASE) // Fetch PC of previous frame.
551 | .FPU cvt.d.s TOBIT, TOBIT
552 | daddiu RA, BASE, -8 // Results start at BASE-8.
553 | daddiu DISPATCH, DISPATCH, GG_G2DISP
554 | sd TMP1, 0(RA) // Prepend false to error message.
555 | st_vmstate
556 | b ->vm_returnc
557 |. li RD, 16 // 2 results: false + error message.
558 |
559 |//-----------------------------------------------------------------------
560 |//-- Grow stack for calls -----------------------------------------------
561 |//-----------------------------------------------------------------------
562 |
563 |->vm_growstack_c: // Grow stack for C function.
564 | b >2
565 |. li CARG2, LUA_MINSTACK
566 |
567 |->vm_growstack_l: // Grow stack for Lua function.
568 | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
569 | daddu RC, BASE, RC
570 | dsubu RA, RA, BASE
571 | sd BASE, L->base
572 | daddiu PC, PC, 4 // Must point after first instruction.
573 | sd RC, L->top
574 | srl CARG2, RA, 3
575 |2:
576 | // L->base = new base, L->top = top
577 | load_got lj_state_growstack
578 | sd PC, SAVE_PC
579 | call_intern lj_state_growstack // (lua_State *L, int n)
580 |. move CARG1, L
581 | ld BASE, L->base
582 | ld RC, L->top
583 | ld LFUNC:RB, FRAME_FUNC(BASE)
584 | dsubu RC, RC, BASE
585 | cleartp LFUNC:RB
586 | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
587 | ins_callt // Just retry the call.
588 |
589 |//-----------------------------------------------------------------------
590 |//-- Entry points into the assembler VM ---------------------------------
591 |//-----------------------------------------------------------------------
592 |
593 |->vm_resume: // Setup C frame and resume thread.
594 | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
595 | saveregs
596 | move L, CARG1
597 | ld DISPATCH, L->glref // Setup pointer to dispatch table.
598 | move BASE, CARG2
599 | lbu TMP1, L->status
600 | sd L, SAVE_L
601 | li PC, FRAME_CP
602 | daddiu TMP0, sp, CFRAME_RESUME
603 | daddiu DISPATCH, DISPATCH, GG_G2DISP
604 | sw r0, SAVE_NRES
605 | sw r0, SAVE_ERRF
606 | sd CARG1, SAVE_PC // Any value outside of bytecode is ok.
607 | sd r0, SAVE_CFRAME
608 | beqz TMP1, >3
609 |. sd TMP0, L->cframe
610 |
611 | // Resume after yield (like a return).
612 | sd L, DISPATCH_GL(cur_L)(DISPATCH)
613 | move RA, BASE
614 | ld BASE, L->base
615 | ld TMP1, L->top
616 | ld PC, FRAME_PC(BASE)
617 | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
618 | dsubu RD, TMP1, BASE
619 | .FPU mtc1 TMP3, TOBIT
620 | sb r0, L->status
621 | .FPU cvt.d.s TOBIT, TOBIT
622 | li_vmstate INTERP
623 | daddiu RD, RD, 8
624 | st_vmstate
625 | move MULTRES, RD
626 | andi TMP0, PC, FRAME_TYPE
627 | li TISNIL, LJ_TNIL
628 | beqz TMP0, ->BC_RET_Z
629 |. li TISNUM, LJ_TISNUM
630 | b ->vm_return
631 |. nop
632 |
633 |->vm_pcall: // Setup protected C frame and enter VM.
634 | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
635 | saveregs
636 | sw CARG4, SAVE_ERRF
637 | b >1
638 |. li PC, FRAME_CP
639 |
640 |->vm_call: // Setup C frame and enter VM.
641 | // (lua_State *L, TValue *base, int nres1)
642 | saveregs
643 | li PC, FRAME_C
644 |
645 |1: // Entry point for vm_pcall above (PC = ftype).
646 | ld TMP1, L:CARG1->cframe
647 | move L, CARG1
648 | sw CARG3, SAVE_NRES
649 | ld DISPATCH, L->glref // Setup pointer to dispatch table.
650 | sd CARG1, SAVE_L
651 | move BASE, CARG2
652 | daddiu DISPATCH, DISPATCH, GG_G2DISP
653 | sd CARG1, SAVE_PC // Any value outside of bytecode is ok.
654 | sd TMP1, SAVE_CFRAME
655 | sd sp, L->cframe // Add our C frame to cframe chain.
656 |
657 |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
658 | sd L, DISPATCH_GL(cur_L)(DISPATCH)
659 | ld TMP2, L->base // TMP2 = old base (used in vmeta_call).
660 | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
661 | ld TMP1, L->top
662 | .FPU mtc1 TMP3, TOBIT
663 | daddu PC, PC, BASE
664 | dsubu NARGS8:RC, TMP1, BASE
665 | li TISNUM, LJ_TISNUM
666 | dsubu PC, PC, TMP2 // PC = frame delta + frame type
667 | .FPU cvt.d.s TOBIT, TOBIT
668 | li_vmstate INTERP
669 | li TISNIL, LJ_TNIL
670 | st_vmstate
671 |
672 |->vm_call_dispatch:
673 | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
674 | ld LFUNC:RB, FRAME_FUNC(BASE)
675 | checkfunc LFUNC:RB, ->vmeta_call
676 |
677 |->vm_call_dispatch_f:
678 | ins_call
679 | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
680 |
681 |->vm_cpcall: // Setup protected C frame, call C.
682 | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
683 | saveregs
684 | move L, CARG1
685 | ld TMP0, L:CARG1->stack
686 | sd CARG1, SAVE_L
687 | ld TMP1, L->top
688 | ld DISPATCH, L->glref // Setup pointer to dispatch table.
689 | sd CARG1, SAVE_PC // Any value outside of bytecode is ok.
690 | dsubu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
691 | ld TMP1, L->cframe
692 | daddiu DISPATCH, DISPATCH, GG_G2DISP
693 | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
694 | sw r0, SAVE_ERRF // No error function.
695 | sd TMP1, SAVE_CFRAME
696 | sd sp, L->cframe // Add our C frame to cframe chain.
697 | sd L, DISPATCH_GL(cur_L)(DISPATCH)
698 | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud)
699 |. move CFUNCADDR, CARG4
700 | move BASE, CRET1
701 | bnez CRET1, <3 // Else continue with the call.
702 |. li PC, FRAME_CP
703 | b ->vm_leave_cp // No base? Just remove C frame.
704 |. nop
705 |
706 |//-----------------------------------------------------------------------
707 |//-- Metamethod handling ------------------------------------------------
708 |//-----------------------------------------------------------------------
709 |
710 |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
711 |// stack, so BASE doesn't need to be reloaded across these calls.
712 |
713 |//-- Continuation dispatch ----------------------------------------------
714 |
715 |->cont_dispatch:
716 | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
717 | ld TMP0, -32(BASE) // Continuation.
718 | move RB, BASE
719 | move BASE, TMP2 // Restore caller BASE.
720 | ld LFUNC:TMP1, FRAME_FUNC(TMP2)
721 |.if FFI
722 | sltiu AT, TMP0, 2
723 |.endif
724 | ld PC, -24(RB) // Restore PC from [cont|PC].
725 | cleartp LFUNC:TMP1
726 | daddu TMP2, RA, RD
727 | ld TMP1, LFUNC:TMP1->pc
728 |.if FFI
729 | bnez AT, >1
730 |.endif
731 |. sd TISNIL, -8(TMP2) // Ensure one valid arg.
732 | // BASE = base, RA = resultptr, RB = meta base
733 | jr TMP0 // Jump to continuation.
734 |. ld KBASE, PC2PROTO(k)(TMP1)
735 |
736 |.if FFI
737 |1:
738 | bnez TMP0, ->cont_ffi_callback // cont = 1: return from FFI callback.
739 | // cont = 0: tailcall from C function.
740 |. daddiu TMP1, RB, -32
741 | b ->vm_call_tail
742 |. dsubu RC, TMP1, BASE
743 |.endif
744 |
745 |->cont_cat: // RA = resultptr, RB = meta base
746 | lw INS, -4(PC)
747 | daddiu CARG2, RB, -32
748 | ld CRET1, 0(RA)
749 | decode_RB8a MULTRES, INS
750 | decode_RA8a RA, INS
751 | decode_RB8b MULTRES
752 | decode_RA8b RA
753 | daddu TMP1, BASE, MULTRES
754 | sd BASE, L->base
755 | dsubu CARG3, CARG2, TMP1
756 | bne TMP1, CARG2, ->BC_CAT_Z
757 |. sd CRET1, 0(CARG2)
758 | daddu RA, BASE, RA
759 | b ->cont_nop
760 |. sd CRET1, 0(RA)
761 |
762 |//-- Table indexing metamethods -----------------------------------------
763 |
764 |->vmeta_tgets1:
765 | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
766 | li TMP0, LJ_TSTR
767 | settp STR:RC, TMP0
768 | b >1
769 |. sd STR:RC, 0(CARG3)
770 |
771 |->vmeta_tgets:
772 | daddiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
773 | li TMP0, LJ_TTAB
774 | li TMP1, LJ_TSTR
775 | settp TAB:RB, TMP0
776 | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
777 | sd TAB:RB, 0(CARG2)
778 | settp STR:RC, TMP1
779 | b >1
780 |. sd STR:RC, 0(CARG3)
781 |
782 |->vmeta_tgetb: // TMP0 = index
783 | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
784 | settp TMP0, TISNUM
785 | sd TMP0, 0(CARG3)
786 |
787 |->vmeta_tgetv:
788 |1:
789 | load_got lj_meta_tget
790 | sd BASE, L->base
791 | sd PC, SAVE_PC
792 | call_intern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
793 |. move CARG1, L
794 | // Returns TValue * (finished) or NULL (metamethod).
795 | beqz CRET1, >3
796 |. daddiu TMP1, BASE, -FRAME_CONT
797 | ld CARG1, 0(CRET1)
798 | ins_next1
799 | sd CARG1, 0(RA)
800 | ins_next2
801 |
802 |3: // Call __index metamethod.
803 | // BASE = base, L->top = new base, stack = cont/func/t/k
804 | ld BASE, L->top
805 | sd PC, -24(BASE) // [cont|PC]
806 | dsubu PC, BASE, TMP1
807 | ld LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
808 | cleartp LFUNC:RB
809 | b ->vm_call_dispatch_f
810 |. li NARGS8:RC, 16 // 2 args for func(t, k).
811 |
812 |->vmeta_tgetr:
813 | load_got lj_tab_getinth
814 | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
815 |. nop
816 | // Returns cTValue * or NULL.
817 | beqz CRET1, ->BC_TGETR_Z
818 |. move CARG2, TISNIL
819 | b ->BC_TGETR_Z
820 |. ld CARG2, 0(CRET1)
821 |
822 |//-----------------------------------------------------------------------
823 |
824 |->vmeta_tsets1:
825 | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
826 | li TMP0, LJ_TSTR
827 | settp STR:RC, TMP0
828 | b >1
829 |. sd STR:RC, 0(CARG3)
830 |
831 |->vmeta_tsets:
832 | daddiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
833 | li TMP0, LJ_TTAB
834 | li TMP1, LJ_TSTR
835 | settp TAB:RB, TMP0
836 | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
837 | sd TAB:RB, 0(CARG2)
838 | settp STR:RC, TMP1
839 | b >1
840 |. sd STR:RC, 0(CARG3)
841 |
842 |->vmeta_tsetb: // TMP0 = index
843 | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
844 | settp TMP0, TISNUM
845 | sd TMP0, 0(CARG3)
846 |
847 |->vmeta_tsetv:
848 |1:
849 | load_got lj_meta_tset
850 | sd BASE, L->base
851 | sd PC, SAVE_PC
852 | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
853 |. move CARG1, L
854 | // Returns TValue * (finished) or NULL (metamethod).
855 | beqz CRET1, >3
856 |. ld CARG1, 0(RA)
857 | // NOBARRIER: lj_meta_tset ensures the table is not black.
858 | ins_next1
859 | sd CARG1, 0(CRET1)
860 | ins_next2
861 |
862 |3: // Call __newindex metamethod.
863 | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
864 | daddiu TMP1, BASE, -FRAME_CONT
865 | ld BASE, L->top
866 | sd PC, -24(BASE) // [cont|PC]
867 | dsubu PC, BASE, TMP1
868 | ld LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
869 | cleartp LFUNC:RB
870 | sd CARG1, 16(BASE) // Copy value to third argument.
871 | b ->vm_call_dispatch_f
872 |. li NARGS8:RC, 24 // 3 args for func(t, k, v)
873 |
874 |->vmeta_tsetr:
875 | load_got lj_tab_setinth
876 | sd BASE, L->base
877 | sd PC, SAVE_PC
878 | call_intern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
879 |. move CARG1, L
880 | // Returns TValue *.
881 | b ->BC_TSETR_Z
882 |. nop
883 |
884 |//-- Comparison metamethods ---------------------------------------------
885 |
886 |->vmeta_comp:
887 | // RA/RD point to o1/o2.
888 | move CARG2, RA
889 | move CARG3, RD
890 | load_got lj_meta_comp
891 | daddiu PC, PC, -4
892 | sd BASE, L->base
893 | sd PC, SAVE_PC
894 | decode_OP1 CARG4, INS
895 | call_intern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
896 |. move CARG1, L
897 | // Returns 0/1 or TValue * (metamethod).
898 |3:
899 | sltiu AT, CRET1, 2
900 | beqz AT, ->vmeta_binop
901 | negu TMP2, CRET1
902 |4:
903 | lhu RD, OFS_RD(PC)
904 | daddiu PC, PC, 4
905 | lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535)
906 | sll RD, RD, 2
907 | addu RD, RD, TMP1
908 | and RD, RD, TMP2
909 | daddu PC, PC, RD
910 |->cont_nop:
911 | ins_next
912 |
913 |->cont_ra: // RA = resultptr
914 | lbu TMP1, -4+OFS_RA(PC)
915 | ld CRET1, 0(RA)
916 | sll TMP1, TMP1, 3
917 | daddu TMP1, BASE, TMP1
918 | b ->cont_nop
919 |. sd CRET1, 0(TMP1)
920 |
921 |->cont_condt: // RA = resultptr
922 | ld TMP0, 0(RA)
923 | gettp TMP0, TMP0
924 | sltiu AT, TMP0, LJ_TISTRUECOND
925 | b <4
926 |. negu TMP2, AT // Branch if result is true.
927 |
928 |->cont_condf: // RA = resultptr
929 | ld TMP0, 0(RA)
930 | gettp TMP0, TMP0
931 | sltiu AT, TMP0, LJ_TISTRUECOND
932 | b <4
933 |. addiu TMP2, AT, -1 // Branch if result is false.
934 |
935 |->vmeta_equal:
936 | // CARG1/CARG2 point to o1/o2. TMP0 is set to 0/1.
937 | load_got lj_meta_equal
938 | cleartp LFUNC:CARG3, CARG2
939 | cleartp LFUNC:CARG2, CARG1
940 | move CARG4, TMP0
941 | daddiu PC, PC, -4
942 | sd BASE, L->base
943 | sd PC, SAVE_PC
944 | call_intern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
945 |. move CARG1, L
946 | // Returns 0/1 or TValue * (metamethod).
947 | b <3
948 |. nop
949 |
950 |->vmeta_equal_cd:
951 |.if FFI
952 | load_got lj_meta_equal_cd
953 | move CARG2, INS
954 | daddiu PC, PC, -4
955 | sd BASE, L->base
956 | sd PC, SAVE_PC
957 | call_intern lj_meta_equal_cd // (lua_State *L, BCIns op)
958 |. move CARG1, L
959 | // Returns 0/1 or TValue * (metamethod).
960 | b <3
961 |. nop
962 |.endif
963 |
964 |->vmeta_istype:
965 | load_got lj_meta_istype
966 | daddiu PC, PC, -4
967 | sd BASE, L->base
968 | srl CARG2, RA, 3
969 | srl CARG3, RD, 3
970 | sd PC, SAVE_PC
971 | call_intern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
972 |. move CARG1, L
973 | b ->cont_nop
974 |. nop
975 |
976 |//-- Arithmetic metamethods ---------------------------------------------
977 |
978 |->vmeta_unm:
979 | move RC, RB
980 |
981 |->vmeta_arith:
982 | load_got lj_meta_arith
983 | sd BASE, L->base
984 | move CARG2, RA
985 | sd PC, SAVE_PC
986 | move CARG3, RB
987 | move CARG4, RC
988 | decode_OP1 CARG5, INS // CARG5 == RB.
989 | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
990 |. move CARG1, L
991 | // Returns NULL (finished) or TValue * (metamethod).
992 | beqz CRET1, ->cont_nop
993 |. nop
994 |
995 | // Call metamethod for binary op.
996 |->vmeta_binop:
997 | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
998 | dsubu TMP1, CRET1, BASE
999 | sd PC, -24(CRET1) // [cont|PC]
1000 | move TMP2, BASE
1001 | daddiu PC, TMP1, FRAME_CONT
1002 | move BASE, CRET1
1003 | b ->vm_call_dispatch
1004 |. li NARGS8:RC, 16 // 2 args for func(o1, o2).
1005 |
1006 |->vmeta_len:
1007 | // CARG2 already set by BC_LEN.
1008#if LJ_52
1009 | move MULTRES, CARG1
1010#endif
1011 | load_got lj_meta_len
1012 | sd BASE, L->base
1013 | sd PC, SAVE_PC
1014 | call_intern lj_meta_len // (lua_State *L, TValue *o)
1015 |. move CARG1, L
1016 | // Returns NULL (retry) or TValue * (metamethod base).
1017#if LJ_52
1018 | bnez CRET1, ->vmeta_binop // Binop call for compatibility.
1019 |. nop
1020 | b ->BC_LEN_Z
1021 |. move CARG1, MULTRES
1022#else
1023 | b ->vmeta_binop // Binop call for compatibility.
1024 |. nop
1025#endif
1026 |
1027 |//-- Call metamethod ----------------------------------------------------
1028 |
1029 |->vmeta_call: // Resolve and call __call metamethod.
1030 | // TMP2 = old base, BASE = new base, RC = nargs*8
1031 | load_got lj_meta_call
1032 | sd TMP2, L->base // This is the callers base!
1033 | daddiu CARG2, BASE, -16
1034 | sd PC, SAVE_PC
1035 | daddu CARG3, BASE, RC
1036 | move MULTRES, NARGS8:RC
1037 | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
1038 |. move CARG1, L
1039 | ld LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
1040 | daddiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
1041 | cleartp LFUNC:RB
1042 | ins_call
1043 |
1044 |->vmeta_callt: // Resolve __call for BC_CALLT.
1045 | // BASE = old base, RA = new base, RC = nargs*8
1046 | load_got lj_meta_call
1047 | sd BASE, L->base
1048 | daddiu CARG2, RA, -16
1049 | sd PC, SAVE_PC
1050 | daddu CARG3, RA, RC
1051 | move MULTRES, NARGS8:RC
1052 | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
1053 |. move CARG1, L
1054 | ld RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
1055 | ld TMP1, FRAME_PC(BASE)
1056 | daddiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
1057 | b ->BC_CALLT_Z
1058 |. cleartp LFUNC:CARG3, RB
1059 |
1060 |//-- Argument coercion for 'for' statement ------------------------------
1061 |
1062 |->vmeta_for:
1063 | load_got lj_meta_for
1064 | sd BASE, L->base
1065 | move CARG2, RA
1066 | sd PC, SAVE_PC
1067 | move MULTRES, INS
1068 | call_intern lj_meta_for // (lua_State *L, TValue *base)
1069 |. move CARG1, L
1070 |.if JIT
1071 | decode_OP1 TMP0, MULTRES
1072 | li AT, BC_JFORI
1073 |.endif
1074 | decode_RA8a RA, MULTRES
1075 | decode_RD8a RD, MULTRES
1076 | decode_RA8b RA
1077 |.if JIT
1078 | beq TMP0, AT, =>BC_JFORI
1079 |. decode_RD8b RD
1080 | b =>BC_FORI
1081 |. nop
1082 |.else
1083 | b =>BC_FORI
1084 |. decode_RD8b RD
1085 |.endif
1086 |
1087 |//-----------------------------------------------------------------------
1088 |//-- Fast functions -----------------------------------------------------
1089 |//-----------------------------------------------------------------------
1090 |
1091 |.macro .ffunc, name
1092 |->ff_ .. name:
1093 |.endmacro
1094 |
1095 |.macro .ffunc_1, name
1096 |->ff_ .. name:
1097 | beqz NARGS8:RC, ->fff_fallback
1098 |. ld CARG1, 0(BASE)
1099 |.endmacro
1100 |
1101 |.macro .ffunc_2, name
1102 |->ff_ .. name:
1103 | sltiu AT, NARGS8:RC, 16
1104 | ld CARG1, 0(BASE)
1105 | bnez AT, ->fff_fallback
1106 |. ld CARG2, 8(BASE)
1107 |.endmacro
1108 |
1109 |.macro .ffunc_n, name // Caveat: has delay slot!
1110 |->ff_ .. name:
1111 | ld CARG1, 0(BASE)
1112 | beqz NARGS8:RC, ->fff_fallback
1113 | // Either ldc1 or the 1st instruction of checknum is in the delay slot.
1114 | .FPU ldc1 FARG1, 0(BASE)
1115 | checknum CARG1, ->fff_fallback
1116 |.endmacro
1117 |
1118 |.macro .ffunc_nn, name // Caveat: has delay slot!
1119 |->ff_ .. name:
1120 | ld CARG1, 0(BASE)
1121 | sltiu AT, NARGS8:RC, 16
1122 | ld CARG2, 8(BASE)
1123 | bnez AT, ->fff_fallback
1124 |. gettp TMP0, CARG1
1125 | gettp TMP1, CARG2
1126 | sltiu TMP0, TMP0, LJ_TISNUM
1127 | sltiu TMP1, TMP1, LJ_TISNUM
1128 | .FPU ldc1 FARG1, 0(BASE)
1129 | and TMP0, TMP0, TMP1
1130 | .FPU ldc1 FARG2, 8(BASE)
1131 | beqz TMP0, ->fff_fallback
1132 |.endmacro
1133 |
1134 |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1 and has delay slot!
1135 |// MIPSR6: no delay slot, but a forbidden slot.
1136 |.macro ffgccheck
1137 | ld TMP0, DISPATCH_GL(gc.total)(DISPATCH)
1138 | ld TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
1139 | dsubu AT, TMP0, TMP1
1140 |.if MIPSR6
1141 | bgezalc AT, ->fff_gcstep
1142 |.else
1143 | bgezal AT, ->fff_gcstep
1144 |.endif
1145 |.endmacro
1146 |
1147 |//-- Base library: checks -----------------------------------------------
1148 |.ffunc_1 assert
1149 | gettp AT, CARG1
1150 | sltiu AT, AT, LJ_TISTRUECOND
1151 | beqz AT, ->fff_fallback
1152 |. daddiu RA, BASE, -16
1153 | ld PC, FRAME_PC(BASE)
1154 | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
1155 | daddu TMP2, RA, RD
1156 | daddiu TMP1, BASE, 8
1157 | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument.
1158 |. sd CARG1, 0(RA)
1159 |1:
1160 | ld CRET1, 0(TMP1)
1161 | sd CRET1, -16(TMP1)
1162 | bne TMP1, TMP2, <1
1163 |. daddiu TMP1, TMP1, 8
1164 | b ->fff_res
1165 |. nop
1166 |
1167 |.ffunc_1 type
1168 | gettp TMP0, CARG1
1169 | sltu TMP1, TISNUM, TMP0
1170 | not TMP2, TMP0
1171 | li TMP3, ~LJ_TISNUM
1172 |.if MIPSR6
1173 | selnez TMP2, TMP2, TMP1
1174 | seleqz TMP3, TMP3, TMP1
1175 | or TMP2, TMP2, TMP3
1176 |.else
1177 | movz TMP2, TMP3, TMP1
1178 |.endif
1179 | dsll TMP2, TMP2, 3
1180 | daddu TMP2, CFUNC:RB, TMP2
1181 | b ->fff_restv
1182 |. ld CARG1, CFUNC:TMP2->upvalue
1183 |
1184 |//-- Base library: getters and setters ---------------------------------
1185 |
1186 |.ffunc_1 getmetatable
1187 | gettp TMP2, CARG1
1188 | daddiu TMP0, TMP2, -LJ_TTAB
1189 | daddiu TMP1, TMP2, -LJ_TUDATA
1190 |.if MIPSR6
1191 | selnez TMP0, TMP1, TMP0
1192 |.else
1193 | movn TMP0, TMP1, TMP0
1194 |.endif
1195 | bnez TMP0, >6
1196 |. cleartp TAB:CARG1
1197 |1: // Field metatable must be at same offset for GCtab and GCudata!
1198 | ld TAB:RB, TAB:CARG1->metatable
1199 |2:
1200 | ld STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
1201 | beqz TAB:RB, ->fff_restv
1202 |. li CARG1, LJ_TNIL
1203 | lw TMP0, TAB:RB->hmask
1204 | lw TMP1, STR:RC->sid
1205 | ld NODE:TMP2, TAB:RB->node
1206 | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
1207 | dsll TMP0, TMP1, 5
1208 | dsll TMP1, TMP1, 3
1209 | dsubu TMP1, TMP0, TMP1
1210 | daddu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
1211 | li CARG4, LJ_TSTR
1212 | settp STR:RC, CARG4 // Tagged key to look for.
1213 |3: // Rearranged logic, because we expect _not_ to find the key.
1214 | ld TMP0, NODE:TMP2->key
1215 | ld CARG1, NODE:TMP2->val
1216 | ld NODE:TMP2, NODE:TMP2->next
1217 | beq RC, TMP0, >5
1218 |. li AT, LJ_TTAB
1219 | bnez NODE:TMP2, <3
1220 |. nop
1221 |4:
1222 | move CARG1, RB
1223 | b ->fff_restv // Not found, keep default result.
1224 |. settp CARG1, AT
1225 |5:
1226 | bne CARG1, TISNIL, ->fff_restv
1227 |. nop
1228 | b <4 // Ditto for nil value.
1229 |. nop
1230 |
1231 |6:
1232 | sltiu AT, TMP2, LJ_TISNUM
1233 |.if MIPSR6
1234 | selnez TMP0, TISNUM, AT
1235 | seleqz AT, TMP2, AT
1236 | or TMP2, TMP0, AT
1237 |.else
1238 | movn TMP2, TISNUM, AT
1239 |.endif
1240 | dsll TMP2, TMP2, 3
1241 | dsubu TMP0, DISPATCH, TMP2
1242 | b <2
1243 |. ld TAB:RB, DISPATCH_GL(gcroot[GCROOT_BASEMT])-8(TMP0)
1244 |
1245 |.ffunc_2 setmetatable
1246 | // Fast path: no mt for table yet and not clearing the mt.
1247 | checktp TMP1, CARG1, -LJ_TTAB, ->fff_fallback
1248 | gettp TMP3, CARG2
1249 | ld TAB:TMP0, TAB:TMP1->metatable
1250 | lbu TMP2, TAB:TMP1->marked
1251 | daddiu AT, TMP3, -LJ_TTAB
1252 | cleartp TAB:CARG2
1253 | or AT, AT, TAB:TMP0
1254 | bnez AT, ->fff_fallback
1255 |. andi AT, TMP2, LJ_GC_BLACK // isblack(table)
1256 | beqz AT, ->fff_restv
1257 |. sd TAB:CARG2, TAB:TMP1->metatable
1258 | barrierback TAB:TMP1, TMP2, TMP0, ->fff_restv
1259 |
1260 |.ffunc rawget
1261 | ld CARG2, 0(BASE)
1262 | sltiu AT, NARGS8:RC, 16
1263 | load_got lj_tab_get
1264 | gettp TMP0, CARG2
1265 | cleartp CARG2
1266 | daddiu TMP0, TMP0, -LJ_TTAB
1267 | or AT, AT, TMP0
1268 | bnez AT, ->fff_fallback
1269 |. daddiu CARG3, BASE, 8
1270 | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
1271 |. move CARG1, L
1272 | b ->fff_restv
1273 |. ld CARG1, 0(CRET1)
1274 |
1275 |//-- Base library: conversions ------------------------------------------
1276 |
1277 |.ffunc tonumber
1278 | // Only handles the number case inline (without a base argument).
1279 | ld CARG1, 0(BASE)
1280 | xori AT, NARGS8:RC, 8 // Exactly one number argument.
1281 | gettp TMP1, CARG1
1282 | sltu TMP0, TISNUM, TMP1
1283 | or AT, AT, TMP0
1284 | bnez AT, ->fff_fallback
1285 |. nop
1286 | b ->fff_restv
1287 |. nop
1288 |
1289 |.ffunc_1 tostring
1290 | // Only handles the string or number case inline.
1291 | gettp TMP0, CARG1
1292 | daddiu AT, TMP0, -LJ_TSTR
1293 | // A __tostring method in the string base metatable is ignored.
1294 | beqz AT, ->fff_restv // String key?
1295 | // Handle numbers inline, unless a number base metatable is present.
1296 |. ld TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
1297 | sltu TMP0, TISNUM, TMP0
1298 | or TMP0, TMP0, TMP1
1299 | bnez TMP0, ->fff_fallback
1300 |. sd BASE, L->base // Add frame since C call can throw.
1301 |.if MIPSR6
1302 | sd PC, SAVE_PC // Redundant (but a defined value).
1303 | ffgccheck
1304 |.else
1305 | ffgccheck
1306 |. sd PC, SAVE_PC // Redundant (but a defined value).
1307 |.endif
1308 | load_got lj_strfmt_number
1309 | move CARG1, L
1310 | call_intern lj_strfmt_number // (lua_State *L, cTValue *o)
1311 |. move CARG2, BASE
1312 | // Returns GCstr *.
1313 | li AT, LJ_TSTR
1314 | settp CRET1, AT
1315 | b ->fff_restv
1316 |. move CARG1, CRET1
1317 |
1318 |//-- Base library: iterators -------------------------------------------
1319 |
1320 |.ffunc_1 next
1321 | checktp CARG2, CARG1, -LJ_TTAB, ->fff_fallback
1322 | daddu TMP2, BASE, NARGS8:RC
1323 | sd TISNIL, 0(TMP2) // Set missing 2nd arg to nil.
1324 | ld PC, FRAME_PC(BASE)
1325 | load_got lj_tab_next
1326 | sd BASE, L->base // Add frame since C call can throw.
1327 | sd BASE, L->top // Dummy frame length is ok.
1328 | daddiu CARG3, BASE, 8
1329 | sd PC, SAVE_PC
1330 | call_intern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
1331 |. move CARG1, L
1332 | // Returns 0 at end of traversal.
1333 | beqz CRET1, ->fff_restv // End of traversal: return nil.
1334 |. move CARG1, TISNIL
1335 | ld TMP0, 8(BASE)
1336 | daddiu RA, BASE, -16
1337 | ld TMP2, 16(BASE)
1338 | sd TMP0, 0(RA)
1339 | sd TMP2, 8(RA)
1340 | b ->fff_res
1341 |. li RD, (2+1)*8
1342 |
1343 |.ffunc_1 pairs
1344 | checktp TAB:TMP1, CARG1, -LJ_TTAB, ->fff_fallback
1345 | ld PC, FRAME_PC(BASE)
1346#if LJ_52
1347 | ld TAB:TMP2, TAB:TMP1->metatable
1348 | ld TMP0, CFUNC:RB->upvalue[0]
1349 | bnez TAB:TMP2, ->fff_fallback
1350#else
1351 | ld TMP0, CFUNC:RB->upvalue[0]
1352#endif
1353 |. daddiu RA, BASE, -16
1354 | sd TISNIL, 0(BASE)
1355 | sd CARG1, -8(BASE)
1356 | sd TMP0, 0(RA)
1357 | b ->fff_res
1358 |. li RD, (3+1)*8
1359 |
1360 |.ffunc_2 ipairs_aux
1361 | checktab CARG1, ->fff_fallback
1362 | checkint CARG2, ->fff_fallback
1363 |. lw TMP0, TAB:CARG1->asize
1364 | ld TMP1, TAB:CARG1->array
1365 | ld PC, FRAME_PC(BASE)
1366 | sextw TMP2, CARG2
1367 | addiu TMP2, TMP2, 1
1368 | sltu AT, TMP2, TMP0
1369 | daddiu RA, BASE, -16
1370 | zextw TMP0, TMP2
1371 | settp TMP0, TISNUM
1372 | beqz AT, >2 // Not in array part?
1373 |. sd TMP0, 0(RA)
1374 | dsll TMP3, TMP2, 3
1375 | daddu TMP3, TMP1, TMP3
1376 | ld TMP1, 0(TMP3)
1377 |1:
1378 | beq TMP1, TISNIL, ->fff_res // End of iteration, return 0 results.
1379 |. li RD, (0+1)*8
1380 | sd TMP1, -8(BASE)
1381 | b ->fff_res
1382 |. li RD, (2+1)*8
1383 |2: // Check for empty hash part first. Otherwise call C function.
1384 | lw TMP0, TAB:CARG1->hmask
1385 | load_got lj_tab_getinth
1386 | beqz TMP0, ->fff_res
1387 |. li RD, (0+1)*8
1388 | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
1389 |. move CARG2, TMP2
1390 | // Returns cTValue * or NULL.
1391 | beqz CRET1, ->fff_res
1392 |. li RD, (0+1)*8
1393 | b <1
1394 |. ld TMP1, 0(CRET1)
1395 |
1396 |.ffunc_1 ipairs
1397 | checktp TAB:TMP1, CARG1, -LJ_TTAB, ->fff_fallback
1398 | ld PC, FRAME_PC(BASE)
1399#if LJ_52
1400 | ld TAB:TMP2, TAB:TMP1->metatable
1401 | ld CFUNC:TMP0, CFUNC:RB->upvalue[0]
1402 | bnez TAB:TMP2, ->fff_fallback
1403#else
1404 | ld TMP0, CFUNC:RB->upvalue[0]
1405#endif
1406 | daddiu RA, BASE, -16
1407 | dsll AT, TISNUM, 47
1408 | sd CARG1, -8(BASE)
1409 | sd AT, 0(BASE)
1410 | sd CFUNC:TMP0, 0(RA)
1411 | b ->fff_res
1412 |. li RD, (3+1)*8
1413 |
1414 |//-- Base library: catch errors ----------------------------------------
1415 |
1416 |.ffunc pcall
1417 | daddiu NARGS8:RC, NARGS8:RC, -8
1418 | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
1419 | bltz NARGS8:RC, ->fff_fallback
1420 |. move TMP2, BASE
1421 | daddiu BASE, BASE, 16
1422 | // Remember active hook before pcall.
1423 | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
1424 | andi TMP3, TMP3, 1
1425 | daddiu PC, TMP3, 16+FRAME_PCALL
1426 | beqz NARGS8:RC, ->vm_call_dispatch
1427 |1:
1428 |. daddu TMP0, BASE, NARGS8:RC
1429 |2:
1430 | ld TMP1, -16(TMP0)
1431 | sd TMP1, -8(TMP0)
1432 | daddiu TMP0, TMP0, -8
1433 | bne TMP0, BASE, <2
1434 |. nop
1435 | b ->vm_call_dispatch
1436 |. nop
1437 |
1438 |.ffunc xpcall
1439 | daddiu NARGS8:TMP0, NARGS8:RC, -16
1440 | ld CARG1, 0(BASE)
1441 | ld CARG2, 8(BASE)
1442 | bltz NARGS8:TMP0, ->fff_fallback
1443 |. lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH)
1444 | gettp AT, CARG2
1445 | daddiu AT, AT, -LJ_TFUNC
1446 | bnez AT, ->fff_fallback // Traceback must be a function.
1447 |. move TMP2, BASE
1448 | move NARGS8:RC, NARGS8:TMP0
1449 | daddiu BASE, BASE, 24
1450 | // Remember active hook before pcall.
1451 | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
1452 | sd CARG2, 0(TMP2) // Swap function and traceback.
1453 | andi TMP3, TMP3, 1
1454 | sd CARG1, 8(TMP2)
1455 | beqz NARGS8:RC, ->vm_call_dispatch
1456 |. daddiu PC, TMP3, 24+FRAME_PCALL
1457 | b <1
1458 |. nop
1459 |
1460 |//-- Coroutine library --------------------------------------------------
1461 |
1462 |.macro coroutine_resume_wrap, resume
1463 |.if resume
1464 |.ffunc_1 coroutine_resume
1465 | checktp CARG1, CARG1, -LJ_TTHREAD, ->fff_fallback
1466 |.else
1467 |.ffunc coroutine_wrap_aux
1468 | ld L:CARG1, CFUNC:RB->upvalue[0].gcr
1469 | cleartp L:CARG1
1470 |.endif
1471 | lbu TMP0, L:CARG1->status
1472 | ld TMP1, L:CARG1->cframe
1473 | ld CARG2, L:CARG1->top
1474 | ld TMP2, L:CARG1->base
1475 | addiu AT, TMP0, -LUA_YIELD
1476 | daddu CARG3, CARG2, TMP0
1477 | daddiu TMP3, CARG2, 8
1478 |.if MIPSR6
1479 | seleqz CARG2, CARG2, AT
1480 | selnez TMP3, TMP3, AT
1481 | bgtz AT, ->fff_fallback // st > LUA_YIELD?
1482 |. or CARG2, TMP3, CARG2
1483 |.else
1484 | bgtz AT, ->fff_fallback // st > LUA_YIELD?
1485 |. movn CARG2, TMP3, AT
1486 |.endif
1487 | xor TMP2, TMP2, CARG3
1488 | bnez TMP1, ->fff_fallback // cframe != 0?
1489 |. or AT, TMP2, TMP0
1490 | ld TMP0, L:CARG1->maxstack
1491 | beqz AT, ->fff_fallback // base == top && st == 0?
1492 |. ld PC, FRAME_PC(BASE)
1493 | daddu TMP2, CARG2, NARGS8:RC
1494 | sltu AT, TMP0, TMP2
1495 | bnez AT, ->fff_fallback // Stack overflow?
1496 |. sd PC, SAVE_PC
1497 | sd BASE, L->base
1498 |1:
1499 |.if resume
1500 | daddiu BASE, BASE, 8 // Keep resumed thread in stack for GC.
1501 | daddiu NARGS8:RC, NARGS8:RC, -8
1502 | daddiu TMP2, TMP2, -8
1503 |.endif
1504 | sd TMP2, L:CARG1->top
1505 | daddu TMP1, BASE, NARGS8:RC
1506 | move CARG3, CARG2
1507 | sd BASE, L->top
1508 |2: // Move args to coroutine.
1509 | ld CRET1, 0(BASE)
1510 | sltu AT, BASE, TMP1
1511 | beqz AT, >3
1512 |. daddiu BASE, BASE, 8
1513 | sd CRET1, 0(CARG3)
1514 | b <2
1515 |. daddiu CARG3, CARG3, 8
1516 |3:
1517 | bal ->vm_resume // (lua_State *L, TValue *base, 0, 0)
1518 |. move L:RA, L:CARG1
1519 | // Returns thread status.
1520 |4:
1521 | ld TMP2, L:RA->base
1522 | sltiu AT, CRET1, LUA_YIELD+1
1523 | ld TMP3, L:RA->top
1524 | li_vmstate INTERP
1525 | ld BASE, L->base
1526 | sd L, DISPATCH_GL(cur_L)(DISPATCH)
1527 | st_vmstate
1528 | beqz AT, >8
1529 |. dsubu RD, TMP3, TMP2
1530 | ld TMP0, L->maxstack
1531 | beqz RD, >6 // No results?
1532 |. daddu TMP1, BASE, RD
1533 | sltu AT, TMP0, TMP1
1534 | bnez AT, >9 // Need to grow stack?
1535 |. daddu TMP3, TMP2, RD
1536 | sd TMP2, L:RA->top // Clear coroutine stack.
1537 | move TMP1, BASE
1538 |5: // Move results from coroutine.
1539 | ld CRET1, 0(TMP2)
1540 | daddiu TMP2, TMP2, 8
1541 | sltu AT, TMP2, TMP3
1542 | sd CRET1, 0(TMP1)
1543 | bnez AT, <5
1544 |. daddiu TMP1, TMP1, 8
1545 |6:
1546 | andi TMP0, PC, FRAME_TYPE
1547 |.if resume
1548 | mov_true TMP1
1549 | daddiu RA, BASE, -8
1550 | sd TMP1, -8(BASE) // Prepend true to results.
1551 | daddiu RD, RD, 16
1552 |.else
1553 | move RA, BASE
1554 | daddiu RD, RD, 8
1555 |.endif
1556 |7:
1557 | sd PC, SAVE_PC
1558 | beqz TMP0, ->BC_RET_Z
1559 |. move MULTRES, RD
1560 | b ->vm_return
1561 |. nop
1562 |
1563 |8: // Coroutine returned with error (at co->top-1).
1564 |.if resume
1565 | daddiu TMP3, TMP3, -8
1566 | mov_false TMP1
1567 | ld CRET1, 0(TMP3)
1568 | sd TMP3, L:RA->top // Remove error from coroutine stack.
1569 | li RD, (2+1)*8
1570 | sd TMP1, -8(BASE) // Prepend false to results.
1571 | daddiu RA, BASE, -8
1572 | sd CRET1, 0(BASE) // Copy error message.
1573 | b <7
1574 |. andi TMP0, PC, FRAME_TYPE
1575 |.else
1576 | load_got lj_ffh_coroutine_wrap_err
1577 | move CARG2, L:RA
1578 | call_intern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
1579 |. move CARG1, L
1580 |.endif
1581 |
1582 |9: // Handle stack expansion on return from yield.
1583 | load_got lj_state_growstack
1584 | srl CARG2, RD, 3
1585 | call_intern lj_state_growstack // (lua_State *L, int n)
1586 |. move CARG1, L
1587 | b <4
1588 |. li CRET1, 0
1589 |.endmacro
1590 |
1591 | coroutine_resume_wrap 1 // coroutine.resume
1592 | coroutine_resume_wrap 0 // coroutine.wrap
1593 |
1594 |.ffunc coroutine_yield
1595 | ld TMP0, L->cframe
1596 | daddu TMP1, BASE, NARGS8:RC
1597 | sd BASE, L->base
1598 | andi TMP0, TMP0, CFRAME_RESUME
1599 | sd TMP1, L->top
1600 | beqz TMP0, ->fff_fallback
1601 |. li CRET1, LUA_YIELD
1602 | sd r0, L->cframe
1603 | b ->vm_leave_unw
1604 |. sb CRET1, L->status
1605 |
1606 |//-- Math library -------------------------------------------------------
1607 |
1608 |.ffunc_1 math_abs
1609 | gettp CARG2, CARG1
1610 | daddiu AT, CARG2, -LJ_TISNUM
1611 | bnez AT, >1
1612 |. sextw TMP1, CARG1
1613 | sra TMP0, TMP1, 31 // Extract sign.
1614 | xor TMP1, TMP1, TMP0
1615 | dsubu CARG1, TMP1, TMP0
1616 | dsll TMP3, CARG1, 32
1617 | bgez TMP3, ->fff_restv
1618 |. settp CARG1, TISNUM
1619 | li CARG1, 0x41e0 // 2^31 as a double.
1620 | b ->fff_restv
1621 |. dsll CARG1, CARG1, 48
1622 |1:
1623 | sltiu AT, CARG2, LJ_TISNUM
1624 | beqz AT, ->fff_fallback
1625 |. dextm CARG1, CARG1, 0, 30
1626 |// fallthrough
1627 |
1628 |->fff_restv:
1629 | // CARG1 = TValue result.
1630 | ld PC, FRAME_PC(BASE)
1631 | daddiu RA, BASE, -16
1632 | sd CARG1, -16(BASE)
1633 |->fff_res1:
1634 | // RA = results, PC = return.
1635 | li RD, (1+1)*8
1636 |->fff_res:
1637 | // RA = results, RD = (nresults+1)*8, PC = return.
1638 | andi TMP0, PC, FRAME_TYPE
1639 | bnez TMP0, ->vm_return
1640 |. move MULTRES, RD
1641 | lw INS, -4(PC)
1642 | decode_RB8a RB, INS
1643 | decode_RB8b RB
1644 |5:
1645 | sltu AT, RD, RB
1646 | bnez AT, >6 // More results expected?
1647 |. decode_RA8a TMP0, INS
1648 | decode_RA8b TMP0
1649 | ins_next1
1650 | // Adjust BASE. KBASE is assumed to be set for the calling frame.
1651 | dsubu BASE, RA, TMP0
1652 | ins_next2
1653 |
1654 |6: // Fill up results with nil.
1655 | daddu TMP1, RA, RD
1656 | daddiu RD, RD, 8
1657 | b <5
1658 |. sd TISNIL, -8(TMP1)
1659 |
1660 |.macro math_extern, func
1661 | .ffunc_n math_ .. func
1662 | load_got func
1663 | call_extern
1664 |. nop
1665 | b ->fff_resn
1666 |. nop
1667 |.endmacro
1668 |
1669 |.macro math_extern2, func
1670 | .ffunc_nn math_ .. func
1671 |. load_got func
1672 | call_extern
1673 |. nop
1674 | b ->fff_resn
1675 |. nop
1676 |.endmacro
1677 |
1678 |// TODO: Return integer type if result is integer (own sf implementation).
1679 |.macro math_round, func
1680 |->ff_math_ .. func:
1681 | ld CARG1, 0(BASE)
1682 | beqz NARGS8:RC, ->fff_fallback
1683 |. gettp TMP0, CARG1
1684 | beq TMP0, TISNUM, ->fff_restv
1685 |. sltu AT, TMP0, TISNUM
1686 | beqz AT, ->fff_fallback
1687 |.if FPU
1688 |. ldc1 FARG1, 0(BASE)
1689 | bal ->vm_ .. func
1690 |. nop
1691 |.else
1692 |. load_got func
1693 | call_extern
1694 |. nop
1695 |.endif
1696 | b ->fff_resn
1697 |. nop
1698 |.endmacro
1699 |
1700 | math_round floor
1701 | math_round ceil
1702 |
1703 |.ffunc math_log
1704 | li AT, 8
1705 | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument.
1706 |. ld CARG1, 0(BASE)
1707 | checknum CARG1, ->fff_fallback
1708 |. load_got log
1709 |.if FPU
1710 | call_extern
1711 |. ldc1 FARG1, 0(BASE)
1712 |.else
1713 | call_extern
1714 |. nop
1715 |.endif
1716 | b ->fff_resn
1717 |. nop
1718 |
1719 | math_extern log10
1720 | math_extern exp
1721 | math_extern sin
1722 | math_extern cos
1723 | math_extern tan
1724 | math_extern asin
1725 | math_extern acos
1726 | math_extern atan
1727 | math_extern sinh
1728 | math_extern cosh
1729 | math_extern tanh
1730 | math_extern2 pow
1731 | math_extern2 atan2
1732 | math_extern2 fmod
1733 |
1734 |.if FPU
1735 |.ffunc_n math_sqrt
1736 |. sqrt.d FRET1, FARG1
1737 |// fallthrough to ->fff_resn
1738 |.else
1739 | math_extern sqrt
1740 |.endif
1741 |
1742 |->fff_resn:
1743 | ld PC, FRAME_PC(BASE)
1744 | daddiu RA, BASE, -16
1745 | b ->fff_res1
1746 |.if FPU
1747 |. sdc1 FRET1, 0(RA)
1748 |.else
1749 |. sd CRET1, 0(RA)
1750 |.endif
1751 |
1752 |
1753 |.ffunc_2 math_ldexp
1754 | checknum CARG1, ->fff_fallback
1755 | checkint CARG2, ->fff_fallback
1756 |. load_got ldexp
1757 | .FPU ldc1 FARG1, 0(BASE)
1758 | call_extern
1759 |. lw CARG2, 8+LO(BASE)
1760 | b ->fff_resn
1761 |. nop
1762 |
1763 |.ffunc_n math_frexp
1764 | load_got frexp
1765 | ld PC, FRAME_PC(BASE)
1766 | call_extern
1767 |. daddiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
1768 | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH)
1769 | daddiu RA, BASE, -16
1770 |.if FPU
1771 | mtc1 TMP1, FARG2
1772 | sdc1 FRET1, 0(RA)
1773 | cvt.d.w FARG2, FARG2
1774 | sdc1 FARG2, 8(RA)
1775 |.else
1776 | sd CRET1, 0(RA)
1777 | zextw TMP1, TMP1
1778 | settp TMP1, TISNUM
1779 | sd TMP1, 8(RA)
1780 |.endif
1781 | b ->fff_res
1782 |. li RD, (2+1)*8
1783 |
1784 |.ffunc_n math_modf
1785 | load_got modf
1786 | ld PC, FRAME_PC(BASE)
1787 | call_extern
1788 |. daddiu CARG2, BASE, -16
1789 | daddiu RA, BASE, -16
1790 |.if FPU
1791 | sdc1 FRET1, -8(BASE)
1792 |.else
1793 | sd CRET1, -8(BASE)
1794 |.endif
1795 | b ->fff_res
1796 |. li RD, (2+1)*8
1797 |
1798 |.macro math_minmax, name, intins, intinsc, fpins
1799 | .ffunc_1 name
1800 | daddu TMP3, BASE, NARGS8:RC
1801 | checkint CARG1, >5
1802 |. daddiu TMP2, BASE, 8
1803 |1: // Handle integers.
1804 | beq TMP2, TMP3, ->fff_restv
1805 |. ld CARG2, 0(TMP2)
1806 | checkint CARG2, >3
1807 |. sextw CARG1, CARG1
1808 | lw CARG2, LO(TMP2)
1809 |. slt AT, CARG1, CARG2
1810 |.if MIPSR6
1811 | intins TMP1, CARG2, AT
1812 | intinsc CARG1, CARG1, AT
1813 | or CARG1, CARG1, TMP1
1814 |.else
1815 | intins CARG1, CARG2, AT
1816 |.endif
1817 | daddiu TMP2, TMP2, 8
1818 | zextw CARG1, CARG1
1819 | b <1
1820 |. settp CARG1, TISNUM
1821 |
1822 |3: // Convert intermediate result to number and continue with number loop.
1823 | checknum CARG2, ->fff_fallback
1824 |.if FPU
1825 |. mtc1 CARG1, FRET1
1826 | cvt.d.w FRET1, FRET1
1827 | b >7
1828 |. ldc1 FARG1, 0(TMP2)
1829 |.else
1830 |. nop
1831 | bal ->vm_sfi2d_1
1832 |. nop
1833 | b >7
1834 |. nop
1835 |.endif
1836 |
1837 |5:
1838 | .FPU ldc1 FRET1, 0(BASE)
1839 | checknum CARG1, ->fff_fallback
1840 |6: // Handle numbers.
1841 |. ld CARG2, 0(TMP2)
1842 | beq TMP2, TMP3, ->fff_resn
1843 |.if FPU
1844 | ldc1 FARG1, 0(TMP2)
1845 |.else
1846 | move CRET1, CARG1
1847 |.endif
1848 | checknum CARG2, >8
1849 |. nop
1850 |7:
1851 |.if FPU
1852 |.if MIPSR6
1853 | fpins FRET1, FRET1, FARG1
1854 |.else
1855 |.if fpins // ismax
1856 | c.olt.d FARG1, FRET1
1857 |.else
1858 | c.olt.d FRET1, FARG1
1859 |.endif
1860 | movf.d FRET1, FARG1
1861 |.endif
1862 |.else
1863 |.if fpins // ismax
1864 | bal ->vm_sfcmpogt
1865 |.else
1866 | bal ->vm_sfcmpolt
1867 |.endif
1868 |. nop
1869 |.if MIPSR6
1870 | seleqz AT, CARG2, CRET1
1871 | selnez CARG1, CARG1, CRET1
1872 | or CARG1, CARG1, AT
1873 |.else
1874 | movz CARG1, CARG2, CRET1
1875 |.endif
1876 |.endif
1877 | b <6
1878 |. daddiu TMP2, TMP2, 8
1879 |
1880 |8: // Convert integer to number and continue with number loop.
1881 | checkint CARG2, ->fff_fallback
1882 |.if FPU
1883 |. lwc1 FARG1, LO(TMP2)
1884 | b <7
1885 |. cvt.d.w FARG1, FARG1
1886 |.else
1887 |. lw CARG2, LO(TMP2)
1888 | bal ->vm_sfi2d_2
1889 |. nop
1890 | b <7
1891 |. nop
1892 |.endif
1893 |
1894 |.endmacro
1895 |
1896 |.if MIPSR6
1897 | math_minmax math_min, seleqz, selnez, min.d
1898 | math_minmax math_max, selnez, seleqz, max.d
1899 |.else
1900 | math_minmax math_min, movz, _, 0
1901 | math_minmax math_max, movn, _, 1
1902 |.endif
1903 |
1904 |//-- String library -----------------------------------------------------
1905 |
1906 |.ffunc string_byte // Only handle the 1-arg case here.
1907 | ld CARG1, 0(BASE)
1908 | gettp TMP0, CARG1
1909 | xori AT, NARGS8:RC, 8
1910 | daddiu TMP0, TMP0, -LJ_TSTR
1911 | or AT, AT, TMP0
1912 | bnez AT, ->fff_fallback // Need exactly 1 string argument.
1913 |. cleartp STR:CARG1
1914 | lw TMP0, STR:CARG1->len
1915 | daddiu RA, BASE, -16
1916 | ld PC, FRAME_PC(BASE)
1917 | sltu RD, r0, TMP0
1918 | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
1919 | addiu RD, RD, 1
1920 | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8
1921 | settp TMP1, TISNUM
1922 | b ->fff_res
1923 |. sd TMP1, 0(RA)
1924 |
1925 |.ffunc string_char // Only handle the 1-arg case here.
1926 | ffgccheck
1927 |.if not MIPSR6
1928 |. nop
1929 |.endif
1930 | ld CARG1, 0(BASE)
1931 | gettp TMP0, CARG1
1932 | xori AT, NARGS8:RC, 8 // Exactly 1 argument.
1933 | daddiu TMP0, TMP0, -LJ_TISNUM // Integer.
1934 | li TMP1, 255
1935 | sextw CARG1, CARG1
1936 | or AT, AT, TMP0
1937 | sltu TMP1, TMP1, CARG1 // !(255 < n).
1938 | or AT, AT, TMP1
1939 | bnez AT, ->fff_fallback
1940 |. li CARG3, 1
1941 | daddiu CARG2, sp, TMPD_OFS
1942 | sb CARG1, TMPD
1943 |->fff_newstr:
1944 | load_got lj_str_new
1945 | sd BASE, L->base
1946 | sd PC, SAVE_PC
1947 | call_intern lj_str_new // (lua_State *L, char *str, size_t l)
1948 |. move CARG1, L
1949 | // Returns GCstr *.
1950 | ld BASE, L->base
1951 |->fff_resstr:
1952 | li AT, LJ_TSTR
1953 | settp CRET1, AT
1954 | b ->fff_restv
1955 |. move CARG1, CRET1
1956 |
1957 |.ffunc string_sub
1958 | ffgccheck
1959 |.if not MIPSR6
1960 |. nop
1961 |.endif
1962 | addiu AT, NARGS8:RC, -16
1963 | ld TMP0, 0(BASE)
1964 | bltz AT, ->fff_fallback
1965 |. gettp TMP3, TMP0
1966 | cleartp STR:CARG1, TMP0
1967 | ld CARG2, 8(BASE)
1968 | beqz AT, >1
1969 |. li CARG4, -1
1970 | ld CARG3, 16(BASE)
1971 | checkint CARG3, ->fff_fallback
1972 |. sextw CARG4, CARG3
1973 |1:
1974 | checkint CARG2, ->fff_fallback
1975 |. li AT, LJ_TSTR
1976 | bne TMP3, AT, ->fff_fallback
1977 |. sextw CARG3, CARG2
1978 | lw CARG2, STR:CARG1->len
1979 | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end
1980 | slt AT, CARG4, r0
1981 | addiu TMP0, CARG2, 1
1982 | addu TMP1, CARG4, TMP0
1983 | slt TMP3, CARG3, r0
1984 |.if MIPSR6
1985 | seleqz CARG4, CARG4, AT
1986 | selnez TMP1, TMP1, AT
1987 | or CARG4, TMP1, CARG4 // if (end < 0) end += len+1
1988 |.else
1989 | movn CARG4, TMP1, AT // if (end < 0) end += len+1
1990 |.endif
1991 | addu TMP1, CARG3, TMP0
1992 |.if MIPSR6
1993 | selnez TMP1, TMP1, TMP3
1994 | seleqz CARG3, CARG3, TMP3
1995 | or CARG3, TMP1, CARG3 // if (start < 0) start += len+1
1996 | li TMP2, 1
1997 | slt AT, CARG4, r0
1998 | slt TMP3, r0, CARG3
1999 | seleqz CARG4, CARG4, AT // if (end < 0) end = 0
2000 | selnez CARG3, CARG3, TMP3
2001 | seleqz TMP2, TMP2, TMP3
2002 | or CARG3, TMP2, CARG3 // if (start < 1) start = 1
2003 | slt AT, CARG2, CARG4
2004 | seleqz CARG4, CARG4, AT
2005 | selnez CARG2, CARG2, AT
2006 | or CARG4, CARG2, CARG4 // if (end > len) end = len
2007 |.else
2008 | movn CARG3, TMP1, TMP3 // if (start < 0) start += len+1
2009 | li TMP2, 1
2010 | slt AT, CARG4, r0
2011 | slt TMP3, r0, CARG3
2012 | movn CARG4, r0, AT // if (end < 0) end = 0
2013 | movz CARG3, TMP2, TMP3 // if (start < 1) start = 1
2014 | slt AT, CARG2, CARG4
2015 | movn CARG4, CARG2, AT // if (end > len) end = len
2016 |.endif
2017 | daddu CARG2, STR:CARG1, CARG3
2018 | subu CARG3, CARG4, CARG3 // len = end - start
2019 | daddiu CARG2, CARG2, sizeof(GCstr)-1
2020 | bgez CARG3, ->fff_newstr
2021 |. addiu CARG3, CARG3, 1 // len++
2022 |->fff_emptystr: // Return empty string.
2023 | li AT, LJ_TSTR
2024 | daddiu STR:CARG1, DISPATCH, DISPATCH_GL(strempty)
2025 | b ->fff_restv
2026 |. settp CARG1, AT
2027 |
2028 |.macro ffstring_op, name
2029 | .ffunc string_ .. name
2030 | ffgccheck
2031 |. nop
2032 | beqz NARGS8:RC, ->fff_fallback
2033 |. ld CARG2, 0(BASE)
2034 | checkstr STR:CARG2, ->fff_fallback
2035 | daddiu SBUF:CARG1, DISPATCH, DISPATCH_GL(tmpbuf)
2036 | load_got lj_buf_putstr_ .. name
2037 | ld TMP0, SBUF:CARG1->b
2038 | sd L, SBUF:CARG1->L
2039 | sd BASE, L->base
2040 | sd TMP0, SBUF:CARG1->p
2041 | call_intern extern lj_buf_putstr_ .. name
2042 |. sd PC, SAVE_PC
2043 | load_got lj_buf_tostr
2044 | call_intern lj_buf_tostr
2045 |. move SBUF:CARG1, SBUF:CRET1
2046 | b ->fff_resstr
2047 |. ld BASE, L->base
2048 |.endmacro
2049 |
2050 |ffstring_op reverse
2051 |ffstring_op lower
2052 |ffstring_op upper
2053 |
2054 |//-- Bit library --------------------------------------------------------
2055 |
2056 |->vm_tobit_fb:
2057 | beqz TMP1, ->fff_fallback
2058 |.if FPU
2059 |. ldc1 FARG1, 0(BASE)
2060 | add.d FARG1, FARG1, TOBIT
2061 | mfc1 CRET1, FARG1
2062 | jr ra
2063 |. zextw CRET1, CRET1
2064 |.else
2065 |// FP number to bit conversion for soft-float.
2066 |->vm_tobit:
2067 | dsll TMP0, CARG1, 1
2068 | li CARG3, 1076
2069 | dsrl AT, TMP0, 53
2070 | dsubu CARG3, CARG3, AT
2071 | sltiu AT, CARG3, 54
2072 | beqz AT, >1
2073 |. dextm TMP0, TMP0, 0, 20
2074 | dinsu TMP0, AT, 21, 21
2075 | slt AT, CARG1, r0
2076 | dsrlv CRET1, TMP0, CARG3
2077 | dsubu TMP0, r0, CRET1
2078 |.if MIPSR6
2079 | selnez TMP0, TMP0, AT
2080 | seleqz CRET1, CRET1, AT
2081 | or CRET1, CRET1, TMP0
2082 |.else
2083 | movn CRET1, TMP0, AT
2084 |.endif
2085 | jr ra
2086 |. zextw CRET1, CRET1
2087 |1:
2088 | jr ra
2089 |. move CRET1, r0
2090 |
2091 |// FP number to int conversion with a check for soft-float.
2092 |// Modifies CARG1, CRET1, CRET2, TMP0, AT.
2093 |->vm_tointg:
2094 |.if JIT
2095 | dsll CRET2, CARG1, 1
2096 | beqz CRET2, >2
2097 |. li TMP0, 1076
2098 | dsrl AT, CRET2, 53
2099 | dsubu TMP0, TMP0, AT
2100 | sltiu AT, TMP0, 54
2101 | beqz AT, >1
2102 |. dextm CRET2, CRET2, 0, 20
2103 | dinsu CRET2, AT, 21, 21
2104 | slt AT, CARG1, r0
2105 | dsrlv CRET1, CRET2, TMP0
2106 | dsubu CARG1, r0, CRET1
2107 |.if MIPSR6
2108 | seleqz CRET1, CRET1, AT
2109 | selnez CARG1, CARG1, AT
2110 | or CRET1, CRET1, CARG1
2111 |.else
2112 | movn CRET1, CARG1, AT
2113 |.endif
2114 | li CARG1, 64
2115 | subu TMP0, CARG1, TMP0
2116 | dsllv CRET2, CRET2, TMP0 // Integer check.
2117 | sextw AT, CRET1
2118 | xor AT, CRET1, AT // Range check.
2119 |.if MIPSR6
2120 | seleqz AT, AT, CRET2
2121 | selnez CRET2, CRET2, CRET2
2122 | jr ra
2123 |. or CRET2, AT, CRET2
2124 |.else
2125 | jr ra
2126 |. movz CRET2, AT, CRET2
2127 |.endif
2128 |1:
2129 | jr ra
2130 |. li CRET2, 1
2131 |2:
2132 | jr ra
2133 |. move CRET1, r0
2134 |.endif
2135 |.endif
2136 |
2137 |.macro .ffunc_bit, name
2138 | .ffunc_1 bit_..name
2139 | gettp TMP0, CARG1
2140 | beq TMP0, TISNUM, >6
2141 |. zextw CRET1, CARG1
2142 | bal ->vm_tobit_fb
2143 |. sltiu TMP1, TMP0, LJ_TISNUM
2144 |6:
2145 |.endmacro
2146 |
2147 |.macro .ffunc_bit_op, name, bins
2148 | .ffunc_bit name
2149 | daddiu TMP2, BASE, 8
2150 | daddu TMP3, BASE, NARGS8:RC
2151 |1:
2152 | beq TMP2, TMP3, ->fff_resi
2153 |. ld CARG1, 0(TMP2)
2154 | gettp TMP0, CARG1
2155 |.if FPU
2156 | bne TMP0, TISNUM, >2
2157 |. daddiu TMP2, TMP2, 8
2158 | zextw CARG1, CARG1
2159 | b <1
2160 |. bins CRET1, CRET1, CARG1
2161 |2:
2162 | ldc1 FARG1, -8(TMP2)
2163 | sltiu AT, TMP0, LJ_TISNUM
2164 | beqz AT, ->fff_fallback
2165 |. add.d FARG1, FARG1, TOBIT
2166 | mfc1 CARG1, FARG1
2167 | zextw CARG1, CARG1
2168 | b <1
2169 |. bins CRET1, CRET1, CARG1
2170 |.else
2171 | beq TMP0, TISNUM, >2
2172 |. move CRET2, CRET1
2173 | bal ->vm_tobit_fb
2174 |. sltiu TMP1, TMP0, LJ_TISNUM
2175 | move CARG1, CRET2
2176 |2:
2177 | zextw CARG1, CARG1
2178 | bins CRET1, CRET1, CARG1
2179 | b <1
2180 |. daddiu TMP2, TMP2, 8
2181 |.endif
2182 |.endmacro
2183 |
2184 |.ffunc_bit_op band, and
2185 |.ffunc_bit_op bor, or
2186 |.ffunc_bit_op bxor, xor
2187 |
2188 |.ffunc_bit bswap
2189 | dsrl TMP0, CRET1, 8
2190 | dsrl TMP1, CRET1, 24
2191 | andi TMP2, TMP0, 0xff00
2192 | dins TMP1, CRET1, 24, 31
2193 | dins TMP2, TMP0, 16, 23
2194 | b ->fff_resi
2195 |. or CRET1, TMP1, TMP2
2196 |
2197 |.ffunc_bit bnot
2198 | not CRET1, CRET1
2199 | b ->fff_resi
2200 |. zextw CRET1, CRET1
2201 |
2202 |.macro .ffunc_bit_sh, name, shins, shmod
2203 | .ffunc_2 bit_..name
2204 | gettp TMP0, CARG1
2205 | beq TMP0, TISNUM, >1
2206 |. nop
2207 | bal ->vm_tobit_fb
2208 |. sltiu TMP1, TMP0, LJ_TISNUM
2209 | move CARG1, CRET1
2210 |1:
2211 | gettp TMP0, CARG2
2212 | bne TMP0, TISNUM, ->fff_fallback
2213 |. zextw CARG2, CARG2
2214 | sextw CARG1, CARG1
2215 |.if shmod == 1
2216 | negu CARG2, CARG2
2217 |.endif
2218 | shins CRET1, CARG1, CARG2
2219 | b ->fff_resi
2220 |. zextw CRET1, CRET1
2221 |.endmacro
2222 |
2223 |.ffunc_bit_sh lshift, sllv, 0
2224 |.ffunc_bit_sh rshift, srlv, 0
2225 |.ffunc_bit_sh arshift, srav, 0
2226 |.ffunc_bit_sh rol, rotrv, 1
2227 |.ffunc_bit_sh ror, rotrv, 0
2228 |
2229 |.ffunc_bit tobit
2230 |->fff_resi:
2231 | ld PC, FRAME_PC(BASE)
2232 | daddiu RA, BASE, -16
2233 | settp CRET1, TISNUM
2234 | b ->fff_res1
2235 |. sd CRET1, -16(BASE)
2236 |
2237 |//-----------------------------------------------------------------------
2238 |->fff_fallback: // Call fast function fallback handler.
2239 | // BASE = new base, RB = CFUNC, RC = nargs*8
2240 | ld TMP3, CFUNC:RB->f
2241 | daddu TMP1, BASE, NARGS8:RC
2242 | ld PC, FRAME_PC(BASE) // Fallback may overwrite PC.
2243 | daddiu TMP0, TMP1, 8*LUA_MINSTACK
2244 | ld TMP2, L->maxstack
2245 | sd PC, SAVE_PC // Redundant (but a defined value).
2246 | sltu AT, TMP2, TMP0
2247 | sd BASE, L->base
2248 | sd TMP1, L->top
2249 | bnez AT, >5 // Need to grow stack.
2250 |. move CFUNCADDR, TMP3
2251 | jalr TMP3 // (lua_State *L)
2252 |. move CARG1, L
2253 | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
2254 | ld BASE, L->base
2255 | sll RD, CRET1, 3
2256 | bgtz CRET1, ->fff_res // Returned nresults+1?
2257 |. daddiu RA, BASE, -16
2258 |1: // Returned 0 or -1: retry fast path.
2259 | ld LFUNC:RB, FRAME_FUNC(BASE)
2260 | ld TMP0, L->top
2261 | cleartp LFUNC:RB
2262 | bnez CRET1, ->vm_call_tail // Returned -1?
2263 |. dsubu NARGS8:RC, TMP0, BASE
2264 | ins_callt // Returned 0: retry fast path.
2265 |
2266 |// Reconstruct previous base for vmeta_call during tailcall.
2267 |->vm_call_tail:
2268 | andi TMP0, PC, FRAME_TYPE
2269 | li AT, -4
2270 | bnez TMP0, >3
2271 |. and TMP1, PC, AT
2272 | lbu TMP1, OFS_RA(PC)
2273 | sll TMP1, TMP1, 3
2274 | addiu TMP1, TMP1, 16
2275 |3:
2276 | b ->vm_call_dispatch // Resolve again for tailcall.
2277 |. dsubu TMP2, BASE, TMP1
2278 |
2279 |5: // Grow stack for fallback handler.
2280 | load_got lj_state_growstack
2281 | li CARG2, LUA_MINSTACK
2282 | call_intern lj_state_growstack // (lua_State *L, int n)
2283 |. move CARG1, L
2284 | ld BASE, L->base
2285 | b <1
2286 |. li CRET1, 0 // Force retry.
2287 |
2288 |->fff_gcstep: // Call GC step function.
2289 | // BASE = new base, RC = nargs*8
2290 | move MULTRES, ra
2291 | load_got lj_gc_step
2292 | sd BASE, L->base
2293 | daddu TMP0, BASE, NARGS8:RC
2294 | sd PC, SAVE_PC // Redundant (but a defined value).
2295 | sd TMP0, L->top
2296 | call_intern lj_gc_step // (lua_State *L)
2297 |. move CARG1, L
2298 | ld BASE, L->base
2299 | move ra, MULTRES
2300 | ld TMP0, L->top
2301 | ld CFUNC:RB, FRAME_FUNC(BASE)
2302 | cleartp CFUNC:RB
2303 | jr ra
2304 |. dsubu NARGS8:RC, TMP0, BASE
2305 |
2306 |//-----------------------------------------------------------------------
2307 |//-- Special dispatch targets -------------------------------------------
2308 |//-----------------------------------------------------------------------
2309 |
2310 |->vm_record: // Dispatch target for recording phase.
2311 |.if JIT
2312 | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
2313 | andi AT, TMP3, HOOK_VMEVENT // No recording while in vmevent.
2314 | bnez AT, >5
2315 | // Decrement the hookcount for consistency, but always do the call.
2316 |. lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
2317 | andi AT, TMP3, HOOK_ACTIVE
2318 | bnez AT, >1
2319 |. addiu TMP2, TMP2, -1
2320 | andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
2321 | beqz AT, >1
2322 |. nop
2323 | b >1
2324 |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
2325 |.endif
2326 |
2327 |->vm_rethook: // Dispatch target for return hooks.
2328 | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
2329 | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
2330 | beqz AT, >1
2331 |5: // Re-dispatch to static ins.
2332 |. ld AT, GG_DISP2STATIC(TMP0) // Assumes TMP0 holds DISPATCH+OP*4.
2333 | jr AT
2334 |. nop
2335 |
2336 |->vm_inshook: // Dispatch target for instr/line hooks.
2337 | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
2338 | lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
2339 | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
2340 | bnez AT, <5
2341 |. andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
2342 | beqz AT, <5
2343 |. addiu TMP2, TMP2, -1
2344 | beqz TMP2, >1
2345 |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
2346 | andi AT, TMP3, LUA_MASKLINE
2347 | beqz AT, <5
2348 |1:
2349 |. load_got lj_dispatch_ins
2350 | sw MULTRES, SAVE_MULTRES
2351 | move CARG2, PC
2352 | sd BASE, L->base
2353 | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
2354 | call_intern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
2355 |. move CARG1, L
2356 |3:
2357 | ld BASE, L->base
2358 |4: // Re-dispatch to static ins.
2359 | lw INS, -4(PC)
2360 | decode_OP8a TMP1, INS
2361 | decode_OP8b TMP1
2362 | daddu TMP0, DISPATCH, TMP1
2363 | decode_RD8a RD, INS
2364 | ld AT, GG_DISP2STATIC(TMP0)
2365 | decode_RA8a RA, INS
2366 | decode_RD8b RD
2367 | jr AT
2368 | decode_RA8b RA
2369 |
2370 |->cont_hook: // Continue from hook yield.
2371 | daddiu PC, PC, 4
2372 | b <4
2373 |. lw MULTRES, -24+LO(RB) // Restore MULTRES for *M ins.
2374 |
2375 |->vm_hotloop: // Hot loop counter underflow.
2376 |.if JIT
2377 | ld LFUNC:TMP1, FRAME_FUNC(BASE)
2378 | daddiu CARG1, DISPATCH, GG_DISP2J
2379 | cleartp LFUNC:TMP1
2380 | sd PC, SAVE_PC
2381 | ld TMP1, LFUNC:TMP1->pc
2382 | move CARG2, PC
2383 | sd L, DISPATCH_J(L)(DISPATCH)
2384 | lbu TMP1, PC2PROTO(framesize)(TMP1)
2385 | load_got lj_trace_hot
2386 | sd BASE, L->base
2387 | dsll TMP1, TMP1, 3
2388 | daddu TMP1, BASE, TMP1
2389 | call_intern lj_trace_hot // (jit_State *J, const BCIns *pc)
2390 |. sd TMP1, L->top
2391 | b <3
2392 |. nop
2393 |.endif
2394 |
2395 |
2396 |->vm_callhook: // Dispatch target for call hooks.
2397 |.if JIT
2398 | b >1
2399 |.endif
2400 |. move CARG2, PC
2401 |
2402 |->vm_hotcall: // Hot call counter underflow.
2403 |.if JIT
2404 | ori CARG2, PC, 1
2405 |1:
2406 |.endif
2407 | load_got lj_dispatch_call
2408 | daddu TMP0, BASE, RC
2409 | sd PC, SAVE_PC
2410 | sd BASE, L->base
2411 | dsubu RA, RA, BASE
2412 | sd TMP0, L->top
2413 | call_intern lj_dispatch_call // (lua_State *L, const BCIns *pc)
2414 |. move CARG1, L
2415 | // Returns ASMFunction.
2416 | ld BASE, L->base
2417 | ld TMP0, L->top
2418 | sd r0, SAVE_PC // Invalidate for subsequent line hook.
2419 | dsubu NARGS8:RC, TMP0, BASE
2420 | daddu RA, BASE, RA
2421 | ld LFUNC:RB, FRAME_FUNC(BASE)
2422 | cleartp LFUNC:RB
2423 | jr CRET1
2424 |. lw INS, -4(PC)
2425 |
2426 |->cont_stitch: // Trace stitching.
2427 |.if JIT
2428 | // RA = resultptr, RB = meta base
2429 | lw INS, -4(PC)
2430 | ld TRACE:TMP2, -40(RB) // Save previous trace.
2431 | decode_RA8a RC, INS
2432 | daddiu AT, MULTRES, -8
2433 | cleartp TRACE:TMP2
2434 | decode_RA8b RC
2435 | beqz AT, >2
2436 |. daddu RC, BASE, RC // Call base.
2437 |1: // Move results down.
2438 | ld CARG1, 0(RA)
2439 | daddiu AT, AT, -8
2440 | daddiu RA, RA, 8
2441 | sd CARG1, 0(RC)
2442 | bnez AT, <1
2443 |. daddiu RC, RC, 8
2444 |2:
2445 | decode_RA8a RA, INS
2446 | decode_RB8a RB, INS
2447 | decode_RA8b RA
2448 | decode_RB8b RB
2449 | daddu RA, RA, RB
2450 | daddu RA, BASE, RA
2451 |3:
2452 | sltu AT, RC, RA
2453 | bnez AT, >9 // More results wanted?
2454 |. nop
2455 |
2456 | lhu TMP3, TRACE:TMP2->traceno
2457 | lhu RD, TRACE:TMP2->link
2458 | beq RD, TMP3, ->cont_nop // Blacklisted.
2459 |. load_got lj_dispatch_stitch
2460 | bnez RD, =>BC_JLOOP // Jump to stitched trace.
2461 |. sll RD, RD, 3
2462 |
2463 | // Stitch a new trace to the previous trace.
2464 | sw TMP3, DISPATCH_J(exitno)(DISPATCH)
2465 | sd L, DISPATCH_J(L)(DISPATCH)
2466 | sd BASE, L->base
2467 | daddiu CARG1, DISPATCH, GG_DISP2J
2468 | call_intern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
2469 |. move CARG2, PC
2470 | b ->cont_nop
2471 |. ld BASE, L->base
2472 |
2473 |9:
2474 | sd TISNIL, 0(RC)
2475 | b <3
2476 |. daddiu RC, RC, 8
2477 |.endif
2478 |
2479 |->vm_profhook: // Dispatch target for profiler hook.
2480#if LJ_HASPROFILE
2481 | load_got lj_dispatch_profile
2482 | sw MULTRES, SAVE_MULTRES
2483 | move CARG2, PC
2484 | sd BASE, L->base
2485 | call_intern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
2486 |. move CARG1, L
2487 | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
2488 | daddiu PC, PC, -4
2489 | b ->cont_nop
2490 |. ld BASE, L->base
2491#endif
2492 |
2493 |//-----------------------------------------------------------------------
2494 |//-- Trace exit handler -------------------------------------------------
2495 |//-----------------------------------------------------------------------
2496 |
2497 |.macro savex_, a, b
2498 |.if FPU
2499 | sdc1 f..a, a*8(sp)
2500 | sdc1 f..b, b*8(sp)
2501 | sd r..a, 32*8+a*8(sp)
2502 | sd r..b, 32*8+b*8(sp)
2503 |.else
2504 | sd r..a, a*8(sp)
2505 | sd r..b, b*8(sp)
2506 |.endif
2507 |.endmacro
2508 |
2509 |->vm_exit_handler:
2510 |.if JIT
2511 |.if FPU
2512 | daddiu sp, sp, -(32*8+32*8)
2513 |.else
2514 | daddiu sp, sp, -(32*8)
2515 |.endif
2516 | savex_ 0, 1
2517 | savex_ 2, 3
2518 | savex_ 4, 5
2519 | savex_ 6, 7
2520 | savex_ 8, 9
2521 | savex_ 10, 11
2522 | savex_ 12, 13
2523 | savex_ 14, 15
2524 | savex_ 16, 17
2525 | savex_ 18, 19
2526 | savex_ 20, 21
2527 | savex_ 22, 23
2528 | savex_ 24, 25
2529 | savex_ 26, 27
2530 | savex_ 28, 30
2531 |.if FPU
2532 | sdc1 f29, 29*8(sp)
2533 | sdc1 f31, 31*8(sp)
2534 | sd r0, 32*8+31*8(sp) // Clear RID_TMP.
2535 | daddiu TMP2, sp, 32*8+32*8 // Recompute original value of sp.
2536 | sd TMP2, 32*8+29*8(sp) // Store sp in RID_SP
2537 |.else
2538 | sd r0, 31*8(sp) // Clear RID_TMP.
2539 | daddiu TMP2, sp, 32*8 // Recompute original value of sp.
2540 | sd TMP2, 29*8(sp) // Store sp in RID_SP
2541 |.endif
2542 | li_vmstate EXIT
2543 | daddiu DISPATCH, JGL, -GG_DISP2G-32768
2544 | lw TMP1, 0(TMP2) // Load exit number.
2545 | st_vmstate
2546 | ld L, DISPATCH_GL(cur_L)(DISPATCH)
2547 | ld BASE, DISPATCH_GL(jit_base)(DISPATCH)
2548 | load_got lj_trace_exit
2549 | sd L, DISPATCH_J(L)(DISPATCH)
2550 | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number.
2551 | sd BASE, L->base
2552 | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number.
2553 | daddiu CARG1, DISPATCH, GG_DISP2J
2554 | sd r0, DISPATCH_GL(jit_base)(DISPATCH)
2555 | call_intern lj_trace_exit // (jit_State *J, ExitState *ex)
2556 |. move CARG2, sp
2557 | // Returns MULTRES (unscaled) or negated error code.
2558 | ld TMP1, L->cframe
2559 | li AT, -4
2560 | ld BASE, L->base
2561 | and sp, TMP1, AT
2562 | ld PC, SAVE_PC // Get SAVE_PC.
2563 | b >1
2564 |. sd L, SAVE_L // Set SAVE_L (on-trace resume/yield).
2565 |.endif
2566 |->vm_exit_interp:
2567 |.if JIT
2568 | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set.
2569 | ld L, SAVE_L
2570 | daddiu DISPATCH, JGL, -GG_DISP2G-32768
2571 | sd BASE, L->base
2572 |1:
2573 | bltz CRET1, >9 // Check for error from exit.
2574 |. ld LFUNC:RB, FRAME_FUNC(BASE)
2575 | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
2576 | dsll MULTRES, CRET1, 3
2577 | cleartp LFUNC:RB
2578 | sw MULTRES, SAVE_MULTRES
2579 | li TISNIL, LJ_TNIL
2580 | li TISNUM, LJ_TISNUM // Setup type comparison constants.
2581 | .FPU mtc1 TMP3, TOBIT
2582 | ld TMP1, LFUNC:RB->pc
2583 | sd r0, DISPATCH_GL(jit_base)(DISPATCH)
2584 | ld KBASE, PC2PROTO(k)(TMP1)
2585 | .FPU cvt.d.s TOBIT, TOBIT
2586 | // Modified copy of ins_next which handles function header dispatch, too.
2587 | lw INS, 0(PC)
2588 | daddiu PC, PC, 4
2589 | // Assumes TISNIL == ~LJ_VMST_INTERP == -1
2590 | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
2591 | decode_OP8a TMP1, INS
2592 | decode_OP8b TMP1
2593 | sltiu TMP2, TMP1, BC_FUNCF*8
2594 | daddu TMP0, DISPATCH, TMP1
2595 | decode_RD8a RD, INS
2596 | ld AT, 0(TMP0)
2597 | decode_RA8a RA, INS
2598 | beqz TMP2, >2
2599 |. decode_RA8b RA
2600 | jr AT
2601 |. decode_RD8b RD
2602 |2:
2603 | sltiu TMP2, TMP1, (BC_FUNCC+2)*8 // Fast function?
2604 | bnez TMP2, >3
2605 |. ld TMP1, FRAME_PC(BASE)
2606 | // Check frame below fast function.
2607 | andi TMP0, TMP1, FRAME_TYPE
2608 | bnez TMP0, >3 // Trace stitching continuation?
2609 |. nop
2610 | // Otherwise set KBASE for Lua function below fast function.
2611 | lw TMP2, -4(TMP1)
2612 | decode_RA8a TMP0, TMP2
2613 | decode_RA8b TMP0
2614 | dsubu TMP1, BASE, TMP0
2615 | ld LFUNC:TMP2, -32(TMP1)
2616 | cleartp LFUNC:TMP2
2617 | ld TMP1, LFUNC:TMP2->pc
2618 | ld KBASE, PC2PROTO(k)(TMP1)
2619 |3:
2620 | daddiu RC, MULTRES, -8
2621 | jr AT
2622 |. daddu RA, RA, BASE
2623 |
2624 |9: // Rethrow error from the right C frame.
2625 | load_got lj_err_run
2626 | call_intern lj_err_run // (lua_State *L)
2627 |. move CARG1, L
2628 |.endif
2629 |
2630 |//-----------------------------------------------------------------------
2631 |//-- Math helper functions ----------------------------------------------
2632 |//-----------------------------------------------------------------------
2633 |
2634 |// Hard-float round to integer.
2635 |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1.
2636 |// MIPSR6: Modifies FTMP1, too.
2637 |.macro vm_round_hf, func
2638 | lui TMP0, 0x4330 // Hiword of 2^52 (double).
2639 | dsll TMP0, TMP0, 32
2640 | dmtc1 TMP0, f4
2641 | abs.d FRET2, FARG1 // |x|
2642 | dmfc1 AT, FARG1
2643 |.if MIPSR6
2644 | cmp.lt.d FTMP1, FRET2, f4
2645 | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52
2646 | bc1eqz FTMP1, >1 // Truncate only if |x| < 2^52.
2647 |.else
2648 | c.olt.d 0, FRET2, f4
2649 | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52
2650 | bc1f 0, >1 // Truncate only if |x| < 2^52.
2651 |.endif
2652 |. sub.d FRET1, FRET1, f4
2653 | slt AT, AT, r0
2654 |.if "func" == "ceil"
2655 | lui TMP0, 0xbff0 // Hiword of -1 (double). Preserves -0.
2656 |.else
2657 | lui TMP0, 0x3ff0 // Hiword of +1 (double).
2658 |.endif
2659 |.if "func" == "trunc"
2660 | dsll TMP0, TMP0, 32
2661 | dmtc1 TMP0, f4
2662 |.if MIPSR6
2663 | cmp.lt.d FTMP1, FRET2, FRET1 // |x| < result?
2664 | sub.d FRET2, FRET1, f4
2665 | sel.d FTMP1, FRET1, FRET2 // If yes, subtract +1.
2666 | dmtc1 AT, FRET1
2667 | neg.d FRET2, FTMP1
2668 | jr ra
2669 |. sel.d FRET1, FTMP1, FRET2 // Merge sign bit back in.
2670 |.else
2671 | c.olt.d 0, FRET2, FRET1 // |x| < result?
2672 | sub.d FRET2, FRET1, f4
2673 | movt.d FRET1, FRET2, 0 // If yes, subtract +1.
2674 | neg.d FRET2, FRET1
2675 | jr ra
2676 |. movn.d FRET1, FRET2, AT // Merge sign bit back in.
2677 |.endif
2678 |.else
2679 | neg.d FRET2, FRET1
2680 | dsll TMP0, TMP0, 32
2681 | dmtc1 TMP0, f4
2682 |.if MIPSR6
2683 | dmtc1 AT, FTMP1
2684 | sel.d FTMP1, FRET1, FRET2
2685 |.if "func" == "ceil"
2686 | cmp.lt.d FRET1, FTMP1, FARG1 // x > result?
2687 |.else
2688 | cmp.lt.d FRET1, FARG1, FTMP1 // x < result?
2689 |.endif
2690 | sub.d FRET2, FTMP1, f4 // If yes, subtract +-1.
2691 | jr ra
2692 |. sel.d FRET1, FTMP1, FRET2
2693 |.else
2694 | movn.d FRET1, FRET2, AT // Merge sign bit back in.
2695 |.if "func" == "ceil"
2696 | c.olt.d 0, FRET1, FARG1 // x > result?
2697 |.else
2698 | c.olt.d 0, FARG1, FRET1 // x < result?
2699 |.endif
2700 | sub.d FRET2, FRET1, f4 // If yes, subtract +-1.
2701 | jr ra
2702 |. movt.d FRET1, FRET2, 0
2703 |.endif
2704 |.endif
2705 |1:
2706 | jr ra
2707 |. mov.d FRET1, FARG1
2708 |.endmacro
2709 |
2710 |.macro vm_round, func
2711 |.if FPU
2712 | vm_round_hf, func
2713 |.endif
2714 |.endmacro
2715 |
2716 |->vm_floor:
2717 | vm_round floor
2718 |->vm_ceil:
2719 | vm_round ceil
2720 |->vm_trunc:
2721 |.if JIT
2722 | vm_round trunc
2723 |.endif
2724 |
2725 |// Soft-float integer to number conversion.
2726 |.macro sfi2d, ARG
2727 |.if not FPU
2728 | beqz ARG, >9 // Handle zero first.
2729 |. sra TMP0, ARG, 31
2730 | xor TMP1, ARG, TMP0
2731 | dsubu TMP1, TMP1, TMP0 // Absolute value in TMP1.
2732 | dclz ARG, TMP1
2733 | addiu ARG, ARG, -11
2734 | li AT, 0x3ff+63-11-1
2735 | dsllv TMP1, TMP1, ARG // Align mantissa left with leading 1.
2736 | subu ARG, AT, ARG // Exponent - 1.
2737 | ins ARG, TMP0, 11, 11 // Sign | Exponent.
2738 | dsll ARG, ARG, 52 // Align left.
2739 | jr ra
2740 |. daddu ARG, ARG, TMP1 // Add mantissa, increment exponent.
2741 |9:
2742 | jr ra
2743 |. nop
2744 |.endif
2745 |.endmacro
2746 |
2747 |// Input CARG1. Output: CARG1. Temporaries: AT, TMP0, TMP1.
2748 |->vm_sfi2d_1:
2749 | sfi2d CARG1
2750 |
2751 |// Input CARG2. Output: CARG2. Temporaries: AT, TMP0, TMP1.
2752 |->vm_sfi2d_2:
2753 | sfi2d CARG2
2754 |
2755 |// Soft-float comparison. Equivalent to c.eq.d.
2756 |// Input: CARG*. Output: CRET1. Temporaries: AT, TMP0, TMP1.
2757 |->vm_sfcmpeq:
2758 |.if not FPU
2759 | dsll AT, CARG1, 1
2760 | dsll TMP0, CARG2, 1
2761 | or TMP1, AT, TMP0
2762 | beqz TMP1, >8 // Both args +-0: return 1.
2763 |. lui TMP1, 0xffe0
2764 | dsll TMP1, TMP1, 32
2765 | sltu AT, TMP1, AT
2766 | sltu TMP0, TMP1, TMP0
2767 | or TMP1, AT, TMP0
2768 | bnez TMP1, >9 // Either arg is NaN: return 0;
2769 |. xor AT, CARG1, CARG2
2770 | jr ra
2771 |. sltiu CRET1, AT, 1 // Same values: return 1.
2772 |8:
2773 | jr ra
2774 |. li CRET1, 1
2775 |9:
2776 | jr ra
2777 |. li CRET1, 0
2778 |.endif
2779 |
2780 |// Soft-float comparison. Equivalent to c.ult.d and c.olt.d.
2781 |// Input: CARG1, CARG2. Output: CRET1. Temporaries: AT, TMP0, TMP1, CRET2.
2782 |->vm_sfcmpult:
2783 |.if not FPU
2784 | b >1
2785 |. li CRET2, 1
2786 |.endif
2787 |
2788 |->vm_sfcmpolt:
2789 |.if not FPU
2790 | li CRET2, 0
2791 |1:
2792 | dsll AT, CARG1, 1
2793 | dsll TMP0, CARG2, 1
2794 | or TMP1, AT, TMP0
2795 | beqz TMP1, >8 // Both args +-0: return 0.
2796 |. lui TMP1, 0xffe0
2797 | dsll TMP1, TMP1, 32
2798 | sltu AT, TMP1, AT
2799 | sltu TMP0, TMP1, TMP0
2800 | or TMP1, AT, TMP0
2801 | bnez TMP1, >9 // Either arg is NaN: return 0 or 1;
2802 |. and AT, CARG1, CARG2
2803 | bltz AT, >5 // Both args negative?
2804 |. nop
2805 | jr ra
2806 |. slt CRET1, CARG1, CARG2
2807 |5: // Swap conditions if both operands are negative.
2808 | jr ra
2809 |. slt CRET1, CARG2, CARG1
2810 |8:
2811 | jr ra
2812 |. li CRET1, 0
2813 |9:
2814 | jr ra
2815 |. move CRET1, CRET2
2816 |.endif
2817 |
2818 |->vm_sfcmpogt:
2819 |.if not FPU
2820 | dsll AT, CARG2, 1
2821 | dsll TMP0, CARG1, 1
2822 | or TMP1, AT, TMP0
2823 | beqz TMP1, >8 // Both args +-0: return 0.
2824 |. lui TMP1, 0xffe0
2825 | dsll TMP1, TMP1, 32
2826 | sltu AT, TMP1, AT
2827 | sltu TMP0, TMP1, TMP0
2828 | or TMP1, AT, TMP0
2829 | bnez TMP1, >9 // Either arg is NaN: return 0 or 1;
2830 |. and AT, CARG2, CARG1
2831 | bltz AT, >5 // Both args negative?
2832 |. nop
2833 | jr ra
2834 |. slt CRET1, CARG2, CARG1
2835 |5: // Swap conditions if both operands are negative.
2836 | jr ra
2837 |. slt CRET1, CARG1, CARG2
2838 |8:
2839 | jr ra
2840 |. li CRET1, 0
2841 |9:
2842 | jr ra
2843 |. li CRET1, 0
2844 |.endif
2845 |
2846 |// Soft-float comparison. Equivalent to c.ole.d a, b or c.ole.d b, a.
2847 |// Input: CARG1, CARG2, TMP3. Output: CRET1. Temporaries: AT, TMP0, TMP1.
2848 |->vm_sfcmpolex:
2849 |.if not FPU
2850 | dsll AT, CARG1, 1
2851 | dsll TMP0, CARG2, 1
2852 | or TMP1, AT, TMP0
2853 | beqz TMP1, >8 // Both args +-0: return 1.
2854 |. lui TMP1, 0xffe0
2855 | dsll TMP1, TMP1, 32
2856 | sltu AT, TMP1, AT
2857 | sltu TMP0, TMP1, TMP0
2858 | or TMP1, AT, TMP0
2859 | bnez TMP1, >9 // Either arg is NaN: return 0;
2860 |. and AT, CARG1, CARG2
2861 | xor AT, AT, TMP3
2862 | bltz AT, >5 // Both args negative?
2863 |. nop
2864 | jr ra
2865 |. slt CRET1, CARG2, CARG1
2866 |5: // Swap conditions if both operands are negative.
2867 | jr ra
2868 |. slt CRET1, CARG1, CARG2
2869 |8:
2870 | jr ra
2871 |. li CRET1, 1
2872 |9:
2873 | jr ra
2874 |. li CRET1, 0
2875 |.endif
2876 |
2877 |.macro sfmin_max, name, fpcall
2878 |->vm_sf .. name:
2879 |.if JIT and not FPU
2880 | move TMP2, ra
2881 | bal ->fpcall
2882 |. nop
2883 | move ra, TMP2
2884 | move TMP0, CRET1
2885 | move CRET1, CARG1
2886 |.if MIPSR6
2887 | selnez CRET1, CRET1, TMP0
2888 | seleqz TMP0, CARG2, TMP0
2889 | jr ra
2890 |. or CRET1, CRET1, TMP0
2891 |.else
2892 | jr ra
2893 |. movz CRET1, CARG2, TMP0
2894 |.endif
2895 |.endif
2896 |.endmacro
2897 |
2898 | sfmin_max min, vm_sfcmpolt
2899 | sfmin_max max, vm_sfcmpogt
2900 |
2901 |//-----------------------------------------------------------------------
2902 |//-- Miscellaneous functions --------------------------------------------
2903 |//-----------------------------------------------------------------------
2904 |
2905 |//-----------------------------------------------------------------------
2906 |//-- FFI helper functions -----------------------------------------------
2907 |//-----------------------------------------------------------------------
2908 |
2909 |// Handler for callback functions. Callback slot number in r1, g in r2.
2910 |->vm_ffi_callback:
2911 |.if FFI
2912 |.type CTSTATE, CTState, PC
2913 | saveregs
2914 | ld CTSTATE, GL:r2->ctype_state
2915 | daddiu DISPATCH, r2, GG_G2DISP
2916 | load_got lj_ccallback_enter
2917 | sw r1, CTSTATE->cb.slot
2918 | sd CARG1, CTSTATE->cb.gpr[0]
2919 | .FPU sdc1 FARG1, CTSTATE->cb.fpr[0]
2920 | sd CARG2, CTSTATE->cb.gpr[1]
2921 | .FPU sdc1 FARG2, CTSTATE->cb.fpr[1]
2922 | sd CARG3, CTSTATE->cb.gpr[2]
2923 | .FPU sdc1 FARG3, CTSTATE->cb.fpr[2]
2924 | sd CARG4, CTSTATE->cb.gpr[3]
2925 | .FPU sdc1 FARG4, CTSTATE->cb.fpr[3]
2926 | sd CARG5, CTSTATE->cb.gpr[4]
2927 | .FPU sdc1 FARG5, CTSTATE->cb.fpr[4]
2928 | sd CARG6, CTSTATE->cb.gpr[5]
2929 | .FPU sdc1 FARG6, CTSTATE->cb.fpr[5]
2930 | sd CARG7, CTSTATE->cb.gpr[6]
2931 | .FPU sdc1 FARG7, CTSTATE->cb.fpr[6]
2932 | sd CARG8, CTSTATE->cb.gpr[7]
2933 | .FPU sdc1 FARG8, CTSTATE->cb.fpr[7]
2934 | daddiu TMP0, sp, CFRAME_SPACE
2935 | sd TMP0, CTSTATE->cb.stack
2936 | sd r0, SAVE_PC // Any value outside of bytecode is ok.
2937 | move CARG2, sp
2938 | call_intern lj_ccallback_enter // (CTState *cts, void *cf)
2939 |. move CARG1, CTSTATE
2940 | // Returns lua_State *.
2941 | ld BASE, L:CRET1->base
2942 | ld RC, L:CRET1->top
2943 | move L, CRET1
2944 | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
2945 | ld LFUNC:RB, FRAME_FUNC(BASE)
2946 | .FPU mtc1 TMP3, TOBIT
2947 | li TISNIL, LJ_TNIL
2948 | li TISNUM, LJ_TISNUM
2949 | li_vmstate INTERP
2950 | subu RC, RC, BASE
2951 | cleartp LFUNC:RB
2952 | st_vmstate
2953 | .FPU cvt.d.s TOBIT, TOBIT
2954 | ins_callt
2955 |.endif
2956 |
2957 |->cont_ffi_callback: // Return from FFI callback.
2958 |.if FFI
2959 | load_got lj_ccallback_leave
2960 | ld CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
2961 | sd BASE, L->base
2962 | sd RB, L->top
2963 | sd L, CTSTATE->L
2964 | move CARG2, RA
2965 | call_intern lj_ccallback_leave // (CTState *cts, TValue *o)
2966 |. move CARG1, CTSTATE
2967 | .FPU ldc1 FRET1, CTSTATE->cb.fpr[0]
2968 | ld CRET1, CTSTATE->cb.gpr[0]
2969 | .FPU ldc1 FRET2, CTSTATE->cb.fpr[1]
2970 | b ->vm_leave_unw
2971 |. ld CRET2, CTSTATE->cb.gpr[1]
2972 |.endif
2973 |
2974 |->vm_ffi_call: // Call C function via FFI.
2975 | // Caveat: needs special frame unwinding, see below.
2976 |.if FFI
2977 | .type CCSTATE, CCallState, CARG1
2978 | lw TMP1, CCSTATE->spadj
2979 | lbu CARG2, CCSTATE->nsp
2980 | move TMP2, sp
2981 | dsubu sp, sp, TMP1
2982 | sd ra, -8(TMP2)
2983 | sll CARG2, CARG2, 3
2984 | sd r16, -16(TMP2)
2985 | sd CCSTATE, -24(TMP2)
2986 | move r16, TMP2
2987 | daddiu TMP1, CCSTATE, offsetof(CCallState, stack)
2988 | move TMP2, sp
2989 | beqz CARG2, >2
2990 |. daddu TMP3, TMP1, CARG2
2991 |1:
2992 | ld TMP0, 0(TMP1)
2993 | daddiu TMP1, TMP1, 8
2994 | sltu AT, TMP1, TMP3
2995 | sd TMP0, 0(TMP2)
2996 | bnez AT, <1
2997 |. daddiu TMP2, TMP2, 8
2998 |2:
2999 | ld CFUNCADDR, CCSTATE->func
3000 | .FPU ldc1 FARG1, CCSTATE->gpr[0]
3001 | ld CARG2, CCSTATE->gpr[1]
3002 | .FPU ldc1 FARG2, CCSTATE->gpr[1]
3003 | ld CARG3, CCSTATE->gpr[2]
3004 | .FPU ldc1 FARG3, CCSTATE->gpr[2]
3005 | ld CARG4, CCSTATE->gpr[3]
3006 | .FPU ldc1 FARG4, CCSTATE->gpr[3]
3007 | ld CARG5, CCSTATE->gpr[4]
3008 | .FPU ldc1 FARG5, CCSTATE->gpr[4]
3009 | ld CARG6, CCSTATE->gpr[5]
3010 | .FPU ldc1 FARG6, CCSTATE->gpr[5]
3011 | ld CARG7, CCSTATE->gpr[6]
3012 | .FPU ldc1 FARG7, CCSTATE->gpr[6]
3013 | ld CARG8, CCSTATE->gpr[7]
3014 | .FPU ldc1 FARG8, CCSTATE->gpr[7]
3015 | jalr CFUNCADDR
3016 |. ld CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
3017 | ld CCSTATE:TMP1, -24(r16)
3018 | ld TMP2, -16(r16)
3019 | ld ra, -8(r16)
3020 | sd CRET1, CCSTATE:TMP1->gpr[0]
3021 | sd CRET2, CCSTATE:TMP1->gpr[1]
3022 |.if FPU
3023 | sdc1 FRET1, CCSTATE:TMP1->fpr[0]
3024 | sdc1 FRET2, CCSTATE:TMP1->fpr[1]
3025 |.else
3026 | sd CARG1, CCSTATE:TMP1->gpr[2] // 2nd FP struct field for soft-float.
3027 |.endif
3028 | move sp, r16
3029 | jr ra
3030 |. move r16, TMP2
3031 |.endif
3032 |// Note: vm_ffi_call must be the last function in this object file!
3033 |
3034 |//-----------------------------------------------------------------------
3035}
3036
3037/* Generate the code for a single instruction. */
3038static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3039{
3040 int vk = 0;
3041 |=>defop:
3042
3043 switch (op) {
3044
3045 /* -- Comparison ops ---------------------------------------------------- */
3046
3047 /* Remember: all ops branch for a true comparison, fall through otherwise. */
3048
3049 case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
3050 | // RA = src1*8, RD = src2*8, JMP with RD = target
3051 |.macro bc_comp, FRA, FRD, ARGRA, ARGRD, movop, fmovop, fcomp, sfcomp
3052 | daddu RA, BASE, RA
3053 | daddu RD, BASE, RD
3054 | ld ARGRA, 0(RA)
3055 | ld ARGRD, 0(RD)
3056 | lhu TMP2, OFS_RD(PC)
3057 | gettp CARG3, ARGRA
3058 | gettp CARG4, ARGRD
3059 | bne CARG3, TISNUM, >2
3060 |. daddiu PC, PC, 4
3061 | bne CARG4, TISNUM, >5
3062 |. decode_RD4b TMP2
3063 | sextw ARGRA, ARGRA
3064 | sextw ARGRD, ARGRD
3065 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3066 | slt AT, CARG1, CARG2
3067 | addu TMP2, TMP2, TMP3
3068 |.if MIPSR6
3069 | movop TMP2, TMP2, AT
3070 |.else
3071 | movop TMP2, r0, AT
3072 |.endif
3073 |1:
3074 | daddu PC, PC, TMP2
3075 | ins_next
3076 |
3077 |2: // RA is not an integer.
3078 | sltiu AT, CARG3, LJ_TISNUM
3079 | beqz AT, ->vmeta_comp
3080 |. lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3081 | sltiu AT, CARG4, LJ_TISNUM
3082 | beqz AT, >4
3083 |. decode_RD4b TMP2
3084 |.if FPU
3085 | ldc1 FRA, 0(RA)
3086 | ldc1 FRD, 0(RD)
3087 |.endif
3088 |3: // RA and RD are both numbers.
3089 |.if FPU
3090 |.if MIPSR6
3091 | fcomp FTMP0, FTMP0, FTMP2
3092 | addu TMP2, TMP2, TMP3
3093 | mfc1 TMP3, FTMP0
3094 | b <1
3095 |. fmovop TMP2, TMP2, TMP3
3096 |.else
3097 | fcomp FTMP0, FTMP2
3098 | addu TMP2, TMP2, TMP3
3099 | b <1
3100 |. fmovop TMP2, r0
3101 |.endif
3102 |.else
3103 | bal sfcomp
3104 |. addu TMP2, TMP2, TMP3
3105 | b <1
3106 |.if MIPSR6
3107 |. movop TMP2, TMP2, CRET1
3108 |.else
3109 |. movop TMP2, r0, CRET1
3110 |.endif
3111 |.endif
3112 |
3113 |4: // RA is a number, RD is not a number.
3114 | bne CARG4, TISNUM, ->vmeta_comp
3115 | // RA is a number, RD is an integer. Convert RD to a number.
3116 |.if FPU
3117 |. lwc1 FRD, LO(RD)
3118 | ldc1 FRA, 0(RA)
3119 | b <3
3120 |. cvt.d.w FRD, FRD
3121 |.else
3122 |.if "ARGRD" == "CARG1"
3123 |. sextw CARG1, CARG1
3124 | bal ->vm_sfi2d_1
3125 |. nop
3126 |.else
3127 |. sextw CARG2, CARG2
3128 | bal ->vm_sfi2d_2
3129 |. nop
3130 |.endif
3131 | b <3
3132 |. nop
3133 |.endif
3134 |
3135 |5: // RA is an integer, RD is not an integer
3136 | sltiu AT, CARG4, LJ_TISNUM
3137 | beqz AT, ->vmeta_comp
3138 |. lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3139 | // RA is an integer, RD is a number. Convert RA to a number.
3140 |.if FPU
3141 | lwc1 FRA, LO(RA)
3142 | ldc1 FRD, 0(RD)
3143 | b <3
3144 | cvt.d.w FRA, FRA
3145 |.else
3146 |.if "ARGRA" == "CARG1"
3147 | bal ->vm_sfi2d_1
3148 |. sextw CARG1, CARG1
3149 |.else
3150 | bal ->vm_sfi2d_2
3151 |. sextw CARG2, CARG2
3152 |.endif
3153 | b <3
3154 |. nop
3155 |.endif
3156 |.endmacro
3157 |
3158 |.if MIPSR6
3159 if (op == BC_ISLT) {
3160 | bc_comp FTMP0, FTMP2, CARG1, CARG2, selnez, selnez, cmp.lt.d, ->vm_sfcmpolt
3161 } else if (op == BC_ISGE) {
3162 | bc_comp FTMP0, FTMP2, CARG1, CARG2, seleqz, seleqz, cmp.lt.d, ->vm_sfcmpolt
3163 } else if (op == BC_ISLE) {
3164 | bc_comp FTMP2, FTMP0, CARG2, CARG1, seleqz, seleqz, cmp.ult.d, ->vm_sfcmpult
3165 } else {
3166 | bc_comp FTMP2, FTMP0, CARG2, CARG1, selnez, selnez, cmp.ult.d, ->vm_sfcmpult
3167 }
3168 |.else
3169 if (op == BC_ISLT) {
3170 | bc_comp FTMP0, FTMP2, CARG1, CARG2, movz, movf, c.olt.d, ->vm_sfcmpolt
3171 } else if (op == BC_ISGE) {
3172 | bc_comp FTMP0, FTMP2, CARG1, CARG2, movn, movt, c.olt.d, ->vm_sfcmpolt
3173 } else if (op == BC_ISLE) {
3174 | bc_comp FTMP2, FTMP0, CARG2, CARG1, movn, movt, c.ult.d, ->vm_sfcmpult
3175 } else {
3176 | bc_comp FTMP2, FTMP0, CARG2, CARG1, movz, movf, c.ult.d, ->vm_sfcmpult
3177 }
3178 |.endif
3179 break;
3180
3181 case BC_ISEQV: case BC_ISNEV:
3182 vk = op == BC_ISEQV;
3183 | // RA = src1*8, RD = src2*8, JMP with RD = target
3184 | daddu RA, BASE, RA
3185 | daddiu PC, PC, 4
3186 | daddu RD, BASE, RD
3187 | ld CARG1, 0(RA)
3188 | lhu TMP2, -4+OFS_RD(PC)
3189 | ld CARG2, 0(RD)
3190 | gettp CARG3, CARG1
3191 | gettp CARG4, CARG2
3192 | sltu AT, TISNUM, CARG3
3193 | sltu TMP1, TISNUM, CARG4
3194 | or AT, AT, TMP1
3195 if (vk) {
3196 | beqz AT, ->BC_ISEQN_Z
3197 } else {
3198 | beqz AT, ->BC_ISNEN_Z
3199 }
3200 | // Either or both types are not numbers.
3201 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3202 |.if FFI
3203 |. li AT, LJ_TCDATA
3204 | beq CARG3, AT, ->vmeta_equal_cd
3205 |.endif
3206 | decode_RD4b TMP2
3207 |.if FFI
3208 | beq CARG4, AT, ->vmeta_equal_cd
3209 |. nop
3210 |.endif
3211 | bne CARG1, CARG2, >2
3212 |. addu TMP2, TMP2, TMP3
3213 | // Tag and value are equal.
3214 if (vk) {
3215 |->BC_ISEQV_Z:
3216 | daddu PC, PC, TMP2
3217 }
3218 |1:
3219 | ins_next
3220 |
3221 |2: // Check if the tags are the same and it's a table or userdata.
3222 | xor AT, CARG3, CARG4 // Same type?
3223 | sltiu TMP0, CARG3, LJ_TISTABUD+1 // Table or userdata?
3224 |.if MIPSR6
3225 | seleqz TMP0, TMP0, AT
3226 |.else
3227 | movn TMP0, r0, AT
3228 |.endif
3229 if (vk) {
3230 | beqz TMP0, <1
3231 } else {
3232 | beqz TMP0, ->BC_ISEQV_Z // Reuse code from opposite instruction.
3233 }
3234 | // Different tables or userdatas. Need to check __eq metamethod.
3235 | // Field metatable must be at same offset for GCtab and GCudata!
3236 |. cleartp TAB:TMP1, CARG1
3237 | ld TAB:TMP3, TAB:TMP1->metatable
3238 if (vk) {
3239 | beqz TAB:TMP3, <1 // No metatable?
3240 |. nop
3241 | lbu TMP3, TAB:TMP3->nomm
3242 | andi TMP3, TMP3, 1<<MM_eq
3243 | bnez TMP3, >1 // Or 'no __eq' flag set?
3244 } else {
3245 | beqz TAB:TMP3,->BC_ISEQV_Z // No metatable?
3246 |. nop
3247 | lbu TMP3, TAB:TMP3->nomm
3248 | andi TMP3, TMP3, 1<<MM_eq
3249 | bnez TMP3, ->BC_ISEQV_Z // Or 'no __eq' flag set?
3250 }
3251 |. nop
3252 | b ->vmeta_equal // Handle __eq metamethod.
3253 |. li TMP0, 1-vk // ne = 0 or 1.
3254 break;
3255
3256 case BC_ISEQS: case BC_ISNES:
3257 vk = op == BC_ISEQS;
3258 | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
3259 | daddu RA, BASE, RA
3260 | daddiu PC, PC, 4
3261 | ld CARG1, 0(RA)
3262 | dsubu RD, KBASE, RD
3263 | lhu TMP2, -4+OFS_RD(PC)
3264 | ld CARG2, -8(RD) // KBASE-8-str_const*8
3265 |.if FFI
3266 | gettp TMP0, CARG1
3267 | li AT, LJ_TCDATA
3268 |.endif
3269 | li TMP1, LJ_TSTR
3270 | decode_RD4b TMP2
3271 |.if FFI
3272 | beq TMP0, AT, ->vmeta_equal_cd
3273 |.endif
3274 |. settp CARG2, TMP1
3275 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3276 | xor TMP1, CARG1, CARG2
3277 | addu TMP2, TMP2, TMP3
3278 |.if MIPSR6
3279 if (vk) {
3280 | seleqz TMP2, TMP2, TMP1
3281 } else {
3282 | selnez TMP2, TMP2, TMP1
3283 }
3284 |.else
3285 if (vk) {
3286 | movn TMP2, r0, TMP1
3287 } else {
3288 | movz TMP2, r0, TMP1
3289 }
3290 |.endif
3291 | daddu PC, PC, TMP2
3292 | ins_next
3293 break;
3294
3295 case BC_ISEQN: case BC_ISNEN:
3296 vk = op == BC_ISEQN;
3297 | // RA = src*8, RD = num_const*8, JMP with RD = target
3298 | daddu RA, BASE, RA
3299 | daddu RD, KBASE, RD
3300 | ld CARG1, 0(RA)
3301 | ld CARG2, 0(RD)
3302 | lhu TMP2, OFS_RD(PC)
3303 | gettp CARG3, CARG1
3304 | gettp CARG4, CARG2
3305 | daddiu PC, PC, 4
3306 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3307 if (vk) {
3308 |->BC_ISEQN_Z:
3309 } else {
3310 |->BC_ISNEN_Z:
3311 }
3312 | bne CARG3, TISNUM, >3
3313 |. decode_RD4b TMP2
3314 | bne CARG4, TISNUM, >6
3315 |. addu TMP2, TMP2, TMP3
3316 | xor AT, CARG1, CARG2
3317 |.if MIPSR6
3318 if (vk) {
3319 | seleqz TMP2, TMP2, AT
3320 |1:
3321 | daddu PC, PC, TMP2
3322 |2:
3323 } else {
3324 | selnez TMP2, TMP2, AT
3325 |1:
3326 |2:
3327 | daddu PC, PC, TMP2
3328 }
3329 |.else
3330 if (vk) {
3331 | movn TMP2, r0, AT
3332 |1:
3333 | daddu PC, PC, TMP2
3334 |2:
3335 } else {
3336 | movz TMP2, r0, AT
3337 |1:
3338 |2:
3339 | daddu PC, PC, TMP2
3340 }
3341 |.endif
3342 | ins_next
3343 |
3344 |3: // RA is not an integer.
3345 | sltu AT, CARG3, TISNUM
3346 |.if FFI
3347 | beqz AT, >8
3348 |.else
3349 | beqz AT, <2
3350 |.endif
3351 |. addu TMP2, TMP2, TMP3
3352 | sltu AT, CARG4, TISNUM
3353 |.if FPU
3354 | ldc1 FTMP0, 0(RA)
3355 | ldc1 FTMP2, 0(RD)
3356 |.endif
3357 | beqz AT, >5
3358 |. nop
3359 |4: // RA and RD are both numbers.
3360 |.if FPU
3361 |.if MIPSR6
3362 | cmp.eq.d FTMP0, FTMP0, FTMP2
3363 | dmfc1 TMP1, FTMP0
3364 | b <1
3365 if (vk) {
3366 |. selnez TMP2, TMP2, TMP1
3367 } else {
3368 |. seleqz TMP2, TMP2, TMP1
3369 }
3370 |.else
3371 | c.eq.d FTMP0, FTMP2
3372 | b <1
3373 if (vk) {
3374 |. movf TMP2, r0
3375 } else {
3376 |. movt TMP2, r0
3377 }
3378 |.endif
3379 |.else
3380 | bal ->vm_sfcmpeq
3381 |. nop
3382 | b <1
3383 |.if MIPSR6
3384 if (vk) {
3385 |. selnez TMP2, TMP2, CRET1
3386 } else {
3387 |. seleqz TMP2, TMP2, CRET1
3388 }
3389 |.else
3390 if (vk) {
3391 |. movz TMP2, r0, CRET1
3392 } else {
3393 |. movn TMP2, r0, CRET1
3394 }
3395 |.endif
3396 |.endif
3397 |
3398 |5: // RA is a number, RD is not a number.
3399 |.if FFI
3400 | bne CARG4, TISNUM, >9
3401 |.else
3402 | bne CARG4, TISNUM, <2
3403 |.endif
3404 | // RA is a number, RD is an integer. Convert RD to a number.
3405 |.if FPU
3406 |. lwc1 FTMP2, LO(RD)
3407 | b <4
3408 |. cvt.d.w FTMP2, FTMP2
3409 |.else
3410 |. sextw CARG2, CARG2
3411 | bal ->vm_sfi2d_2
3412 |. nop
3413 | b <4
3414 |. nop
3415 |.endif
3416 |
3417 |6: // RA is an integer, RD is not an integer
3418 | sltu AT, CARG4, TISNUM
3419 |.if FFI
3420 | beqz AT, >9
3421 |.else
3422 | beqz AT, <2
3423 |.endif
3424 | // RA is an integer, RD is a number. Convert RA to a number.
3425 |.if FPU
3426 |. lwc1 FTMP0, LO(RA)
3427 | ldc1 FTMP2, 0(RD)
3428 | b <4
3429 | cvt.d.w FTMP0, FTMP0
3430 |.else
3431 |. sextw CARG1, CARG1
3432 | bal ->vm_sfi2d_1
3433 |. nop
3434 | b <4
3435 |. nop
3436 |.endif
3437 |
3438 |.if FFI
3439 |8:
3440 | li AT, LJ_TCDATA
3441 | bne CARG3, AT, <2
3442 |. nop
3443 | b ->vmeta_equal_cd
3444 |. nop
3445 |9:
3446 | li AT, LJ_TCDATA
3447 | bne CARG4, AT, <2
3448 |. nop
3449 | b ->vmeta_equal_cd
3450 |. nop
3451 |.endif
3452 break;
3453
3454 case BC_ISEQP: case BC_ISNEP:
3455 vk = op == BC_ISEQP;
3456 | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
3457 | daddu RA, BASE, RA
3458 | srl TMP1, RD, 3
3459 | ld TMP0, 0(RA)
3460 | lhu TMP2, OFS_RD(PC)
3461 | not TMP1, TMP1
3462 | gettp TMP0, TMP0
3463 | daddiu PC, PC, 4
3464 |.if FFI
3465 | li AT, LJ_TCDATA
3466 | beq TMP0, AT, ->vmeta_equal_cd
3467 |.endif
3468 |. xor TMP0, TMP0, TMP1
3469 | decode_RD4b TMP2
3470 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3471 | addu TMP2, TMP2, TMP3
3472 |.if MIPSR6
3473 if (vk) {
3474 | seleqz TMP2, TMP2, TMP0
3475 } else {
3476 | selnez TMP2, TMP2, TMP0
3477 }
3478 |.else
3479 if (vk) {
3480 | movn TMP2, r0, TMP0
3481 } else {
3482 | movz TMP2, r0, TMP0
3483 }
3484 |.endif
3485 | daddu PC, PC, TMP2
3486 | ins_next
3487 break;
3488
3489 /* -- Unary test and copy ops ------------------------------------------- */
3490
3491 case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
3492 | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
3493 | daddu RD, BASE, RD
3494 | lhu TMP2, OFS_RD(PC)
3495 | ld TMP0, 0(RD)
3496 | daddiu PC, PC, 4
3497 | gettp TMP0, TMP0
3498 | sltiu TMP0, TMP0, LJ_TISTRUECOND
3499 if (op == BC_IST || op == BC_ISF) {
3500 | decode_RD4b TMP2
3501 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3502 | addu TMP2, TMP2, TMP3
3503 |.if MIPSR6
3504 if (op == BC_IST) {
3505 | selnez TMP2, TMP2, TMP0;
3506 } else {
3507 | seleqz TMP2, TMP2, TMP0;
3508 }
3509 |.else
3510 if (op == BC_IST) {
3511 | movz TMP2, r0, TMP0
3512 } else {
3513 | movn TMP2, r0, TMP0
3514 }
3515 |.endif
3516 | daddu PC, PC, TMP2
3517 } else {
3518 | ld CRET1, 0(RD)
3519 if (op == BC_ISTC) {
3520 | beqz TMP0, >1
3521 } else {
3522 | bnez TMP0, >1
3523 }
3524 |. daddu RA, BASE, RA
3525 | decode_RD4b TMP2
3526 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
3527 | addu TMP2, TMP2, TMP3
3528 | sd CRET1, 0(RA)
3529 | daddu PC, PC, TMP2
3530 |1:
3531 }
3532 | ins_next
3533 break;
3534
3535 case BC_ISTYPE:
3536 | // RA = src*8, RD = -type*8
3537 | daddu TMP2, BASE, RA
3538 | srl TMP1, RD, 3
3539 | ld TMP0, 0(TMP2)
3540 | ins_next1
3541 | gettp TMP0, TMP0
3542 | daddu AT, TMP0, TMP1
3543 | bnez AT, ->vmeta_istype
3544 |. ins_next2
3545 break;
3546 case BC_ISNUM:
3547 | // RA = src*8, RD = -(TISNUM-1)*8
3548 | daddu TMP2, BASE, RA
3549 | ld TMP0, 0(TMP2)
3550 | ins_next1
3551 | checknum TMP0, ->vmeta_istype
3552 |. ins_next2
3553 break;
3554
3555 /* -- Unary ops --------------------------------------------------------- */
3556
3557 case BC_MOV:
3558 | // RA = dst*8, RD = src*8
3559 | daddu RD, BASE, RD
3560 | daddu RA, BASE, RA
3561 | ld CRET1, 0(RD)
3562 | ins_next1
3563 | sd CRET1, 0(RA)
3564 | ins_next2
3565 break;
3566 case BC_NOT:
3567 | // RA = dst*8, RD = src*8
3568 | daddu RD, BASE, RD
3569 | daddu RA, BASE, RA
3570 | ld TMP0, 0(RD)
3571 | li AT, LJ_TTRUE
3572 | gettp TMP0, TMP0
3573 | sltu TMP0, AT, TMP0
3574 | addiu TMP0, TMP0, 1
3575 | dsll TMP0, TMP0, 47
3576 | not TMP0, TMP0
3577 | ins_next1
3578 | sd TMP0, 0(RA)
3579 | ins_next2
3580 break;
3581 case BC_UNM:
3582 | // RA = dst*8, RD = src*8
3583 | daddu RB, BASE, RD
3584 | ld CARG1, 0(RB)
3585 | daddu RA, BASE, RA
3586 | gettp CARG3, CARG1
3587 | bne CARG3, TISNUM, >2
3588 |. lui TMP1, 0x8000
3589 | sextw CARG1, CARG1
3590 | beq CARG1, TMP1, ->vmeta_unm // Meta handler deals with -2^31.
3591 |. negu CARG1, CARG1
3592 | zextw CARG1, CARG1
3593 | settp CARG1, TISNUM
3594 |1:
3595 | ins_next1
3596 | sd CARG1, 0(RA)
3597 | ins_next2
3598 |2:
3599 | sltiu AT, CARG3, LJ_TISNUM
3600 | beqz AT, ->vmeta_unm
3601 |. dsll TMP1, TMP1, 32
3602 | b <1
3603 |. xor CARG1, CARG1, TMP1
3604 break;
3605 case BC_LEN:
3606 | // RA = dst*8, RD = src*8
3607 | daddu CARG2, BASE, RD
3608 | daddu RA, BASE, RA
3609 | ld TMP0, 0(CARG2)
3610 | gettp TMP1, TMP0
3611 | daddiu AT, TMP1, -LJ_TSTR
3612 | bnez AT, >2
3613 |. cleartp STR:CARG1, TMP0
3614 | lw CRET1, STR:CARG1->len
3615 |1:
3616 | settp CRET1, TISNUM
3617 | ins_next1
3618 | sd CRET1, 0(RA)
3619 | ins_next2
3620 |2:
3621 | daddiu AT, TMP1, -LJ_TTAB
3622 | bnez AT, ->vmeta_len
3623 |. nop
3624#if LJ_52
3625 | ld TAB:TMP2, TAB:CARG1->metatable
3626 | bnez TAB:TMP2, >9
3627 |. nop
3628 |3:
3629#endif
3630 |->BC_LEN_Z:
3631 | load_got lj_tab_len
3632 | call_intern lj_tab_len // (GCtab *t)
3633 |. nop
3634 | // Returns uint32_t (but less than 2^31).
3635 | b <1
3636 |. nop
3637#if LJ_52
3638 |9:
3639 | lbu TMP0, TAB:TMP2->nomm
3640 | andi TMP0, TMP0, 1<<MM_len
3641 | bnez TMP0, <3 // 'no __len' flag set: done.
3642 |. nop
3643 | b ->vmeta_len
3644 |. nop
3645#endif
3646 break;
3647
3648 /* -- Binary ops -------------------------------------------------------- */
3649
3650 |.macro fpmod, a, b, c
3651 | bal ->vm_floor // floor(b/c)
3652 |. div.d FARG1, b, c
3653 | mul.d a, FRET1, c
3654 | sub.d a, b, a // b - floor(b/c)*c
3655 |.endmacro
3656
3657 |.macro sfpmod
3658 | daddiu sp, sp, -16
3659 |
3660 | load_got __divdf3
3661 | sd CARG1, 0(sp)
3662 | call_extern
3663 |. sd CARG2, 8(sp)
3664 |
3665 | load_got floor
3666 | call_extern
3667 |. move CARG1, CRET1
3668 |
3669 | load_got __muldf3
3670 | move CARG1, CRET1
3671 | call_extern
3672 |. ld CARG2, 8(sp)
3673 |
3674 | load_got __subdf3
3675 | ld CARG1, 0(sp)
3676 | call_extern
3677 |. move CARG2, CRET1
3678 |
3679 | daddiu sp, sp, 16
3680 |.endmacro
3681
3682 |.macro ins_arithpre, label
3683 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
3684 | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
3685 ||switch (vk) {
3686 ||case 0:
3687 | decode_RB8a RB, INS
3688 | decode_RB8b RB
3689 | decode_RDtoRC8 RC, RD
3690 | // RA = dst*8, RB = src1*8, RC = num_const*8
3691 | daddu RB, BASE, RB
3692 |.if "label" ~= "none"
3693 | b label
3694 |.endif
3695 |. daddu RC, KBASE, RC
3696 || break;
3697 ||case 1:
3698 | decode_RB8a RC, INS
3699 | decode_RB8b RC
3700 | decode_RDtoRC8 RB, RD
3701 | // RA = dst*8, RB = num_const*8, RC = src1*8
3702 | daddu RC, BASE, RC
3703 |.if "label" ~= "none"
3704 | b label
3705 |.endif
3706 |. daddu RB, KBASE, RB
3707 || break;
3708 ||default:
3709 | decode_RB8a RB, INS
3710 | decode_RB8b RB
3711 | decode_RDtoRC8 RC, RD
3712 | // RA = dst*8, RB = src1*8, RC = src2*8
3713 | daddu RB, BASE, RB
3714 |.if "label" ~= "none"
3715 | b label
3716 |.endif
3717 |. daddu RC, BASE, RC
3718 || break;
3719 ||}
3720 |.endmacro
3721 |
3722 |.macro ins_arith, intins, fpins, fpcall, label
3723 | ins_arithpre none
3724 |
3725 |.if "label" ~= "none"
3726 |label:
3727 |.endif
3728 |
3729 |// Used in 5.
3730 | ld CARG1, 0(RB)
3731 | ld CARG2, 0(RC)
3732 | gettp TMP0, CARG1
3733 | gettp TMP1, CARG2
3734 |
3735 |.if "intins" ~= "div"
3736 |
3737 | // Check for two integers.
3738 | sextw CARG3, CARG1
3739 | bne TMP0, TISNUM, >5
3740 |. sextw CARG4, CARG2
3741 | bne TMP1, TISNUM, >5
3742 |
3743 |.if "intins" == "addu"
3744 |. intins CRET1, CARG3, CARG4
3745 | xor TMP1, CRET1, CARG3 // ((y^a) & (y^b)) < 0: overflow.
3746 | xor TMP2, CRET1, CARG4
3747 | and TMP1, TMP1, TMP2
3748 | bltz TMP1, ->vmeta_arith
3749 |. daddu RA, BASE, RA
3750 |.elif "intins" == "subu"
3751 |. intins CRET1, CARG3, CARG4
3752 | xor TMP1, CRET1, CARG3 // ((y^a) & (a^b)) < 0: overflow.
3753 | xor TMP2, CARG3, CARG4
3754 | and TMP1, TMP1, TMP2
3755 | bltz TMP1, ->vmeta_arith
3756 |. daddu RA, BASE, RA
3757 |.elif "intins" == "mult"
3758 |.if MIPSR6
3759 |. nop
3760 | mul CRET1, CARG3, CARG4
3761 | muh TMP2, CARG3, CARG4
3762 |.else
3763 |. intins CARG3, CARG4
3764 | mflo CRET1
3765 | mfhi TMP2
3766 |.endif
3767 | sra TMP1, CRET1, 31
3768 | bne TMP1, TMP2, ->vmeta_arith
3769 |. daddu RA, BASE, RA
3770 |.else
3771 |. load_got lj_vm_modi
3772 | beqz CARG4, ->vmeta_arith
3773 |. daddu RA, BASE, RA
3774 | move CARG1, CARG3
3775 | call_extern
3776 |. move CARG2, CARG4
3777 |.endif
3778 |
3779 | zextw CRET1, CRET1
3780 | settp CRET1, TISNUM
3781 | ins_next1
3782 | sd CRET1, 0(RA)
3783 |3:
3784 | ins_next2
3785 |
3786 |.endif
3787 |
3788 |5: // Check for two numbers.
3789 | .FPU ldc1 FTMP0, 0(RB)
3790 | sltu AT, TMP0, TISNUM
3791 | sltu TMP0, TMP1, TISNUM
3792 | .FPU ldc1 FTMP2, 0(RC)
3793 | and AT, AT, TMP0
3794 | beqz AT, ->vmeta_arith
3795 |. daddu RA, BASE, RA
3796 |
3797 |.if FPU
3798 | fpins FRET1, FTMP0, FTMP2
3799 |.elif "fpcall" == "sfpmod"
3800 | sfpmod
3801 |.else
3802 | load_got fpcall
3803 | call_extern
3804 |. nop
3805 |.endif
3806 |
3807 | ins_next1
3808 |.if "intins" ~= "div"
3809 | b <3
3810 |.endif
3811 |.if FPU
3812 |. sdc1 FRET1, 0(RA)
3813 |.else
3814 |. sd CRET1, 0(RA)
3815 |.endif
3816 |.if "intins" == "div"
3817 | ins_next2
3818 |.endif
3819 |
3820 |.endmacro
3821
3822 case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
3823 | ins_arith addu, add.d, __adddf3, none
3824 break;
3825 case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
3826 | ins_arith subu, sub.d, __subdf3, none
3827 break;
3828 case BC_MULVN: case BC_MULNV: case BC_MULVV:
3829 | ins_arith mult, mul.d, __muldf3, none
3830 break;
3831 case BC_DIVVN:
3832 | ins_arith div, div.d, __divdf3, ->BC_DIVVN_Z
3833 break;
3834 case BC_DIVNV: case BC_DIVVV:
3835 | ins_arithpre ->BC_DIVVN_Z
3836 break;
3837 case BC_MODVN:
3838 | ins_arith modi, fpmod, sfpmod, ->BC_MODVN_Z
3839 break;
3840 case BC_MODNV: case BC_MODVV:
3841 | ins_arithpre ->BC_MODVN_Z
3842 break;
3843 case BC_POW:
3844 | ins_arithpre none
3845 | ld CARG1, 0(RB)
3846 | ld CARG2, 0(RC)
3847 | gettp TMP0, CARG1
3848 | gettp TMP1, CARG2
3849 | sltiu TMP0, TMP0, LJ_TISNUM
3850 | sltiu TMP1, TMP1, LJ_TISNUM
3851 | and AT, TMP0, TMP1
3852 | load_got pow
3853 | beqz AT, ->vmeta_arith
3854 |. daddu RA, BASE, RA
3855 |.if FPU
3856 | ldc1 FARG1, 0(RB)
3857 | ldc1 FARG2, 0(RC)
3858 |.endif
3859 | call_extern
3860 |. nop
3861 | ins_next1
3862 |.if FPU
3863 | sdc1 FRET1, 0(RA)
3864 |.else
3865 | sd CRET1, 0(RA)
3866 |.endif
3867 | ins_next2
3868 break;
3869
3870 case BC_CAT:
3871 | // RA = dst*8, RB = src_start*8, RC = src_end*8
3872 | decode_RB8a RB, INS
3873 | decode_RB8b RB
3874 | decode_RDtoRC8 RC, RD
3875 | dsubu CARG3, RC, RB
3876 | sd BASE, L->base
3877 | daddu CARG2, BASE, RC
3878 | move MULTRES, RB
3879 |->BC_CAT_Z:
3880 | load_got lj_meta_cat
3881 | srl CARG3, CARG3, 3
3882 | sd PC, SAVE_PC
3883 | call_intern lj_meta_cat // (lua_State *L, TValue *top, int left)
3884 |. move CARG1, L
3885 | // Returns NULL (finished) or TValue * (metamethod).
3886 | bnez CRET1, ->vmeta_binop
3887 |. ld BASE, L->base
3888 | daddu RB, BASE, MULTRES
3889 | ld CRET1, 0(RB)
3890 | daddu RA, BASE, RA
3891 | ins_next1
3892 | sd CRET1, 0(RA)
3893 | ins_next2
3894 break;
3895
3896 /* -- Constant ops ------------------------------------------------------ */
3897
3898 case BC_KSTR:
3899 | // RA = dst*8, RD = str_const*8 (~)
3900 | dsubu TMP1, KBASE, RD
3901 | ins_next1
3902 | li TMP2, LJ_TSTR
3903 | ld TMP0, -8(TMP1) // KBASE-8-str_const*8
3904 | daddu RA, BASE, RA
3905 | settp TMP0, TMP2
3906 | sd TMP0, 0(RA)
3907 | ins_next2
3908 break;
3909 case BC_KCDATA:
3910 |.if FFI
3911 | // RA = dst*8, RD = cdata_const*8 (~)
3912 | dsubu TMP1, KBASE, RD
3913 | ins_next1
3914 | ld TMP0, -8(TMP1) // KBASE-8-cdata_const*8
3915 | li TMP2, LJ_TCDATA
3916 | daddu RA, BASE, RA
3917 | settp TMP0, TMP2
3918 | sd TMP0, 0(RA)
3919 | ins_next2
3920 |.endif
3921 break;
3922 case BC_KSHORT:
3923 | // RA = dst*8, RD = int16_literal*8
3924 | sra RD, INS, 16
3925 | daddu RA, BASE, RA
3926 | zextw RD, RD
3927 | ins_next1
3928 | settp RD, TISNUM
3929 | sd RD, 0(RA)
3930 | ins_next2
3931 break;
3932 case BC_KNUM:
3933 | // RA = dst*8, RD = num_const*8
3934 | daddu RD, KBASE, RD
3935 | daddu RA, BASE, RA
3936 | ld CRET1, 0(RD)
3937 | ins_next1
3938 | sd CRET1, 0(RA)
3939 | ins_next2
3940 break;
3941 case BC_KPRI:
3942 | // RA = dst*8, RD = primitive_type*8 (~)
3943 | daddu RA, BASE, RA
3944 | dsll TMP0, RD, 44
3945 | not TMP0, TMP0
3946 | ins_next1
3947 | sd TMP0, 0(RA)
3948 | ins_next2
3949 break;
3950 case BC_KNIL:
3951 | // RA = base*8, RD = end*8
3952 | daddu RA, BASE, RA
3953 | sd TISNIL, 0(RA)
3954 | daddiu RA, RA, 8
3955 | daddu RD, BASE, RD
3956 |1:
3957 | sd TISNIL, 0(RA)
3958 | slt AT, RA, RD
3959 | bnez AT, <1
3960 |. daddiu RA, RA, 8
3961 | ins_next_
3962 break;
3963
3964 /* -- Upvalue and function ops ------------------------------------------ */
3965
3966 case BC_UGET:
3967 | // RA = dst*8, RD = uvnum*8
3968 | ld LFUNC:RB, FRAME_FUNC(BASE)
3969 | daddu RA, BASE, RA
3970 | cleartp LFUNC:RB
3971 | daddu RD, RD, LFUNC:RB
3972 | ld UPVAL:RB, LFUNC:RD->uvptr
3973 | ins_next1
3974 | ld TMP1, UPVAL:RB->v
3975 | ld CRET1, 0(TMP1)
3976 | sd CRET1, 0(RA)
3977 | ins_next2
3978 break;
3979 case BC_USETV:
3980 | // RA = uvnum*8, RD = src*8
3981 | ld LFUNC:RB, FRAME_FUNC(BASE)
3982 | daddu RD, BASE, RD
3983 | cleartp LFUNC:RB
3984 | daddu RA, RA, LFUNC:RB
3985 | ld UPVAL:RB, LFUNC:RA->uvptr
3986 | ld CRET1, 0(RD)
3987 | lbu TMP3, UPVAL:RB->marked
3988 | ld CARG2, UPVAL:RB->v
3989 | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
3990 | lbu TMP0, UPVAL:RB->closed
3991 | gettp TMP2, CRET1
3992 | sd CRET1, 0(CARG2)
3993 | li AT, LJ_GC_BLACK|1
3994 | or TMP3, TMP3, TMP0
3995 | beq TMP3, AT, >2 // Upvalue is closed and black?
3996 |. daddiu TMP2, TMP2, -(LJ_TNUMX+1)
3997 |1:
3998 | ins_next
3999 |
4000 |2: // Check if new value is collectable.
4001 | sltiu AT, TMP2, LJ_TISGCV - (LJ_TNUMX+1)
4002 | beqz AT, <1 // tvisgcv(v)
4003 |. cleartp GCOBJ:CRET1, CRET1
4004 | lbu TMP3, GCOBJ:CRET1->gch.marked
4005 | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
4006 | beqz TMP3, <1
4007 |. load_got lj_gc_barrieruv
4008 | // Crossed a write barrier. Move the barrier forward.
4009 | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
4010 |. daddiu CARG1, DISPATCH, GG_DISP2G
4011 | b <1
4012 |. nop
4013 break;
4014 case BC_USETS:
4015 | // RA = uvnum*8, RD = str_const*8 (~)
4016 | ld LFUNC:RB, FRAME_FUNC(BASE)
4017 | dsubu TMP1, KBASE, RD
4018 | cleartp LFUNC:RB
4019 | daddu RA, RA, LFUNC:RB
4020 | ld UPVAL:RB, LFUNC:RA->uvptr
4021 | ld STR:TMP1, -8(TMP1) // KBASE-8-str_const*8
4022 | lbu TMP2, UPVAL:RB->marked
4023 | ld CARG2, UPVAL:RB->v
4024 | lbu TMP3, STR:TMP1->marked
4025 | andi AT, TMP2, LJ_GC_BLACK // isblack(uv)
4026 | lbu TMP2, UPVAL:RB->closed
4027 | li TMP0, LJ_TSTR
4028 | settp TMP1, TMP0
4029 | bnez AT, >2
4030 |. sd TMP1, 0(CARG2)
4031 |1:
4032 | ins_next
4033 |
4034 |2: // Check if string is white and ensure upvalue is closed.
4035 | beqz TMP2, <1
4036 |. andi AT, TMP3, LJ_GC_WHITES // iswhite(str)
4037 | beqz AT, <1
4038 |. load_got lj_gc_barrieruv
4039 | // Crossed a write barrier. Move the barrier forward.
4040 | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
4041 |. daddiu CARG1, DISPATCH, GG_DISP2G
4042 | b <1
4043 |. nop
4044 break;
4045 case BC_USETN:
4046 | // RA = uvnum*8, RD = num_const*8
4047 | ld LFUNC:RB, FRAME_FUNC(BASE)
4048 | daddu RD, KBASE, RD
4049 | cleartp LFUNC:RB
4050 | daddu RA, RA, LFUNC:RB
4051 | ld UPVAL:RB, LFUNC:RA->uvptr
4052 | ld CRET1, 0(RD)
4053 | ld TMP1, UPVAL:RB->v
4054 | ins_next1
4055 | sd CRET1, 0(TMP1)
4056 | ins_next2
4057 break;
4058 case BC_USETP:
4059 | // RA = uvnum*8, RD = primitive_type*8 (~)
4060 | ld LFUNC:RB, FRAME_FUNC(BASE)
4061 | dsll TMP0, RD, 44
4062 | cleartp LFUNC:RB
4063 | daddu RA, RA, LFUNC:RB
4064 | not TMP0, TMP0
4065 | ld UPVAL:RB, LFUNC:RA->uvptr
4066 | ins_next1
4067 | ld TMP1, UPVAL:RB->v
4068 | sd TMP0, 0(TMP1)
4069 | ins_next2
4070 break;
4071
4072 case BC_UCLO:
4073 | // RA = level*8, RD = target
4074 | ld TMP2, L->openupval
4075 | branch_RD // Do this first since RD is not saved.
4076 | load_got lj_func_closeuv
4077 | sd BASE, L->base
4078 | beqz TMP2, >1
4079 |. move CARG1, L
4080 | call_intern lj_func_closeuv // (lua_State *L, TValue *level)
4081 |. daddu CARG2, BASE, RA
4082 | ld BASE, L->base
4083 |1:
4084 | ins_next
4085 break;
4086
4087 case BC_FNEW:
4088 | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
4089 | load_got lj_func_newL_gc
4090 | dsubu TMP1, KBASE, RD
4091 | ld CARG3, FRAME_FUNC(BASE)
4092 | ld CARG2, -8(TMP1) // KBASE-8-tab_const*8
4093 | sd BASE, L->base
4094 | sd PC, SAVE_PC
4095 | cleartp CARG3
4096 | // (lua_State *L, GCproto *pt, GCfuncL *parent)
4097 | call_intern lj_func_newL_gc
4098 |. move CARG1, L
4099 | // Returns GCfuncL *.
4100 | li TMP0, LJ_TFUNC
4101 | ld BASE, L->base
4102 | ins_next1
4103 | settp CRET1, TMP0
4104 | daddu RA, BASE, RA
4105 | sd CRET1, 0(RA)
4106 | ins_next2
4107 break;
4108
4109 /* -- Table ops --------------------------------------------------------- */
4110
4111 case BC_TNEW:
4112 case BC_TDUP:
4113 | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
4114 | ld TMP0, DISPATCH_GL(gc.total)(DISPATCH)
4115 | ld TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
4116 | sd BASE, L->base
4117 | sd PC, SAVE_PC
4118 | sltu AT, TMP0, TMP1
4119 | beqz AT, >5
4120 |1:
4121 if (op == BC_TNEW) {
4122 | load_got lj_tab_new
4123 | srl CARG2, RD, 3
4124 | andi CARG2, CARG2, 0x7ff
4125 | li TMP0, 0x801
4126 | addiu AT, CARG2, -0x7ff
4127 | srl CARG3, RD, 14
4128 |.if MIPSR6
4129 | seleqz TMP0, TMP0, AT
4130 | selnez CARG2, CARG2, AT
4131 | or CARG2, CARG2, TMP0
4132 |.else
4133 | movz CARG2, TMP0, AT
4134 |.endif
4135 | // (lua_State *L, int32_t asize, uint32_t hbits)
4136 | call_intern lj_tab_new
4137 |. move CARG1, L
4138 | // Returns Table *.
4139 } else {
4140 | load_got lj_tab_dup
4141 | dsubu TMP1, KBASE, RD
4142 | move CARG1, L
4143 | call_intern lj_tab_dup // (lua_State *L, Table *kt)
4144 |. ld CARG2, -8(TMP1) // KBASE-8-str_const*8
4145 | // Returns Table *.
4146 }
4147 | li TMP0, LJ_TTAB
4148 | ld BASE, L->base
4149 | ins_next1
4150 | daddu RA, BASE, RA
4151 | settp CRET1, TMP0
4152 | sd CRET1, 0(RA)
4153 | ins_next2
4154 |5:
4155 | load_got lj_gc_step_fixtop
4156 | move MULTRES, RD
4157 | call_intern lj_gc_step_fixtop // (lua_State *L)
4158 |. move CARG1, L
4159 | b <1
4160 |. move RD, MULTRES
4161 break;
4162
4163 case BC_GGET:
4164 | // RA = dst*8, RD = str_const*8 (~)
4165 case BC_GSET:
4166 | // RA = src*8, RD = str_const*8 (~)
4167 | ld LFUNC:TMP2, FRAME_FUNC(BASE)
4168 | dsubu TMP1, KBASE, RD
4169 | ld STR:RC, -8(TMP1) // KBASE-8-str_const*8
4170 | cleartp LFUNC:TMP2
4171 | ld TAB:RB, LFUNC:TMP2->env
4172 if (op == BC_GGET) {
4173 | b ->BC_TGETS_Z
4174 } else {
4175 | b ->BC_TSETS_Z
4176 }
4177 |. daddu RA, BASE, RA
4178 break;
4179
4180 case BC_TGETV:
4181 | // RA = dst*8, RB = table*8, RC = key*8
4182 | decode_RB8a RB, INS
4183 | decode_RB8b RB
4184 | decode_RDtoRC8 RC, RD
4185 | daddu CARG2, BASE, RB
4186 | daddu CARG3, BASE, RC
4187 | ld TAB:RB, 0(CARG2)
4188 | ld TMP2, 0(CARG3)
4189 | daddu RA, BASE, RA
4190 | checktab TAB:RB, ->vmeta_tgetv
4191 | gettp TMP3, TMP2
4192 | bne TMP3, TISNUM, >5 // Integer key?
4193 |. lw TMP0, TAB:RB->asize
4194 | sextw TMP2, TMP2
4195 | ld TMP1, TAB:RB->array
4196 | sltu AT, TMP2, TMP0
4197 | sll TMP2, TMP2, 3
4198 | beqz AT, ->vmeta_tgetv // Integer key and in array part?
4199 |. daddu TMP2, TMP1, TMP2
4200 | ld AT, 0(TMP2)
4201 | beq AT, TISNIL, >2
4202 |. ld CRET1, 0(TMP2)
4203 |1:
4204 | ins_next1
4205 | sd CRET1, 0(RA)
4206 | ins_next2
4207 |
4208 |2: // Check for __index if table value is nil.
4209 | ld TAB:TMP2, TAB:RB->metatable
4210 | beqz TAB:TMP2, <1 // No metatable: done.
4211 |. nop
4212 | lbu TMP0, TAB:TMP2->nomm
4213 | andi TMP0, TMP0, 1<<MM_index
4214 | bnez TMP0, <1 // 'no __index' flag set: done.
4215 |. nop
4216 | b ->vmeta_tgetv
4217 |. nop
4218 |
4219 |5:
4220 | li AT, LJ_TSTR
4221 | bne TMP3, AT, ->vmeta_tgetv
4222 |. cleartp RC, TMP2
4223 | b ->BC_TGETS_Z // String key?
4224 |. nop
4225 break;
4226 case BC_TGETS:
4227 | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
4228 | decode_RB8a RB, INS
4229 | decode_RB8b RB
4230 | decode_RC8a RC, INS
4231 | daddu CARG2, BASE, RB
4232 | decode_RC8b RC
4233 | ld TAB:RB, 0(CARG2)
4234 | dsubu CARG3, KBASE, RC
4235 | daddu RA, BASE, RA
4236 | ld STR:RC, -8(CARG3) // KBASE-8-str_const*8
4237 | checktab TAB:RB, ->vmeta_tgets1
4238 |->BC_TGETS_Z:
4239 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
4240 | lw TMP0, TAB:RB->hmask
4241 | lw TMP1, STR:RC->sid
4242 | ld NODE:TMP2, TAB:RB->node
4243 | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
4244 | sll TMP0, TMP1, 5
4245 | sll TMP1, TMP1, 3
4246 | subu TMP1, TMP0, TMP1
4247 | li TMP3, LJ_TSTR
4248 | daddu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
4249 | settp STR:RC, TMP3 // Tagged key to look for.
4250 |1:
4251 | ld CARG1, NODE:TMP2->key
4252 | ld CRET1, NODE:TMP2->val
4253 | ld NODE:TMP1, NODE:TMP2->next
4254 | bne CARG1, RC, >4
4255 |. ld TAB:TMP3, TAB:RB->metatable
4256 | beq CRET1, TISNIL, >5 // Key found, but nil value?
4257 |. nop
4258 |3:
4259 | ins_next1
4260 | sd CRET1, 0(RA)
4261 | ins_next2
4262 |
4263 |4: // Follow hash chain.
4264 | bnez NODE:TMP1, <1
4265 |. move NODE:TMP2, NODE:TMP1
4266 | // End of hash chain: key not found, nil result.
4267 |
4268 |5: // Check for __index if table value is nil.
4269 | beqz TAB:TMP3, <3 // No metatable: done.
4270 |. move CRET1, TISNIL
4271 | lbu TMP0, TAB:TMP3->nomm
4272 | andi TMP0, TMP0, 1<<MM_index
4273 | bnez TMP0, <3 // 'no __index' flag set: done.
4274 |. nop
4275 | b ->vmeta_tgets
4276 |. nop
4277 break;
4278 case BC_TGETB:
4279 | // RA = dst*8, RB = table*8, RC = index*8
4280 | decode_RB8a RB, INS
4281 | decode_RB8b RB
4282 | daddu CARG2, BASE, RB
4283 | decode_RDtoRC8 RC, RD
4284 | ld TAB:RB, 0(CARG2)
4285 | daddu RA, BASE, RA
4286 | srl TMP0, RC, 3
4287 | checktab TAB:RB, ->vmeta_tgetb
4288 | lw TMP1, TAB:RB->asize
4289 | ld TMP2, TAB:RB->array
4290 | sltu AT, TMP0, TMP1
4291 | beqz AT, ->vmeta_tgetb
4292 |. daddu RC, TMP2, RC
4293 | ld AT, 0(RC)
4294 | beq AT, TISNIL, >5
4295 |. ld CRET1, 0(RC)
4296 |1:
4297 | ins_next1
4298 | sd CRET1, 0(RA)
4299 | ins_next2
4300 |
4301 |5: // Check for __index if table value is nil.
4302 | ld TAB:TMP2, TAB:RB->metatable
4303 | beqz TAB:TMP2, <1 // No metatable: done.
4304 |. nop
4305 | lbu TMP1, TAB:TMP2->nomm
4306 | andi TMP1, TMP1, 1<<MM_index
4307 | bnez TMP1, <1 // 'no __index' flag set: done.
4308 |. nop
4309 | b ->vmeta_tgetb // Caveat: preserve TMP0 and CARG2!
4310 |. nop
4311 break;
4312 case BC_TGETR:
4313 | // RA = dst*8, RB = table*8, RC = key*8
4314 | decode_RB8a RB, INS
4315 | decode_RB8b RB
4316 | decode_RDtoRC8 RC, RD
4317 | daddu RB, BASE, RB
4318 | daddu RC, BASE, RC
4319 | ld TAB:CARG1, 0(RB)
4320 | lw CARG2, LO(RC)
4321 | daddu RA, BASE, RA
4322 | cleartp TAB:CARG1
4323 | lw TMP0, TAB:CARG1->asize
4324 | ld TMP1, TAB:CARG1->array
4325 | sltu AT, CARG2, TMP0
4326 | sll TMP2, CARG2, 3
4327 | beqz AT, ->vmeta_tgetr // In array part?
4328 |. daddu CRET1, TMP1, TMP2
4329 | ld CARG2, 0(CRET1)
4330 |->BC_TGETR_Z:
4331 | ins_next1
4332 | sd CARG2, 0(RA)
4333 | ins_next2
4334 break;
4335
4336 case BC_TSETV:
4337 | // RA = src*8, RB = table*8, RC = key*8
4338 | decode_RB8a RB, INS
4339 | decode_RB8b RB
4340 | decode_RDtoRC8 RC, RD
4341 | daddu CARG2, BASE, RB
4342 | daddu CARG3, BASE, RC
4343 | ld RB, 0(CARG2)
4344 | ld TMP2, 0(CARG3)
4345 | daddu RA, BASE, RA
4346 | checktab RB, ->vmeta_tsetv
4347 | checkint TMP2, >5
4348 |. sextw RC, TMP2
4349 | lw TMP0, TAB:RB->asize
4350 | ld TMP1, TAB:RB->array
4351 | sltu AT, RC, TMP0
4352 | sll TMP2, RC, 3
4353 | beqz AT, ->vmeta_tsetv // Integer key and in array part?
4354 |. daddu TMP1, TMP1, TMP2
4355 | ld TMP0, 0(TMP1)
4356 | lbu TMP3, TAB:RB->marked
4357 | beq TMP0, TISNIL, >3
4358 |. ld CRET1, 0(RA)
4359 |1:
4360 | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
4361 | bnez AT, >7
4362 |. sd CRET1, 0(TMP1)
4363 |2:
4364 | ins_next
4365 |
4366 |3: // Check for __newindex if previous value is nil.
4367 | ld TAB:TMP2, TAB:RB->metatable
4368 | beqz TAB:TMP2, <1 // No metatable: done.
4369 |. nop
4370 | lbu TMP2, TAB:TMP2->nomm
4371 | andi TMP2, TMP2, 1<<MM_newindex
4372 | bnez TMP2, <1 // 'no __newindex' flag set: done.
4373 |. nop
4374 | b ->vmeta_tsetv
4375 |. nop
4376 |
4377 |5:
4378 | gettp AT, TMP2
4379 | daddiu AT, AT, -LJ_TSTR
4380 | bnez AT, ->vmeta_tsetv
4381 |. nop
4382 | b ->BC_TSETS_Z // String key?
4383 |. cleartp STR:RC, TMP2
4384 |
4385 |7: // Possible table write barrier for the value. Skip valiswhite check.
4386 | barrierback TAB:RB, TMP3, TMP0, <2
4387 break;
4388 case BC_TSETS:
4389 | // RA = src*8, RB = table*8, RC = str_const*8 (~)
4390 | decode_RB8a RB, INS
4391 | decode_RB8b RB
4392 | daddu CARG2, BASE, RB
4393 | decode_RC8a RC, INS
4394 | ld TAB:RB, 0(CARG2)
4395 | decode_RC8b RC
4396 | dsubu CARG3, KBASE, RC
4397 | ld RC, -8(CARG3) // KBASE-8-str_const*8
4398 | daddu RA, BASE, RA
4399 | cleartp STR:RC
4400 | checktab TAB:RB, ->vmeta_tsets1
4401 |->BC_TSETS_Z:
4402 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8
4403 | lw TMP0, TAB:RB->hmask
4404 | lw TMP1, STR:RC->sid
4405 | ld NODE:TMP2, TAB:RB->node
4406 | sb r0, TAB:RB->nomm // Clear metamethod cache.
4407 | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
4408 | sll TMP0, TMP1, 5
4409 | sll TMP1, TMP1, 3
4410 | subu TMP1, TMP0, TMP1
4411 | li TMP3, LJ_TSTR
4412 | daddu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
4413 | settp STR:RC, TMP3 // Tagged key to look for.
4414 |.if FPU
4415 | ldc1 FTMP0, 0(RA)
4416 |.else
4417 | ld CRET1, 0(RA)
4418 |.endif
4419 |1:
4420 | ld TMP0, NODE:TMP2->key
4421 | ld CARG2, NODE:TMP2->val
4422 | ld NODE:TMP1, NODE:TMP2->next
4423 | bne TMP0, RC, >5
4424 |. lbu TMP3, TAB:RB->marked
4425 | beq CARG2, TISNIL, >4 // Key found, but nil value?
4426 |. ld TAB:TMP0, TAB:RB->metatable
4427 |2:
4428 | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
4429 | bnez AT, >7
4430 |.if FPU
4431 |. sdc1 FTMP0, NODE:TMP2->val
4432 |.else
4433 |. sd CRET1, NODE:TMP2->val
4434 |.endif
4435 |3:
4436 | ins_next
4437 |
4438 |4: // Check for __newindex if previous value is nil.
4439 | beqz TAB:TMP0, <2 // No metatable: done.
4440 |. nop
4441 | lbu TMP0, TAB:TMP0->nomm
4442 | andi TMP0, TMP0, 1<<MM_newindex
4443 | bnez TMP0, <2 // 'no __newindex' flag set: done.
4444 |. nop
4445 | b ->vmeta_tsets
4446 |. nop
4447 |
4448 |5: // Follow hash chain.
4449 | bnez NODE:TMP1, <1
4450 |. move NODE:TMP2, NODE:TMP1
4451 | // End of hash chain: key not found, add a new one
4452 |
4453 | // But check for __newindex first.
4454 | ld TAB:TMP2, TAB:RB->metatable
4455 | beqz TAB:TMP2, >6 // No metatable: continue.
4456 |. daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
4457 | lbu TMP0, TAB:TMP2->nomm
4458 | andi TMP0, TMP0, 1<<MM_newindex
4459 | beqz TMP0, ->vmeta_tsets // 'no __newindex' flag NOT set: check.
4460 |6:
4461 | load_got lj_tab_newkey
4462 | sd RC, 0(CARG3)
4463 | sd BASE, L->base
4464 | move CARG2, TAB:RB
4465 | sd PC, SAVE_PC
4466 | call_intern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k
4467 |. move CARG1, L
4468 | // Returns TValue *.
4469 | ld BASE, L->base
4470 |.if FPU
4471 | b <3 // No 2nd write barrier needed.
4472 |. sdc1 FTMP0, 0(CRET1)
4473 |.else
4474 | ld CARG1, 0(RA)
4475 | b <3 // No 2nd write barrier needed.
4476 |. sd CARG1, 0(CRET1)
4477 |.endif
4478 |
4479 |7: // Possible table write barrier for the value. Skip valiswhite check.
4480 | barrierback TAB:RB, TMP3, TMP0, <3
4481 break;
4482 case BC_TSETB:
4483 | // RA = src*8, RB = table*8, RC = index*8
4484 | decode_RB8a RB, INS
4485 | decode_RB8b RB
4486 | daddu CARG2, BASE, RB
4487 | decode_RDtoRC8 RC, RD
4488 | ld TAB:RB, 0(CARG2)
4489 | daddu RA, BASE, RA
4490 | srl TMP0, RC, 3
4491 | checktab RB, ->vmeta_tsetb
4492 | lw TMP1, TAB:RB->asize
4493 | ld TMP2, TAB:RB->array
4494 | sltu AT, TMP0, TMP1
4495 | beqz AT, ->vmeta_tsetb
4496 |. daddu RC, TMP2, RC
4497 | ld TMP1, 0(RC)
4498 | lbu TMP3, TAB:RB->marked
4499 | beq TMP1, TISNIL, >5
4500 |1:
4501 |. ld CRET1, 0(RA)
4502 | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
4503 | bnez AT, >7
4504 |. sd CRET1, 0(RC)
4505 |2:
4506 | ins_next
4507 |
4508 |5: // Check for __newindex if previous value is nil.
4509 | ld TAB:TMP2, TAB:RB->metatable
4510 | beqz TAB:TMP2, <1 // No metatable: done.
4511 |. nop
4512 | lbu TMP1, TAB:TMP2->nomm
4513 | andi TMP1, TMP1, 1<<MM_newindex
4514 | bnez TMP1, <1 // 'no __newindex' flag set: done.
4515 |. nop
4516 | b ->vmeta_tsetb // Caveat: preserve TMP0 and CARG2!
4517 |. nop
4518 |
4519 |7: // Possible table write barrier for the value. Skip valiswhite check.
4520 | barrierback TAB:RB, TMP3, TMP0, <2
4521 break;
4522 case BC_TSETR:
4523 | // RA = dst*8, RB = table*8, RC = key*8
4524 | decode_RB8a RB, INS
4525 | decode_RB8b RB
4526 | decode_RDtoRC8 RC, RD
4527 | daddu CARG1, BASE, RB
4528 | daddu CARG3, BASE, RC
4529 | ld TAB:CARG2, 0(CARG1)
4530 | lw CARG3, LO(CARG3)
4531 | cleartp TAB:CARG2
4532 | lbu TMP3, TAB:CARG2->marked
4533 | lw TMP0, TAB:CARG2->asize
4534 | ld TMP1, TAB:CARG2->array
4535 | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
4536 | bnez AT, >7
4537 |. daddu RA, BASE, RA
4538 |2:
4539 | sltu AT, CARG3, TMP0
4540 | sll TMP2, CARG3, 3
4541 | beqz AT, ->vmeta_tsetr // In array part?
4542 |. daddu CRET1, TMP1, TMP2
4543 |->BC_TSETR_Z:
4544 | ld CARG1, 0(RA)
4545 | ins_next1
4546 | sd CARG1, 0(CRET1)
4547 | ins_next2
4548 |
4549 |7: // Possible table write barrier for the value. Skip valiswhite check.
4550 | barrierback TAB:CARG2, TMP3, CRET1, <2
4551 break;
4552
4553 case BC_TSETM:
4554 | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
4555 | daddu RA, BASE, RA
4556 |1:
4557 | daddu TMP3, KBASE, RD
4558 | ld TAB:CARG2, -8(RA) // Guaranteed to be a table.
4559 | addiu TMP0, MULTRES, -8
4560 | lw TMP3, LO(TMP3) // Integer constant is in lo-word.
4561 | beqz TMP0, >4 // Nothing to copy?
4562 |. srl CARG3, TMP0, 3
4563 | cleartp CARG2
4564 | addu CARG3, CARG3, TMP3
4565 | lw TMP2, TAB:CARG2->asize
4566 | sll TMP1, TMP3, 3
4567 | lbu TMP3, TAB:CARG2->marked
4568 | ld CARG1, TAB:CARG2->array
4569 | sltu AT, TMP2, CARG3
4570 | bnez AT, >5
4571 |. daddu TMP2, RA, TMP0
4572 | daddu TMP1, TMP1, CARG1
4573 | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table)
4574 |3: // Copy result slots to table.
4575 | ld CRET1, 0(RA)
4576 | daddiu RA, RA, 8
4577 | sltu AT, RA, TMP2
4578 | sd CRET1, 0(TMP1)
4579 | bnez AT, <3
4580 |. daddiu TMP1, TMP1, 8
4581 | bnez TMP0, >7
4582 |. nop
4583 |4:
4584 | ins_next
4585 |
4586 |5: // Need to resize array part.
4587 | load_got lj_tab_reasize
4588 | sd BASE, L->base
4589 | sd PC, SAVE_PC
4590 | move BASE, RD
4591 | call_intern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
4592 |. move CARG1, L
4593 | // Must not reallocate the stack.
4594 | move RD, BASE
4595 | b <1
4596 |. ld BASE, L->base // Reload BASE for lack of a saved register.
4597 |
4598 |7: // Possible table write barrier for any value. Skip valiswhite check.
4599 | barrierback TAB:CARG2, TMP3, TMP0, <4
4600 break;
4601
4602 /* -- Calls and vararg handling ----------------------------------------- */
4603
4604 case BC_CALLM:
4605 | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
4606 | decode_RDtoRC8 NARGS8:RC, RD
4607 | b ->BC_CALL_Z
4608 |. addu NARGS8:RC, NARGS8:RC, MULTRES
4609 break;
4610 case BC_CALL:
4611 | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
4612 | decode_RDtoRC8 NARGS8:RC, RD
4613 |->BC_CALL_Z:
4614 | move TMP2, BASE
4615 | daddu BASE, BASE, RA
4616 | ld LFUNC:RB, 0(BASE)
4617 | daddiu BASE, BASE, 16
4618 | addiu NARGS8:RC, NARGS8:RC, -8
4619 | checkfunc RB, ->vmeta_call
4620 | ins_call
4621 break;
4622
4623 case BC_CALLMT:
4624 | // RA = base*8, (RB = 0,) RC = extra_nargs*8
4625 | addu NARGS8:RD, NARGS8:RD, MULTRES // BC_CALLT gets RC from RD.
4626 | // Fall through. Assumes BC_CALLT follows.
4627 break;
4628 case BC_CALLT:
4629 | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
4630 | daddu RA, BASE, RA
4631 | ld RB, 0(RA)
4632 | move NARGS8:RC, RD
4633 | ld TMP1, FRAME_PC(BASE)
4634 | daddiu RA, RA, 16
4635 | addiu NARGS8:RC, NARGS8:RC, -8
4636 | checktp CARG3, RB, -LJ_TFUNC, ->vmeta_callt
4637 |->BC_CALLT_Z:
4638 | andi TMP0, TMP1, FRAME_TYPE // Caveat: preserve TMP0 until the 'or'.
4639 | lbu TMP3, LFUNC:CARG3->ffid
4640 | bnez TMP0, >7
4641 |. xori TMP2, TMP1, FRAME_VARG
4642 |1:
4643 | sd RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
4644 | sltiu AT, TMP3, 2 // (> FF_C) Calling a fast function?
4645 | move TMP2, BASE
4646 | move RB, CARG3
4647 | beqz NARGS8:RC, >3
4648 |. move TMP3, NARGS8:RC
4649 |2:
4650 | ld CRET1, 0(RA)
4651 | daddiu RA, RA, 8
4652 | addiu TMP3, TMP3, -8
4653 | sd CRET1, 0(TMP2)
4654 | bnez TMP3, <2
4655 |. daddiu TMP2, TMP2, 8
4656 |3:
4657 | or TMP0, TMP0, AT
4658 | beqz TMP0, >5
4659 |. nop
4660 |4:
4661 | ins_callt
4662 |
4663 |5: // Tailcall to a fast function with a Lua frame below.
4664 | lw INS, -4(TMP1)
4665 | decode_RA8a RA, INS
4666 | decode_RA8b RA
4667 | dsubu TMP1, BASE, RA
4668 | ld TMP1, -32(TMP1)
4669 | cleartp LFUNC:TMP1
4670 | ld TMP1, LFUNC:TMP1->pc
4671 | b <4
4672 |. ld KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
4673 |
4674 |7: // Tailcall from a vararg function.
4675 | andi AT, TMP2, FRAME_TYPEP
4676 | bnez AT, <1 // Vararg frame below?
4677 |. dsubu TMP2, BASE, TMP2 // Relocate BASE down.
4678 | move BASE, TMP2
4679 | ld TMP1, FRAME_PC(TMP2)
4680 | b <1
4681 |. andi TMP0, TMP1, FRAME_TYPE
4682 break;
4683
4684 case BC_ITERC:
4685 | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
4686 | move TMP2, BASE // Save old BASE fir vmeta_call.
4687 | daddu BASE, BASE, RA
4688 | ld RB, -24(BASE)
4689 | ld CARG1, -16(BASE)
4690 | ld CARG2, -8(BASE)
4691 | li NARGS8:RC, 16 // Iterators get 2 arguments.
4692 | sd RB, 0(BASE) // Copy callable.
4693 | sd CARG1, 16(BASE) // Copy state.
4694 | sd CARG2, 24(BASE) // Copy control var.
4695 | daddiu BASE, BASE, 16
4696 | checkfunc RB, ->vmeta_call
4697 | ins_call
4698 break;
4699
4700 case BC_ITERN:
4701 | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
4702 |.if JIT
4703 | // NYI: add hotloop, record BC_ITERN.
4704 |.endif
4705 | daddu RA, BASE, RA
4706 | ld TAB:RB, -16(RA)
4707 | lw RC, -8+LO(RA) // Get index from control var.
4708 | cleartp TAB:RB
4709 | daddiu PC, PC, 4
4710 | lw TMP0, TAB:RB->asize
4711 | ld TMP1, TAB:RB->array
4712 | dsll CARG3, TISNUM, 47
4713 |1: // Traverse array part.
4714 | sltu AT, RC, TMP0
4715 | beqz AT, >5 // Index points after array part?
4716 |. sll TMP3, RC, 3
4717 | daddu TMP3, TMP1, TMP3
4718 | ld CARG1, 0(TMP3)
4719 | lhu RD, -4+OFS_RD(PC)
4720 | or TMP2, RC, CARG3
4721 | beq CARG1, TISNIL, <1 // Skip holes in array part.
4722 |. addiu RC, RC, 1
4723 | sd TMP2, 0(RA)
4724 | sd CARG1, 8(RA)
4725 | or TMP0, RC, CARG3
4726 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
4727 | decode_RD4b RD
4728 | daddu RD, RD, TMP3
4729 | sw TMP0, -8+LO(RA) // Update control var.
4730 | daddu PC, PC, RD
4731 |3:
4732 | ins_next
4733 |
4734 |5: // Traverse hash part.
4735 | lw TMP1, TAB:RB->hmask
4736 | subu RC, RC, TMP0
4737 | ld TMP2, TAB:RB->node
4738 |6:
4739 | sltu AT, TMP1, RC // End of iteration? Branch to ITERL+1.
4740 | bnez AT, <3
4741 |. sll TMP3, RC, 5
4742 | sll RB, RC, 3
4743 | subu TMP3, TMP3, RB
4744 | daddu NODE:TMP3, TMP3, TMP2
4745 | ld CARG1, 0(NODE:TMP3)
4746 | lhu RD, -4+OFS_RD(PC)
4747 | beq CARG1, TISNIL, <6 // Skip holes in hash part.
4748 |. addiu RC, RC, 1
4749 | ld CARG2, NODE:TMP3->key
4750 | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
4751 | sd CARG1, 8(RA)
4752 | addu RC, RC, TMP0
4753 | decode_RD4b RD
4754 | addu RD, RD, TMP3
4755 | sd CARG2, 0(RA)
4756 | daddu PC, PC, RD
4757 | b <3
4758 |. sw RC, -8+LO(RA) // Update control var.
4759 break;
4760
4761 case BC_ISNEXT:
4762 | // RA = base*8, RD = target (points to ITERN)
4763 | daddu RA, BASE, RA
4764 | srl TMP0, RD, 1
4765 | ld CFUNC:CARG1, -24(RA)
4766 | daddu TMP0, PC, TMP0
4767 | ld CARG2, -16(RA)
4768 | ld CARG3, -8(RA)
4769 | lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
4770 | checkfunc CFUNC:CARG1, >5
4771 | gettp CARG2, CARG2
4772 | daddiu CARG2, CARG2, -LJ_TTAB
4773 | lbu TMP1, CFUNC:CARG1->ffid
4774 | daddiu CARG3, CARG3, -LJ_TNIL
4775 | or AT, CARG2, CARG3
4776 | daddiu TMP1, TMP1, -FF_next_N
4777 | or AT, AT, TMP1
4778 | bnez AT, >5
4779 |. lui TMP1, 0xfffe
4780 | daddu PC, TMP0, TMP2
4781 | ori TMP1, TMP1, 0x7fff
4782 | dsll TMP1, TMP1, 32
4783 | sd TMP1, -8(RA)
4784 |1:
4785 | ins_next
4786 |5: // Despecialize bytecode if any of the checks fail.
4787 | li TMP3, BC_JMP
4788 | li TMP1, BC_ITERC
4789 | sb TMP3, -4+OFS_OP(PC)
4790 | daddu PC, TMP0, TMP2
4791 | b <1
4792 |. sb TMP1, OFS_OP(PC)
4793 break;
4794
4795 case BC_VARG:
4796 | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
4797 | ld TMP0, FRAME_PC(BASE)
4798 | decode_RDtoRC8 RC, RD
4799 | decode_RB8a RB, INS
4800 | daddu RC, BASE, RC
4801 | decode_RB8b RB
4802 | daddu RA, BASE, RA
4803 | daddiu RC, RC, FRAME_VARG
4804 | daddu TMP2, RA, RB
4805 | daddiu TMP3, BASE, -16 // TMP3 = vtop
4806 | dsubu RC, RC, TMP0 // RC = vbase
4807 | // Note: RC may now be even _above_ BASE if nargs was < numparams.
4808 | beqz RB, >5 // Copy all varargs?
4809 |. dsubu TMP1, TMP3, RC
4810 | daddiu TMP2, TMP2, -16
4811 |1: // Copy vararg slots to destination slots.
4812 | ld CARG1, 0(RC)
4813 | sltu AT, RC, TMP3
4814 | daddiu RC, RC, 8
4815 |.if MIPSR6
4816 | selnez CARG1, CARG1, AT
4817 | seleqz AT, TISNIL, AT
4818 | or CARG1, CARG1, AT
4819 |.else
4820 | movz CARG1, TISNIL, AT
4821 |.endif
4822 | sd CARG1, 0(RA)
4823 | sltu AT, RA, TMP2
4824 | bnez AT, <1
4825 |. daddiu RA, RA, 8
4826 |3:
4827 | ins_next
4828 |
4829 |5: // Copy all varargs.
4830 | ld TMP0, L->maxstack
4831 | blez TMP1, <3 // No vararg slots?
4832 |. li MULTRES, 8 // MULTRES = (0+1)*8
4833 | daddu TMP2, RA, TMP1
4834 | sltu AT, TMP0, TMP2
4835 | bnez AT, >7
4836 |. daddiu MULTRES, TMP1, 8
4837 |6:
4838 | ld CRET1, 0(RC)
4839 | daddiu RC, RC, 8
4840 | sd CRET1, 0(RA)
4841 | sltu AT, RC, TMP3
4842 | bnez AT, <6 // More vararg slots?
4843 |. daddiu RA, RA, 8
4844 | b <3
4845 |. nop
4846 |
4847 |7: // Grow stack for varargs.
4848 | load_got lj_state_growstack
4849 | sd RA, L->top
4850 | dsubu RA, RA, BASE
4851 | sd BASE, L->base
4852 | dsubu BASE, RC, BASE // Need delta, because BASE may change.
4853 | sd PC, SAVE_PC
4854 | srl CARG2, TMP1, 3
4855 | call_intern lj_state_growstack // (lua_State *L, int n)
4856 |. move CARG1, L
4857 | move RC, BASE
4858 | ld BASE, L->base
4859 | daddu RA, BASE, RA
4860 | daddu RC, BASE, RC
4861 | b <6
4862 |. daddiu TMP3, BASE, -16
4863 break;
4864
4865 /* -- Returns ----------------------------------------------------------- */
4866
4867 case BC_RETM:
4868 | // RA = results*8, RD = extra_nresults*8
4869 | addu RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
4870 | // Fall through. Assumes BC_RET follows.
4871 break;
4872
4873 case BC_RET:
4874 | // RA = results*8, RD = (nresults+1)*8
4875 | ld PC, FRAME_PC(BASE)
4876 | daddu RA, BASE, RA
4877 | move MULTRES, RD
4878 |1:
4879 | andi TMP0, PC, FRAME_TYPE
4880 | bnez TMP0, ->BC_RETV_Z
4881 |. xori TMP1, PC, FRAME_VARG
4882 |
4883 |->BC_RET_Z:
4884 | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
4885 | lw INS, -4(PC)
4886 | daddiu TMP2, BASE, -16
4887 | daddiu RC, RD, -8
4888 | decode_RA8a TMP0, INS
4889 | decode_RB8a RB, INS
4890 | decode_RA8b TMP0
4891 | decode_RB8b RB
4892 | daddu TMP3, TMP2, RB
4893 | beqz RC, >3
4894 |. dsubu BASE, TMP2, TMP0
4895 |2:
4896 | ld CRET1, 0(RA)
4897 | daddiu RA, RA, 8
4898 | daddiu RC, RC, -8
4899 | sd CRET1, 0(TMP2)
4900 | bnez RC, <2
4901 |. daddiu TMP2, TMP2, 8
4902 |3:
4903 | daddiu TMP3, TMP3, -8
4904 |5:
4905 | sltu AT, TMP2, TMP3
4906 | bnez AT, >6
4907 |. ld LFUNC:TMP1, FRAME_FUNC(BASE)
4908 | ins_next1
4909 | cleartp LFUNC:TMP1
4910 | ld TMP1, LFUNC:TMP1->pc
4911 | ld KBASE, PC2PROTO(k)(TMP1)
4912 | ins_next2
4913 |
4914 |6: // Fill up results with nil.
4915 | sd TISNIL, 0(TMP2)
4916 | b <5
4917 |. daddiu TMP2, TMP2, 8
4918 |
4919 |->BC_RETV_Z: // Non-standard return case.
4920 | andi TMP2, TMP1, FRAME_TYPEP
4921 | bnez TMP2, ->vm_return
4922 |. nop
4923 | // Return from vararg function: relocate BASE down.
4924 | dsubu BASE, BASE, TMP1
4925 | b <1
4926 |. ld PC, FRAME_PC(BASE)
4927 break;
4928
4929 case BC_RET0: case BC_RET1:
4930 | // RA = results*8, RD = (nresults+1)*8
4931 | ld PC, FRAME_PC(BASE)
4932 | daddu RA, BASE, RA
4933 | move MULTRES, RD
4934 | andi TMP0, PC, FRAME_TYPE
4935 | bnez TMP0, ->BC_RETV_Z
4936 |. xori TMP1, PC, FRAME_VARG
4937 | lw INS, -4(PC)
4938 | daddiu TMP2, BASE, -16
4939 if (op == BC_RET1) {
4940 | ld CRET1, 0(RA)
4941 }
4942 | decode_RB8a RB, INS
4943 | decode_RA8a RA, INS
4944 | decode_RB8b RB
4945 | decode_RA8b RA
4946 | dsubu BASE, TMP2, RA
4947 if (op == BC_RET1) {
4948 | sd CRET1, 0(TMP2)
4949 }
4950 |5:
4951 | sltu AT, RD, RB
4952 | bnez AT, >6
4953 |. ld TMP1, FRAME_FUNC(BASE)
4954 | ins_next1
4955 | cleartp LFUNC:TMP1
4956 | ld TMP1, LFUNC:TMP1->pc
4957 | ld KBASE, PC2PROTO(k)(TMP1)
4958 | ins_next2
4959 |
4960 |6: // Fill up results with nil.
4961 | daddiu TMP2, TMP2, 8
4962 | daddiu RD, RD, 8
4963 | b <5
4964 if (op == BC_RET1) {
4965 |. sd TISNIL, 0(TMP2)
4966 } else {
4967 |. sd TISNIL, -8(TMP2)
4968 }
4969 break;
4970
4971 /* -- Loops and branches ------------------------------------------------ */
4972
4973 case BC_FORL:
4974 |.if JIT
4975 | hotloop
4976 |.endif
4977 | // Fall through. Assumes BC_IFORL follows.
4978 break;
4979
4980 case BC_JFORI:
4981 case BC_JFORL:
4982#if !LJ_HASJIT
4983 break;
4984#endif
4985 case BC_FORI:
4986 case BC_IFORL:
4987 | // RA = base*8, RD = target (after end of loop or start of loop)
4988 vk = (op == BC_IFORL || op == BC_JFORL);
4989 | daddu RA, BASE, RA
4990 | ld CARG1, FORL_IDX*8(RA) // IDX CARG1 - CARG3 type
4991 | gettp CARG3, CARG1
4992 if (op != BC_JFORL) {
4993 | srl RD, RD, 1
4994 | lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
4995 | daddu TMP2, RD, TMP2
4996 }
4997 if (!vk) {
4998 | ld CARG2, FORL_STOP*8(RA) // STOP CARG2 - CARG4 type
4999 | ld CRET1, FORL_STEP*8(RA) // STEP CRET1 - CRET2 type
5000 | gettp CARG4, CARG2
5001 | bne CARG3, TISNUM, >5
5002 |. gettp CRET2, CRET1
5003 | bne CARG4, TISNUM, ->vmeta_for
5004 |. sextw CARG3, CARG1
5005 | bne CRET2, TISNUM, ->vmeta_for
5006 |. sextw CARG2, CARG2
5007 | dext AT, CRET1, 31, 0
5008 | slt CRET1, CARG2, CARG3
5009 | slt TMP1, CARG3, CARG2
5010 |.if MIPSR6
5011 | selnez TMP1, TMP1, AT
5012 | seleqz CRET1, CRET1, AT
5013 | or CRET1, CRET1, TMP1
5014 |.else
5015 | movn CRET1, TMP1, AT
5016 |.endif
5017 } else {
5018 | bne CARG3, TISNUM, >5
5019 |. ld CARG2, FORL_STEP*8(RA) // STEP CARG2 - CARG4 type
5020 | ld CRET1, FORL_STOP*8(RA) // STOP CRET1 - CRET2 type
5021 | sextw TMP3, CARG1
5022 | sextw CARG2, CARG2
5023 | sextw CRET1, CRET1
5024 | addu CARG1, TMP3, CARG2
5025 | xor TMP0, CARG1, TMP3
5026 | xor TMP1, CARG1, CARG2
5027 | and TMP0, TMP0, TMP1
5028 | slt TMP1, CARG1, CRET1
5029 | slt CRET1, CRET1, CARG1
5030 | slt AT, CARG2, r0
5031 | slt TMP0, TMP0, r0 // ((y^a) & (y^b)) < 0: overflow.
5032 |.if MIPSR6
5033 | selnez TMP1, TMP1, AT
5034 | seleqz CRET1, CRET1, AT
5035 | or CRET1, CRET1, TMP1
5036 |.else
5037 | movn CRET1, TMP1, AT
5038 |.endif
5039 | or CRET1, CRET1, TMP0
5040 | zextw CARG1, CARG1
5041 | settp CARG1, TISNUM
5042 }
5043 |1:
5044 if (op == BC_FORI) {
5045 |.if MIPSR6
5046 | selnez TMP2, TMP2, CRET1
5047 |.else
5048 | movz TMP2, r0, CRET1
5049 |.endif
5050 | daddu PC, PC, TMP2
5051 } else if (op == BC_JFORI) {
5052 | daddu PC, PC, TMP2
5053 | lhu RD, -4+OFS_RD(PC)
5054 } else if (op == BC_IFORL) {
5055 |.if MIPSR6
5056 | seleqz TMP2, TMP2, CRET1
5057 |.else
5058 | movn TMP2, r0, CRET1
5059 |.endif
5060 | daddu PC, PC, TMP2
5061 }
5062 if (vk) {
5063 | sd CARG1, FORL_IDX*8(RA)
5064 }
5065 | ins_next1
5066 | sd CARG1, FORL_EXT*8(RA)
5067 |2:
5068 if (op == BC_JFORI) {
5069 | beqz CRET1, =>BC_JLOOP
5070 |. decode_RD8b RD
5071 } else if (op == BC_JFORL) {
5072 | beqz CRET1, =>BC_JLOOP
5073 }
5074 | ins_next2
5075 |
5076 |5: // FP loop.
5077 |.if FPU
5078 if (!vk) {
5079 | ldc1 f0, FORL_IDX*8(RA)
5080 | ldc1 f2, FORL_STOP*8(RA)
5081 | sltiu TMP0, CARG3, LJ_TISNUM
5082 | sltiu TMP1, CARG4, LJ_TISNUM
5083 | sltiu AT, CRET2, LJ_TISNUM
5084 | ld TMP3, FORL_STEP*8(RA)
5085 | and TMP0, TMP0, TMP1
5086 | and AT, AT, TMP0
5087 | beqz AT, ->vmeta_for
5088 |. slt TMP3, TMP3, r0
5089 |.if MIPSR6
5090 | dmtc1 TMP3, FTMP2
5091 | cmp.lt.d FTMP0, f0, f2
5092 | cmp.lt.d FTMP1, f2, f0
5093 | sel.d FTMP2, FTMP1, FTMP0
5094 | b <1
5095 |. dmfc1 CRET1, FTMP2
5096 |.else
5097 | c.ole.d 0, f0, f2
5098 | c.ole.d 1, f2, f0
5099 | li CRET1, 1
5100 | movt CRET1, r0, 0
5101 | movt AT, r0, 1
5102 | b <1
5103 |. movn CRET1, AT, TMP3
5104 |.endif
5105 } else {
5106 | ldc1 f0, FORL_IDX*8(RA)
5107 | ldc1 f4, FORL_STEP*8(RA)
5108 | ldc1 f2, FORL_STOP*8(RA)
5109 | ld TMP3, FORL_STEP*8(RA)
5110 | add.d f0, f0, f4
5111 |.if MIPSR6
5112 | slt TMP3, TMP3, r0
5113 | dmtc1 TMP3, FTMP2
5114 | cmp.lt.d FTMP0, f0, f2
5115 | cmp.lt.d FTMP1, f2, f0
5116 | sel.d FTMP2, FTMP1, FTMP0
5117 | dmfc1 CRET1, FTMP2
5118 if (op == BC_IFORL) {
5119 | seleqz TMP2, TMP2, CRET1
5120 | daddu PC, PC, TMP2
5121 }
5122 |.else
5123 | c.ole.d 0, f0, f2
5124 | c.ole.d 1, f2, f0
5125 | slt TMP3, TMP3, r0
5126 | li CRET1, 1
5127 | li AT, 1
5128 | movt CRET1, r0, 0
5129 | movt AT, r0, 1
5130 | movn CRET1, AT, TMP3
5131 if (op == BC_IFORL) {
5132 | movn TMP2, r0, CRET1
5133 | daddu PC, PC, TMP2
5134 }
5135 |.endif
5136 | sdc1 f0, FORL_IDX*8(RA)
5137 | ins_next1
5138 | b <2
5139 |. sdc1 f0, FORL_EXT*8(RA)
5140 }
5141 |.else
5142 if (!vk) {
5143 | sltiu TMP0, CARG3, LJ_TISNUM
5144 | sltiu TMP1, CARG4, LJ_TISNUM
5145 | sltiu AT, CRET2, LJ_TISNUM
5146 | and TMP0, TMP0, TMP1
5147 | and AT, AT, TMP0
5148 | beqz AT, ->vmeta_for
5149 |. nop
5150 | bal ->vm_sfcmpolex
5151 |. lw TMP3, FORL_STEP*8+HI(RA)
5152 | b <1
5153 |. nop
5154 } else {
5155 | load_got __adddf3
5156 | call_extern
5157 |. sw TMP2, TMPD
5158 | ld CARG2, FORL_STOP*8(RA)
5159 | move CARG1, CRET1
5160 if ( op == BC_JFORL ) {
5161 | lhu RD, -4+OFS_RD(PC)
5162 | decode_RD8b RD
5163 }
5164 | bal ->vm_sfcmpolex
5165 |. lw TMP3, FORL_STEP*8+HI(RA)
5166 | b <1
5167 |. lw TMP2, TMPD
5168 }
5169 |.endif
5170 break;
5171
5172 case BC_ITERL:
5173 |.if JIT
5174 | hotloop
5175 |.endif
5176 | // Fall through. Assumes BC_IITERL follows.
5177 break;
5178
5179 case BC_JITERL:
5180#if !LJ_HASJIT
5181 break;
5182#endif
5183 case BC_IITERL:
5184 | // RA = base*8, RD = target
5185 | daddu RA, BASE, RA
5186 | ld TMP1, 0(RA)
5187 | beq TMP1, TISNIL, >1 // Stop if iterator returned nil.
5188 |. nop
5189 if (op == BC_JITERL) {
5190 | b =>BC_JLOOP
5191 |. sd TMP1, -8(RA)
5192 } else {
5193 | branch_RD // Otherwise save control var + branch.
5194 | sd TMP1, -8(RA)
5195 }
5196 |1:
5197 | ins_next
5198 break;
5199
5200 case BC_LOOP:
5201 | // RA = base*8, RD = target (loop extent)
5202 | // Note: RA/RD is only used by trace recorder to determine scope/extent
5203 | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
5204 |.if JIT
5205 | hotloop
5206 |.endif
5207 | // Fall through. Assumes BC_ILOOP follows.
5208 break;
5209
5210 case BC_ILOOP:
5211 | // RA = base*8, RD = target (loop extent)
5212 | ins_next
5213 break;
5214
5215 case BC_JLOOP:
5216 |.if JIT
5217 | // RA = base*8 (ignored), RD = traceno*8
5218 | ld TMP1, DISPATCH_J(trace)(DISPATCH)
5219 | li AT, 0
5220 | daddu TMP1, TMP1, RD
5221 | // Traces on MIPS don't store the trace number, so use 0.
5222 | sd AT, DISPATCH_GL(vmstate)(DISPATCH)
5223 | ld TRACE:TMP2, 0(TMP1)
5224 | sd BASE, DISPATCH_GL(jit_base)(DISPATCH)
5225 | ld TMP2, TRACE:TMP2->mcode
5226 | sd L, DISPATCH_GL(tmpbuf.L)(DISPATCH)
5227 | jr TMP2
5228 |. daddiu JGL, DISPATCH, GG_DISP2G+32768
5229 |.endif
5230 break;
5231
5232 case BC_JMP:
5233 | // RA = base*8 (only used by trace recorder), RD = target
5234 | branch_RD
5235 | ins_next
5236 break;
5237
5238 /* -- Function headers -------------------------------------------------- */
5239
5240 case BC_FUNCF:
5241 |.if JIT
5242 | hotcall
5243 |.endif
5244 case BC_FUNCV: /* NYI: compiled vararg functions. */
5245 | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
5246 break;
5247
5248 case BC_JFUNCF:
5249#if !LJ_HASJIT
5250 break;
5251#endif
5252 case BC_IFUNCF:
5253 | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
5254 | ld TMP2, L->maxstack
5255 | lbu TMP1, -4+PC2PROTO(numparams)(PC)
5256 | ld KBASE, -4+PC2PROTO(k)(PC)
5257 | sltu AT, TMP2, RA
5258 | bnez AT, ->vm_growstack_l
5259 |. sll TMP1, TMP1, 3
5260 if (op != BC_JFUNCF) {
5261 | ins_next1
5262 }
5263 |2:
5264 | sltu AT, NARGS8:RC, TMP1 // Check for missing parameters.
5265 | bnez AT, >3
5266 |. daddu AT, BASE, NARGS8:RC
5267 if (op == BC_JFUNCF) {
5268 | decode_RD8a RD, INS
5269 | b =>BC_JLOOP
5270 |. decode_RD8b RD
5271 } else {
5272 | ins_next2
5273 }
5274 |
5275 |3: // Clear missing parameters.
5276 | sd TISNIL, 0(AT)
5277 | b <2
5278 |. addiu NARGS8:RC, NARGS8:RC, 8
5279 break;
5280
5281 case BC_JFUNCV:
5282#if !LJ_HASJIT
5283 break;
5284#endif
5285 | NYI // NYI: compiled vararg functions
5286 break; /* NYI: compiled vararg functions. */
5287
5288 case BC_IFUNCV:
5289 | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
5290 | li TMP0, LJ_TFUNC
5291 | daddu TMP1, BASE, RC
5292 | ld TMP2, L->maxstack
5293 | settp LFUNC:RB, TMP0
5294 | daddu TMP0, RA, RC
5295 | sd LFUNC:RB, 0(TMP1) // Store (tagged) copy of LFUNC.
5296 | daddiu TMP3, RC, 16+FRAME_VARG
5297 | sltu AT, TMP0, TMP2
5298 | ld KBASE, -4+PC2PROTO(k)(PC)
5299 | beqz AT, ->vm_growstack_l
5300 |. sd TMP3, 8(TMP1) // Store delta + FRAME_VARG.
5301 | lbu TMP2, -4+PC2PROTO(numparams)(PC)
5302 | move RA, BASE
5303 | move RC, TMP1
5304 | ins_next1
5305 | beqz TMP2, >3
5306 |. daddiu BASE, TMP1, 16
5307 |1:
5308 | ld TMP0, 0(RA)
5309 | sltu AT, RA, RC // Less args than parameters?
5310 | move CARG1, TMP0
5311 |.if MIPSR6
5312 | selnez TMP0, TMP0, AT
5313 | seleqz TMP3, TISNIL, AT
5314 | or TMP0, TMP0, TMP3
5315 | seleqz TMP3, CARG1, AT
5316 | selnez CARG1, TISNIL, AT
5317 | or CARG1, CARG1, TMP3
5318 |.else
5319 | movz TMP0, TISNIL, AT // Clear missing parameters.
5320 | movn CARG1, TISNIL, AT // Clear old fixarg slot (help the GC).
5321 |.endif
5322 | addiu TMP2, TMP2, -1
5323 | sd TMP0, 16(TMP1)
5324 | daddiu TMP1, TMP1, 8
5325 | sd CARG1, 0(RA)
5326 | bnez TMP2, <1
5327 |. daddiu RA, RA, 8
5328 |3:
5329 | ins_next2
5330 break;
5331
5332 case BC_FUNCC:
5333 case BC_FUNCCW:
5334 | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
5335 if (op == BC_FUNCC) {
5336 | ld CFUNCADDR, CFUNC:RB->f
5337 } else {
5338 | ld CFUNCADDR, DISPATCH_GL(wrapf)(DISPATCH)
5339 }
5340 | daddu TMP1, RA, NARGS8:RC
5341 | ld TMP2, L->maxstack
5342 | daddu RC, BASE, NARGS8:RC
5343 | sd BASE, L->base
5344 | sltu AT, TMP2, TMP1
5345 | sd RC, L->top
5346 | li_vmstate C
5347 if (op == BC_FUNCCW) {
5348 | ld CARG2, CFUNC:RB->f
5349 }
5350 | bnez AT, ->vm_growstack_c // Need to grow stack.
5351 |. move CARG1, L
5352 | jalr CFUNCADDR // (lua_State *L [, lua_CFunction f])
5353 |. st_vmstate
5354 | // Returns nresults.
5355 | ld BASE, L->base
5356 | sll RD, CRET1, 3
5357 | ld TMP1, L->top
5358 | li_vmstate INTERP
5359 | ld PC, FRAME_PC(BASE) // Fetch PC of caller.
5360 | dsubu RA, TMP1, RD // RA = L->top - nresults*8
5361 | sd L, DISPATCH_GL(cur_L)(DISPATCH)
5362 | b ->vm_returnc
5363 |. st_vmstate
5364 break;
5365
5366 /* ---------------------------------------------------------------------- */
5367
5368 default:
5369 fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
5370 exit(2);
5371 break;
5372 }
5373}
5374
5375static int build_backend(BuildCtx *ctx)
5376{
5377 int op;
5378
5379 dasm_growpc(Dst, BC__MAX);
5380
5381 build_subroutines(ctx);
5382
5383 |.code_op
5384 for (op = 0; op < BC__MAX; op++)
5385 build_ins(ctx, (BCOp)op, op);
5386
5387 return BC__MAX;
5388}
5389
5390/* Emit pseudo frame-info for all assembler functions. */
5391static void emit_asm_debug(BuildCtx *ctx)
5392{
5393 int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
5394 int i;
5395 switch (ctx->mode) {
5396 case BUILD_elfasm:
5397 fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
5398 fprintf(ctx->fp,
5399 ".Lframe0:\n"
5400 "\t.4byte .LECIE0-.LSCIE0\n"
5401 ".LSCIE0:\n"
5402 "\t.4byte 0xffffffff\n"
5403 "\t.byte 0x1\n"
5404 "\t.string \"\"\n"
5405 "\t.uleb128 0x1\n"
5406 "\t.sleb128 -4\n"
5407 "\t.byte 31\n"
5408 "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
5409 "\t.align 2\n"
5410 ".LECIE0:\n\n");
5411 fprintf(ctx->fp,
5412 ".LSFDE0:\n"
5413 "\t.4byte .LEFDE0-.LASFDE0\n"
5414 ".LASFDE0:\n"
5415 "\t.4byte .Lframe0\n"
5416 "\t.8byte .Lbegin\n"
5417 "\t.8byte %d\n"
5418 "\t.byte 0xe\n\t.uleb128 %d\n"
5419 "\t.byte 0x9f\n\t.sleb128 2*5\n"
5420 "\t.byte 0x9e\n\t.sleb128 2*6\n",
5421 fcofs, CFRAME_SIZE);
5422 for (i = 23; i >= 16; i--)
5423 fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2*(30-i));
5424#if !LJ_SOFTFP
5425 for (i = 31; i >= 24; i--)
5426 fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 2*(46-i));
5427#endif
5428 fprintf(ctx->fp,
5429 "\t.align 2\n"
5430 ".LEFDE0:\n\n");
5431#if LJ_HASFFI
5432 fprintf(ctx->fp,
5433 ".LSFDE1:\n"
5434 "\t.4byte .LEFDE1-.LASFDE1\n"
5435 ".LASFDE1:\n"
5436 "\t.4byte .Lframe0\n"
5437 "\t.4byte lj_vm_ffi_call\n"
5438 "\t.4byte %d\n"
5439 "\t.byte 0x9f\n\t.uleb128 2*1\n"
5440 "\t.byte 0x90\n\t.uleb128 2*2\n"
5441 "\t.byte 0xd\n\t.uleb128 0x10\n"
5442 "\t.align 2\n"
5443 ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
5444#endif
5445#if !LJ_NO_UNWIND
5446 /* NYI */
5447#endif
5448 break;
5449 default:
5450 break;
5451 }
5452}
5453
diff --git a/src/vm_ppc.dasc b/src/vm_ppc.dasc
index de44027b..3070d86e 100644
--- a/src/vm_ppc.dasc
+++ b/src/vm_ppc.dasc
@@ -1,4 +1,4 @@
1|// Low-level VM code for PowerPC CPUs. 1|// Low-level VM code for PowerPC 32 bit or 32on64 bit mode.
2|// Bytecode interpreter, fast functions and helper functions. 2|// Bytecode interpreter, fast functions and helper functions.
3|// Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h 3|// Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4| 4|
@@ -18,7 +18,6 @@
18|// DynASM defines used by the PPC port: 18|// DynASM defines used by the PPC port:
19|// 19|//
20|// P64 64 bit pointers (only for GPR64 testing). 20|// P64 64 bit pointers (only for GPR64 testing).
21|// Note: a full PPC64 _LP64 port is not planned.
22|// GPR64 64 bit registers (but possibly 32 bit pointers, e.g. PS3). 21|// GPR64 64 bit registers (but possibly 32 bit pointers, e.g. PS3).
23|// Affects reg saves, stack layout, carry/overflow/dot flags etc. 22|// Affects reg saves, stack layout, carry/overflow/dot flags etc.
24|// FRAME32 Use 32 bit frame layout, even with GPR64 (Xbox 360). 23|// FRAME32 Use 32 bit frame layout, even with GPR64 (Xbox 360).
@@ -103,6 +102,18 @@
103|// Fixed register assignments for the interpreter. 102|// Fixed register assignments for the interpreter.
104|// Don't use: r1 = sp, r2 and r13 = reserved (TOC, TLS or SDATA) 103|// Don't use: r1 = sp, r2 and r13 = reserved (TOC, TLS or SDATA)
105| 104|
105|.macro .FPU, a, b
106|.if FPU
107| a, b
108|.endif
109|.endmacro
110|
111|.macro .FPU, a, b, c
112|.if FPU
113| a, b, c
114|.endif
115|.endmacro
116|
106|// The following must be C callee-save (but BASE is often refetched). 117|// The following must be C callee-save (but BASE is often refetched).
107|.define BASE, r14 // Base of current Lua stack frame. 118|.define BASE, r14 // Base of current Lua stack frame.
108|.define KBASE, r15 // Constants of current Lua function. 119|.define KBASE, r15 // Constants of current Lua function.
@@ -116,8 +127,10 @@
116|.define TISNUM, r22 127|.define TISNUM, r22
117|.define TISNIL, r23 128|.define TISNIL, r23
118|.define ZERO, r24 129|.define ZERO, r24
130|.if FPU
119|.define TOBIT, f30 // 2^52 + 2^51. 131|.define TOBIT, f30 // 2^52 + 2^51.
120|.define TONUM, f31 // 2^52 + 2^51 + 2^31. 132|.define TONUM, f31 // 2^52 + 2^51 + 2^31.
133|.endif
121| 134|
122|// The following temporaries are not saved across C calls, except for RA. 135|// The following temporaries are not saved across C calls, except for RA.
123|.define RA, r20 // Callee-save. 136|.define RA, r20 // Callee-save.
@@ -133,6 +146,7 @@
133| 146|
134|// Saved temporaries. 147|// Saved temporaries.
135|.define SAVE0, r21 148|.define SAVE0, r21
149|.define SAVE1, r25
136| 150|
137|// Calling conventions. 151|// Calling conventions.
138|.define CARG1, r3 152|.define CARG1, r3
@@ -141,8 +155,10 @@
141|.define CARG4, r6 // Overlaps TMP3. 155|.define CARG4, r6 // Overlaps TMP3.
142|.define CARG5, r7 // Overlaps INS. 156|.define CARG5, r7 // Overlaps INS.
143| 157|
158|.if FPU
144|.define FARG1, f1 159|.define FARG1, f1
145|.define FARG2, f2 160|.define FARG2, f2
161|.endif
146| 162|
147|.define CRET1, r3 163|.define CRET1, r3
148|.define CRET2, r4 164|.define CRET2, r4
@@ -213,10 +229,16 @@
213|.endif 229|.endif
214|.else 230|.else
215| 231|
232|.if FPU
216|.define SAVE_LR, 276(sp) 233|.define SAVE_LR, 276(sp)
217|.define CFRAME_SPACE, 272 // Delta for sp. 234|.define CFRAME_SPACE, 272 // Delta for sp.
218|// Back chain for sp: 272(sp) <-- sp entering interpreter 235|// Back chain for sp: 272(sp) <-- sp entering interpreter
219|.define SAVE_FPR_, 128 // .. 128+18*8: 64 bit FPR saves. 236|.define SAVE_FPR_, 128 // .. 128+18*8: 64 bit FPR saves.
237|.else
238|.define SAVE_LR, 132(sp)
239|.define CFRAME_SPACE, 128 // Delta for sp.
240|// Back chain for sp: 128(sp) <-- sp entering interpreter
241|.endif
220|.define SAVE_GPR_, 56 // .. 56+18*4: 32 bit GPR saves. 242|.define SAVE_GPR_, 56 // .. 56+18*4: 32 bit GPR saves.
221|.define SAVE_CR, 52(sp) // 32 bit CR save. 243|.define SAVE_CR, 52(sp) // 32 bit CR save.
222|.define SAVE_ERRF, 48(sp) // 32 bit C frame info. 244|.define SAVE_ERRF, 48(sp) // 32 bit C frame info.
@@ -226,16 +248,25 @@
226|.define SAVE_PC, 32(sp) 248|.define SAVE_PC, 32(sp)
227|.define SAVE_MULTRES, 28(sp) 249|.define SAVE_MULTRES, 28(sp)
228|.define UNUSED1, 24(sp) 250|.define UNUSED1, 24(sp)
251|.if FPU
229|.define TMPD_LO, 20(sp) 252|.define TMPD_LO, 20(sp)
230|.define TMPD_HI, 16(sp) 253|.define TMPD_HI, 16(sp)
231|.define TONUM_LO, 12(sp) 254|.define TONUM_LO, 12(sp)
232|.define TONUM_HI, 8(sp) 255|.define TONUM_HI, 8(sp)
256|.else
257|.define SFSAVE_4, 20(sp)
258|.define SFSAVE_3, 16(sp)
259|.define SFSAVE_2, 12(sp)
260|.define SFSAVE_1, 8(sp)
261|.endif
233|// Next frame lr: 4(sp) 262|// Next frame lr: 4(sp)
234|// Back chain for sp: 0(sp) <-- sp while in interpreter 263|// Back chain for sp: 0(sp) <-- sp while in interpreter
235| 264|
265|.if FPU
236|.define TMPD_BLO, 23(sp) 266|.define TMPD_BLO, 23(sp)
237|.define TMPD, TMPD_HI 267|.define TMPD, TMPD_HI
238|.define TONUM_D, TONUM_HI 268|.define TONUM_D, TONUM_HI
269|.endif
239| 270|
240|.endif 271|.endif
241| 272|
@@ -245,7 +276,7 @@
245|.else 276|.else
246| stw r..reg, SAVE_GPR_+(reg-14)*4(sp) 277| stw r..reg, SAVE_GPR_+(reg-14)*4(sp)
247|.endif 278|.endif
248| stfd f..reg, SAVE_FPR_+(reg-14)*8(sp) 279| .FPU stfd f..reg, SAVE_FPR_+(reg-14)*8(sp)
249|.endmacro 280|.endmacro
250|.macro rest_, reg 281|.macro rest_, reg
251|.if GPR64 282|.if GPR64
@@ -253,7 +284,7 @@
253|.else 284|.else
254| lwz r..reg, SAVE_GPR_+(reg-14)*4(sp) 285| lwz r..reg, SAVE_GPR_+(reg-14)*4(sp)
255|.endif 286|.endif
256| lfd f..reg, SAVE_FPR_+(reg-14)*8(sp) 287| .FPU lfd f..reg, SAVE_FPR_+(reg-14)*8(sp)
257|.endmacro 288|.endmacro
258| 289|
259|.macro saveregs 290|.macro saveregs
@@ -316,19 +347,14 @@
316|.type NODE, Node 347|.type NODE, Node
317|.type NARGS8, int 348|.type NARGS8, int
318|.type TRACE, GCtrace 349|.type TRACE, GCtrace
350|.type SBUF, SBuf
319| 351|
320|//----------------------------------------------------------------------- 352|//-----------------------------------------------------------------------
321| 353|
322|// These basic macros should really be part of DynASM.
323|.macro srwi, rx, ry, n; rlwinm rx, ry, 32-n, n, 31; .endmacro
324|.macro slwi, rx, ry, n; rlwinm rx, ry, n, 0, 31-n; .endmacro
325|.macro rotlwi, rx, ry, n; rlwinm rx, ry, n, 0, 31; .endmacro
326|.macro rotlw, rx, ry, rn; rlwnm rx, ry, rn, 0, 31; .endmacro
327|.macro subi, rx, ry, i; addi rx, ry, -i; .endmacro
328|
329|// Trap for not-yet-implemented parts. 354|// Trap for not-yet-implemented parts.
330|.macro NYI; tw 4, sp, sp; .endmacro 355|.macro NYI; tw 4, sp, sp; .endmacro
331| 356|
357|.if FPU
332|// int/FP conversions. 358|// int/FP conversions.
333|.macro tonum_i, freg, reg 359|.macro tonum_i, freg, reg
334| xoris reg, reg, 0x8000 360| xoris reg, reg, 0x8000
@@ -352,6 +378,7 @@
352|.macro toint, reg, freg 378|.macro toint, reg, freg
353| toint reg, freg, freg 379| toint reg, freg, freg
354|.endmacro 380|.endmacro
381|.endif
355| 382|
356|//----------------------------------------------------------------------- 383|//-----------------------------------------------------------------------
357| 384|
@@ -539,9 +566,19 @@ static void build_subroutines(BuildCtx *ctx)
539 | beq >2 566 | beq >2
540 |1: 567 |1:
541 | addic. TMP1, TMP1, -8 568 | addic. TMP1, TMP1, -8
569 |.if FPU
542 | lfd f0, 0(RA) 570 | lfd f0, 0(RA)
571 |.else
572 | lwz CARG1, 0(RA)
573 | lwz CARG2, 4(RA)
574 |.endif
543 | addi RA, RA, 8 575 | addi RA, RA, 8
576 |.if FPU
544 | stfd f0, 0(BASE) 577 | stfd f0, 0(BASE)
578 |.else
579 | stw CARG1, 0(BASE)
580 | stw CARG2, 4(BASE)
581 |.endif
545 | addi BASE, BASE, 8 582 | addi BASE, BASE, 8
546 | bney <1 583 | bney <1
547 | 584 |
@@ -619,23 +656,23 @@ static void build_subroutines(BuildCtx *ctx)
619 | .toc ld TOCREG, SAVE_TOC 656 | .toc ld TOCREG, SAVE_TOC
620 | li TISNUM, LJ_TISNUM // Setup type comparison constants. 657 | li TISNUM, LJ_TISNUM // Setup type comparison constants.
621 | lp BASE, L->base 658 | lp BASE, L->base
622 | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). 659 | .FPU lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
623 | lwz DISPATCH, L->glref // Setup pointer to dispatch table. 660 | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
624 | li ZERO, 0 661 | li ZERO, 0
625 | stw TMP3, TMPD 662 | .FPU stw TMP3, TMPD
626 | li TMP1, LJ_TFALSE 663 | li TMP1, LJ_TFALSE
627 | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). 664 | .FPU ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
628 | li TISNIL, LJ_TNIL 665 | li TISNIL, LJ_TNIL
629 | li_vmstate INTERP 666 | li_vmstate INTERP
630 | lfs TOBIT, TMPD 667 | .FPU lfs TOBIT, TMPD
631 | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame. 668 | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame.
632 | la RA, -8(BASE) // Results start at BASE-8. 669 | la RA, -8(BASE) // Results start at BASE-8.
633 | stw TMP3, TMPD 670 | .FPU stw TMP3, TMPD
634 | addi DISPATCH, DISPATCH, GG_G2DISP 671 | addi DISPATCH, DISPATCH, GG_G2DISP
635 | stw TMP1, 0(RA) // Prepend false to error message. 672 | stw TMP1, 0(RA) // Prepend false to error message.
636 | li RD, 16 // 2 results: false + error message. 673 | li RD, 16 // 2 results: false + error message.
637 | st_vmstate 674 | st_vmstate
638 | lfs TONUM, TMPD 675 | .FPU lfs TONUM, TMPD
639 | b ->vm_returnc 676 | b ->vm_returnc
640 | 677 |
641 |//----------------------------------------------------------------------- 678 |//-----------------------------------------------------------------------
@@ -684,33 +721,34 @@ static void build_subroutines(BuildCtx *ctx)
684 | stw CARG3, SAVE_NRES 721 | stw CARG3, SAVE_NRES
685 | cmplwi TMP1, 0 722 | cmplwi TMP1, 0
686 | stw CARG3, SAVE_ERRF 723 | stw CARG3, SAVE_ERRF
687 | stp TMP0, L->cframe
688 | stp CARG3, SAVE_CFRAME 724 | stp CARG3, SAVE_CFRAME
689 | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. 725 | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
726 | stp TMP0, L->cframe
690 | beq >3 727 | beq >3
691 | 728 |
692 | // Resume after yield (like a return). 729 | // Resume after yield (like a return).
730 | stw L, DISPATCH_GL(cur_L)(DISPATCH)
693 | mr RA, BASE 731 | mr RA, BASE
694 | lp BASE, L->base 732 | lp BASE, L->base
695 | li TISNUM, LJ_TISNUM // Setup type comparison constants. 733 | li TISNUM, LJ_TISNUM // Setup type comparison constants.
696 | lp TMP1, L->top 734 | lp TMP1, L->top
697 | lwz PC, FRAME_PC(BASE) 735 | lwz PC, FRAME_PC(BASE)
698 | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). 736 | .FPU lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
699 | stb CARG3, L->status 737 | stb CARG3, L->status
700 | stw TMP3, TMPD 738 | .FPU stw TMP3, TMPD
701 | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). 739 | .FPU ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
702 | lfs TOBIT, TMPD 740 | .FPU lfs TOBIT, TMPD
703 | sub RD, TMP1, BASE 741 | sub RD, TMP1, BASE
704 | stw TMP3, TMPD 742 | .FPU stw TMP3, TMPD
705 | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) 743 | .FPU lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
706 | addi RD, RD, 8 744 | addi RD, RD, 8
707 | stw TMP0, TONUM_HI 745 | .FPU stw TMP0, TONUM_HI
708 | li_vmstate INTERP 746 | li_vmstate INTERP
709 | li ZERO, 0 747 | li ZERO, 0
710 | st_vmstate 748 | st_vmstate
711 | andix. TMP0, PC, FRAME_TYPE 749 | andix. TMP0, PC, FRAME_TYPE
712 | mr MULTRES, RD 750 | mr MULTRES, RD
713 | lfs TONUM, TMPD 751 | .FPU lfs TONUM, TMPD
714 | li TISNIL, LJ_TNIL 752 | li TISNIL, LJ_TNIL
715 | beq ->BC_RET_Z 753 | beq ->BC_RET_Z
716 | b ->vm_return 754 | b ->vm_return
@@ -729,33 +767,34 @@ static void build_subroutines(BuildCtx *ctx)
729 | 767 |
730 |1: // Entry point for vm_pcall above (PC = ftype). 768 |1: // Entry point for vm_pcall above (PC = ftype).
731 | lp TMP1, L:CARG1->cframe 769 | lp TMP1, L:CARG1->cframe
732 | stw CARG3, SAVE_NRES
733 | mr L, CARG1 770 | mr L, CARG1
734 | stw CARG1, SAVE_L 771 | stw CARG3, SAVE_NRES
735 | mr BASE, CARG2
736 | stp sp, L->cframe // Add our C frame to cframe chain.
737 | lwz DISPATCH, L->glref // Setup pointer to dispatch table. 772 | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
773 | stw CARG1, SAVE_L
774 | mr BASE, CARG2
775 | addi DISPATCH, DISPATCH, GG_G2DISP
738 | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. 776 | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
739 | stp TMP1, SAVE_CFRAME 777 | stp TMP1, SAVE_CFRAME
740 | addi DISPATCH, DISPATCH, GG_G2DISP 778 | stp sp, L->cframe // Add our C frame to cframe chain.
741 | 779 |
742 |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype). 780 |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
781 | stw L, DISPATCH_GL(cur_L)(DISPATCH)
743 | lp TMP2, L->base // TMP2 = old base (used in vmeta_call). 782 | lp TMP2, L->base // TMP2 = old base (used in vmeta_call).
744 | li TISNUM, LJ_TISNUM // Setup type comparison constants. 783 | li TISNUM, LJ_TISNUM // Setup type comparison constants.
745 | lp TMP1, L->top 784 | lp TMP1, L->top
746 | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). 785 | .FPU lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
747 | add PC, PC, BASE 786 | add PC, PC, BASE
748 | stw TMP3, TMPD 787 | .FPU stw TMP3, TMPD
749 | li ZERO, 0 788 | li ZERO, 0
750 | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). 789 | .FPU ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
751 | lfs TOBIT, TMPD 790 | .FPU lfs TOBIT, TMPD
752 | sub PC, PC, TMP2 // PC = frame delta + frame type 791 | sub PC, PC, TMP2 // PC = frame delta + frame type
753 | stw TMP3, TMPD 792 | .FPU stw TMP3, TMPD
754 | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) 793 | .FPU lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
755 | sub NARGS8:RC, TMP1, BASE 794 | sub NARGS8:RC, TMP1, BASE
756 | stw TMP0, TONUM_HI 795 | .FPU stw TMP0, TONUM_HI
757 | li_vmstate INTERP 796 | li_vmstate INTERP
758 | lfs TONUM, TMPD 797 | .FPU lfs TONUM, TMPD
759 | li TISNIL, LJ_TNIL 798 | li TISNIL, LJ_TNIL
760 | st_vmstate 799 | st_vmstate
761 | 800 |
@@ -776,15 +815,18 @@ static void build_subroutines(BuildCtx *ctx)
776 | lwz TMP0, L:CARG1->stack 815 | lwz TMP0, L:CARG1->stack
777 | stw CARG1, SAVE_L 816 | stw CARG1, SAVE_L
778 | lp TMP1, L->top 817 | lp TMP1, L->top
818 | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
779 | stw CARG1, SAVE_PC // Any value outside of bytecode is ok. 819 | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
780 | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top). 820 | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
781 | lp TMP1, L->cframe 821 | lp TMP1, L->cframe
782 | stp sp, L->cframe // Add our C frame to cframe chain. 822 | addi DISPATCH, DISPATCH, GG_G2DISP
783 | .toc lp CARG4, 0(CARG4) 823 | .toc lp CARG4, 0(CARG4)
784 | li TMP2, 0 824 | li TMP2, 0
785 | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame. 825 | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
786 | stw TMP2, SAVE_ERRF // No error function. 826 | stw TMP2, SAVE_ERRF // No error function.
787 | stp TMP1, SAVE_CFRAME 827 | stp TMP1, SAVE_CFRAME
828 | stp sp, L->cframe // Add our C frame to cframe chain.
829 | stw L, DISPATCH_GL(cur_L)(DISPATCH)
788 | mtctr CARG4 830 | mtctr CARG4
789 | bctrl // (lua_State *L, lua_CFunction func, void *ud) 831 | bctrl // (lua_State *L, lua_CFunction func, void *ud)
790 |.if PPE 832 |.if PPE
@@ -793,9 +835,7 @@ static void build_subroutines(BuildCtx *ctx)
793 |.else 835 |.else
794 | mr. BASE, CRET1 836 | mr. BASE, CRET1
795 |.endif 837 |.endif
796 | lwz DISPATCH, L->glref // Setup pointer to dispatch table. 838 | li PC, FRAME_CP
797 | li PC, FRAME_CP
798 | addi DISPATCH, DISPATCH, GG_G2DISP
799 | bne <3 // Else continue with the call. 839 | bne <3 // Else continue with the call.
800 | b ->vm_leave_cp // No base? Just remove C frame. 840 | b ->vm_leave_cp // No base? Just remove C frame.
801 | 841 |
@@ -842,15 +882,30 @@ static void build_subroutines(BuildCtx *ctx)
842 | lwz INS, -4(PC) 882 | lwz INS, -4(PC)
843 | subi CARG2, RB, 16 883 | subi CARG2, RB, 16
844 | decode_RB8 SAVE0, INS 884 | decode_RB8 SAVE0, INS
885 |.if FPU
845 | lfd f0, 0(RA) 886 | lfd f0, 0(RA)
887 |.else
888 | lwz TMP2, 0(RA)
889 | lwz TMP3, 4(RA)
890 |.endif
846 | add TMP1, BASE, SAVE0 891 | add TMP1, BASE, SAVE0
847 | stp BASE, L->base 892 | stp BASE, L->base
848 | cmplw TMP1, CARG2 893 | cmplw TMP1, CARG2
849 | sub CARG3, CARG2, TMP1 894 | sub CARG3, CARG2, TMP1
850 | decode_RA8 RA, INS 895 | decode_RA8 RA, INS
896 |.if FPU
851 | stfd f0, 0(CARG2) 897 | stfd f0, 0(CARG2)
898 |.else
899 | stw TMP2, 0(CARG2)
900 | stw TMP3, 4(CARG2)
901 |.endif
852 | bney ->BC_CAT_Z 902 | bney ->BC_CAT_Z
903 |.if FPU
853 | stfdx f0, BASE, RA 904 | stfdx f0, BASE, RA
905 |.else
906 | stwux TMP2, RA, BASE
907 | stw TMP3, 4(RA)
908 |.endif
854 | b ->cont_nop 909 | b ->cont_nop
855 | 910 |
856 |//-- Table indexing metamethods ----------------------------------------- 911 |//-- Table indexing metamethods -----------------------------------------
@@ -903,9 +958,19 @@ static void build_subroutines(BuildCtx *ctx)
903 | // Returns TValue * (finished) or NULL (metamethod). 958 | // Returns TValue * (finished) or NULL (metamethod).
904 | cmplwi CRET1, 0 959 | cmplwi CRET1, 0
905 | beq >3 960 | beq >3
961 |.if FPU
906 | lfd f0, 0(CRET1) 962 | lfd f0, 0(CRET1)
963 |.else
964 | lwz TMP0, 0(CRET1)
965 | lwz TMP1, 4(CRET1)
966 |.endif
907 | ins_next1 967 | ins_next1
968 |.if FPU
908 | stfdx f0, BASE, RA 969 | stfdx f0, BASE, RA
970 |.else
971 | stwux TMP0, RA, BASE
972 | stw TMP1, 4(RA)
973 |.endif
909 | ins_next2 974 | ins_next2
910 | 975 |
911 |3: // Call __index metamethod. 976 |3: // Call __index metamethod.
@@ -918,6 +983,22 @@ static void build_subroutines(BuildCtx *ctx)
918 | li NARGS8:RC, 16 // 2 args for func(t, k). 983 | li NARGS8:RC, 16 // 2 args for func(t, k).
919 | b ->vm_call_dispatch_f 984 | b ->vm_call_dispatch_f
920 | 985 |
986 |->vmeta_tgetr:
987 | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
988 | // Returns cTValue * or NULL.
989 | cmplwi CRET1, 0
990 | beq >1
991 |.if FPU
992 | lfd f14, 0(CRET1)
993 |.else
994 | lwz SAVE0, 0(CRET1)
995 | lwz SAVE1, 4(CRET1)
996 |.endif
997 | b ->BC_TGETR_Z
998 |1:
999 | stwx TISNIL, BASE, RA
1000 | b ->cont_nop
1001 |
921 |//----------------------------------------------------------------------- 1002 |//-----------------------------------------------------------------------
922 | 1003 |
923 |->vmeta_tsets1: 1004 |->vmeta_tsets1:
@@ -967,11 +1048,21 @@ static void build_subroutines(BuildCtx *ctx)
967 | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k) 1048 | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
968 | // Returns TValue * (finished) or NULL (metamethod). 1049 | // Returns TValue * (finished) or NULL (metamethod).
969 | cmplwi CRET1, 0 1050 | cmplwi CRET1, 0
1051 |.if FPU
970 | lfdx f0, BASE, RA 1052 | lfdx f0, BASE, RA
1053 |.else
1054 | lwzux TMP2, RA, BASE
1055 | lwz TMP3, 4(RA)
1056 |.endif
971 | beq >3 1057 | beq >3
972 | // NOBARRIER: lj_meta_tset ensures the table is not black. 1058 | // NOBARRIER: lj_meta_tset ensures the table is not black.
973 | ins_next1 1059 | ins_next1
1060 |.if FPU
974 | stfd f0, 0(CRET1) 1061 | stfd f0, 0(CRET1)
1062 |.else
1063 | stw TMP2, 0(CRET1)
1064 | stw TMP3, 4(CRET1)
1065 |.endif
975 | ins_next2 1066 | ins_next2
976 | 1067 |
977 |3: // Call __newindex metamethod. 1068 |3: // Call __newindex metamethod.
@@ -982,9 +1073,28 @@ static void build_subroutines(BuildCtx *ctx)
982 | add PC, TMP1, BASE 1073 | add PC, TMP1, BASE
983 | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here. 1074 | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
984 | li NARGS8:RC, 24 // 3 args for func(t, k, v) 1075 | li NARGS8:RC, 24 // 3 args for func(t, k, v)
1076 |.if FPU
985 | stfd f0, 16(BASE) // Copy value to third argument. 1077 | stfd f0, 16(BASE) // Copy value to third argument.
1078 |.else
1079 | stw TMP2, 16(BASE)
1080 | stw TMP3, 20(BASE)
1081 |.endif
986 | b ->vm_call_dispatch_f 1082 | b ->vm_call_dispatch_f
987 | 1083 |
1084 |->vmeta_tsetr:
1085 | stp BASE, L->base
1086 | mr CARG1, L
1087 | stw PC, SAVE_PC
1088 | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
1089 | // Returns TValue *.
1090 |.if FPU
1091 | stfd f14, 0(CRET1)
1092 |.else
1093 | stw SAVE0, 0(CRET1)
1094 | stw SAVE1, 4(CRET1)
1095 |.endif
1096 | b ->cont_nop
1097 |
988 |//-- Comparison metamethods --------------------------------------------- 1098 |//-- Comparison metamethods ---------------------------------------------
989 | 1099 |
990 |->vmeta_comp: 1100 |->vmeta_comp:
@@ -1021,9 +1131,19 @@ static void build_subroutines(BuildCtx *ctx)
1021 | 1131 |
1022 |->cont_ra: // RA = resultptr 1132 |->cont_ra: // RA = resultptr
1023 | lwz INS, -4(PC) 1133 | lwz INS, -4(PC)
1134 |.if FPU
1024 | lfd f0, 0(RA) 1135 | lfd f0, 0(RA)
1136 |.else
1137 | lwz CARG1, 0(RA)
1138 | lwz CARG2, 4(RA)
1139 |.endif
1025 | decode_RA8 TMP1, INS 1140 | decode_RA8 TMP1, INS
1141 |.if FPU
1026 | stfdx f0, BASE, TMP1 1142 | stfdx f0, BASE, TMP1
1143 |.else
1144 | stwux CARG1, TMP1, BASE
1145 | stw CARG2, 4(TMP1)
1146 |.endif
1027 | b ->cont_nop 1147 | b ->cont_nop
1028 | 1148 |
1029 |->cont_condt: // RA = resultptr 1149 |->cont_condt: // RA = resultptr
@@ -1063,6 +1183,16 @@ static void build_subroutines(BuildCtx *ctx)
1063 | b <3 1183 | b <3
1064 |.endif 1184 |.endif
1065 | 1185 |
1186 |->vmeta_istype:
1187 | subi PC, PC, 4
1188 | stp BASE, L->base
1189 | srwi CARG2, RA, 3
1190 | mr CARG1, L
1191 | srwi CARG3, RD, 3
1192 | stw PC, SAVE_PC
1193 | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
1194 | b ->cont_nop
1195 |
1066 |//-- Arithmetic metamethods --------------------------------------------- 1196 |//-- Arithmetic metamethods ---------------------------------------------
1067 | 1197 |
1068 |->vmeta_arith_nv: 1198 |->vmeta_arith_nv:
@@ -1219,22 +1349,32 @@ static void build_subroutines(BuildCtx *ctx)
1219 |.macro .ffunc_n, name 1349 |.macro .ffunc_n, name
1220 |->ff_ .. name: 1350 |->ff_ .. name:
1221 | cmplwi NARGS8:RC, 8 1351 | cmplwi NARGS8:RC, 8
1222 | lwz CARG3, 0(BASE) 1352 | lwz CARG1, 0(BASE)
1353 |.if FPU
1223 | lfd FARG1, 0(BASE) 1354 | lfd FARG1, 0(BASE)
1355 |.else
1356 | lwz CARG2, 4(BASE)
1357 |.endif
1224 | blt ->fff_fallback 1358 | blt ->fff_fallback
1225 | checknum CARG3; bge ->fff_fallback 1359 | checknum CARG1; bge ->fff_fallback
1226 |.endmacro 1360 |.endmacro
1227 | 1361 |
1228 |.macro .ffunc_nn, name 1362 |.macro .ffunc_nn, name
1229 |->ff_ .. name: 1363 |->ff_ .. name:
1230 | cmplwi NARGS8:RC, 16 1364 | cmplwi NARGS8:RC, 16
1231 | lwz CARG3, 0(BASE) 1365 | lwz CARG1, 0(BASE)
1366 |.if FPU
1232 | lfd FARG1, 0(BASE) 1367 | lfd FARG1, 0(BASE)
1233 | lwz CARG4, 8(BASE) 1368 | lwz CARG3, 8(BASE)
1234 | lfd FARG2, 8(BASE) 1369 | lfd FARG2, 8(BASE)
1370 |.else
1371 | lwz CARG2, 4(BASE)
1372 | lwz CARG3, 8(BASE)
1373 | lwz CARG4, 12(BASE)
1374 |.endif
1235 | blt ->fff_fallback 1375 | blt ->fff_fallback
1376 | checknum CARG1; bge ->fff_fallback
1236 | checknum CARG3; bge ->fff_fallback 1377 | checknum CARG3; bge ->fff_fallback
1237 | checknum CARG4; bge ->fff_fallback
1238 |.endmacro 1378 |.endmacro
1239 | 1379 |
1240 |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1. 1380 |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1.
@@ -1255,14 +1395,21 @@ static void build_subroutines(BuildCtx *ctx)
1255 | bge cr1, ->fff_fallback 1395 | bge cr1, ->fff_fallback
1256 | stw CARG3, 0(RA) 1396 | stw CARG3, 0(RA)
1257 | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8. 1397 | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
1398 | addi TMP1, BASE, 8
1399 | add TMP2, RA, NARGS8:RC
1258 | stw CARG1, 4(RA) 1400 | stw CARG1, 4(RA)
1259 | beq ->fff_res // Done if exactly 1 argument. 1401 | beq ->fff_res // Done if exactly 1 argument.
1260 | li TMP1, 8
1261 | subi RC, RC, 8
1262 |1: 1402 |1:
1263 | cmplw TMP1, RC 1403 | cmplw TMP1, TMP2
1264 | lfdx f0, BASE, TMP1 1404 |.if FPU
1265 | stfdx f0, RA, TMP1 1405 | lfd f0, 0(TMP1)
1406 | stfd f0, 0(TMP1)
1407 |.else
1408 | lwz CARG1, 0(TMP1)
1409 | lwz CARG2, 4(TMP1)
1410 | stw CARG1, -8(TMP1)
1411 | stw CARG2, -4(TMP1)
1412 |.endif
1266 | addi TMP1, TMP1, 8 1413 | addi TMP1, TMP1, 8
1267 | bney <1 1414 | bney <1
1268 | b ->fff_res 1415 | b ->fff_res
@@ -1277,8 +1424,14 @@ static void build_subroutines(BuildCtx *ctx)
1277 | orc TMP1, TMP2, TMP0 1424 | orc TMP1, TMP2, TMP0
1278 | addi TMP1, TMP1, ~LJ_TISNUM+1 1425 | addi TMP1, TMP1, ~LJ_TISNUM+1
1279 | slwi TMP1, TMP1, 3 1426 | slwi TMP1, TMP1, 3
1427 |.if FPU
1280 | la TMP2, CFUNC:RB->upvalue 1428 | la TMP2, CFUNC:RB->upvalue
1281 | lfdx FARG1, TMP2, TMP1 1429 | lfdx FARG1, TMP2, TMP1
1430 |.else
1431 | add TMP1, CFUNC:RB, TMP1
1432 | lwz CARG1, CFUNC:TMP1->upvalue[0].u32.hi
1433 | lwz CARG2, CFUNC:TMP1->upvalue[0].u32.lo
1434 |.endif
1282 | b ->fff_resn 1435 | b ->fff_resn
1283 | 1436 |
1284 |//-- Base library: getters and setters --------------------------------- 1437 |//-- Base library: getters and setters ---------------------------------
@@ -1294,9 +1447,9 @@ static void build_subroutines(BuildCtx *ctx)
1294 | beq ->fff_restv 1447 | beq ->fff_restv
1295 | lwz TMP0, TAB:CARG1->hmask 1448 | lwz TMP0, TAB:CARG1->hmask
1296 | li CARG3, LJ_TTAB // Use metatable as default result. 1449 | li CARG3, LJ_TTAB // Use metatable as default result.
1297 | lwz TMP1, STR:RC->hash 1450 | lwz TMP1, STR:RC->sid
1298 | lwz NODE:TMP2, TAB:CARG1->node 1451 | lwz NODE:TMP2, TAB:CARG1->node
1299 | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask 1452 | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
1300 | slwi TMP0, TMP1, 5 1453 | slwi TMP0, TMP1, 5
1301 | slwi TMP1, TMP1, 3 1454 | slwi TMP1, TMP1, 3
1302 | sub TMP1, TMP0, TMP1 1455 | sub TMP1, TMP0, TMP1
@@ -1356,7 +1509,12 @@ static void build_subroutines(BuildCtx *ctx)
1356 | mr CARG1, L 1509 | mr CARG1, L
1357 | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key) 1510 | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
1358 | // Returns cTValue *. 1511 | // Returns cTValue *.
1512 |.if FPU
1359 | lfd FARG1, 0(CRET1) 1513 | lfd FARG1, 0(CRET1)
1514 |.else
1515 | lwz CARG2, 4(CRET1)
1516 | lwz CARG1, 0(CRET1) // Caveat: CARG1 == CRET1.
1517 |.endif
1360 | b ->fff_resn 1518 | b ->fff_resn
1361 | 1519 |
1362 |//-- Base library: conversions ------------------------------------------ 1520 |//-- Base library: conversions ------------------------------------------
@@ -1365,7 +1523,11 @@ static void build_subroutines(BuildCtx *ctx)
1365 | // Only handles the number case inline (without a base argument). 1523 | // Only handles the number case inline (without a base argument).
1366 | cmplwi NARGS8:RC, 8 1524 | cmplwi NARGS8:RC, 8
1367 | lwz CARG1, 0(BASE) 1525 | lwz CARG1, 0(BASE)
1526 |.if FPU
1368 | lfd FARG1, 0(BASE) 1527 | lfd FARG1, 0(BASE)
1528 |.else
1529 | lwz CARG2, 4(BASE)
1530 |.endif
1369 | bne ->fff_fallback // Exactly one argument. 1531 | bne ->fff_fallback // Exactly one argument.
1370 | checknum CARG1; bgt ->fff_fallback 1532 | checknum CARG1; bgt ->fff_fallback
1371 | b ->fff_resn 1533 | b ->fff_resn
@@ -1387,9 +1549,9 @@ static void build_subroutines(BuildCtx *ctx)
1387 | mr CARG1, L 1549 | mr CARG1, L
1388 | mr CARG2, BASE 1550 | mr CARG2, BASE
1389 |.if DUALNUM 1551 |.if DUALNUM
1390 | bl extern lj_str_fromnumber // (lua_State *L, cTValue *o) 1552 | bl extern lj_strfmt_number // (lua_State *L, cTValue *o)
1391 |.else 1553 |.else
1392 | bl extern lj_str_fromnum // (lua_State *L, lua_Number *np) 1554 | bl extern lj_strfmt_num // (lua_State *L, lua_Number *np)
1393 |.endif 1555 |.endif
1394 | // Returns GCstr *. 1556 | // Returns GCstr *.
1395 | li CARG3, LJ_TSTR 1557 | li CARG3, LJ_TSTR
@@ -1416,12 +1578,23 @@ static void build_subroutines(BuildCtx *ctx)
1416 | cmplwi CRET1, 0 1578 | cmplwi CRET1, 0
1417 | li CARG3, LJ_TNIL 1579 | li CARG3, LJ_TNIL
1418 | beq ->fff_restv // End of traversal: return nil. 1580 | beq ->fff_restv // End of traversal: return nil.
1419 | lfd f0, 8(BASE) // Copy key and value to results.
1420 | la RA, -8(BASE) 1581 | la RA, -8(BASE)
1582 |.if FPU
1583 | lfd f0, 8(BASE) // Copy key and value to results.
1421 | lfd f1, 16(BASE) 1584 | lfd f1, 16(BASE)
1422 | stfd f0, 0(RA) 1585 | stfd f0, 0(RA)
1423 | li RD, (2+1)*8
1424 | stfd f1, 8(RA) 1586 | stfd f1, 8(RA)
1587 |.else
1588 | lwz CARG1, 8(BASE)
1589 | lwz CARG2, 12(BASE)
1590 | lwz CARG3, 16(BASE)
1591 | lwz CARG4, 20(BASE)
1592 | stw CARG1, 0(RA)
1593 | stw CARG2, 4(RA)
1594 | stw CARG3, 8(RA)
1595 | stw CARG4, 12(RA)
1596 |.endif
1597 | li RD, (2+1)*8
1425 | b ->fff_res 1598 | b ->fff_res
1426 | 1599 |
1427 |.ffunc_1 pairs 1600 |.ffunc_1 pairs
@@ -1430,17 +1603,32 @@ static void build_subroutines(BuildCtx *ctx)
1430 | bne ->fff_fallback 1603 | bne ->fff_fallback
1431#if LJ_52 1604#if LJ_52
1432 | lwz TAB:TMP2, TAB:CARG1->metatable 1605 | lwz TAB:TMP2, TAB:CARG1->metatable
1606 |.if FPU
1433 | lfd f0, CFUNC:RB->upvalue[0] 1607 | lfd f0, CFUNC:RB->upvalue[0]
1608 |.else
1609 | lwz TMP0, CFUNC:RB->upvalue[0].u32.hi
1610 | lwz TMP1, CFUNC:RB->upvalue[0].u32.lo
1611 |.endif
1434 | cmplwi TAB:TMP2, 0 1612 | cmplwi TAB:TMP2, 0
1435 | la RA, -8(BASE) 1613 | la RA, -8(BASE)
1436 | bne ->fff_fallback 1614 | bne ->fff_fallback
1437#else 1615#else
1616 |.if FPU
1438 | lfd f0, CFUNC:RB->upvalue[0] 1617 | lfd f0, CFUNC:RB->upvalue[0]
1618 |.else
1619 | lwz TMP0, CFUNC:RB->upvalue[0].u32.hi
1620 | lwz TMP1, CFUNC:RB->upvalue[0].u32.lo
1621 |.endif
1439 | la RA, -8(BASE) 1622 | la RA, -8(BASE)
1440#endif 1623#endif
1441 | stw TISNIL, 8(BASE) 1624 | stw TISNIL, 8(BASE)
1442 | li RD, (3+1)*8 1625 | li RD, (3+1)*8
1626 |.if FPU
1443 | stfd f0, 0(RA) 1627 | stfd f0, 0(RA)
1628 |.else
1629 | stw TMP0, 0(RA)
1630 | stw TMP1, 4(RA)
1631 |.endif
1444 | b ->fff_res 1632 | b ->fff_res
1445 | 1633 |
1446 |.ffunc ipairs_aux 1634 |.ffunc ipairs_aux
@@ -1486,14 +1674,24 @@ static void build_subroutines(BuildCtx *ctx)
1486 | stfd FARG2, 0(RA) 1674 | stfd FARG2, 0(RA)
1487 |.endif 1675 |.endif
1488 | ble >2 // Not in array part? 1676 | ble >2 // Not in array part?
1677 |.if FPU
1489 | lwzx TMP2, TMP1, TMP3 1678 | lwzx TMP2, TMP1, TMP3
1490 | lfdx f0, TMP1, TMP3 1679 | lfdx f0, TMP1, TMP3
1680 |.else
1681 | lwzux TMP2, TMP1, TMP3
1682 | lwz TMP3, 4(TMP1)
1683 |.endif
1491 |1: 1684 |1:
1492 | checknil TMP2 1685 | checknil TMP2
1493 | li RD, (0+1)*8 1686 | li RD, (0+1)*8
1494 | beq ->fff_res // End of iteration, return 0 results. 1687 | beq ->fff_res // End of iteration, return 0 results.
1495 | li RD, (2+1)*8 1688 | li RD, (2+1)*8
1689 |.if FPU
1496 | stfd f0, 8(RA) 1690 | stfd f0, 8(RA)
1691 |.else
1692 | stw TMP2, 8(RA)
1693 | stw TMP3, 12(RA)
1694 |.endif
1497 | b ->fff_res 1695 | b ->fff_res
1498 |2: // Check for empty hash part first. Otherwise call C function. 1696 |2: // Check for empty hash part first. Otherwise call C function.
1499 | lwz TMP0, TAB:CARG1->hmask 1697 | lwz TMP0, TAB:CARG1->hmask
@@ -1507,7 +1705,11 @@ static void build_subroutines(BuildCtx *ctx)
1507 | li RD, (0+1)*8 1705 | li RD, (0+1)*8
1508 | beq ->fff_res 1706 | beq ->fff_res
1509 | lwz TMP2, 0(CRET1) 1707 | lwz TMP2, 0(CRET1)
1708 |.if FPU
1510 | lfd f0, 0(CRET1) 1709 | lfd f0, 0(CRET1)
1710 |.else
1711 | lwz TMP3, 4(CRET1)
1712 |.endif
1511 | b <1 1713 | b <1
1512 | 1714 |
1513 |.ffunc_1 ipairs 1715 |.ffunc_1 ipairs
@@ -1516,12 +1718,22 @@ static void build_subroutines(BuildCtx *ctx)
1516 | bne ->fff_fallback 1718 | bne ->fff_fallback
1517#if LJ_52 1719#if LJ_52
1518 | lwz TAB:TMP2, TAB:CARG1->metatable 1720 | lwz TAB:TMP2, TAB:CARG1->metatable
1721 |.if FPU
1519 | lfd f0, CFUNC:RB->upvalue[0] 1722 | lfd f0, CFUNC:RB->upvalue[0]
1723 |.else
1724 | lwz TMP0, CFUNC:RB->upvalue[0].u32.hi
1725 | lwz TMP1, CFUNC:RB->upvalue[0].u32.lo
1726 |.endif
1520 | cmplwi TAB:TMP2, 0 1727 | cmplwi TAB:TMP2, 0
1521 | la RA, -8(BASE) 1728 | la RA, -8(BASE)
1522 | bne ->fff_fallback 1729 | bne ->fff_fallback
1523#else 1730#else
1731 |.if FPU
1524 | lfd f0, CFUNC:RB->upvalue[0] 1732 | lfd f0, CFUNC:RB->upvalue[0]
1733 |.else
1734 | lwz TMP0, CFUNC:RB->upvalue[0].u32.hi
1735 | lwz TMP1, CFUNC:RB->upvalue[0].u32.lo
1736 |.endif
1525 | la RA, -8(BASE) 1737 | la RA, -8(BASE)
1526#endif 1738#endif
1527 |.if DUALNUM 1739 |.if DUALNUM
@@ -1531,7 +1743,12 @@ static void build_subroutines(BuildCtx *ctx)
1531 |.endif 1743 |.endif
1532 | stw ZERO, 12(BASE) 1744 | stw ZERO, 12(BASE)
1533 | li RD, (3+1)*8 1745 | li RD, (3+1)*8
1746 |.if FPU
1534 | stfd f0, 0(RA) 1747 | stfd f0, 0(RA)
1748 |.else
1749 | stw TMP0, 0(RA)
1750 | stw TMP1, 4(RA)
1751 |.endif
1535 | b ->fff_res 1752 | b ->fff_res
1536 | 1753 |
1537 |//-- Base library: catch errors ---------------------------------------- 1754 |//-- Base library: catch errors ----------------------------------------
@@ -1550,19 +1767,32 @@ static void build_subroutines(BuildCtx *ctx)
1550 | 1767 |
1551 |.ffunc xpcall 1768 |.ffunc xpcall
1552 | cmplwi NARGS8:RC, 16 1769 | cmplwi NARGS8:RC, 16
1553 | lwz CARG4, 8(BASE) 1770 | lwz CARG3, 8(BASE)
1771 |.if FPU
1554 | lfd FARG2, 8(BASE) 1772 | lfd FARG2, 8(BASE)
1555 | lfd FARG1, 0(BASE) 1773 | lfd FARG1, 0(BASE)
1774 |.else
1775 | lwz CARG1, 0(BASE)
1776 | lwz CARG2, 4(BASE)
1777 | lwz CARG4, 12(BASE)
1778 |.endif
1556 | blt ->fff_fallback 1779 | blt ->fff_fallback
1557 | lbz TMP1, DISPATCH_GL(hookmask)(DISPATCH) 1780 | lbz TMP1, DISPATCH_GL(hookmask)(DISPATCH)
1558 | mr TMP2, BASE 1781 | mr TMP2, BASE
1559 | checkfunc CARG4; bne ->fff_fallback // Traceback must be a function. 1782 | checkfunc CARG3; bne ->fff_fallback // Traceback must be a function.
1560 | la BASE, 16(BASE) 1783 | la BASE, 16(BASE)
1561 | // Remember active hook before pcall. 1784 | // Remember active hook before pcall.
1562 | rlwinm TMP1, TMP1, 32-HOOK_ACTIVE_SHIFT, 31, 31 1785 | rlwinm TMP1, TMP1, 32-HOOK_ACTIVE_SHIFT, 31, 31
1786 |.if FPU
1563 | stfd FARG2, 0(TMP2) // Swap function and traceback. 1787 | stfd FARG2, 0(TMP2) // Swap function and traceback.
1564 | subi NARGS8:RC, NARGS8:RC, 16
1565 | stfd FARG1, 8(TMP2) 1788 | stfd FARG1, 8(TMP2)
1789 |.else
1790 | stw CARG3, 0(TMP2)
1791 | stw CARG4, 4(TMP2)
1792 | stw CARG1, 8(TMP2)
1793 | stw CARG2, 12(TMP2)
1794 |.endif
1795 | subi NARGS8:RC, NARGS8:RC, 16
1566 | addi PC, TMP1, 16+FRAME_PCALL 1796 | addi PC, TMP1, 16+FRAME_PCALL
1567 | b ->vm_call_dispatch 1797 | b ->vm_call_dispatch
1568 | 1798 |
@@ -1605,9 +1835,21 @@ static void build_subroutines(BuildCtx *ctx)
1605 | stp BASE, L->top 1835 | stp BASE, L->top
1606 |2: // Move args to coroutine. 1836 |2: // Move args to coroutine.
1607 | cmpw TMP1, NARGS8:RC 1837 | cmpw TMP1, NARGS8:RC
1838 |.if FPU
1608 | lfdx f0, BASE, TMP1 1839 | lfdx f0, BASE, TMP1
1840 |.else
1841 | add CARG3, BASE, TMP1
1842 | lwz TMP2, 0(CARG3)
1843 | lwz TMP3, 4(CARG3)
1844 |.endif
1609 | beq >3 1845 | beq >3
1846 |.if FPU
1610 | stfdx f0, CARG2, TMP1 1847 | stfdx f0, CARG2, TMP1
1848 |.else
1849 | add CARG3, CARG2, TMP1
1850 | stw TMP2, 0(CARG3)
1851 | stw TMP3, 4(CARG3)
1852 |.endif
1611 | addi TMP1, TMP1, 8 1853 | addi TMP1, TMP1, 8
1612 | b <2 1854 | b <2
1613 |3: 1855 |3:
@@ -1622,6 +1864,7 @@ static void build_subroutines(BuildCtx *ctx)
1622 | lp TMP3, L:SAVE0->top 1864 | lp TMP3, L:SAVE0->top
1623 | li_vmstate INTERP 1865 | li_vmstate INTERP
1624 | lp BASE, L->base 1866 | lp BASE, L->base
1867 | stw L, DISPATCH_GL(cur_L)(DISPATCH)
1625 | st_vmstate 1868 | st_vmstate
1626 | bgt >8 1869 | bgt >8
1627 | sub RD, TMP3, TMP2 1870 | sub RD, TMP3, TMP2
@@ -1637,8 +1880,17 @@ static void build_subroutines(BuildCtx *ctx)
1637 | stp TMP2, L:SAVE0->top // Clear coroutine stack. 1880 | stp TMP2, L:SAVE0->top // Clear coroutine stack.
1638 |5: // Move results from coroutine. 1881 |5: // Move results from coroutine.
1639 | cmplw TMP1, TMP3 1882 | cmplw TMP1, TMP3
1883 |.if FPU
1640 | lfdx f0, TMP2, TMP1 1884 | lfdx f0, TMP2, TMP1
1641 | stfdx f0, BASE, TMP1 1885 | stfdx f0, BASE, TMP1
1886 |.else
1887 | add CARG3, TMP2, TMP1
1888 | lwz CARG1, 0(CARG3)
1889 | lwz CARG2, 4(CARG3)
1890 | add CARG3, BASE, TMP1
1891 | stw CARG1, 0(CARG3)
1892 | stw CARG2, 4(CARG3)
1893 |.endif
1642 | addi TMP1, TMP1, 8 1894 | addi TMP1, TMP1, 8
1643 | bne <5 1895 | bne <5
1644 |6: 1896 |6:
@@ -1663,12 +1915,22 @@ static void build_subroutines(BuildCtx *ctx)
1663 | andix. TMP0, PC, FRAME_TYPE 1915 | andix. TMP0, PC, FRAME_TYPE
1664 | la TMP3, -8(TMP3) 1916 | la TMP3, -8(TMP3)
1665 | li TMP1, LJ_TFALSE 1917 | li TMP1, LJ_TFALSE
1918 |.if FPU
1666 | lfd f0, 0(TMP3) 1919 | lfd f0, 0(TMP3)
1920 |.else
1921 | lwz CARG1, 0(TMP3)
1922 | lwz CARG2, 4(TMP3)
1923 |.endif
1667 | stp TMP3, L:SAVE0->top // Remove error from coroutine stack. 1924 | stp TMP3, L:SAVE0->top // Remove error from coroutine stack.
1668 | li RD, (2+1)*8 1925 | li RD, (2+1)*8
1669 | stw TMP1, -8(BASE) // Prepend false to results. 1926 | stw TMP1, -8(BASE) // Prepend false to results.
1670 | la RA, -8(BASE) 1927 | la RA, -8(BASE)
1928 |.if FPU
1671 | stfd f0, 0(BASE) // Copy error message. 1929 | stfd f0, 0(BASE) // Copy error message.
1930 |.else
1931 | stw CARG1, 0(BASE) // Copy error message.
1932 | stw CARG2, 4(BASE)
1933 |.endif
1672 | b <7 1934 | b <7
1673 |.else 1935 |.else
1674 | mr CARG1, L 1936 | mr CARG1, L
@@ -1847,7 +2109,12 @@ static void build_subroutines(BuildCtx *ctx)
1847 | lus CARG1, 0x8000 // -(2^31). 2109 | lus CARG1, 0x8000 // -(2^31).
1848 | beqy ->fff_resi 2110 | beqy ->fff_resi
1849 |5: 2111 |5:
2112 |.if FPU
1850 | lfd FARG1, 0(BASE) 2113 | lfd FARG1, 0(BASE)
2114 |.else
2115 | lwz CARG1, 0(BASE)
2116 | lwz CARG2, 4(BASE)
2117 |.endif
1851 | blex func 2118 | blex func
1852 | b ->fff_resn 2119 | b ->fff_resn
1853 |.endmacro 2120 |.endmacro
@@ -1871,10 +2138,14 @@ static void build_subroutines(BuildCtx *ctx)
1871 | 2138 |
1872 |.ffunc math_log 2139 |.ffunc math_log
1873 | cmplwi NARGS8:RC, 8 2140 | cmplwi NARGS8:RC, 8
1874 | lwz CARG3, 0(BASE) 2141 | lwz CARG1, 0(BASE)
1875 | lfd FARG1, 0(BASE)
1876 | bne ->fff_fallback // Need exactly 1 argument. 2142 | bne ->fff_fallback // Need exactly 1 argument.
1877 | checknum CARG3; bge ->fff_fallback 2143 | checknum CARG1; bge ->fff_fallback
2144 |.if FPU
2145 | lfd FARG1, 0(BASE)
2146 |.else
2147 | lwz CARG2, 4(BASE)
2148 |.endif
1878 | blex log 2149 | blex log
1879 | b ->fff_resn 2150 | b ->fff_resn
1880 | 2151 |
@@ -1893,26 +2164,27 @@ static void build_subroutines(BuildCtx *ctx)
1893 | math_extern2 atan2 2164 | math_extern2 atan2
1894 | math_extern2 fmod 2165 | math_extern2 fmod
1895 | 2166 |
1896 |->ff_math_deg:
1897 |.ffunc_n math_rad
1898 | lfd FARG2, CFUNC:RB->upvalue[0]
1899 | fmul FARG1, FARG1, FARG2
1900 | b ->fff_resn
1901 |
1902 |.if DUALNUM 2167 |.if DUALNUM
1903 |.ffunc math_ldexp 2168 |.ffunc math_ldexp
1904 | cmplwi NARGS8:RC, 16 2169 | cmplwi NARGS8:RC, 16
1905 | lwz CARG3, 0(BASE) 2170 | lwz TMP0, 0(BASE)
2171 |.if FPU
1906 | lfd FARG1, 0(BASE) 2172 | lfd FARG1, 0(BASE)
1907 | lwz CARG4, 8(BASE) 2173 |.else
2174 | lwz CARG1, 0(BASE)
2175 | lwz CARG2, 4(BASE)
2176 |.endif
2177 | lwz TMP1, 8(BASE)
1908 |.if GPR64 2178 |.if GPR64
1909 | lwz CARG2, 12(BASE) 2179 | lwz CARG2, 12(BASE)
1910 |.else 2180 |.elif FPU
1911 | lwz CARG1, 12(BASE) 2181 | lwz CARG1, 12(BASE)
2182 |.else
2183 | lwz CARG3, 12(BASE)
1912 |.endif 2184 |.endif
1913 | blt ->fff_fallback 2185 | blt ->fff_fallback
1914 | checknum CARG3; bge ->fff_fallback 2186 | checknum TMP0; bge ->fff_fallback
1915 | checknum CARG4; bne ->fff_fallback 2187 | checknum TMP1; bne ->fff_fallback
1916 |.else 2188 |.else
1917 |.ffunc_nn math_ldexp 2189 |.ffunc_nn math_ldexp
1918 |.if GPR64 2190 |.if GPR64
@@ -1927,8 +2199,10 @@ static void build_subroutines(BuildCtx *ctx)
1927 |.ffunc_n math_frexp 2199 |.ffunc_n math_frexp
1928 |.if GPR64 2200 |.if GPR64
1929 | la CARG2, DISPATCH_GL(tmptv)(DISPATCH) 2201 | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
1930 |.else 2202 |.elif FPU
1931 | la CARG1, DISPATCH_GL(tmptv)(DISPATCH) 2203 | la CARG1, DISPATCH_GL(tmptv)(DISPATCH)
2204 |.else
2205 | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
1932 |.endif 2206 |.endif
1933 | lwz PC, FRAME_PC(BASE) 2207 | lwz PC, FRAME_PC(BASE)
1934 | blex frexp 2208 | blex frexp
@@ -1937,7 +2211,12 @@ static void build_subroutines(BuildCtx *ctx)
1937 |.if not DUALNUM 2211 |.if not DUALNUM
1938 | tonum_i FARG2, TMP1 2212 | tonum_i FARG2, TMP1
1939 |.endif 2213 |.endif
2214 |.if FPU
1940 | stfd FARG1, 0(RA) 2215 | stfd FARG1, 0(RA)
2216 |.else
2217 | stw CRET1, 0(RA)
2218 | stw CRET2, 4(RA)
2219 |.endif
1941 | li RD, (2+1)*8 2220 | li RD, (2+1)*8
1942 |.if DUALNUM 2221 |.if DUALNUM
1943 | stw TISNUM, 8(RA) 2222 | stw TISNUM, 8(RA)
@@ -1950,13 +2229,20 @@ static void build_subroutines(BuildCtx *ctx)
1950 |.ffunc_n math_modf 2229 |.ffunc_n math_modf
1951 |.if GPR64 2230 |.if GPR64
1952 | la CARG2, -8(BASE) 2231 | la CARG2, -8(BASE)
1953 |.else 2232 |.elif FPU
1954 | la CARG1, -8(BASE) 2233 | la CARG1, -8(BASE)
2234 |.else
2235 | la CARG3, -8(BASE)
1955 |.endif 2236 |.endif
1956 | lwz PC, FRAME_PC(BASE) 2237 | lwz PC, FRAME_PC(BASE)
1957 | blex modf 2238 | blex modf
1958 | la RA, -8(BASE) 2239 | la RA, -8(BASE)
2240 |.if FPU
1959 | stfd FARG1, 0(BASE) 2241 | stfd FARG1, 0(BASE)
2242 |.else
2243 | stw CRET1, 0(BASE)
2244 | stw CRET2, 4(BASE)
2245 |.endif
1960 | li RD, (2+1)*8 2246 | li RD, (2+1)*8
1961 | b ->fff_res 2247 | b ->fff_res
1962 | 2248 |
@@ -1964,13 +2250,13 @@ static void build_subroutines(BuildCtx *ctx)
1964 |.if DUALNUM 2250 |.if DUALNUM
1965 | .ffunc_1 name 2251 | .ffunc_1 name
1966 | checknum CARG3 2252 | checknum CARG3
1967 | addi TMP1, BASE, 8 2253 | addi SAVE0, BASE, 8
1968 | add TMP2, BASE, NARGS8:RC 2254 | add SAVE1, BASE, NARGS8:RC
1969 | bne >4 2255 | bne >4
1970 |1: // Handle integers. 2256 |1: // Handle integers.
1971 | lwz CARG4, 0(TMP1) 2257 | lwz CARG4, 0(SAVE0)
1972 | cmplw cr1, TMP1, TMP2 2258 | cmplw cr1, SAVE0, SAVE1
1973 | lwz CARG2, 4(TMP1) 2259 | lwz CARG2, 4(SAVE0)
1974 | bge cr1, ->fff_resi 2260 | bge cr1, ->fff_resi
1975 | checknum CARG4 2261 | checknum CARG4
1976 | xoris TMP0, CARG1, 0x8000 2262 | xoris TMP0, CARG1, 0x8000
@@ -1987,36 +2273,76 @@ static void build_subroutines(BuildCtx *ctx)
1987 |.if GPR64 2273 |.if GPR64
1988 | rldicl CARG1, CARG1, 0, 32 2274 | rldicl CARG1, CARG1, 0, 32
1989 |.endif 2275 |.endif
1990 | addi TMP1, TMP1, 8 2276 | addi SAVE0, SAVE0, 8
1991 | b <1 2277 | b <1
1992 |3: 2278 |3:
1993 | bge ->fff_fallback 2279 | bge ->fff_fallback
1994 | // Convert intermediate result to number and continue below. 2280 | // Convert intermediate result to number and continue below.
2281 |.if FPU
1995 | tonum_i FARG1, CARG1 2282 | tonum_i FARG1, CARG1
1996 | lfd FARG2, 0(TMP1) 2283 | lfd FARG2, 0(SAVE0)
2284 |.else
2285 | mr CARG2, CARG1
2286 | bl ->vm_sfi2d_1
2287 | lwz CARG3, 0(SAVE0)
2288 | lwz CARG4, 4(SAVE0)
2289 |.endif
1997 | b >6 2290 | b >6
1998 |4: 2291 |4:
2292 |.if FPU
1999 | lfd FARG1, 0(BASE) 2293 | lfd FARG1, 0(BASE)
2294 |.else
2295 | lwz CARG1, 0(BASE)
2296 | lwz CARG2, 4(BASE)
2297 |.endif
2000 | bge ->fff_fallback 2298 | bge ->fff_fallback
2001 |5: // Handle numbers. 2299 |5: // Handle numbers.
2002 | lwz CARG4, 0(TMP1) 2300 | lwz CARG3, 0(SAVE0)
2003 | cmplw cr1, TMP1, TMP2 2301 | cmplw cr1, SAVE0, SAVE1
2004 | lfd FARG2, 0(TMP1) 2302 |.if FPU
2303 | lfd FARG2, 0(SAVE0)
2304 |.else
2305 | lwz CARG4, 4(SAVE0)
2306 |.endif
2005 | bge cr1, ->fff_resn 2307 | bge cr1, ->fff_resn
2006 | checknum CARG4; bge >7 2308 | checknum CARG3; bge >7
2007 |6: 2309 |6:
2008 | fsub f0, FARG1, FARG2 2310 | addi SAVE0, SAVE0, 8
2009 | addi TMP1, TMP1, 8 2311 |.if FPU
2010 |.if ismax 2312 |.if ismax
2313 | fsub f0, FARG1, FARG2
2314 |.else
2315 | fsub f0, FARG2, FARG1
2316 |.endif
2011 | fsel FARG1, f0, FARG1, FARG2 2317 | fsel FARG1, f0, FARG1, FARG2
2012 |.else 2318 |.else
2013 | fsel FARG1, f0, FARG2, FARG1 2319 | stw CARG1, SFSAVE_1
2320 | stw CARG2, SFSAVE_2
2321 | stw CARG3, SFSAVE_3
2322 | stw CARG4, SFSAVE_4
2323 | blex __ledf2
2324 | cmpwi CRET1, 0
2325 |.if ismax
2326 | blt >8
2327 |.else
2328 | bge >8
2329 |.endif
2330 | lwz CARG1, SFSAVE_1
2331 | lwz CARG2, SFSAVE_2
2332 | b <5
2333 |8:
2334 | lwz CARG1, SFSAVE_3
2335 | lwz CARG2, SFSAVE_4
2014 |.endif 2336 |.endif
2015 | b <5 2337 | b <5
2016 |7: // Convert integer to number and continue above. 2338 |7: // Convert integer to number and continue above.
2017 | lwz CARG2, 4(TMP1) 2339 | lwz CARG3, 4(SAVE0)
2018 | bne ->fff_fallback 2340 | bne ->fff_fallback
2019 | tonum_i FARG2, CARG2 2341 |.if FPU
2342 | tonum_i FARG2, CARG3
2343 |.else
2344 | bl ->vm_sfi2d_2
2345 |.endif
2020 | b <6 2346 | b <6
2021 |.else 2347 |.else
2022 | .ffunc_n name 2348 | .ffunc_n name
@@ -2028,13 +2354,13 @@ static void build_subroutines(BuildCtx *ctx)
2028 | checknum CARG2 2354 | checknum CARG2
2029 | bge cr1, ->fff_resn 2355 | bge cr1, ->fff_resn
2030 | bge ->fff_fallback 2356 | bge ->fff_fallback
2031 | fsub f0, FARG1, FARG2
2032 | addi TMP1, TMP1, 8
2033 |.if ismax 2357 |.if ismax
2034 | fsel FARG1, f0, FARG1, FARG2 2358 | fsub f0, FARG1, FARG2
2035 |.else 2359 |.else
2036 | fsel FARG1, f0, FARG2, FARG1 2360 | fsub f0, FARG2, FARG1
2037 |.endif 2361 |.endif
2362 | addi TMP1, TMP1, 8
2363 | fsel FARG1, f0, FARG1, FARG2
2038 | b <1 2364 | b <1
2039 |.endif 2365 |.endif
2040 |.endmacro 2366 |.endmacro
@@ -2044,11 +2370,6 @@ static void build_subroutines(BuildCtx *ctx)
2044 | 2370 |
2045 |//-- String library ----------------------------------------------------- 2371 |//-- String library -----------------------------------------------------
2046 | 2372 |
2047 |.ffunc_1 string_len
2048 | checkstr CARG3; bne ->fff_fallback
2049 | lwz CRET1, STR:CARG1->len
2050 | b ->fff_resi
2051 |
2052 |.ffunc string_byte // Only handle the 1-arg case here. 2373 |.ffunc string_byte // Only handle the 1-arg case here.
2053 | cmplwi NARGS8:RC, 8 2374 | cmplwi NARGS8:RC, 8
2054 | lwz CARG3, 0(BASE) 2375 | lwz CARG3, 0(BASE)
@@ -2103,6 +2424,7 @@ static void build_subroutines(BuildCtx *ctx)
2103 | stp BASE, L->base 2424 | stp BASE, L->base
2104 | stw PC, SAVE_PC 2425 | stw PC, SAVE_PC
2105 | bl extern lj_str_new // (lua_State *L, char *str, size_t l) 2426 | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
2427 |->fff_resstr:
2106 | // Returns GCstr *. 2428 | // Returns GCstr *.
2107 | lp BASE, L->base 2429 | lp BASE, L->base
2108 | li CARG3, LJ_TSTR 2430 | li CARG3, LJ_TSTR
@@ -2180,114 +2502,29 @@ static void build_subroutines(BuildCtx *ctx)
2180 | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0) 2502 | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0)
2181 | b <3 2503 | b <3
2182 | 2504 |
2183 |.ffunc string_rep // Only handle the 1-char case inline. 2505 |.macro ffstring_op, name
2184 | ffgccheck 2506 | .ffunc string_ .. name
2185 | cmplwi NARGS8:RC, 16
2186 | lwz TMP0, 0(BASE)
2187 | lwz STR:CARG1, 4(BASE)
2188 | lwz CARG4, 8(BASE)
2189 |.if DUALNUM
2190 | lwz CARG3, 12(BASE)
2191 |.else
2192 | lfd FARG2, 8(BASE)
2193 |.endif
2194 | bne ->fff_fallback // Exactly 2 arguments.
2195 | checkstr TMP0; bne ->fff_fallback
2196 |.if DUALNUM
2197 | checknum CARG4; bne ->fff_fallback
2198 |.else
2199 | checknum CARG4; bge ->fff_fallback
2200 | toint CARG3, FARG2
2201 |.endif
2202 | lwz TMP0, STR:CARG1->len
2203 | cmpwi CARG3, 0
2204 | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
2205 | ble >2 // Count <= 0? (or non-int)
2206 | cmplwi TMP0, 1
2207 | subi TMP2, CARG3, 1
2208 | blt >2 // Zero length string?
2209 | cmplw cr1, TMP1, CARG3
2210 | bne ->fff_fallback // Fallback for > 1-char strings.
2211 | lbz TMP0, STR:CARG1[1]
2212 | lp CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
2213 | blt cr1, ->fff_fallback
2214 |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
2215 | cmplwi TMP2, 0
2216 | stbx TMP0, CARG2, TMP2
2217 | subi TMP2, TMP2, 1
2218 | bne <1
2219 | b ->fff_newstr
2220 |2: // Return empty string.
2221 | la STR:CARG1, DISPATCH_GL(strempty)(DISPATCH)
2222 | li CARG3, LJ_TSTR
2223 | b ->fff_restv
2224 |
2225 |.ffunc string_reverse
2226 | ffgccheck 2507 | ffgccheck
2227 | cmplwi NARGS8:RC, 8 2508 | cmplwi NARGS8:RC, 8
2228 | lwz CARG3, 0(BASE) 2509 | lwz CARG3, 0(BASE)
2229 | lwz STR:CARG1, 4(BASE) 2510 | lwz STR:CARG2, 4(BASE)
2230 | blt ->fff_fallback 2511 | blt ->fff_fallback
2231 | checkstr CARG3 2512 | checkstr CARG3
2232 | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH) 2513 | la SBUF:CARG1, DISPATCH_GL(tmpbuf)(DISPATCH)
2233 | bne ->fff_fallback 2514 | bne ->fff_fallback
2234 | lwz CARG3, STR:CARG1->len 2515 | lwz TMP0, SBUF:CARG1->b
2235 | la CARG1, #STR(STR:CARG1) 2516 | stw L, SBUF:CARG1->L
2236 | lp CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH) 2517 | stp BASE, L->base
2237 | li TMP2, 0 2518 | stw PC, SAVE_PC
2238 | cmplw TMP1, CARG3 2519 | stw TMP0, SBUF:CARG1->p
2239 | subi TMP3, CARG3, 1 2520 | bl extern lj_buf_putstr_ .. name
2240 | blt ->fff_fallback 2521 | bl extern lj_buf_tostr
2241 |1: // Reverse string copy. 2522 | b ->fff_resstr
2242 | cmpwi TMP3, 0
2243 | lbzx TMP1, CARG1, TMP2
2244 | blty ->fff_newstr
2245 | stbx TMP1, CARG2, TMP3
2246 | subi TMP3, TMP3, 1
2247 | addi TMP2, TMP2, 1
2248 | b <1
2249 |
2250 |.macro ffstring_case, name, lo
2251 | .ffunc name
2252 | ffgccheck
2253 | cmplwi NARGS8:RC, 8
2254 | lwz CARG3, 0(BASE)
2255 | lwz STR:CARG1, 4(BASE)
2256 | blt ->fff_fallback
2257 | checkstr CARG3
2258 | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
2259 | bne ->fff_fallback
2260 | lwz CARG3, STR:CARG1->len
2261 | la CARG1, #STR(STR:CARG1)
2262 | lp CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
2263 | cmplw TMP1, CARG3
2264 | li TMP2, 0
2265 | blt ->fff_fallback
2266 |1: // ASCII case conversion.
2267 | cmplw TMP2, CARG3
2268 | lbzx TMP1, CARG1, TMP2
2269 | bgey ->fff_newstr
2270 | subi TMP0, TMP1, lo
2271 | xori TMP3, TMP1, 0x20
2272 | addic TMP0, TMP0, -26
2273 | subfe TMP3, TMP3, TMP3
2274 | rlwinm TMP3, TMP3, 0, 26, 26 // x &= 0x20.
2275 | xor TMP1, TMP1, TMP3
2276 | stbx TMP1, CARG2, TMP2
2277 | addi TMP2, TMP2, 1
2278 | b <1
2279 |.endmacro 2523 |.endmacro
2280 | 2524 |
2281 |ffstring_case string_lower, 65 2525 |ffstring_op reverse
2282 |ffstring_case string_upper, 97 2526 |ffstring_op lower
2283 | 2527 |ffstring_op upper
2284 |//-- Table library ------------------------------------------------------
2285 |
2286 |.ffunc_1 table_getn
2287 | checktab CARG3; bne ->fff_fallback
2288 | bl extern lj_tab_len // (GCtab *t)
2289 | // Returns uint32_t (but less than 2^31).
2290 | b ->fff_resi
2291 | 2528 |
2292 |//-- Bit library -------------------------------------------------------- 2529 |//-- Bit library --------------------------------------------------------
2293 | 2530 |
@@ -2305,28 +2542,37 @@ static void build_subroutines(BuildCtx *ctx)
2305 | 2542 |
2306 |.macro .ffunc_bit_op, name, ins 2543 |.macro .ffunc_bit_op, name, ins
2307 | .ffunc_bit name 2544 | .ffunc_bit name
2308 | addi TMP1, BASE, 8 2545 | addi SAVE0, BASE, 8
2309 | add TMP2, BASE, NARGS8:RC 2546 | add SAVE1, BASE, NARGS8:RC
2310 |1: 2547 |1:
2311 | lwz CARG4, 0(TMP1) 2548 | lwz CARG4, 0(SAVE0)
2312 | cmplw cr1, TMP1, TMP2 2549 | cmplw cr1, SAVE0, SAVE1
2313 |.if DUALNUM 2550 |.if DUALNUM
2314 | lwz CARG2, 4(TMP1) 2551 | lwz CARG2, 4(SAVE0)
2315 |.else 2552 |.else
2316 | lfd FARG1, 0(TMP1) 2553 | lfd FARG1, 0(SAVE0)
2317 |.endif 2554 |.endif
2318 | bgey cr1, ->fff_resi 2555 | bgey cr1, ->fff_resi
2319 | checknum CARG4 2556 | checknum CARG4
2320 |.if DUALNUM 2557 |.if DUALNUM
2558 |.if FPU
2321 | bnel ->fff_bitop_fb 2559 | bnel ->fff_bitop_fb
2322 |.else 2560 |.else
2561 | beq >3
2562 | stw CARG1, SFSAVE_1
2563 | bl ->fff_bitop_fb
2564 | mr CARG2, CARG1
2565 | lwz CARG1, SFSAVE_1
2566 |3:
2567 |.endif
2568 |.else
2323 | fadd FARG1, FARG1, TOBIT 2569 | fadd FARG1, FARG1, TOBIT
2324 | bge ->fff_fallback 2570 | bge ->fff_fallback
2325 | stfd FARG1, TMPD 2571 | stfd FARG1, TMPD
2326 | lwz CARG2, TMPD_LO 2572 | lwz CARG2, TMPD_LO
2327 |.endif 2573 |.endif
2328 | ins CARG1, CARG1, CARG2 2574 | ins CARG1, CARG1, CARG2
2329 | addi TMP1, TMP1, 8 2575 | addi SAVE0, SAVE0, 8
2330 | b <1 2576 | b <1
2331 |.endmacro 2577 |.endmacro
2332 | 2578 |
@@ -2348,7 +2594,14 @@ static void build_subroutines(BuildCtx *ctx)
2348 |.macro .ffunc_bit_sh, name, ins, shmod 2594 |.macro .ffunc_bit_sh, name, ins, shmod
2349 |.if DUALNUM 2595 |.if DUALNUM
2350 | .ffunc_2 bit_..name 2596 | .ffunc_2 bit_..name
2597 |.if FPU
2351 | checknum CARG3; bnel ->fff_tobit_fb 2598 | checknum CARG3; bnel ->fff_tobit_fb
2599 |.else
2600 | checknum CARG3; beq >1
2601 | bl ->fff_tobit_fb
2602 | lwz CARG2, 12(BASE) // Conversion polluted CARG2.
2603 |1:
2604 |.endif
2352 | // Note: no inline conversion from number for 2nd argument! 2605 | // Note: no inline conversion from number for 2nd argument!
2353 | checknum CARG4; bne ->fff_fallback 2606 | checknum CARG4; bne ->fff_fallback
2354 |.else 2607 |.else
@@ -2385,27 +2638,77 @@ static void build_subroutines(BuildCtx *ctx)
2385 |->fff_resn: 2638 |->fff_resn:
2386 | lwz PC, FRAME_PC(BASE) 2639 | lwz PC, FRAME_PC(BASE)
2387 | la RA, -8(BASE) 2640 | la RA, -8(BASE)
2641 |.if FPU
2388 | stfd FARG1, -8(BASE) 2642 | stfd FARG1, -8(BASE)
2643 |.else
2644 | stw CARG1, -8(BASE)
2645 | stw CARG2, -4(BASE)
2646 |.endif
2389 | b ->fff_res1 2647 | b ->fff_res1
2390 | 2648 |
2391 |// Fallback FP number to bit conversion. 2649 |// Fallback FP number to bit conversion.
2392 |->fff_tobit_fb: 2650 |->fff_tobit_fb:
2393 |.if DUALNUM 2651 |.if DUALNUM
2652 |.if FPU
2394 | lfd FARG1, 0(BASE) 2653 | lfd FARG1, 0(BASE)
2395 | bgt ->fff_fallback 2654 | bgt ->fff_fallback
2396 | fadd FARG1, FARG1, TOBIT 2655 | fadd FARG1, FARG1, TOBIT
2397 | stfd FARG1, TMPD 2656 | stfd FARG1, TMPD
2398 | lwz CARG1, TMPD_LO 2657 | lwz CARG1, TMPD_LO
2399 | blr 2658 | blr
2659 |.else
2660 | bgt ->fff_fallback
2661 | mr CARG2, CARG1
2662 | mr CARG1, CARG3
2663 |// Modifies: CARG1, CARG2, TMP0, TMP1, TMP2.
2664 |->vm_tobit:
2665 | slwi TMP2, CARG1, 1
2666 | addis TMP2, TMP2, 0x0020
2667 | cmpwi TMP2, 0
2668 | bge >2
2669 | li TMP1, 0x3e0
2670 | srawi TMP2, TMP2, 21
2671 | not TMP1, TMP1
2672 | sub. TMP2, TMP1, TMP2
2673 | cmpwi cr7, CARG1, 0
2674 | blt >1
2675 | slwi TMP1, CARG1, 11
2676 | srwi TMP0, CARG2, 21
2677 | oris TMP1, TMP1, 0x8000
2678 | or TMP1, TMP1, TMP0
2679 | srw CARG1, TMP1, TMP2
2680 | bclr 4, 28 // Return if cr7[lt] == 0, no hint.
2681 | neg CARG1, CARG1
2682 | blr
2683 |1:
2684 | addi TMP2, TMP2, 21
2685 | srw TMP1, CARG2, TMP2
2686 | slwi CARG2, CARG1, 12
2687 | subfic TMP2, TMP2, 20
2688 | slw TMP0, CARG2, TMP2
2689 | or CARG1, TMP1, TMP0
2690 | bclr 4, 28 // Return if cr7[lt] == 0, no hint.
2691 | neg CARG1, CARG1
2692 | blr
2693 |2:
2694 | li CARG1, 0
2695 | blr
2696 |.endif
2400 |.endif 2697 |.endif
2401 |->fff_bitop_fb: 2698 |->fff_bitop_fb:
2402 |.if DUALNUM 2699 |.if DUALNUM
2403 | lfd FARG1, 0(TMP1) 2700 |.if FPU
2701 | lfd FARG1, 0(SAVE0)
2404 | bgt ->fff_fallback 2702 | bgt ->fff_fallback
2405 | fadd FARG1, FARG1, TOBIT 2703 | fadd FARG1, FARG1, TOBIT
2406 | stfd FARG1, TMPD 2704 | stfd FARG1, TMPD
2407 | lwz CARG2, TMPD_LO 2705 | lwz CARG2, TMPD_LO
2408 | blr 2706 | blr
2707 |.else
2708 | bgt ->fff_fallback
2709 | mr CARG1, CARG4
2710 | b ->vm_tobit
2711 |.endif
2409 |.endif 2712 |.endif
2410 | 2713 |
2411 |//----------------------------------------------------------------------- 2714 |//-----------------------------------------------------------------------
@@ -2589,15 +2892,88 @@ static void build_subroutines(BuildCtx *ctx)
2589 | mtctr CRET1 2892 | mtctr CRET1
2590 | bctr 2893 | bctr
2591 | 2894 |
2895 |->cont_stitch: // Trace stitching.
2896 |.if JIT
2897 | // RA = resultptr, RB = meta base
2898 | lwz INS, -4(PC)
2899 | lwz TRACE:TMP2, -20(RB) // Save previous trace.
2900 | addic. TMP1, MULTRES, -8
2901 | decode_RA8 RC, INS // Call base.
2902 | beq >2
2903 |1: // Move results down.
2904 |.if FPU
2905 | lfd f0, 0(RA)
2906 |.else
2907 | lwz CARG1, 0(RA)
2908 | lwz CARG2, 4(RA)
2909 |.endif
2910 | addic. TMP1, TMP1, -8
2911 | addi RA, RA, 8
2912 |.if FPU
2913 | stfdx f0, BASE, RC
2914 |.else
2915 | add CARG3, BASE, RC
2916 | stw CARG1, 0(CARG3)
2917 | stw CARG2, 4(CARG3)
2918 |.endif
2919 | addi RC, RC, 8
2920 | bne <1
2921 |2:
2922 | decode_RA8 RA, INS
2923 | decode_RB8 RB, INS
2924 | add RA, RA, RB
2925 |3:
2926 | cmplw RA, RC
2927 | bgt >9 // More results wanted?
2928 |
2929 | lhz TMP3, TRACE:TMP2->traceno
2930 | lhz RD, TRACE:TMP2->link
2931 | cmpw RD, TMP3
2932 | cmpwi cr1, RD, 0
2933 | beq ->cont_nop // Blacklisted.
2934 | slwi RD, RD, 3
2935 | bne cr1, =>BC_JLOOP // Jump to stitched trace.
2936 |
2937 | // Stitch a new trace to the previous trace.
2938 | stw TMP3, DISPATCH_J(exitno)(DISPATCH)
2939 | stp L, DISPATCH_J(L)(DISPATCH)
2940 | stp BASE, L->base
2941 | addi CARG1, DISPATCH, GG_DISP2J
2942 | mr CARG2, PC
2943 | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
2944 | lp BASE, L->base
2945 | b ->cont_nop
2946 |
2947 |9:
2948 | stwx TISNIL, BASE, RC
2949 | addi RC, RC, 8
2950 | b <3
2951 |.endif
2952 |
2953 |->vm_profhook: // Dispatch target for profiler hook.
2954#if LJ_HASPROFILE
2955 | mr CARG1, L
2956 | stw MULTRES, SAVE_MULTRES
2957 | mr CARG2, PC
2958 | stp BASE, L->base
2959 | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
2960 | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
2961 | lp BASE, L->base
2962 | subi PC, PC, 4
2963 | b ->cont_nop
2964#endif
2965 |
2592 |//----------------------------------------------------------------------- 2966 |//-----------------------------------------------------------------------
2593 |//-- Trace exit handler ------------------------------------------------- 2967 |//-- Trace exit handler -------------------------------------------------
2594 |//----------------------------------------------------------------------- 2968 |//-----------------------------------------------------------------------
2595 | 2969 |
2596 |.macro savex_, a, b, c, d 2970 |.macro savex_, a, b, c, d
2971 |.if FPU
2597 | stfd f..a, 16+a*8(sp) 2972 | stfd f..a, 16+a*8(sp)
2598 | stfd f..b, 16+b*8(sp) 2973 | stfd f..b, 16+b*8(sp)
2599 | stfd f..c, 16+c*8(sp) 2974 | stfd f..c, 16+c*8(sp)
2600 | stfd f..d, 16+d*8(sp) 2975 | stfd f..d, 16+d*8(sp)
2976 |.endif
2601 |.endmacro 2977 |.endmacro
2602 | 2978 |
2603 |->vm_exit_handler: 2979 |->vm_exit_handler:
@@ -2623,16 +2999,16 @@ static void build_subroutines(BuildCtx *ctx)
2623 | savex_ 20,21,22,23 2999 | savex_ 20,21,22,23
2624 | lhz CARG4, 2(CARG3) // Load trace number. 3000 | lhz CARG4, 2(CARG3) // Load trace number.
2625 | savex_ 24,25,26,27 3001 | savex_ 24,25,26,27
2626 | lwz L, DISPATCH_GL(jit_L)(DISPATCH) 3002 | lwz L, DISPATCH_GL(cur_L)(DISPATCH)
2627 | savex_ 28,29,30,31 3003 | savex_ 28,29,30,31
2628 | sub CARG3, TMP0, CARG3 // Compute exit number. 3004 | sub CARG3, TMP0, CARG3 // Compute exit number.
2629 | lp BASE, DISPATCH_GL(jit_base)(DISPATCH) 3005 | lp BASE, DISPATCH_GL(jit_base)(DISPATCH)
2630 | srwi CARG3, CARG3, 2 3006 | srwi CARG3, CARG3, 2
2631 | stw L, DISPATCH_J(L)(DISPATCH) 3007 | stp L, DISPATCH_J(L)(DISPATCH)
2632 | subi CARG3, CARG3, 2 3008 | subi CARG3, CARG3, 2
2633 | stw TMP1, DISPATCH_GL(jit_L)(DISPATCH)
2634 | stw CARG4, DISPATCH_J(parent)(DISPATCH)
2635 | stp BASE, L->base 3009 | stp BASE, L->base
3010 | stw CARG4, DISPATCH_J(parent)(DISPATCH)
3011 | stw TMP1, DISPATCH_GL(jit_base)(DISPATCH)
2636 | addi CARG1, DISPATCH, GG_DISP2J 3012 | addi CARG1, DISPATCH, GG_DISP2J
2637 | stw CARG3, DISPATCH_J(exitno)(DISPATCH) 3013 | stw CARG3, DISPATCH_J(exitno)(DISPATCH)
2638 | addi CARG2, sp, 16 3014 | addi CARG2, sp, 16
@@ -2656,28 +3032,29 @@ static void build_subroutines(BuildCtx *ctx)
2656 | // CARG1 = MULTRES or negated error code, BASE, PC and JGL set. 3032 | // CARG1 = MULTRES or negated error code, BASE, PC and JGL set.
2657 | lwz L, SAVE_L 3033 | lwz L, SAVE_L
2658 | addi DISPATCH, JGL, -GG_DISP2G-32768 3034 | addi DISPATCH, JGL, -GG_DISP2G-32768
3035 | stp BASE, L->base
2659 |1: 3036 |1:
2660 | cmpwi CARG1, 0 3037 | cmpwi CARG1, 0
2661 | blt >3 // Check for error from exit. 3038 | blt >9 // Check for error from exit.
2662 | lwz LFUNC:TMP1, FRAME_FUNC(BASE) 3039 | lwz LFUNC:RB, FRAME_FUNC(BASE)
2663 | slwi MULTRES, CARG1, 3 3040 | slwi MULTRES, CARG1, 3
2664 | li TMP2, 0 3041 | li TMP2, 0
2665 | stw MULTRES, SAVE_MULTRES 3042 | stw MULTRES, SAVE_MULTRES
2666 | lwz TMP1, LFUNC:TMP1->pc 3043 | lwz TMP1, LFUNC:RB->pc
2667 | stw TMP2, DISPATCH_GL(jit_L)(DISPATCH) 3044 | stw TMP2, DISPATCH_GL(jit_base)(DISPATCH)
2668 | lwz KBASE, PC2PROTO(k)(TMP1) 3045 | lwz KBASE, PC2PROTO(k)(TMP1)
2669 | // Setup type comparison constants. 3046 | // Setup type comparison constants.
2670 | li TISNUM, LJ_TISNUM 3047 | li TISNUM, LJ_TISNUM
2671 | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). 3048 | .FPU lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
2672 | stw TMP3, TMPD 3049 | .FPU stw TMP3, TMPD
2673 | li ZERO, 0 3050 | li ZERO, 0
2674 | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). 3051 | .FPU ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
2675 | lfs TOBIT, TMPD 3052 | .FPU lfs TOBIT, TMPD
2676 | stw TMP3, TMPD 3053 | .FPU stw TMP3, TMPD
2677 | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) 3054 | .FPU lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
2678 | li TISNIL, LJ_TNIL 3055 | li TISNIL, LJ_TNIL
2679 | stw TMP0, TONUM_HI 3056 | .FPU stw TMP0, TONUM_HI
2680 | lfs TONUM, TMPD 3057 | .FPU lfs TONUM, TMPD
2681 | // Modified copy of ins_next which handles function header dispatch, too. 3058 | // Modified copy of ins_next which handles function header dispatch, too.
2682 | lwz INS, 0(PC) 3059 | lwz INS, 0(PC)
2683 | addi PC, PC, 4 3060 | addi PC, PC, 4
@@ -2694,11 +3071,25 @@ static void build_subroutines(BuildCtx *ctx)
2694 | decode_RC8 RC, INS 3071 | decode_RC8 RC, INS
2695 | bctr 3072 | bctr
2696 |2: 3073 |2:
3074 | cmplwi TMP1, (BC_FUNCC+2)*4 // Fast function?
3075 | blt >3
3076 | // Check frame below fast function.
3077 | lwz TMP1, FRAME_PC(BASE)
3078 | andix. TMP0, TMP1, FRAME_TYPE
3079 | bney >3 // Trace stitching continuation?
3080 | // Otherwise set KBASE for Lua function below fast function.
3081 | lwz TMP2, -4(TMP1)
3082 | decode_RA8 TMP0, TMP2
3083 | sub TMP1, BASE, TMP0
3084 | lwz LFUNC:TMP2, -12(TMP1)
3085 | lwz TMP1, LFUNC:TMP2->pc
3086 | lwz KBASE, PC2PROTO(k)(TMP1)
3087 |3:
2697 | subi RC, MULTRES, 8 3088 | subi RC, MULTRES, 8
2698 | add RA, RA, BASE 3089 | add RA, RA, BASE
2699 | bctr 3090 | bctr
2700 | 3091 |
2701 |3: // Rethrow error from the right C frame. 3092 |9: // Rethrow error from the right C frame.
2702 | mr CARG1, L 3093 | mr CARG1, L
2703 | bl extern lj_err_run // (lua_State *L) 3094 | bl extern lj_err_run // (lua_State *L)
2704 |.endif 3095 |.endif
@@ -2707,7 +3098,35 @@ static void build_subroutines(BuildCtx *ctx)
2707 |//-- Math helper functions ---------------------------------------------- 3098 |//-- Math helper functions ----------------------------------------------
2708 |//----------------------------------------------------------------------- 3099 |//-----------------------------------------------------------------------
2709 | 3100 |
2710 |// NYI: Use internal implementations of floor, ceil, trunc. 3101 |// NYI: Use internal implementations of floor, ceil, trunc, sfcmp.
3102 |
3103 |.macro sfi2d, AHI, ALO
3104 |.if not FPU
3105 | mr. AHI, ALO
3106 | bclr 12, 2 // Handle zero first.
3107 | srawi TMP0, ALO, 31
3108 | xor TMP1, ALO, TMP0
3109 | sub TMP1, TMP1, TMP0 // Absolute value in TMP1.
3110 | cntlzw AHI, TMP1
3111 | andix. TMP0, TMP0, 0x800 // Mask sign bit.
3112 | slw TMP1, TMP1, AHI // Align mantissa left with leading 1.
3113 | subfic AHI, AHI, 0x3ff+31-1 // Exponent -1 in AHI.
3114 | slwi ALO, TMP1, 21
3115 | or AHI, AHI, TMP0 // Sign | Exponent.
3116 | srwi TMP1, TMP1, 11
3117 | slwi AHI, AHI, 20 // Align left.
3118 | add AHI, AHI, TMP1 // Add mantissa, increment exponent.
3119 | blr
3120 |.endif
3121 |.endmacro
3122 |
3123 |// Input: CARG2. Output: CARG1, CARG2. Temporaries: TMP0, TMP1.
3124 |->vm_sfi2d_1:
3125 | sfi2d CARG1, CARG2
3126 |
3127 |// Input: CARG4. Output: CARG3, CARG4. Temporaries: TMP0, TMP1.
3128 |->vm_sfi2d_2:
3129 | sfi2d CARG3, CARG4
2711 | 3130 |
2712 |->vm_modi: 3131 |->vm_modi:
2713 | divwo. TMP0, CARG1, CARG2 3132 | divwo. TMP0, CARG1, CARG2
@@ -2775,21 +3194,21 @@ static void build_subroutines(BuildCtx *ctx)
2775 | addi DISPATCH, r12, GG_G2DISP 3194 | addi DISPATCH, r12, GG_G2DISP
2776 | stw r11, CTSTATE->cb.slot 3195 | stw r11, CTSTATE->cb.slot
2777 | stw r3, CTSTATE->cb.gpr[0] 3196 | stw r3, CTSTATE->cb.gpr[0]
2778 | stfd f1, CTSTATE->cb.fpr[0] 3197 | .FPU stfd f1, CTSTATE->cb.fpr[0]
2779 | stw r4, CTSTATE->cb.gpr[1] 3198 | stw r4, CTSTATE->cb.gpr[1]
2780 | stfd f2, CTSTATE->cb.fpr[1] 3199 | .FPU stfd f2, CTSTATE->cb.fpr[1]
2781 | stw r5, CTSTATE->cb.gpr[2] 3200 | stw r5, CTSTATE->cb.gpr[2]
2782 | stfd f3, CTSTATE->cb.fpr[2] 3201 | .FPU stfd f3, CTSTATE->cb.fpr[2]
2783 | stw r6, CTSTATE->cb.gpr[3] 3202 | stw r6, CTSTATE->cb.gpr[3]
2784 | stfd f4, CTSTATE->cb.fpr[3] 3203 | .FPU stfd f4, CTSTATE->cb.fpr[3]
2785 | stw r7, CTSTATE->cb.gpr[4] 3204 | stw r7, CTSTATE->cb.gpr[4]
2786 | stfd f5, CTSTATE->cb.fpr[4] 3205 | .FPU stfd f5, CTSTATE->cb.fpr[4]
2787 | stw r8, CTSTATE->cb.gpr[5] 3206 | stw r8, CTSTATE->cb.gpr[5]
2788 | stfd f6, CTSTATE->cb.fpr[5] 3207 | .FPU stfd f6, CTSTATE->cb.fpr[5]
2789 | stw r9, CTSTATE->cb.gpr[6] 3208 | stw r9, CTSTATE->cb.gpr[6]
2790 | stfd f7, CTSTATE->cb.fpr[6] 3209 | .FPU stfd f7, CTSTATE->cb.fpr[6]
2791 | stw r10, CTSTATE->cb.gpr[7] 3210 | stw r10, CTSTATE->cb.gpr[7]
2792 | stfd f8, CTSTATE->cb.fpr[7] 3211 | .FPU stfd f8, CTSTATE->cb.fpr[7]
2793 | addi TMP0, sp, CFRAME_SPACE+8 3212 | addi TMP0, sp, CFRAME_SPACE+8
2794 | stw TMP0, CTSTATE->cb.stack 3213 | stw TMP0, CTSTATE->cb.stack
2795 | mr CARG1, CTSTATE 3214 | mr CARG1, CTSTATE
@@ -2800,21 +3219,21 @@ static void build_subroutines(BuildCtx *ctx)
2800 | lp BASE, L:CRET1->base 3219 | lp BASE, L:CRET1->base
2801 | li TISNUM, LJ_TISNUM // Setup type comparison constants. 3220 | li TISNUM, LJ_TISNUM // Setup type comparison constants.
2802 | lp RC, L:CRET1->top 3221 | lp RC, L:CRET1->top
2803 | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float). 3222 | .FPU lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
2804 | li ZERO, 0 3223 | li ZERO, 0
2805 | mr L, CRET1 3224 | mr L, CRET1
2806 | stw TMP3, TMPD 3225 | .FPU stw TMP3, TMPD
2807 | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double) 3226 | .FPU lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
2808 | lwz LFUNC:RB, FRAME_FUNC(BASE) 3227 | lwz LFUNC:RB, FRAME_FUNC(BASE)
2809 | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float). 3228 | .FPU ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
2810 | stw TMP0, TONUM_HI 3229 | .FPU stw TMP0, TONUM_HI
2811 | li TISNIL, LJ_TNIL 3230 | li TISNIL, LJ_TNIL
2812 | li_vmstate INTERP 3231 | li_vmstate INTERP
2813 | lfs TOBIT, TMPD 3232 | .FPU lfs TOBIT, TMPD
2814 | stw TMP3, TMPD 3233 | .FPU stw TMP3, TMPD
2815 | sub RC, RC, BASE 3234 | sub RC, RC, BASE
2816 | st_vmstate 3235 | st_vmstate
2817 | lfs TONUM, TMPD 3236 | .FPU lfs TONUM, TMPD
2818 | ins_callt 3237 | ins_callt
2819 |.endif 3238 |.endif
2820 | 3239 |
@@ -2828,7 +3247,7 @@ static void build_subroutines(BuildCtx *ctx)
2828 | mr CARG2, RA 3247 | mr CARG2, RA
2829 | bl extern lj_ccallback_leave // (CTState *cts, TValue *o) 3248 | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
2830 | lwz CRET1, CTSTATE->cb.gpr[0] 3249 | lwz CRET1, CTSTATE->cb.gpr[0]
2831 | lfd FARG1, CTSTATE->cb.fpr[0] 3250 | .FPU lfd FARG1, CTSTATE->cb.fpr[0]
2832 | lwz CRET2, CTSTATE->cb.gpr[1] 3251 | lwz CRET2, CTSTATE->cb.gpr[1]
2833 | b ->vm_leave_unw 3252 | b ->vm_leave_unw
2834 |.endif 3253 |.endif
@@ -2862,14 +3281,14 @@ static void build_subroutines(BuildCtx *ctx)
2862 | bge <1 3281 | bge <1
2863 |2: 3282 |2:
2864 | bney cr1, >3 3283 | bney cr1, >3
2865 | lfd f1, CCSTATE->fpr[0] 3284 | .FPU lfd f1, CCSTATE->fpr[0]
2866 | lfd f2, CCSTATE->fpr[1] 3285 | .FPU lfd f2, CCSTATE->fpr[1]
2867 | lfd f3, CCSTATE->fpr[2] 3286 | .FPU lfd f3, CCSTATE->fpr[2]
2868 | lfd f4, CCSTATE->fpr[3] 3287 | .FPU lfd f4, CCSTATE->fpr[3]
2869 | lfd f5, CCSTATE->fpr[4] 3288 | .FPU lfd f5, CCSTATE->fpr[4]
2870 | lfd f6, CCSTATE->fpr[5] 3289 | .FPU lfd f6, CCSTATE->fpr[5]
2871 | lfd f7, CCSTATE->fpr[6] 3290 | .FPU lfd f7, CCSTATE->fpr[6]
2872 | lfd f8, CCSTATE->fpr[7] 3291 | .FPU lfd f8, CCSTATE->fpr[7]
2873 |3: 3292 |3:
2874 | lp TMP0, CCSTATE->func 3293 | lp TMP0, CCSTATE->func
2875 | lwz CARG2, CCSTATE->gpr[1] 3294 | lwz CARG2, CCSTATE->gpr[1]
@@ -2886,7 +3305,7 @@ static void build_subroutines(BuildCtx *ctx)
2886 | lwz TMP2, -4(r14) 3305 | lwz TMP2, -4(r14)
2887 | lwz TMP0, 4(r14) 3306 | lwz TMP0, 4(r14)
2888 | stw CARG1, CCSTATE:TMP1->gpr[0] 3307 | stw CARG1, CCSTATE:TMP1->gpr[0]
2889 | stfd FARG1, CCSTATE:TMP1->fpr[0] 3308 | .FPU stfd FARG1, CCSTATE:TMP1->fpr[0]
2890 | stw CARG2, CCSTATE:TMP1->gpr[1] 3309 | stw CARG2, CCSTATE:TMP1->gpr[1]
2891 | mtlr TMP0 3310 | mtlr TMP0
2892 | stw CARG3, CCSTATE:TMP1->gpr[2] 3311 | stw CARG3, CCSTATE:TMP1->gpr[2]
@@ -2915,19 +3334,19 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2915 case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT: 3334 case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
2916 | // RA = src1*8, RD = src2*8, JMP with RD = target 3335 | // RA = src1*8, RD = src2*8, JMP with RD = target
2917 |.if DUALNUM 3336 |.if DUALNUM
2918 | lwzux TMP0, RA, BASE 3337 | lwzux CARG1, RA, BASE
2919 | addi PC, PC, 4 3338 | addi PC, PC, 4
2920 | lwz CARG2, 4(RA) 3339 | lwz CARG2, 4(RA)
2921 | lwzux TMP1, RD, BASE 3340 | lwzux CARG3, RD, BASE
2922 | lwz TMP2, -4(PC) 3341 | lwz TMP2, -4(PC)
2923 | checknum cr0, TMP0 3342 | checknum cr0, CARG1
2924 | lwz CARG3, 4(RD) 3343 | lwz CARG4, 4(RD)
2925 | decode_RD4 TMP2, TMP2 3344 | decode_RD4 TMP2, TMP2
2926 | checknum cr1, TMP1 3345 | checknum cr1, CARG3
2927 | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) 3346 | addis SAVE0, TMP2, -(BCBIAS_J*4 >> 16)
2928 | bne cr0, >7 3347 | bne cr0, >7
2929 | bne cr1, >8 3348 | bne cr1, >8
2930 | cmpw CARG2, CARG3 3349 | cmpw CARG2, CARG4
2931 if (op == BC_ISLT) { 3350 if (op == BC_ISLT) {
2932 | bge >2 3351 | bge >2
2933 } else if (op == BC_ISGE) { 3352 } else if (op == BC_ISGE) {
@@ -2938,28 +3357,41 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2938 | ble >2 3357 | ble >2
2939 } 3358 }
2940 |1: 3359 |1:
2941 | add PC, PC, TMP2 3360 | add PC, PC, SAVE0
2942 |2: 3361 |2:
2943 | ins_next 3362 | ins_next
2944 | 3363 |
2945 |7: // RA is not an integer. 3364 |7: // RA is not an integer.
2946 | bgt cr0, ->vmeta_comp 3365 | bgt cr0, ->vmeta_comp
2947 | // RA is a number. 3366 | // RA is a number.
2948 | lfd f0, 0(RA) 3367 | .FPU lfd f0, 0(RA)
2949 | bgt cr1, ->vmeta_comp 3368 | bgt cr1, ->vmeta_comp
2950 | blt cr1, >4 3369 | blt cr1, >4
2951 | // RA is a number, RD is an integer. 3370 | // RA is a number, RD is an integer.
2952 | tonum_i f1, CARG3 3371 |.if FPU
3372 | tonum_i f1, CARG4
3373 |.else
3374 | bl ->vm_sfi2d_2
3375 |.endif
2953 | b >5 3376 | b >5
2954 | 3377 |
2955 |8: // RA is an integer, RD is not an integer. 3378 |8: // RA is an integer, RD is not an integer.
2956 | bgt cr1, ->vmeta_comp 3379 | bgt cr1, ->vmeta_comp
2957 | // RA is an integer, RD is a number. 3380 | // RA is an integer, RD is a number.
3381 |.if FPU
2958 | tonum_i f0, CARG2 3382 | tonum_i f0, CARG2
3383 |.else
3384 | bl ->vm_sfi2d_1
3385 |.endif
2959 |4: 3386 |4:
2960 | lfd f1, 0(RD) 3387 | .FPU lfd f1, 0(RD)
2961 |5: 3388 |5:
3389 |.if FPU
2962 | fcmpu cr0, f0, f1 3390 | fcmpu cr0, f0, f1
3391 |.else
3392 | blex __ledf2
3393 | cmpwi CRET1, 0
3394 |.endif
2963 if (op == BC_ISLT) { 3395 if (op == BC_ISLT) {
2964 | bge <2 3396 | bge <2
2965 } else if (op == BC_ISGE) { 3397 } else if (op == BC_ISGE) {
@@ -3007,42 +3439,42 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3007 vk = op == BC_ISEQV; 3439 vk = op == BC_ISEQV;
3008 | // RA = src1*8, RD = src2*8, JMP with RD = target 3440 | // RA = src1*8, RD = src2*8, JMP with RD = target
3009 |.if DUALNUM 3441 |.if DUALNUM
3010 | lwzux TMP0, RA, BASE 3442 | lwzux CARG1, RA, BASE
3011 | addi PC, PC, 4 3443 | addi PC, PC, 4
3012 | lwz CARG2, 4(RA) 3444 | lwz CARG2, 4(RA)
3013 | lwzux TMP1, RD, BASE 3445 | lwzux CARG3, RD, BASE
3014 | checknum cr0, TMP0 3446 | checknum cr0, CARG1
3015 | lwz TMP2, -4(PC) 3447 | lwz SAVE0, -4(PC)
3016 | checknum cr1, TMP1 3448 | checknum cr1, CARG3
3017 | decode_RD4 TMP2, TMP2 3449 | decode_RD4 SAVE0, SAVE0
3018 | lwz CARG3, 4(RD) 3450 | lwz CARG4, 4(RD)
3019 | cror 4*cr7+gt, 4*cr0+gt, 4*cr1+gt 3451 | cror 4*cr7+gt, 4*cr0+gt, 4*cr1+gt
3020 | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) 3452 | addis SAVE0, SAVE0, -(BCBIAS_J*4 >> 16)
3021 if (vk) { 3453 if (vk) {
3022 | ble cr7, ->BC_ISEQN_Z 3454 | ble cr7, ->BC_ISEQN_Z
3023 } else { 3455 } else {
3024 | ble cr7, ->BC_ISNEN_Z 3456 | ble cr7, ->BC_ISNEN_Z
3025 } 3457 }
3026 |.else 3458 |.else
3027 | lwzux TMP0, RA, BASE 3459 | lwzux CARG1, RA, BASE
3028 | lwz TMP2, 0(PC) 3460 | lwz SAVE0, 0(PC)
3029 | lfd f0, 0(RA) 3461 | lfd f0, 0(RA)
3030 | addi PC, PC, 4 3462 | addi PC, PC, 4
3031 | lwzux TMP1, RD, BASE 3463 | lwzux CARG3, RD, BASE
3032 | checknum cr0, TMP0 3464 | checknum cr0, CARG1
3033 | decode_RD4 TMP2, TMP2 3465 | decode_RD4 SAVE0, SAVE0
3034 | lfd f1, 0(RD) 3466 | lfd f1, 0(RD)
3035 | checknum cr1, TMP1 3467 | checknum cr1, CARG3
3036 | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) 3468 | addis SAVE0, SAVE0, -(BCBIAS_J*4 >> 16)
3037 | bge cr0, >5 3469 | bge cr0, >5
3038 | bge cr1, >5 3470 | bge cr1, >5
3039 | fcmpu cr0, f0, f1 3471 | fcmpu cr0, f0, f1
3040 if (vk) { 3472 if (vk) {
3041 | bne >1 3473 | bne >1
3042 | add PC, PC, TMP2 3474 | add PC, PC, SAVE0
3043 } else { 3475 } else {
3044 | beq >1 3476 | beq >1
3045 | add PC, PC, TMP2 3477 | add PC, PC, SAVE0
3046 } 3478 }
3047 |1: 3479 |1:
3048 | ins_next 3480 | ins_next
@@ -3050,36 +3482,36 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3050 |5: // Either or both types are not numbers. 3482 |5: // Either or both types are not numbers.
3051 |.if not DUALNUM 3483 |.if not DUALNUM
3052 | lwz CARG2, 4(RA) 3484 | lwz CARG2, 4(RA)
3053 | lwz CARG3, 4(RD) 3485 | lwz CARG4, 4(RD)
3054 |.endif 3486 |.endif
3055 |.if FFI 3487 |.if FFI
3056 | cmpwi cr7, TMP0, LJ_TCDATA 3488 | cmpwi cr7, CARG1, LJ_TCDATA
3057 | cmpwi cr5, TMP1, LJ_TCDATA 3489 | cmpwi cr5, CARG3, LJ_TCDATA
3058 |.endif 3490 |.endif
3059 | not TMP3, TMP0 3491 | not TMP2, CARG1
3060 | cmplw TMP0, TMP1 3492 | cmplw CARG1, CARG3
3061 | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive? 3493 | cmplwi cr1, TMP2, ~LJ_TISPRI // Primitive?
3062 |.if FFI 3494 |.if FFI
3063 | cror 4*cr7+eq, 4*cr7+eq, 4*cr5+eq 3495 | cror 4*cr7+eq, 4*cr7+eq, 4*cr5+eq
3064 |.endif 3496 |.endif
3065 | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata? 3497 | cmplwi cr6, TMP2, ~LJ_TISTABUD // Table or userdata?
3066 |.if FFI 3498 |.if FFI
3067 | beq cr7, ->vmeta_equal_cd 3499 | beq cr7, ->vmeta_equal_cd
3068 |.endif 3500 |.endif
3069 | cmplw cr5, CARG2, CARG3 3501 | cmplw cr5, CARG2, CARG4
3070 | crandc 4*cr0+gt, 4*cr0+eq, 4*cr1+gt // 2: Same type and primitive. 3502 | crandc 4*cr0+gt, 4*cr0+eq, 4*cr1+gt // 2: Same type and primitive.
3071 | crorc 4*cr0+lt, 4*cr5+eq, 4*cr0+eq // 1: Same tv or different type. 3503 | crorc 4*cr0+lt, 4*cr5+eq, 4*cr0+eq // 1: Same tv or different type.
3072 | crand 4*cr0+eq, 4*cr0+eq, 4*cr5+eq // 0: Same type and same tv. 3504 | crand 4*cr0+eq, 4*cr0+eq, 4*cr5+eq // 0: Same type and same tv.
3073 | mr SAVE0, PC 3505 | mr SAVE1, PC
3074 | cror 4*cr0+eq, 4*cr0+eq, 4*cr0+gt // 0 or 2. 3506 | cror 4*cr0+eq, 4*cr0+eq, 4*cr0+gt // 0 or 2.
3075 | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+gt // 1 or 2. 3507 | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+gt // 1 or 2.
3076 if (vk) { 3508 if (vk) {
3077 | bne cr0, >6 3509 | bne cr0, >6
3078 | add PC, PC, TMP2 3510 | add PC, PC, SAVE0
3079 |6: 3511 |6:
3080 } else { 3512 } else {
3081 | beq cr0, >6 3513 | beq cr0, >6
3082 | add PC, PC, TMP2 3514 | add PC, PC, SAVE0
3083 |6: 3515 |6:
3084 } 3516 }
3085 |.if DUALNUM 3517 |.if DUALNUM
@@ -3094,6 +3526,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3094 | 3526 |
3095 | // Different tables or userdatas. Need to check __eq metamethod. 3527 | // Different tables or userdatas. Need to check __eq metamethod.
3096 | // Field metatable must be at same offset for GCtab and GCudata! 3528 | // Field metatable must be at same offset for GCtab and GCudata!
3529 | mr CARG3, CARG4
3097 | lwz TAB:TMP2, TAB:CARG2->metatable 3530 | lwz TAB:TMP2, TAB:CARG2->metatable
3098 | li CARG4, 1-vk // ne = 0 or 1. 3531 | li CARG4, 1-vk // ne = 0 or 1.
3099 | cmplwi TAB:TMP2, 0 3532 | cmplwi TAB:TMP2, 0
@@ -3101,7 +3534,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3101 | lbz TMP2, TAB:TMP2->nomm 3534 | lbz TMP2, TAB:TMP2->nomm
3102 | andix. TMP2, TMP2, 1<<MM_eq 3535 | andix. TMP2, TMP2, 1<<MM_eq
3103 | bne <1 // Or 'no __eq' flag set? 3536 | bne <1 // Or 'no __eq' flag set?
3104 | mr PC, SAVE0 // Restore old PC. 3537 | mr PC, SAVE1 // Restore old PC.
3105 | b ->vmeta_equal // Handle __eq metamethod. 3538 | b ->vmeta_equal // Handle __eq metamethod.
3106 break; 3539 break;
3107 3540
@@ -3142,16 +3575,16 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3142 vk = op == BC_ISEQN; 3575 vk = op == BC_ISEQN;
3143 | // RA = src*8, RD = num_const*8, JMP with RD = target 3576 | // RA = src*8, RD = num_const*8, JMP with RD = target
3144 |.if DUALNUM 3577 |.if DUALNUM
3145 | lwzux TMP0, RA, BASE 3578 | lwzux CARG1, RA, BASE
3146 | addi PC, PC, 4 3579 | addi PC, PC, 4
3147 | lwz CARG2, 4(RA) 3580 | lwz CARG2, 4(RA)
3148 | lwzux TMP1, RD, KBASE 3581 | lwzux CARG3, RD, KBASE
3149 | checknum cr0, TMP0 3582 | checknum cr0, CARG1
3150 | lwz TMP2, -4(PC) 3583 | lwz SAVE0, -4(PC)
3151 | checknum cr1, TMP1 3584 | checknum cr1, CARG3
3152 | decode_RD4 TMP2, TMP2 3585 | decode_RD4 SAVE0, SAVE0
3153 | lwz CARG3, 4(RD) 3586 | lwz CARG4, 4(RD)
3154 | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) 3587 | addis SAVE0, SAVE0, -(BCBIAS_J*4 >> 16)
3155 if (vk) { 3588 if (vk) {
3156 |->BC_ISEQN_Z: 3589 |->BC_ISEQN_Z:
3157 } else { 3590 } else {
@@ -3159,7 +3592,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3159 } 3592 }
3160 | bne cr0, >7 3593 | bne cr0, >7
3161 | bne cr1, >8 3594 | bne cr1, >8
3162 | cmpw CARG2, CARG3 3595 | cmpw CARG2, CARG4
3163 |4: 3596 |4:
3164 |.else 3597 |.else
3165 if (vk) { 3598 if (vk) {
@@ -3167,20 +3600,20 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3167 } else { 3600 } else {
3168 |->BC_ISNEN_Z: // Dummy label. 3601 |->BC_ISNEN_Z: // Dummy label.
3169 } 3602 }
3170 | lwzx TMP0, BASE, RA 3603 | lwzx CARG1, BASE, RA
3171 | addi PC, PC, 4 3604 | addi PC, PC, 4
3172 | lfdx f0, BASE, RA 3605 | lfdx f0, BASE, RA
3173 | lwz TMP2, -4(PC) 3606 | lwz SAVE0, -4(PC)
3174 | lfdx f1, KBASE, RD 3607 | lfdx f1, KBASE, RD
3175 | decode_RD4 TMP2, TMP2 3608 | decode_RD4 SAVE0, SAVE0
3176 | checknum TMP0 3609 | checknum CARG1
3177 | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16) 3610 | addis SAVE0, SAVE0, -(BCBIAS_J*4 >> 16)
3178 | bge >3 3611 | bge >3
3179 | fcmpu cr0, f0, f1 3612 | fcmpu cr0, f0, f1
3180 |.endif 3613 |.endif
3181 if (vk) { 3614 if (vk) {
3182 | bne >1 3615 | bne >1
3183 | add PC, PC, TMP2 3616 | add PC, PC, SAVE0
3184 |1: 3617 |1:
3185 |.if not FFI 3618 |.if not FFI
3186 |3: 3619 |3:
@@ -3191,13 +3624,13 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3191 |.if not FFI 3624 |.if not FFI
3192 |3: 3625 |3:
3193 |.endif 3626 |.endif
3194 | add PC, PC, TMP2 3627 | add PC, PC, SAVE0
3195 |2: 3628 |2:
3196 } 3629 }
3197 | ins_next 3630 | ins_next
3198 |.if FFI 3631 |.if FFI
3199 |3: 3632 |3:
3200 | cmpwi TMP0, LJ_TCDATA 3633 | cmpwi CARG1, LJ_TCDATA
3201 | beq ->vmeta_equal_cd 3634 | beq ->vmeta_equal_cd
3202 | b <1 3635 | b <1
3203 |.endif 3636 |.endif
@@ -3205,18 +3638,31 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3205 |7: // RA is not an integer. 3638 |7: // RA is not an integer.
3206 | bge cr0, <3 3639 | bge cr0, <3
3207 | // RA is a number. 3640 | // RA is a number.
3208 | lfd f0, 0(RA) 3641 | .FPU lfd f0, 0(RA)
3209 | blt cr1, >1 3642 | blt cr1, >1
3210 | // RA is a number, RD is an integer. 3643 | // RA is a number, RD is an integer.
3211 | tonum_i f1, CARG3 3644 |.if FPU
3645 | tonum_i f1, CARG4
3646 |.else
3647 | bl ->vm_sfi2d_2
3648 |.endif
3212 | b >2 3649 | b >2
3213 | 3650 |
3214 |8: // RA is an integer, RD is a number. 3651 |8: // RA is an integer, RD is a number.
3652 |.if FPU
3215 | tonum_i f0, CARG2 3653 | tonum_i f0, CARG2
3654 |.else
3655 | bl ->vm_sfi2d_1
3656 |.endif
3216 |1: 3657 |1:
3217 | lfd f1, 0(RD) 3658 | .FPU lfd f1, 0(RD)
3218 |2: 3659 |2:
3660 |.if FPU
3219 | fcmpu cr0, f0, f1 3661 | fcmpu cr0, f0, f1
3662 |.else
3663 | blex __ledf2
3664 | cmpwi CRET1, 0
3665 |.endif
3220 | b <4 3666 | b <4
3221 |.endif 3667 |.endif
3222 break; 3668 break;
@@ -3271,7 +3717,12 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3271 | add PC, PC, TMP2 3717 | add PC, PC, TMP2
3272 } else { 3718 } else {
3273 | li TMP1, LJ_TFALSE 3719 | li TMP1, LJ_TFALSE
3720 |.if FPU
3274 | lfdx f0, BASE, RD 3721 | lfdx f0, BASE, RD
3722 |.else
3723 | lwzux CARG1, RD, BASE
3724 | lwz CARG2, 4(RD)
3725 |.endif
3275 | cmplw TMP0, TMP1 3726 | cmplw TMP0, TMP1
3276 if (op == BC_ISTC) { 3727 if (op == BC_ISTC) {
3277 | bge >1 3728 | bge >1
@@ -3280,20 +3731,55 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3280 } 3731 }
3281 | addis PC, PC, -(BCBIAS_J*4 >> 16) 3732 | addis PC, PC, -(BCBIAS_J*4 >> 16)
3282 | decode_RD4 TMP2, INS 3733 | decode_RD4 TMP2, INS
3734 |.if FPU
3283 | stfdx f0, BASE, RA 3735 | stfdx f0, BASE, RA
3736 |.else
3737 | stwux CARG1, RA, BASE
3738 | stw CARG2, 4(RA)
3739 |.endif
3284 | add PC, PC, TMP2 3740 | add PC, PC, TMP2
3285 |1: 3741 |1:
3286 } 3742 }
3287 | ins_next 3743 | ins_next
3288 break; 3744 break;
3289 3745
3746 case BC_ISTYPE:
3747 | // RA = src*8, RD = -type*8
3748 | lwzx TMP0, BASE, RA
3749 | srwi TMP1, RD, 3
3750 | ins_next1
3751 |.if not PPE and not GPR64
3752 | add. TMP0, TMP0, TMP1
3753 |.else
3754 | neg TMP1, TMP1
3755 | cmpw TMP0, TMP1
3756 |.endif
3757 | bne ->vmeta_istype
3758 | ins_next2
3759 break;
3760 case BC_ISNUM:
3761 | // RA = src*8, RD = -(TISNUM-1)*8
3762 | lwzx TMP0, BASE, RA
3763 | ins_next1
3764 | checknum TMP0
3765 | bge ->vmeta_istype
3766 | ins_next2
3767 break;
3768
3290 /* -- Unary ops --------------------------------------------------------- */ 3769 /* -- Unary ops --------------------------------------------------------- */
3291 3770
3292 case BC_MOV: 3771 case BC_MOV:
3293 | // RA = dst*8, RD = src*8 3772 | // RA = dst*8, RD = src*8
3294 | ins_next1 3773 | ins_next1
3774 |.if FPU
3295 | lfdx f0, BASE, RD 3775 | lfdx f0, BASE, RD
3296 | stfdx f0, BASE, RA 3776 | stfdx f0, BASE, RA
3777 |.else
3778 | lwzux TMP0, RD, BASE
3779 | lwz TMP1, 4(RD)
3780 | stwux TMP0, RA, BASE
3781 | stw TMP1, 4(RA)
3782 |.endif
3297 | ins_next2 3783 | ins_next2
3298 break; 3784 break;
3299 case BC_NOT: 3785 case BC_NOT:
@@ -3395,44 +3881,65 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3395 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); 3881 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
3396 ||switch (vk) { 3882 ||switch (vk) {
3397 ||case 0: 3883 ||case 0:
3398 | lwzx TMP1, BASE, RB 3884 | lwzx CARG1, BASE, RB
3399 | .if DUALNUM 3885 | .if DUALNUM
3400 | lwzx TMP2, KBASE, RC 3886 | lwzx CARG3, KBASE, RC
3401 | .endif 3887 | .endif
3888 | .if FPU
3402 | lfdx f14, BASE, RB 3889 | lfdx f14, BASE, RB
3403 | lfdx f15, KBASE, RC 3890 | lfdx f15, KBASE, RC
3891 | .else
3892 | add TMP1, BASE, RB
3893 | add TMP2, KBASE, RC
3894 | lwz CARG2, 4(TMP1)
3895 | lwz CARG4, 4(TMP2)
3896 | .endif
3404 | .if DUALNUM 3897 | .if DUALNUM
3405 | checknum cr0, TMP1 3898 | checknum cr0, CARG1
3406 | checknum cr1, TMP2 3899 | checknum cr1, CARG3
3407 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt 3900 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
3408 | bge ->vmeta_arith_vn 3901 | bge ->vmeta_arith_vn
3409 | .else 3902 | .else
3410 | checknum TMP1; bge ->vmeta_arith_vn 3903 | checknum CARG1; bge ->vmeta_arith_vn
3411 | .endif 3904 | .endif
3412 || break; 3905 || break;
3413 ||case 1: 3906 ||case 1:
3414 | lwzx TMP1, BASE, RB 3907 | lwzx CARG1, BASE, RB
3415 | .if DUALNUM 3908 | .if DUALNUM
3416 | lwzx TMP2, KBASE, RC 3909 | lwzx CARG3, KBASE, RC
3417 | .endif 3910 | .endif
3911 | .if FPU
3418 | lfdx f15, BASE, RB 3912 | lfdx f15, BASE, RB
3419 | lfdx f14, KBASE, RC 3913 | lfdx f14, KBASE, RC
3914 | .else
3915 | add TMP1, BASE, RB
3916 | add TMP2, KBASE, RC
3917 | lwz CARG2, 4(TMP1)
3918 | lwz CARG4, 4(TMP2)
3919 | .endif
3420 | .if DUALNUM 3920 | .if DUALNUM
3421 | checknum cr0, TMP1 3921 | checknum cr0, CARG1
3422 | checknum cr1, TMP2 3922 | checknum cr1, CARG3
3423 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt 3923 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
3424 | bge ->vmeta_arith_nv 3924 | bge ->vmeta_arith_nv
3425 | .else 3925 | .else
3426 | checknum TMP1; bge ->vmeta_arith_nv 3926 | checknum CARG1; bge ->vmeta_arith_nv
3427 | .endif 3927 | .endif
3428 || break; 3928 || break;
3429 ||default: 3929 ||default:
3430 | lwzx TMP1, BASE, RB 3930 | lwzx CARG1, BASE, RB
3431 | lwzx TMP2, BASE, RC 3931 | lwzx CARG3, BASE, RC
3932 | .if FPU
3432 | lfdx f14, BASE, RB 3933 | lfdx f14, BASE, RB
3433 | lfdx f15, BASE, RC 3934 | lfdx f15, BASE, RC
3434 | checknum cr0, TMP1 3935 | .else
3435 | checknum cr1, TMP2 3936 | add TMP1, BASE, RB
3937 | add TMP2, BASE, RC
3938 | lwz CARG2, 4(TMP1)
3939 | lwz CARG4, 4(TMP2)
3940 | .endif
3941 | checknum cr0, CARG1
3942 | checknum cr1, CARG3
3436 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt 3943 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
3437 | bge ->vmeta_arith_vv 3944 | bge ->vmeta_arith_vv
3438 || break; 3945 || break;
@@ -3466,48 +3973,78 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3466 | fsub a, b, a // b - floor(b/c)*c 3973 | fsub a, b, a // b - floor(b/c)*c
3467 |.endmacro 3974 |.endmacro
3468 | 3975 |
3976 |.macro sfpmod
3977 |->BC_MODVN_Z:
3978 | stw CARG1, SFSAVE_1
3979 | stw CARG2, SFSAVE_2
3980 | mr SAVE0, CARG3
3981 | mr SAVE1, CARG4
3982 | blex __divdf3
3983 | blex floor
3984 | mr CARG3, SAVE0
3985 | mr CARG4, SAVE1
3986 | blex __muldf3
3987 | mr CARG3, CRET1
3988 | mr CARG4, CRET2
3989 | lwz CARG1, SFSAVE_1
3990 | lwz CARG2, SFSAVE_2
3991 | blex __subdf3
3992 |.endmacro
3993 |
3469 |.macro ins_arithfp, fpins 3994 |.macro ins_arithfp, fpins
3470 | ins_arithpre 3995 | ins_arithpre
3471 |.if "fpins" == "fpmod_" 3996 |.if "fpins" == "fpmod_"
3472 | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. 3997 | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
3473 |.else 3998 |.elif FPU
3474 | fpins f0, f14, f15 3999 | fpins f0, f14, f15
3475 | ins_next1 4000 | ins_next1
3476 | stfdx f0, BASE, RA 4001 | stfdx f0, BASE, RA
3477 | ins_next2 4002 | ins_next2
4003 |.else
4004 | blex __divdf3 // Only soft-float div uses this macro.
4005 | ins_next1
4006 | stwux CRET1, RA, BASE
4007 | stw CRET2, 4(RA)
4008 | ins_next2
3478 |.endif 4009 |.endif
3479 |.endmacro 4010 |.endmacro
3480 | 4011 |
3481 |.macro ins_arithdn, intins, fpins 4012 |.macro ins_arithdn, intins, fpins, fpcall
3482 | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8 4013 | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
3483 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); 4014 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
3484 ||switch (vk) { 4015 ||switch (vk) {
3485 ||case 0: 4016 ||case 0:
3486 | lwzux TMP1, RB, BASE 4017 | lwzux CARG1, RB, BASE
3487 | lwzux TMP2, RC, KBASE 4018 | lwzux CARG3, RC, KBASE
3488 | lwz CARG1, 4(RB) 4019 | lwz CARG2, 4(RB)
3489 | checknum cr0, TMP1 4020 | checknum cr0, CARG1
3490 | lwz CARG2, 4(RC) 4021 | lwz CARG4, 4(RC)
4022 | checknum cr1, CARG3
3491 || break; 4023 || break;
3492 ||case 1: 4024 ||case 1:
3493 | lwzux TMP1, RB, BASE 4025 | lwzux CARG3, RB, BASE
3494 | lwzux TMP2, RC, KBASE 4026 | lwzux CARG1, RC, KBASE
3495 | lwz CARG2, 4(RB) 4027 | lwz CARG4, 4(RB)
3496 | checknum cr0, TMP1 4028 | checknum cr0, CARG3
3497 | lwz CARG1, 4(RC) 4029 | lwz CARG2, 4(RC)
4030 | checknum cr1, CARG1
3498 || break; 4031 || break;
3499 ||default: 4032 ||default:
3500 | lwzux TMP1, RB, BASE 4033 | lwzux CARG1, RB, BASE
3501 | lwzux TMP2, RC, BASE 4034 | lwzux CARG3, RC, BASE
3502 | lwz CARG1, 4(RB) 4035 | lwz CARG2, 4(RB)
3503 | checknum cr0, TMP1 4036 | checknum cr0, CARG1
3504 | lwz CARG2, 4(RC) 4037 | lwz CARG4, 4(RC)
4038 | checknum cr1, CARG3
3505 || break; 4039 || break;
3506 ||} 4040 ||}
3507 | checknum cr1, TMP2
3508 | bne >5 4041 | bne >5
3509 | bne cr1, >5 4042 | bne cr1, >5
3510 | intins CARG1, CARG1, CARG2 4043 |.if "intins" == "intmod"
4044 | mr CARG1, CARG2
4045 | mr CARG2, CARG4
4046 |.endif
4047 | intins CARG1, CARG2, CARG4
3511 | bso >4 4048 | bso >4
3512 |1: 4049 |1:
3513 | ins_next1 4050 | ins_next1
@@ -3519,29 +4056,40 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3519 | checkov TMP0, <1 // Ignore unrelated overflow. 4056 | checkov TMP0, <1 // Ignore unrelated overflow.
3520 | ins_arithfallback b 4057 | ins_arithfallback b
3521 |5: // FP variant. 4058 |5: // FP variant.
4059 |.if FPU
3522 ||if (vk == 1) { 4060 ||if (vk == 1) {
3523 | lfd f15, 0(RB) 4061 | lfd f15, 0(RB)
3524 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
3525 | lfd f14, 0(RC) 4062 | lfd f14, 0(RC)
3526 ||} else { 4063 ||} else {
3527 | lfd f14, 0(RB) 4064 | lfd f14, 0(RB)
3528 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
3529 | lfd f15, 0(RC) 4065 | lfd f15, 0(RC)
3530 ||} 4066 ||}
4067 |.endif
4068 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
3531 | ins_arithfallback bge 4069 | ins_arithfallback bge
3532 |.if "fpins" == "fpmod_" 4070 |.if "fpins" == "fpmod_"
3533 | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. 4071 | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
3534 |.else 4072 |.else
4073 |.if FPU
3535 | fpins f0, f14, f15 4074 | fpins f0, f14, f15
3536 | ins_next1
3537 | stfdx f0, BASE, RA 4075 | stfdx f0, BASE, RA
4076 |.else
4077 |.if "fpcall" == "sfpmod"
4078 | sfpmod
4079 |.else
4080 | blex fpcall
4081 |.endif
4082 | stwux CRET1, RA, BASE
4083 | stw CRET2, 4(RA)
4084 |.endif
4085 | ins_next1
3538 | b <2 4086 | b <2
3539 |.endif 4087 |.endif
3540 |.endmacro 4088 |.endmacro
3541 | 4089 |
3542 |.macro ins_arith, intins, fpins 4090 |.macro ins_arith, intins, fpins, fpcall
3543 |.if DUALNUM 4091 |.if DUALNUM
3544 | ins_arithdn intins, fpins 4092 | ins_arithdn intins, fpins, fpcall
3545 |.else 4093 |.else
3546 | ins_arithfp fpins 4094 | ins_arithfp fpins
3547 |.endif 4095 |.endif
@@ -3556,9 +4104,9 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3556 | addo. TMP0, TMP0, TMP3 4104 | addo. TMP0, TMP0, TMP3
3557 | add y, a, b 4105 | add y, a, b
3558 |.endmacro 4106 |.endmacro
3559 | ins_arith addo32., fadd 4107 | ins_arith addo32., fadd, __adddf3
3560 |.else 4108 |.else
3561 | ins_arith addo., fadd 4109 | ins_arith addo., fadd, __adddf3
3562 |.endif 4110 |.endif
3563 break; 4111 break;
3564 case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: 4112 case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
@@ -3570,36 +4118,48 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3570 | subo. TMP0, TMP0, TMP3 4118 | subo. TMP0, TMP0, TMP3
3571 | sub y, a, b 4119 | sub y, a, b
3572 |.endmacro 4120 |.endmacro
3573 | ins_arith subo32., fsub 4121 | ins_arith subo32., fsub, __subdf3
3574 |.else 4122 |.else
3575 | ins_arith subo., fsub 4123 | ins_arith subo., fsub, __subdf3
3576 |.endif 4124 |.endif
3577 break; 4125 break;
3578 case BC_MULVN: case BC_MULNV: case BC_MULVV: 4126 case BC_MULVN: case BC_MULNV: case BC_MULVV:
3579 | ins_arith mullwo., fmul 4127 | ins_arith mullwo., fmul, __muldf3
3580 break; 4128 break;
3581 case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: 4129 case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
3582 | ins_arithfp fdiv 4130 | ins_arithfp fdiv
3583 break; 4131 break;
3584 case BC_MODVN: 4132 case BC_MODVN:
3585 | ins_arith intmod, fpmod 4133 | ins_arith intmod, fpmod, sfpmod
3586 break; 4134 break;
3587 case BC_MODNV: case BC_MODVV: 4135 case BC_MODNV: case BC_MODVV:
3588 | ins_arith intmod, fpmod_ 4136 | ins_arith intmod, fpmod_, sfpmod
3589 break; 4137 break;
3590 case BC_POW: 4138 case BC_POW:
3591 | // NYI: (partial) integer arithmetic. 4139 | // NYI: (partial) integer arithmetic.
3592 | lwzx TMP1, BASE, RB 4140 | lwzx CARG1, BASE, RB
4141 | lwzx CARG3, BASE, RC
4142 |.if FPU
3593 | lfdx FARG1, BASE, RB 4143 | lfdx FARG1, BASE, RB
3594 | lwzx TMP2, BASE, RC
3595 | lfdx FARG2, BASE, RC 4144 | lfdx FARG2, BASE, RC
3596 | checknum cr0, TMP1 4145 |.else
3597 | checknum cr1, TMP2 4146 | add TMP1, BASE, RB
4147 | add TMP2, BASE, RC
4148 | lwz CARG2, 4(TMP1)
4149 | lwz CARG4, 4(TMP2)
4150 |.endif
4151 | checknum cr0, CARG1
4152 | checknum cr1, CARG3
3598 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt 4153 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
3599 | bge ->vmeta_arith_vv 4154 | bge ->vmeta_arith_vv
3600 | blex pow 4155 | blex pow
3601 | ins_next1 4156 | ins_next1
4157 |.if FPU
3602 | stfdx FARG1, BASE, RA 4158 | stfdx FARG1, BASE, RA
4159 |.else
4160 | stwux CARG1, RA, BASE
4161 | stw CARG2, 4(RA)
4162 |.endif
3603 | ins_next2 4163 | ins_next2
3604 break; 4164 break;
3605 4165
@@ -3619,8 +4179,15 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3619 | lp BASE, L->base 4179 | lp BASE, L->base
3620 | bne ->vmeta_binop 4180 | bne ->vmeta_binop
3621 | ins_next1 4181 | ins_next1
4182 |.if FPU
3622 | lfdx f0, BASE, SAVE0 // Copy result from RB to RA. 4183 | lfdx f0, BASE, SAVE0 // Copy result from RB to RA.
3623 | stfdx f0, BASE, RA 4184 | stfdx f0, BASE, RA
4185 |.else
4186 | lwzux TMP0, SAVE0, BASE
4187 | lwz TMP1, 4(SAVE0)
4188 | stwux TMP0, RA, BASE
4189 | stw TMP1, 4(RA)
4190 |.endif
3624 | ins_next2 4191 | ins_next2
3625 break; 4192 break;
3626 4193
@@ -3683,8 +4250,15 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3683 case BC_KNUM: 4250 case BC_KNUM:
3684 | // RA = dst*8, RD = num_const*8 4251 | // RA = dst*8, RD = num_const*8
3685 | ins_next1 4252 | ins_next1
4253 |.if FPU
3686 | lfdx f0, KBASE, RD 4254 | lfdx f0, KBASE, RD
3687 | stfdx f0, BASE, RA 4255 | stfdx f0, BASE, RA
4256 |.else
4257 | lwzux TMP0, RD, KBASE
4258 | lwz TMP1, 4(RD)
4259 | stwux TMP0, RA, BASE
4260 | stw TMP1, 4(RA)
4261 |.endif
3688 | ins_next2 4262 | ins_next2
3689 break; 4263 break;
3690 case BC_KPRI: 4264 case BC_KPRI:
@@ -3717,8 +4291,15 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3717 | lwzx UPVAL:RB, LFUNC:RB, RD 4291 | lwzx UPVAL:RB, LFUNC:RB, RD
3718 | ins_next1 4292 | ins_next1
3719 | lwz TMP1, UPVAL:RB->v 4293 | lwz TMP1, UPVAL:RB->v
4294 |.if FPU
3720 | lfd f0, 0(TMP1) 4295 | lfd f0, 0(TMP1)
3721 | stfdx f0, BASE, RA 4296 | stfdx f0, BASE, RA
4297 |.else
4298 | lwz TMP2, 0(TMP1)
4299 | lwz TMP3, 4(TMP1)
4300 | stwux TMP2, RA, BASE
4301 | stw TMP3, 4(RA)
4302 |.endif
3722 | ins_next2 4303 | ins_next2
3723 break; 4304 break;
3724 case BC_USETV: 4305 case BC_USETV:
@@ -3726,14 +4307,24 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3726 | lwz LFUNC:RB, FRAME_FUNC(BASE) 4307 | lwz LFUNC:RB, FRAME_FUNC(BASE)
3727 | srwi RA, RA, 1 4308 | srwi RA, RA, 1
3728 | addi RA, RA, offsetof(GCfuncL, uvptr) 4309 | addi RA, RA, offsetof(GCfuncL, uvptr)
4310 |.if FPU
3729 | lfdux f0, RD, BASE 4311 | lfdux f0, RD, BASE
4312 |.else
4313 | lwzux CARG1, RD, BASE
4314 | lwz CARG3, 4(RD)
4315 |.endif
3730 | lwzx UPVAL:RB, LFUNC:RB, RA 4316 | lwzx UPVAL:RB, LFUNC:RB, RA
3731 | lbz TMP3, UPVAL:RB->marked 4317 | lbz TMP3, UPVAL:RB->marked
3732 | lwz CARG2, UPVAL:RB->v 4318 | lwz CARG2, UPVAL:RB->v
3733 | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv) 4319 | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
3734 | lbz TMP0, UPVAL:RB->closed 4320 | lbz TMP0, UPVAL:RB->closed
3735 | lwz TMP2, 0(RD) 4321 | lwz TMP2, 0(RD)
4322 |.if FPU
3736 | stfd f0, 0(CARG2) 4323 | stfd f0, 0(CARG2)
4324 |.else
4325 | stw CARG1, 0(CARG2)
4326 | stw CARG3, 4(CARG2)
4327 |.endif
3737 | cmplwi cr1, TMP0, 0 4328 | cmplwi cr1, TMP0, 0
3738 | lwz TMP1, 4(RD) 4329 | lwz TMP1, 4(RD)
3739 | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq 4330 | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
@@ -3789,11 +4380,21 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3789 | lwz LFUNC:RB, FRAME_FUNC(BASE) 4380 | lwz LFUNC:RB, FRAME_FUNC(BASE)
3790 | srwi RA, RA, 1 4381 | srwi RA, RA, 1
3791 | addi RA, RA, offsetof(GCfuncL, uvptr) 4382 | addi RA, RA, offsetof(GCfuncL, uvptr)
4383 |.if FPU
3792 | lfdx f0, KBASE, RD 4384 | lfdx f0, KBASE, RD
4385 |.else
4386 | lwzux TMP2, RD, KBASE
4387 | lwz TMP3, 4(RD)
4388 |.endif
3793 | lwzx UPVAL:RB, LFUNC:RB, RA 4389 | lwzx UPVAL:RB, LFUNC:RB, RA
3794 | ins_next1 4390 | ins_next1
3795 | lwz TMP1, UPVAL:RB->v 4391 | lwz TMP1, UPVAL:RB->v
4392 |.if FPU
3796 | stfd f0, 0(TMP1) 4393 | stfd f0, 0(TMP1)
4394 |.else
4395 | stw TMP2, 0(TMP1)
4396 | stw TMP3, 4(TMP1)
4397 |.endif
3797 | ins_next2 4398 | ins_next2
3798 break; 4399 break;
3799 case BC_USETP: 4400 case BC_USETP:
@@ -3941,11 +4542,21 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3941 |.endif 4542 |.endif
3942 | ble ->vmeta_tgetv // Integer key and in array part? 4543 | ble ->vmeta_tgetv // Integer key and in array part?
3943 | lwzx TMP0, TMP1, TMP2 4544 | lwzx TMP0, TMP1, TMP2
4545 |.if FPU
3944 | lfdx f14, TMP1, TMP2 4546 | lfdx f14, TMP1, TMP2
4547 |.else
4548 | lwzux SAVE0, TMP1, TMP2
4549 | lwz SAVE1, 4(TMP1)
4550 |.endif
3945 | checknil TMP0; beq >2 4551 | checknil TMP0; beq >2
3946 |1: 4552 |1:
3947 | ins_next1 4553 | ins_next1
4554 |.if FPU
3948 | stfdx f14, BASE, RA 4555 | stfdx f14, BASE, RA
4556 |.else
4557 | stwux SAVE0, RA, BASE
4558 | stw SAVE1, 4(RA)
4559 |.endif
3949 | ins_next2 4560 | ins_next2
3950 | 4561 |
3951 |2: // Check for __index if table value is nil. 4562 |2: // Check for __index if table value is nil.
@@ -3976,9 +4587,9 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3976 |->BC_TGETS_Z: 4587 |->BC_TGETS_Z:
3977 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8 4588 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
3978 | lwz TMP0, TAB:RB->hmask 4589 | lwz TMP0, TAB:RB->hmask
3979 | lwz TMP1, STR:RC->hash 4590 | lwz TMP1, STR:RC->sid
3980 | lwz NODE:TMP2, TAB:RB->node 4591 | lwz NODE:TMP2, TAB:RB->node
3981 | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask 4592 | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
3982 | slwi TMP0, TMP1, 5 4593 | slwi TMP0, TMP1, 5
3983 | slwi TMP1, TMP1, 3 4594 | slwi TMP1, TMP1, 3
3984 | sub TMP1, TMP0, TMP1 4595 | sub TMP1, TMP0, TMP1
@@ -4021,12 +4632,22 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4021 | lwz TMP1, TAB:RB->asize 4632 | lwz TMP1, TAB:RB->asize
4022 | lwz TMP2, TAB:RB->array 4633 | lwz TMP2, TAB:RB->array
4023 | cmplw TMP0, TMP1; bge ->vmeta_tgetb 4634 | cmplw TMP0, TMP1; bge ->vmeta_tgetb
4635 |.if FPU
4024 | lwzx TMP1, TMP2, RC 4636 | lwzx TMP1, TMP2, RC
4025 | lfdx f0, TMP2, RC 4637 | lfdx f0, TMP2, RC
4638 |.else
4639 | lwzux TMP1, TMP2, RC
4640 | lwz TMP3, 4(TMP2)
4641 |.endif
4026 | checknil TMP1; beq >5 4642 | checknil TMP1; beq >5
4027 |1: 4643 |1:
4028 | ins_next1 4644 | ins_next1
4645 |.if FPU
4029 | stfdx f0, BASE, RA 4646 | stfdx f0, BASE, RA
4647 |.else
4648 | stwux TMP1, RA, BASE
4649 | stw TMP3, 4(RA)
4650 |.endif
4030 | ins_next2 4651 | ins_next2
4031 | 4652 |
4032 |5: // Check for __index if table value is nil. 4653 |5: // Check for __index if table value is nil.
@@ -4038,6 +4659,40 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4038 | bne <1 // 'no __index' flag set: done. 4659 | bne <1 // 'no __index' flag set: done.
4039 | b ->vmeta_tgetb // Caveat: preserve TMP0! 4660 | b ->vmeta_tgetb // Caveat: preserve TMP0!
4040 break; 4661 break;
4662 case BC_TGETR:
4663 | // RA = dst*8, RB = table*8, RC = key*8
4664 | add RB, BASE, RB
4665 | lwz TAB:CARG1, 4(RB)
4666 |.if DUALNUM
4667 | add RC, BASE, RC
4668 | lwz TMP0, TAB:CARG1->asize
4669 | lwz CARG2, 4(RC)
4670 | lwz TMP1, TAB:CARG1->array
4671 |.else
4672 | lfdx f0, BASE, RC
4673 | lwz TMP0, TAB:CARG1->asize
4674 | toint CARG2, f0
4675 | lwz TMP1, TAB:CARG1->array
4676 |.endif
4677 | cmplw TMP0, CARG2
4678 | slwi TMP2, CARG2, 3
4679 | ble ->vmeta_tgetr // In array part?
4680 |.if FPU
4681 | lfdx f14, TMP1, TMP2
4682 |.else
4683 | lwzux SAVE0, TMP2, TMP1
4684 | lwz SAVE1, 4(TMP2)
4685 |.endif
4686 |->BC_TGETR_Z:
4687 | ins_next1
4688 |.if FPU
4689 | stfdx f14, BASE, RA
4690 |.else
4691 | stwux SAVE0, RA, BASE
4692 | stw SAVE1, 4(RA)
4693 |.endif
4694 | ins_next2
4695 break;
4041 4696
4042 case BC_TSETV: 4697 case BC_TSETV:
4043 | // RA = src*8, RB = table*8, RC = key*8 4698 | // RA = src*8, RB = table*8, RC = key*8
@@ -4076,11 +4731,22 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4076 | ble ->vmeta_tsetv // Integer key and in array part? 4731 | ble ->vmeta_tsetv // Integer key and in array part?
4077 | lwzx TMP2, TMP1, TMP0 4732 | lwzx TMP2, TMP1, TMP0
4078 | lbz TMP3, TAB:RB->marked 4733 | lbz TMP3, TAB:RB->marked
4734 |.if FPU
4079 | lfdx f14, BASE, RA 4735 | lfdx f14, BASE, RA
4736 |.else
4737 | add SAVE1, BASE, RA
4738 | lwz SAVE0, 0(SAVE1)
4739 | lwz SAVE1, 4(SAVE1)
4740 |.endif
4080 | checknil TMP2; beq >3 4741 | checknil TMP2; beq >3
4081 |1: 4742 |1:
4082 | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table) 4743 | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
4744 |.if FPU
4083 | stfdx f14, TMP1, TMP0 4745 | stfdx f14, TMP1, TMP0
4746 |.else
4747 | stwux SAVE0, TMP1, TMP0
4748 | stw SAVE1, 4(TMP1)
4749 |.endif
4084 | bne >7 4750 | bne >7
4085 |2: 4751 |2:
4086 | ins_next 4752 | ins_next
@@ -4117,11 +4783,17 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4117 |->BC_TSETS_Z: 4783 |->BC_TSETS_Z:
4118 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8 4784 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8
4119 | lwz TMP0, TAB:RB->hmask 4785 | lwz TMP0, TAB:RB->hmask
4120 | lwz TMP1, STR:RC->hash 4786 | lwz TMP1, STR:RC->sid
4121 | lwz NODE:TMP2, TAB:RB->node 4787 | lwz NODE:TMP2, TAB:RB->node
4122 | stb ZERO, TAB:RB->nomm // Clear metamethod cache. 4788 | stb ZERO, TAB:RB->nomm // Clear metamethod cache.
4123 | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask 4789 | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
4790 |.if FPU
4124 | lfdx f14, BASE, RA 4791 | lfdx f14, BASE, RA
4792 |.else
4793 | add CARG2, BASE, RA
4794 | lwz SAVE0, 0(CARG2)
4795 | lwz SAVE1, 4(CARG2)
4796 |.endif
4125 | slwi TMP0, TMP1, 5 4797 | slwi TMP0, TMP1, 5
4126 | slwi TMP1, TMP1, 3 4798 | slwi TMP1, TMP1, 3
4127 | sub TMP1, TMP0, TMP1 4799 | sub TMP1, TMP0, TMP1
@@ -4137,7 +4809,12 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4137 | checknil CARG2; beq >4 // Key found, but nil value? 4809 | checknil CARG2; beq >4 // Key found, but nil value?
4138 |2: 4810 |2:
4139 | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) 4811 | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
4812 |.if FPU
4140 | stfd f14, NODE:TMP2->val 4813 | stfd f14, NODE:TMP2->val
4814 |.else
4815 | stw SAVE0, NODE:TMP2->val.u32.hi
4816 | stw SAVE1, NODE:TMP2->val.u32.lo
4817 |.endif
4141 | bne >7 4818 | bne >7
4142 |3: 4819 |3:
4143 | ins_next 4820 | ins_next
@@ -4176,7 +4853,12 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4176 | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k) 4853 | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
4177 | // Returns TValue *. 4854 | // Returns TValue *.
4178 | lp BASE, L->base 4855 | lp BASE, L->base
4856 |.if FPU
4179 | stfd f14, 0(CRET1) 4857 | stfd f14, 0(CRET1)
4858 |.else
4859 | stw SAVE0, 0(CRET1)
4860 | stw SAVE1, 4(CRET1)
4861 |.endif
4180 | b <3 // No 2nd write barrier needed. 4862 | b <3 // No 2nd write barrier needed.
4181 | 4863 |
4182 |7: // Possible table write barrier for the value. Skip valiswhite check. 4864 |7: // Possible table write barrier for the value. Skip valiswhite check.
@@ -4193,13 +4875,24 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4193 | lwz TMP2, TAB:RB->array 4875 | lwz TMP2, TAB:RB->array
4194 | lbz TMP3, TAB:RB->marked 4876 | lbz TMP3, TAB:RB->marked
4195 | cmplw TMP0, TMP1 4877 | cmplw TMP0, TMP1
4878 |.if FPU
4196 | lfdx f14, BASE, RA 4879 | lfdx f14, BASE, RA
4880 |.else
4881 | add CARG2, BASE, RA
4882 | lwz SAVE0, 0(CARG2)
4883 | lwz SAVE1, 4(CARG2)
4884 |.endif
4197 | bge ->vmeta_tsetb 4885 | bge ->vmeta_tsetb
4198 | lwzx TMP1, TMP2, RC 4886 | lwzx TMP1, TMP2, RC
4199 | checknil TMP1; beq >5 4887 | checknil TMP1; beq >5
4200 |1: 4888 |1:
4201 | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) 4889 | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
4890 |.if FPU
4202 | stfdx f14, TMP2, RC 4891 | stfdx f14, TMP2, RC
4892 |.else
4893 | stwux SAVE0, RC, TMP2
4894 | stw SAVE1, 4(RC)
4895 |.endif
4203 | bne >7 4896 | bne >7
4204 |2: 4897 |2:
4205 | ins_next 4898 | ins_next
@@ -4217,6 +4910,49 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4217 | barrierback TAB:RB, TMP3, TMP0 4910 | barrierback TAB:RB, TMP3, TMP0
4218 | b <2 4911 | b <2
4219 break; 4912 break;
4913 case BC_TSETR:
4914 | // RA = dst*8, RB = table*8, RC = key*8
4915 | add RB, BASE, RB
4916 | lwz TAB:CARG2, 4(RB)
4917 |.if DUALNUM
4918 | add RC, BASE, RC
4919 | lbz TMP3, TAB:CARG2->marked
4920 | lwz TMP0, TAB:CARG2->asize
4921 | lwz CARG3, 4(RC)
4922 | lwz TMP1, TAB:CARG2->array
4923 |.else
4924 | lfdx f0, BASE, RC
4925 | lbz TMP3, TAB:CARG2->marked
4926 | lwz TMP0, TAB:CARG2->asize
4927 | toint CARG3, f0
4928 | lwz TMP1, TAB:CARG2->array
4929 |.endif
4930 | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
4931 | bne >7
4932 |2:
4933 | cmplw TMP0, CARG3
4934 | slwi TMP2, CARG3, 3
4935 |.if FPU
4936 | lfdx f14, BASE, RA
4937 |.else
4938 | lwzux SAVE0, RA, BASE
4939 | lwz SAVE1, 4(RA)
4940 |.endif
4941 | ble ->vmeta_tsetr // In array part?
4942 | ins_next1
4943 |.if FPU
4944 | stfdx f14, TMP1, TMP2
4945 |.else
4946 | stwux SAVE0, TMP1, TMP2
4947 | stw SAVE1, 4(TMP1)
4948 |.endif
4949 | ins_next2
4950 |
4951 |7: // Possible table write barrier for the value. Skip valiswhite check.
4952 | barrierback TAB:CARG2, TMP3, TMP2
4953 | b <2
4954 break;
4955
4220 4956
4221 case BC_TSETM: 4957 case BC_TSETM:
4222 | // RA = base*8 (table at base-1), RD = num_const*8 (start index) 4958 | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
@@ -4239,10 +4975,20 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4239 | add TMP1, TMP1, TMP0 4975 | add TMP1, TMP1, TMP0
4240 | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table) 4976 | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
4241 |3: // Copy result slots to table. 4977 |3: // Copy result slots to table.
4978 |.if FPU
4242 | lfd f0, 0(RA) 4979 | lfd f0, 0(RA)
4980 |.else
4981 | lwz SAVE0, 0(RA)
4982 | lwz SAVE1, 4(RA)
4983 |.endif
4243 | addi RA, RA, 8 4984 | addi RA, RA, 8
4244 | cmpw cr1, RA, TMP2 4985 | cmpw cr1, RA, TMP2
4986 |.if FPU
4245 | stfd f0, 0(TMP1) 4987 | stfd f0, 0(TMP1)
4988 |.else
4989 | stw SAVE0, 0(TMP1)
4990 | stw SAVE1, 4(TMP1)
4991 |.endif
4246 | addi TMP1, TMP1, 8 4992 | addi TMP1, TMP1, 8
4247 | blt cr1, <3 4993 | blt cr1, <3
4248 | bne >7 4994 | bne >7
@@ -4309,9 +5055,20 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4309 | beq cr1, >3 5055 | beq cr1, >3
4310 |2: 5056 |2:
4311 | addi TMP3, TMP2, 8 5057 | addi TMP3, TMP2, 8
5058 |.if FPU
4312 | lfdx f0, RA, TMP2 5059 | lfdx f0, RA, TMP2
5060 |.else
5061 | add CARG3, RA, TMP2
5062 | lwz CARG1, 0(CARG3)
5063 | lwz CARG2, 4(CARG3)
5064 |.endif
4313 | cmplw cr1, TMP3, NARGS8:RC 5065 | cmplw cr1, TMP3, NARGS8:RC
5066 |.if FPU
4314 | stfdx f0, BASE, TMP2 5067 | stfdx f0, BASE, TMP2
5068 |.else
5069 | stwux CARG1, TMP2, BASE
5070 | stw CARG2, 4(TMP2)
5071 |.endif
4315 | mr TMP2, TMP3 5072 | mr TMP2, TMP3
4316 | bne cr1, <2 5073 | bne cr1, <2
4317 |3: 5074 |3:
@@ -4344,14 +5101,28 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4344 | add BASE, BASE, RA 5101 | add BASE, BASE, RA
4345 | lwz TMP1, -24(BASE) 5102 | lwz TMP1, -24(BASE)
4346 | lwz LFUNC:RB, -20(BASE) 5103 | lwz LFUNC:RB, -20(BASE)
5104 |.if FPU
4347 | lfd f1, -8(BASE) 5105 | lfd f1, -8(BASE)
4348 | lfd f0, -16(BASE) 5106 | lfd f0, -16(BASE)
5107 |.else
5108 | lwz CARG1, -8(BASE)
5109 | lwz CARG2, -4(BASE)
5110 | lwz CARG3, -16(BASE)
5111 | lwz CARG4, -12(BASE)
5112 |.endif
4349 | stw TMP1, 0(BASE) // Copy callable. 5113 | stw TMP1, 0(BASE) // Copy callable.
4350 | stw LFUNC:RB, 4(BASE) 5114 | stw LFUNC:RB, 4(BASE)
4351 | checkfunc TMP1 5115 | checkfunc TMP1
4352 | stfd f1, 16(BASE) // Copy control var.
4353 | li NARGS8:RC, 16 // Iterators get 2 arguments. 5116 | li NARGS8:RC, 16 // Iterators get 2 arguments.
5117 |.if FPU
5118 | stfd f1, 16(BASE) // Copy control var.
4354 | stfdu f0, 8(BASE) // Copy state. 5119 | stfdu f0, 8(BASE) // Copy state.
5120 |.else
5121 | stw CARG1, 16(BASE) // Copy control var.
5122 | stw CARG2, 20(BASE)
5123 | stwu CARG3, 8(BASE) // Copy state.
5124 | stw CARG4, 4(BASE)
5125 |.endif
4355 | bne ->vmeta_call 5126 | bne ->vmeta_call
4356 | ins_call 5127 | ins_call
4357 break; 5128 break;
@@ -4372,7 +5143,12 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4372 | slwi TMP3, RC, 3 5143 | slwi TMP3, RC, 3
4373 | bge >5 // Index points after array part? 5144 | bge >5 // Index points after array part?
4374 | lwzx TMP2, TMP1, TMP3 5145 | lwzx TMP2, TMP1, TMP3
5146 |.if FPU
4375 | lfdx f0, TMP1, TMP3 5147 | lfdx f0, TMP1, TMP3
5148 |.else
5149 | lwzux CARG1, TMP3, TMP1
5150 | lwz CARG2, 4(TMP3)
5151 |.endif
4376 | checknil TMP2 5152 | checknil TMP2
4377 | lwz INS, -4(PC) 5153 | lwz INS, -4(PC)
4378 | beq >4 5154 | beq >4
@@ -4384,7 +5160,12 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4384 |.endif 5160 |.endif
4385 | addi RC, RC, 1 5161 | addi RC, RC, 1
4386 | addis TMP3, PC, -(BCBIAS_J*4 >> 16) 5162 | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
5163 |.if FPU
4387 | stfd f0, 8(RA) 5164 | stfd f0, 8(RA)
5165 |.else
5166 | stw CARG1, 8(RA)
5167 | stw CARG2, 12(RA)
5168 |.endif
4388 | decode_RD4 TMP1, INS 5169 | decode_RD4 TMP1, INS
4389 | stw RC, -4(RA) // Update control var. 5170 | stw RC, -4(RA) // Update control var.
4390 | add PC, TMP1, TMP3 5171 | add PC, TMP1, TMP3
@@ -4409,17 +5190,38 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4409 | slwi RB, RC, 3 5190 | slwi RB, RC, 3
4410 | sub TMP3, TMP3, RB 5191 | sub TMP3, TMP3, RB
4411 | lwzx RB, TMP2, TMP3 5192 | lwzx RB, TMP2, TMP3
5193 |.if FPU
4412 | lfdx f0, TMP2, TMP3 5194 | lfdx f0, TMP2, TMP3
5195 |.else
5196 | add CARG3, TMP2, TMP3
5197 | lwz CARG1, 0(CARG3)
5198 | lwz CARG2, 4(CARG3)
5199 |.endif
4413 | add NODE:TMP3, TMP2, TMP3 5200 | add NODE:TMP3, TMP2, TMP3
4414 | checknil RB 5201 | checknil RB
4415 | lwz INS, -4(PC) 5202 | lwz INS, -4(PC)
4416 | beq >7 5203 | beq >7
5204 |.if FPU
4417 | lfd f1, NODE:TMP3->key 5205 | lfd f1, NODE:TMP3->key
5206 |.else
5207 | lwz CARG3, NODE:TMP3->key.u32.hi
5208 | lwz CARG4, NODE:TMP3->key.u32.lo
5209 |.endif
4418 | addis TMP2, PC, -(BCBIAS_J*4 >> 16) 5210 | addis TMP2, PC, -(BCBIAS_J*4 >> 16)
5211 |.if FPU
4419 | stfd f0, 8(RA) 5212 | stfd f0, 8(RA)
5213 |.else
5214 | stw CARG1, 8(RA)
5215 | stw CARG2, 12(RA)
5216 |.endif
4420 | add RC, RC, TMP0 5217 | add RC, RC, TMP0
4421 | decode_RD4 TMP1, INS 5218 | decode_RD4 TMP1, INS
5219 |.if FPU
4422 | stfd f1, 0(RA) 5220 | stfd f1, 0(RA)
5221 |.else
5222 | stw CARG3, 0(RA)
5223 | stw CARG4, 4(RA)
5224 |.endif
4423 | addi RC, RC, 1 5225 | addi RC, RC, 1
4424 | add PC, TMP1, TMP2 5226 | add PC, TMP1, TMP2
4425 | stw RC, -4(RA) // Update control var. 5227 | stw RC, -4(RA) // Update control var.
@@ -4485,9 +5287,19 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4485 | subi TMP2, TMP2, 16 5287 | subi TMP2, TMP2, 16
4486 | ble >2 // No vararg slots? 5288 | ble >2 // No vararg slots?
4487 |1: // Copy vararg slots to destination slots. 5289 |1: // Copy vararg slots to destination slots.
5290 |.if FPU
4488 | lfd f0, 0(RC) 5291 | lfd f0, 0(RC)
5292 |.else
5293 | lwz CARG1, 0(RC)
5294 | lwz CARG2, 4(RC)
5295 |.endif
4489 | addi RC, RC, 8 5296 | addi RC, RC, 8
5297 |.if FPU
4490 | stfd f0, 0(RA) 5298 | stfd f0, 0(RA)
5299 |.else
5300 | stw CARG1, 0(RA)
5301 | stw CARG2, 4(RA)
5302 |.endif
4491 | cmplw RA, TMP2 5303 | cmplw RA, TMP2
4492 | cmplw cr1, RC, TMP3 5304 | cmplw cr1, RC, TMP3
4493 | bge >3 // All destination slots filled? 5305 | bge >3 // All destination slots filled?
@@ -4510,9 +5322,19 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4510 | addi MULTRES, TMP1, 8 5322 | addi MULTRES, TMP1, 8
4511 | bgt >7 5323 | bgt >7
4512 |6: 5324 |6:
5325 |.if FPU
4513 | lfd f0, 0(RC) 5326 | lfd f0, 0(RC)
5327 |.else
5328 | lwz CARG1, 0(RC)
5329 | lwz CARG2, 4(RC)
5330 |.endif
4514 | addi RC, RC, 8 5331 | addi RC, RC, 8
5332 |.if FPU
4515 | stfd f0, 0(RA) 5333 | stfd f0, 0(RA)
5334 |.else
5335 | stw CARG1, 0(RA)
5336 | stw CARG2, 4(RA)
5337 |.endif
4516 | cmplw RC, TMP3 5338 | cmplw RC, TMP3
4517 | addi RA, RA, 8 5339 | addi RA, RA, 8
4518 | blt <6 // More vararg slots? 5340 | blt <6 // More vararg slots?
@@ -4563,14 +5385,38 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4563 | li TMP1, 0 5385 | li TMP1, 0
4564 |2: 5386 |2:
4565 | addi TMP3, TMP1, 8 5387 | addi TMP3, TMP1, 8
5388 |.if FPU
4566 | lfdx f0, RA, TMP1 5389 | lfdx f0, RA, TMP1
5390 |.else
5391 | add CARG3, RA, TMP1
5392 | lwz CARG1, 0(CARG3)
5393 | lwz CARG2, 4(CARG3)
5394 |.endif
4567 | cmpw TMP3, RC 5395 | cmpw TMP3, RC
5396 |.if FPU
4568 | stfdx f0, TMP2, TMP1 5397 | stfdx f0, TMP2, TMP1
5398 |.else
5399 | add CARG3, TMP2, TMP1
5400 | stw CARG1, 0(CARG3)
5401 | stw CARG2, 4(CARG3)
5402 |.endif
4569 | beq >3 5403 | beq >3
4570 | addi TMP1, TMP3, 8 5404 | addi TMP1, TMP3, 8
5405 |.if FPU
4571 | lfdx f1, RA, TMP3 5406 | lfdx f1, RA, TMP3
5407 |.else
5408 | add CARG3, RA, TMP3
5409 | lwz CARG1, 0(CARG3)
5410 | lwz CARG2, 4(CARG3)
5411 |.endif
4572 | cmpw TMP1, RC 5412 | cmpw TMP1, RC
5413 |.if FPU
4573 | stfdx f1, TMP2, TMP3 5414 | stfdx f1, TMP2, TMP3
5415 |.else
5416 | add CARG3, TMP2, TMP3
5417 | stw CARG1, 0(CARG3)
5418 | stw CARG2, 4(CARG3)
5419 |.endif
4574 | bne <2 5420 | bne <2
4575 |3: 5421 |3:
4576 |5: 5422 |5:
@@ -4612,8 +5458,15 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4612 | subi TMP2, BASE, 8 5458 | subi TMP2, BASE, 8
4613 | decode_RB8 RB, INS 5459 | decode_RB8 RB, INS
4614 if (op == BC_RET1) { 5460 if (op == BC_RET1) {
5461 |.if FPU
4615 | lfd f0, 0(RA) 5462 | lfd f0, 0(RA)
4616 | stfd f0, 0(TMP2) 5463 | stfd f0, 0(TMP2)
5464 |.else
5465 | lwz CARG1, 0(RA)
5466 | lwz CARG2, 4(RA)
5467 | stw CARG1, 0(TMP2)
5468 | stw CARG2, 4(TMP2)
5469 |.endif
4617 } 5470 }
4618 |5: 5471 |5:
4619 | cmplw RB, RD 5472 | cmplw RB, RD
@@ -4674,11 +5527,11 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4674 |4: 5527 |4:
4675 | stw CARG1, FORL_IDX*8+4(RA) 5528 | stw CARG1, FORL_IDX*8+4(RA)
4676 } else { 5529 } else {
4677 | lwz TMP3, FORL_STEP*8(RA) 5530 | lwz SAVE0, FORL_STEP*8(RA)
4678 | lwz CARG3, FORL_STEP*8+4(RA) 5531 | lwz CARG3, FORL_STEP*8+4(RA)
4679 | lwz TMP2, FORL_STOP*8(RA) 5532 | lwz TMP2, FORL_STOP*8(RA)
4680 | lwz CARG2, FORL_STOP*8+4(RA) 5533 | lwz CARG2, FORL_STOP*8+4(RA)
4681 | cmplw cr7, TMP3, TISNUM 5534 | cmplw cr7, SAVE0, TISNUM
4682 | cmplw cr1, TMP2, TISNUM 5535 | cmplw cr1, TMP2, TISNUM
4683 | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq 5536 | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
4684 | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq 5537 | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
@@ -4721,41 +5574,80 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4721 if (vk) { 5574 if (vk) {
4722 |.if DUALNUM 5575 |.if DUALNUM
4723 |9: // FP loop. 5576 |9: // FP loop.
5577 |.if FPU
4724 | lfd f1, FORL_IDX*8(RA) 5578 | lfd f1, FORL_IDX*8(RA)
4725 |.else 5579 |.else
5580 | lwz CARG1, FORL_IDX*8(RA)
5581 | lwz CARG2, FORL_IDX*8+4(RA)
5582 |.endif
5583 |.else
4726 | lfdux f1, RA, BASE 5584 | lfdux f1, RA, BASE
4727 |.endif 5585 |.endif
5586 |.if FPU
4728 | lfd f3, FORL_STEP*8(RA) 5587 | lfd f3, FORL_STEP*8(RA)
4729 | lfd f2, FORL_STOP*8(RA) 5588 | lfd f2, FORL_STOP*8(RA)
4730 | lwz TMP3, FORL_STEP*8(RA)
4731 | fadd f1, f1, f3 5589 | fadd f1, f1, f3
4732 | stfd f1, FORL_IDX*8(RA) 5590 | stfd f1, FORL_IDX*8(RA)
5591 |.else
5592 | lwz CARG3, FORL_STEP*8(RA)
5593 | lwz CARG4, FORL_STEP*8+4(RA)
5594 | mr SAVE1, RD
5595 | blex __adddf3
5596 | mr RD, SAVE1
5597 | stw CRET1, FORL_IDX*8(RA)
5598 | stw CRET2, FORL_IDX*8+4(RA)
5599 | lwz CARG3, FORL_STOP*8(RA)
5600 | lwz CARG4, FORL_STOP*8+4(RA)
5601 |.endif
5602 | lwz SAVE0, FORL_STEP*8(RA)
4733 } else { 5603 } else {
4734 |.if DUALNUM 5604 |.if DUALNUM
4735 |9: // FP loop. 5605 |9: // FP loop.
4736 |.else 5606 |.else
4737 | lwzux TMP1, RA, BASE 5607 | lwzux TMP1, RA, BASE
4738 | lwz TMP3, FORL_STEP*8(RA) 5608 | lwz SAVE0, FORL_STEP*8(RA)
4739 | lwz TMP2, FORL_STOP*8(RA) 5609 | lwz TMP2, FORL_STOP*8(RA)
4740 | cmplw cr0, TMP1, TISNUM 5610 | cmplw cr0, TMP1, TISNUM
4741 | cmplw cr7, TMP3, TISNUM 5611 | cmplw cr7, SAVE0, TISNUM
4742 | cmplw cr1, TMP2, TISNUM 5612 | cmplw cr1, TMP2, TISNUM
4743 |.endif 5613 |.endif
5614 |.if FPU
4744 | lfd f1, FORL_IDX*8(RA) 5615 | lfd f1, FORL_IDX*8(RA)
5616 |.else
5617 | lwz CARG1, FORL_IDX*8(RA)
5618 | lwz CARG2, FORL_IDX*8+4(RA)
5619 |.endif
4745 | crand 4*cr0+lt, 4*cr0+lt, 4*cr7+lt 5620 | crand 4*cr0+lt, 4*cr0+lt, 4*cr7+lt
4746 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt 5621 | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
5622 |.if FPU
4747 | lfd f2, FORL_STOP*8(RA) 5623 | lfd f2, FORL_STOP*8(RA)
5624 |.else
5625 | lwz CARG3, FORL_STOP*8(RA)
5626 | lwz CARG4, FORL_STOP*8+4(RA)
5627 |.endif
4748 | bge ->vmeta_for 5628 | bge ->vmeta_for
4749 } 5629 }
4750 | cmpwi cr6, TMP3, 0 5630 | cmpwi cr6, SAVE0, 0
4751 if (op != BC_JFORL) { 5631 if (op != BC_JFORL) {
4752 | srwi RD, RD, 1 5632 | srwi RD, RD, 1
4753 } 5633 }
5634 |.if FPU
4754 | stfd f1, FORL_EXT*8(RA) 5635 | stfd f1, FORL_EXT*8(RA)
5636 |.else
5637 | stw CARG1, FORL_EXT*8(RA)
5638 | stw CARG2, FORL_EXT*8+4(RA)
5639 |.endif
4755 if (op != BC_JFORL) { 5640 if (op != BC_JFORL) {
4756 | add RD, PC, RD 5641 | add RD, PC, RD
4757 } 5642 }
5643 |.if FPU
4758 | fcmpu cr0, f1, f2 5644 | fcmpu cr0, f1, f2
5645 |.else
5646 | mr SAVE1, RD
5647 | blex __ledf2
5648 | cmpwi CRET1, 0
5649 | mr RD, SAVE1
5650 |.endif
4759 if (op == BC_JFORI) { 5651 if (op == BC_JFORI) {
4760 | addis PC, RD, -(BCBIAS_J*4 >> 16) 5652 | addis PC, RD, -(BCBIAS_J*4 >> 16)
4761 } 5653 }
@@ -4858,8 +5750,8 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4858 | lp TMP2, TRACE:TMP2->mcode 5750 | lp TMP2, TRACE:TMP2->mcode
4859 | stw BASE, DISPATCH_GL(jit_base)(DISPATCH) 5751 | stw BASE, DISPATCH_GL(jit_base)(DISPATCH)
4860 | mtctr TMP2 5752 | mtctr TMP2
4861 | stw L, DISPATCH_GL(jit_L)(DISPATCH)
4862 | addi JGL, DISPATCH, GG_DISP2G+32768 5753 | addi JGL, DISPATCH, GG_DISP2G+32768
5754 | stw L, DISPATCH_GL(tmpbuf.L)(DISPATCH)
4863 | bctr 5755 | bctr
4864 |.endif 5756 |.endif
4865 break; 5757 break;
@@ -4994,6 +5886,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4994 | lp TMP1, L->top 5886 | lp TMP1, L->top
4995 | li_vmstate INTERP 5887 | li_vmstate INTERP
4996 | lwz PC, FRAME_PC(BASE) // Fetch PC of caller. 5888 | lwz PC, FRAME_PC(BASE) // Fetch PC of caller.
5889 | stw L, DISPATCH_GL(cur_L)(DISPATCH)
4997 | sub RA, TMP1, RD // RA = L->top - nresults*8 5890 | sub RA, TMP1, RD // RA = L->top - nresults*8
4998 | st_vmstate 5891 | st_vmstate
4999 | b ->vm_returnc 5892 | b ->vm_returnc
diff --git a/src/vm_ppcspe.dasc b/src/vm_ppcspe.dasc
deleted file mode 100644
index 5542f8a2..00000000
--- a/src/vm_ppcspe.dasc
+++ /dev/null
@@ -1,3691 +0,0 @@
1|// Low-level VM code for PowerPC/e500 CPUs.
2|// Bytecode interpreter, fast functions and helper functions.
3|// Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4|
5|.arch ppc
6|.section code_op, code_sub
7|
8|.actionlist build_actionlist
9|.globals GLOB_
10|.globalnames globnames
11|.externnames extnames
12|
13|// Note: The ragged indentation of the instructions is intentional.
14|// The starting columns indicate data dependencies.
15|
16|//-----------------------------------------------------------------------
17|
18|// Fixed register assignments for the interpreter.
19|// Don't use: r1 = sp, r2 and r13 = reserved and/or small data area ptr
20|
21|// The following must be C callee-save (but BASE is often refetched).
22|.define BASE, r14 // Base of current Lua stack frame.
23|.define KBASE, r15 // Constants of current Lua function.
24|.define PC, r16 // Next PC.
25|.define DISPATCH, r17 // Opcode dispatch table.
26|.define LREG, r18 // Register holding lua_State (also in SAVE_L).
27|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8.
28|
29|// Constants for vectorized type-comparisons (hi+low GPR). C callee-save.
30|.define TISNUM, r22
31|.define TISSTR, r23
32|.define TISTAB, r24
33|.define TISFUNC, r25
34|.define TISNIL, r26
35|.define TOBIT, r27
36|.define ZERO, TOBIT // Zero in lo word.
37|
38|// The following temporaries are not saved across C calls, except for RA.
39|.define RA, r20 // Callee-save.
40|.define RB, r10
41|.define RC, r11
42|.define RD, r12
43|.define INS, r7 // Overlaps CARG5.
44|
45|.define TMP0, r0
46|.define TMP1, r8
47|.define TMP2, r9
48|.define TMP3, r6 // Overlaps CARG4.
49|
50|// Saved temporaries.
51|.define SAVE0, r21
52|
53|// Calling conventions.
54|.define CARG1, r3
55|.define CARG2, r4
56|.define CARG3, r5
57|.define CARG4, r6 // Overlaps TMP3.
58|.define CARG5, r7 // Overlaps INS.
59|
60|.define CRET1, r3
61|.define CRET2, r4
62|
63|// Stack layout while in interpreter. Must match with lj_frame.h.
64|.define SAVE_LR, 188(sp)
65|.define CFRAME_SPACE, 184 // Delta for sp.
66|// Back chain for sp: 184(sp) <-- sp entering interpreter
67|.define SAVE_r31, 176(sp) // 64 bit register saves.
68|.define SAVE_r30, 168(sp)
69|.define SAVE_r29, 160(sp)
70|.define SAVE_r28, 152(sp)
71|.define SAVE_r27, 144(sp)
72|.define SAVE_r26, 136(sp)
73|.define SAVE_r25, 128(sp)
74|.define SAVE_r24, 120(sp)
75|.define SAVE_r23, 112(sp)
76|.define SAVE_r22, 104(sp)
77|.define SAVE_r21, 96(sp)
78|.define SAVE_r20, 88(sp)
79|.define SAVE_r19, 80(sp)
80|.define SAVE_r18, 72(sp)
81|.define SAVE_r17, 64(sp)
82|.define SAVE_r16, 56(sp)
83|.define SAVE_r15, 48(sp)
84|.define SAVE_r14, 40(sp)
85|.define SAVE_CR, 36(sp)
86|.define UNUSED1, 32(sp)
87|.define SAVE_ERRF, 28(sp) // 32 bit C frame info.
88|.define SAVE_NRES, 24(sp)
89|.define SAVE_CFRAME, 20(sp)
90|.define SAVE_L, 16(sp)
91|.define SAVE_PC, 12(sp)
92|.define SAVE_MULTRES, 8(sp)
93|// Next frame lr: 4(sp)
94|// Back chain for sp: 0(sp) <-- sp while in interpreter
95|
96|.macro save_, reg; evstdd reg, SAVE_..reg; .endmacro
97|.macro rest_, reg; evldd reg, SAVE_..reg; .endmacro
98|
99|.macro saveregs
100| stwu sp, -CFRAME_SPACE(sp)
101| save_ r14; save_ r15; save_ r16; save_ r17; save_ r18; save_ r19
102| mflr r0; mfcr r12
103| save_ r20; save_ r21; save_ r22; save_ r23; save_ r24; save_ r25
104| stw r0, SAVE_LR; stw r12, SAVE_CR
105| save_ r26; save_ r27; save_ r28; save_ r29; save_ r30; save_ r31
106|.endmacro
107|
108|.macro restoreregs
109| lwz r0, SAVE_LR; lwz r12, SAVE_CR
110| rest_ r14; rest_ r15; rest_ r16; rest_ r17; rest_ r18; rest_ r19
111| mtlr r0; mtcrf 0x38, r12
112| rest_ r20; rest_ r21; rest_ r22; rest_ r23; rest_ r24; rest_ r25
113| rest_ r26; rest_ r27; rest_ r28; rest_ r29; rest_ r30; rest_ r31
114| addi sp, sp, CFRAME_SPACE
115|.endmacro
116|
117|// Type definitions. Some of these are only used for documentation.
118|.type L, lua_State, LREG
119|.type GL, global_State
120|.type TVALUE, TValue
121|.type GCOBJ, GCobj
122|.type STR, GCstr
123|.type TAB, GCtab
124|.type LFUNC, GCfuncL
125|.type CFUNC, GCfuncC
126|.type PROTO, GCproto
127|.type UPVAL, GCupval
128|.type NODE, Node
129|.type NARGS8, int
130|.type TRACE, GCtrace
131|
132|//-----------------------------------------------------------------------
133|
134|// These basic macros should really be part of DynASM.
135|.macro srwi, rx, ry, n; rlwinm rx, ry, 32-n, n, 31; .endmacro
136|.macro slwi, rx, ry, n; rlwinm rx, ry, n, 0, 31-n; .endmacro
137|.macro rotlwi, rx, ry, n; rlwinm rx, ry, n, 0, 31; .endmacro
138|.macro rotlw, rx, ry, rn; rlwnm rx, ry, rn, 0, 31; .endmacro
139|.macro subi, rx, ry, i; addi rx, ry, -i; .endmacro
140|
141|// Trap for not-yet-implemented parts.
142|.macro NYI; tw 4, sp, sp; .endmacro
143|
144|//-----------------------------------------------------------------------
145|
146|// Access to frame relative to BASE.
147|.define FRAME_PC, -8
148|.define FRAME_FUNC, -4
149|
150|// Instruction decode.
151|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro
152|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro
153|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro
154|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro
155|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro
156|
157|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro
158|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro
159|
160|// Instruction fetch.
161|.macro ins_NEXT1
162| lwz INS, 0(PC)
163| addi PC, PC, 4
164|.endmacro
165|// Instruction decode+dispatch.
166|.macro ins_NEXT2
167| decode_OP4 TMP1, INS
168| decode_RB8 RB, INS
169| decode_RD8 RD, INS
170| lwzx TMP0, DISPATCH, TMP1
171| decode_RA8 RA, INS
172| decode_RC8 RC, INS
173| mtctr TMP0
174| bctr
175|.endmacro
176|.macro ins_NEXT
177| ins_NEXT1
178| ins_NEXT2
179|.endmacro
180|
181|// Instruction footer.
182|.if 1
183| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
184| .define ins_next, ins_NEXT
185| .define ins_next_, ins_NEXT
186| .define ins_next1, ins_NEXT1
187| .define ins_next2, ins_NEXT2
188|.else
189| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
190| // Affects only certain kinds of benchmarks (and only with -j off).
191| .macro ins_next
192| b ->ins_next
193| .endmacro
194| .macro ins_next1
195| .endmacro
196| .macro ins_next2
197| b ->ins_next
198| .endmacro
199| .macro ins_next_
200| ->ins_next:
201| ins_NEXT
202| .endmacro
203|.endif
204|
205|// Call decode and dispatch.
206|.macro ins_callt
207| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
208| lwz PC, LFUNC:RB->pc
209| lwz INS, 0(PC)
210| addi PC, PC, 4
211| decode_OP4 TMP1, INS
212| decode_RA8 RA, INS
213| lwzx TMP0, DISPATCH, TMP1
214| add RA, RA, BASE
215| mtctr TMP0
216| bctr
217|.endmacro
218|
219|.macro ins_call
220| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
221| stw PC, FRAME_PC(BASE)
222| ins_callt
223|.endmacro
224|
225|//-----------------------------------------------------------------------
226|
227|// Macros to test operand types.
228|.macro checknum, reg; evcmpltu reg, TISNUM; .endmacro
229|.macro checkstr, reg; evcmpeq reg, TISSTR; .endmacro
230|.macro checktab, reg; evcmpeq reg, TISTAB; .endmacro
231|.macro checkfunc, reg; evcmpeq reg, TISFUNC; .endmacro
232|.macro checknil, reg; evcmpeq reg, TISNIL; .endmacro
233|.macro checkok, label; blt label; .endmacro
234|.macro checkfail, label; bge label; .endmacro
235|.macro checkanyfail, label; bns label; .endmacro
236|.macro checkallok, label; bso label; .endmacro
237|
238|.macro branch_RD
239| srwi TMP0, RD, 1
240| add PC, PC, TMP0
241| addis PC, PC, -(BCBIAS_J*4 >> 16)
242|.endmacro
243|
244|// Assumes DISPATCH is relative to GL.
245#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
246#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
247|
248#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
249|
250|.macro hotloop
251| NYI
252|.endmacro
253|
254|.macro hotcall
255| NYI
256|.endmacro
257|
258|// Set current VM state. Uses TMP0.
259|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
260|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
261|
262|// Move table write barrier back. Overwrites mark and tmp.
263|.macro barrierback, tab, mark, tmp
264| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
265| // Assumes LJ_GC_BLACK is 0x04.
266| rlwinm mark, mark, 0, 30, 28 // black2gray(tab)
267| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
268| stb mark, tab->marked
269| stw tmp, tab->gclist
270|.endmacro
271|
272|//-----------------------------------------------------------------------
273
274/* Generate subroutines used by opcodes and other parts of the VM. */
275/* The .code_sub section should be last to help static branch prediction. */
276static void build_subroutines(BuildCtx *ctx)
277{
278 |.code_sub
279 |
280 |//-----------------------------------------------------------------------
281 |//-- Return handling ----------------------------------------------------
282 |//-----------------------------------------------------------------------
283 |
284 |->vm_returnp:
285 | // See vm_return. Also: TMP2 = previous base.
286 | andi. TMP0, PC, FRAME_P
287 | evsplati TMP1, LJ_TTRUE
288 | beq ->cont_dispatch
289 |
290 | // Return from pcall or xpcall fast func.
291 | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
292 | mr BASE, TMP2 // Restore caller base.
293 | // Prepending may overwrite the pcall frame, so do it at the end.
294 | stwu TMP1, FRAME_PC(RA) // Prepend true to results.
295 |
296 |->vm_returnc:
297 | addi RD, RD, 8 // RD = (nresults+1)*8.
298 | andi. TMP0, PC, FRAME_TYPE
299 | cmpwi cr1, RD, 0
300 | li CRET1, LUA_YIELD
301 | beq cr1, ->vm_unwind_c_eh
302 | mr MULTRES, RD
303 | beq ->BC_RET_Z // Handle regular return to Lua.
304 |
305 |->vm_return:
306 | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
307 | // TMP0 = PC & FRAME_TYPE
308 | cmpwi TMP0, FRAME_C
309 | rlwinm TMP2, PC, 0, 0, 28
310 | li_vmstate C
311 | sub TMP2, BASE, TMP2 // TMP2 = previous base.
312 | bne ->vm_returnp
313 |
314 | addic. TMP1, RD, -8
315 | stw TMP2, L->base
316 | lwz TMP2, SAVE_NRES
317 | subi BASE, BASE, 8
318 | st_vmstate
319 | slwi TMP2, TMP2, 3
320 | beq >2
321 |1:
322 | addic. TMP1, TMP1, -8
323 | evldd TMP0, 0(RA)
324 | addi RA, RA, 8
325 | evstdd TMP0, 0(BASE)
326 | addi BASE, BASE, 8
327 | bne <1
328 |
329 |2:
330 | cmpw TMP2, RD // More/less results wanted?
331 | bne >6
332 |3:
333 | stw BASE, L->top // Store new top.
334 |
335 |->vm_leave_cp:
336 | lwz TMP0, SAVE_CFRAME // Restore previous C frame.
337 | li CRET1, 0 // Ok return status for vm_pcall.
338 | stw TMP0, L->cframe
339 |
340 |->vm_leave_unw:
341 | restoreregs
342 | blr
343 |
344 |6:
345 | ble >7 // Less results wanted?
346 | // More results wanted. Check stack size and fill up results with nil.
347 | lwz TMP1, L->maxstack
348 | cmplw BASE, TMP1
349 | bge >8
350 | evstdd TISNIL, 0(BASE)
351 | addi RD, RD, 8
352 | addi BASE, BASE, 8
353 | b <2
354 |
355 |7: // Less results wanted.
356 | sub TMP0, RD, TMP2
357 | cmpwi TMP2, 0 // LUA_MULTRET+1 case?
358 | sub TMP0, BASE, TMP0 // Subtract the difference.
359 | iseleq BASE, BASE, TMP0 // Either keep top or shrink it.
360 | b <3
361 |
362 |8: // Corner case: need to grow stack for filling up results.
363 | // This can happen if:
364 | // - A C function grows the stack (a lot).
365 | // - The GC shrinks the stack in between.
366 | // - A return back from a lua_call() with (high) nresults adjustment.
367 | stw BASE, L->top // Save current top held in BASE (yes).
368 | mr SAVE0, RD
369 | mr CARG2, TMP2
370 | mr CARG1, L
371 | bl extern lj_state_growstack // (lua_State *L, int n)
372 | lwz TMP2, SAVE_NRES
373 | mr RD, SAVE0
374 | slwi TMP2, TMP2, 3
375 | lwz BASE, L->top // Need the (realloced) L->top in BASE.
376 | b <2
377 |
378 |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
379 | // (void *cframe, int errcode)
380 | mr sp, CARG1
381 | mr CRET1, CARG2
382 |->vm_unwind_c_eh: // Landing pad for external unwinder.
383 | lwz L, SAVE_L
384 | li TMP0, ~LJ_VMST_C
385 | lwz GL:TMP1, L->glref
386 | stw TMP0, GL:TMP1->vmstate
387 | b ->vm_leave_unw
388 |
389 |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
390 | // (void *cframe)
391 | rlwinm sp, CARG1, 0, 0, 29
392 |->vm_unwind_ff_eh: // Landing pad for external unwinder.
393 | lwz L, SAVE_L
394 | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
395 | evsplati TISFUNC, LJ_TFUNC
396 | lus TOBIT, 0x4338
397 | evsplati TISTAB, LJ_TTAB
398 | li TMP0, 0
399 | lwz BASE, L->base
400 | evmergelo TOBIT, TOBIT, TMP0
401 | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
402 | evsplati TISSTR, LJ_TSTR
403 | li TMP1, LJ_TFALSE
404 | evsplati TISNIL, LJ_TNIL
405 | li_vmstate INTERP
406 | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame.
407 | la RA, -8(BASE) // Results start at BASE-8.
408 | addi DISPATCH, DISPATCH, GG_G2DISP
409 | stw TMP1, 0(RA) // Prepend false to error message.
410 | li RD, 16 // 2 results: false + error message.
411 | st_vmstate
412 | b ->vm_returnc
413 |
414 |//-----------------------------------------------------------------------
415 |//-- Grow stack for calls -----------------------------------------------
416 |//-----------------------------------------------------------------------
417 |
418 |->vm_growstack_c: // Grow stack for C function.
419 | li CARG2, LUA_MINSTACK
420 | b >2
421 |
422 |->vm_growstack_l: // Grow stack for Lua function.
423 | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
424 | add RC, BASE, RC
425 | sub RA, RA, BASE
426 | stw BASE, L->base
427 | addi PC, PC, 4 // Must point after first instruction.
428 | stw RC, L->top
429 | srwi CARG2, RA, 3
430 |2:
431 | // L->base = new base, L->top = top
432 | stw PC, SAVE_PC
433 | mr CARG1, L
434 | bl extern lj_state_growstack // (lua_State *L, int n)
435 | lwz BASE, L->base
436 | lwz RC, L->top
437 | lwz LFUNC:RB, FRAME_FUNC(BASE)
438 | sub RC, RC, BASE
439 | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
440 | ins_callt // Just retry the call.
441 |
442 |//-----------------------------------------------------------------------
443 |//-- Entry points into the assembler VM ---------------------------------
444 |//-----------------------------------------------------------------------
445 |
446 |->vm_resume: // Setup C frame and resume thread.
447 | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
448 | saveregs
449 | mr L, CARG1
450 | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
451 | mr BASE, CARG2
452 | lbz TMP1, L->status
453 | stw L, SAVE_L
454 | li PC, FRAME_CP
455 | addi TMP0, sp, CFRAME_RESUME
456 | addi DISPATCH, DISPATCH, GG_G2DISP
457 | stw CARG3, SAVE_NRES
458 | cmplwi TMP1, 0
459 | stw CARG3, SAVE_ERRF
460 | stw TMP0, L->cframe
461 | stw CARG3, SAVE_CFRAME
462 | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
463 | beq >3
464 |
465 | // Resume after yield (like a return).
466 | mr RA, BASE
467 | lwz BASE, L->base
468 | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
469 | lwz TMP1, L->top
470 | evsplati TISFUNC, LJ_TFUNC
471 | lus TOBIT, 0x4338
472 | evsplati TISTAB, LJ_TTAB
473 | lwz PC, FRAME_PC(BASE)
474 | li TMP2, 0
475 | evsplati TISSTR, LJ_TSTR
476 | sub RD, TMP1, BASE
477 | evmergelo TOBIT, TOBIT, TMP2
478 | stb CARG3, L->status
479 | andi. TMP0, PC, FRAME_TYPE
480 | li_vmstate INTERP
481 | addi RD, RD, 8
482 | evsplati TISNIL, LJ_TNIL
483 | mr MULTRES, RD
484 | st_vmstate
485 | beq ->BC_RET_Z
486 | b ->vm_return
487 |
488 |->vm_pcall: // Setup protected C frame and enter VM.
489 | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
490 | saveregs
491 | li PC, FRAME_CP
492 | stw CARG4, SAVE_ERRF
493 | b >1
494 |
495 |->vm_call: // Setup C frame and enter VM.
496 | // (lua_State *L, TValue *base, int nres1)
497 | saveregs
498 | li PC, FRAME_C
499 |
500 |1: // Entry point for vm_pcall above (PC = ftype).
501 | lwz TMP1, L:CARG1->cframe
502 | stw CARG3, SAVE_NRES
503 | mr L, CARG1
504 | stw CARG1, SAVE_L
505 | mr BASE, CARG2
506 | stw sp, L->cframe // Add our C frame to cframe chain.
507 | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
508 | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
509 | stw TMP1, SAVE_CFRAME
510 | addi DISPATCH, DISPATCH, GG_G2DISP
511 |
512 |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
513 | lwz TMP2, L->base // TMP2 = old base (used in vmeta_call).
514 | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
515 | lwz TMP1, L->top
516 | evsplati TISFUNC, LJ_TFUNC
517 | add PC, PC, BASE
518 | evsplati TISTAB, LJ_TTAB
519 | lus TOBIT, 0x4338
520 | li TMP0, 0
521 | sub PC, PC, TMP2 // PC = frame delta + frame type
522 | evsplati TISSTR, LJ_TSTR
523 | sub NARGS8:RC, TMP1, BASE
524 | evmergelo TOBIT, TOBIT, TMP0
525 | li_vmstate INTERP
526 | evsplati TISNIL, LJ_TNIL
527 | st_vmstate
528 |
529 |->vm_call_dispatch:
530 | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
531 | li TMP0, -8
532 | evlddx LFUNC:RB, BASE, TMP0
533 | checkfunc LFUNC:RB
534 | checkfail ->vmeta_call
535 |
536 |->vm_call_dispatch_f:
537 | ins_call
538 | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
539 |
540 |->vm_cpcall: // Setup protected C frame, call C.
541 | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
542 | saveregs
543 | mr L, CARG1
544 | lwz TMP0, L:CARG1->stack
545 | stw CARG1, SAVE_L
546 | lwz TMP1, L->top
547 | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
548 | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
549 | lwz TMP1, L->cframe
550 | stw sp, L->cframe // Add our C frame to cframe chain.
551 | li TMP2, 0
552 | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
553 | stw TMP2, SAVE_ERRF // No error function.
554 | stw TMP1, SAVE_CFRAME
555 | mtctr CARG4
556 | bctrl // (lua_State *L, lua_CFunction func, void *ud)
557 | mr. BASE, CRET1
558 | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
559 | li PC, FRAME_CP
560 | addi DISPATCH, DISPATCH, GG_G2DISP
561 | bne <3 // Else continue with the call.
562 | b ->vm_leave_cp // No base? Just remove C frame.
563 |
564 |//-----------------------------------------------------------------------
565 |//-- Metamethod handling ------------------------------------------------
566 |//-----------------------------------------------------------------------
567 |
568 |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
569 |// stack, so BASE doesn't need to be reloaded across these calls.
570 |
571 |//-- Continuation dispatch ----------------------------------------------
572 |
573 |->cont_dispatch:
574 | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
575 | lwz TMP0, -12(BASE) // Continuation.
576 | mr RB, BASE
577 | mr BASE, TMP2 // Restore caller BASE.
578 | lwz LFUNC:TMP1, FRAME_FUNC(TMP2)
579 | cmplwi TMP0, 0
580 | lwz PC, -16(RB) // Restore PC from [cont|PC].
581 | beq >1
582 | subi TMP2, RD, 8
583 | lwz TMP1, LFUNC:TMP1->pc
584 | evstddx TISNIL, RA, TMP2 // Ensure one valid arg.
585 | lwz KBASE, PC2PROTO(k)(TMP1)
586 | // BASE = base, RA = resultptr, RB = meta base
587 | mtctr TMP0
588 | bctr // Jump to continuation.
589 |
590 |1: // Tail call from C function.
591 | subi TMP1, RB, 16
592 | sub RC, TMP1, BASE
593 | b ->vm_call_tail
594 |
595 |->cont_cat: // RA = resultptr, RB = meta base
596 | lwz INS, -4(PC)
597 | subi CARG2, RB, 16
598 | decode_RB8 SAVE0, INS
599 | evldd TMP0, 0(RA)
600 | add TMP1, BASE, SAVE0
601 | stw BASE, L->base
602 | cmplw TMP1, CARG2
603 | sub CARG3, CARG2, TMP1
604 | decode_RA8 RA, INS
605 | evstdd TMP0, 0(CARG2)
606 | bne ->BC_CAT_Z
607 | evstddx TMP0, BASE, RA
608 | b ->cont_nop
609 |
610 |//-- Table indexing metamethods -----------------------------------------
611 |
612 |->vmeta_tgets1:
613 | evmergelo STR:RC, TISSTR, STR:RC
614 | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
615 | decode_RB8 RB, INS
616 | evstdd STR:RC, 0(CARG3)
617 | add CARG2, BASE, RB
618 | b >1
619 |
620 |->vmeta_tgets:
621 | evmergelo TAB:RB, TISTAB, TAB:RB
622 | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
623 | evmergelo STR:RC, TISSTR, STR:RC
624 | evstdd TAB:RB, 0(CARG2)
625 | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
626 | evstdd STR:RC, 0(CARG3)
627 | b >1
628 |
629 |->vmeta_tgetb: // TMP0 = index
630 | efdcfsi TMP0, TMP0
631 | decode_RB8 RB, INS
632 | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
633 | add CARG2, BASE, RB
634 | evstdd TMP0, 0(CARG3)
635 | b >1
636 |
637 |->vmeta_tgetv:
638 | decode_RB8 RB, INS
639 | decode_RC8 RC, INS
640 | add CARG2, BASE, RB
641 | add CARG3, BASE, RC
642 |1:
643 | stw BASE, L->base
644 | mr CARG1, L
645 | stw PC, SAVE_PC
646 | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
647 | // Returns TValue * (finished) or NULL (metamethod).
648 | cmplwi CRET1, 0
649 | beq >3
650 | evldd TMP0, 0(CRET1)
651 | evstddx TMP0, BASE, RA
652 | ins_next
653 |
654 |3: // Call __index metamethod.
655 | // BASE = base, L->top = new base, stack = cont/func/t/k
656 | subfic TMP1, BASE, FRAME_CONT
657 | lwz BASE, L->top
658 | stw PC, -16(BASE) // [cont|PC]
659 | add PC, TMP1, BASE
660 | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
661 | li NARGS8:RC, 16 // 2 args for func(t, k).
662 | b ->vm_call_dispatch_f
663 |
664 |//-----------------------------------------------------------------------
665 |
666 |->vmeta_tsets1:
667 | evmergelo STR:RC, TISSTR, STR:RC
668 | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
669 | decode_RB8 RB, INS
670 | evstdd STR:RC, 0(CARG3)
671 | add CARG2, BASE, RB
672 | b >1
673 |
674 |->vmeta_tsets:
675 | evmergelo TAB:RB, TISTAB, TAB:RB
676 | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
677 | evmergelo STR:RC, TISSTR, STR:RC
678 | evstdd TAB:RB, 0(CARG2)
679 | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
680 | evstdd STR:RC, 0(CARG3)
681 | b >1
682 |
683 |->vmeta_tsetb: // TMP0 = index
684 | efdcfsi TMP0, TMP0
685 | decode_RB8 RB, INS
686 | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
687 | add CARG2, BASE, RB
688 | evstdd TMP0, 0(CARG3)
689 | b >1
690 |
691 |->vmeta_tsetv:
692 | decode_RB8 RB, INS
693 | decode_RC8 RC, INS
694 | add CARG2, BASE, RB
695 | add CARG3, BASE, RC
696 |1:
697 | stw BASE, L->base
698 | mr CARG1, L
699 | stw PC, SAVE_PC
700 | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
701 | // Returns TValue * (finished) or NULL (metamethod).
702 | cmplwi CRET1, 0
703 | evlddx TMP0, BASE, RA
704 | beq >3
705 | // NOBARRIER: lj_meta_tset ensures the table is not black.
706 | evstdd TMP0, 0(CRET1)
707 | ins_next
708 |
709 |3: // Call __newindex metamethod.
710 | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
711 | subfic TMP1, BASE, FRAME_CONT
712 | lwz BASE, L->top
713 | stw PC, -16(BASE) // [cont|PC]
714 | add PC, TMP1, BASE
715 | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
716 | li NARGS8:RC, 24 // 3 args for func(t, k, v)
717 | evstdd TMP0, 16(BASE) // Copy value to third argument.
718 | b ->vm_call_dispatch_f
719 |
720 |//-- Comparison metamethods ---------------------------------------------
721 |
722 |->vmeta_comp:
723 | mr CARG1, L
724 | subi PC, PC, 4
725 | add CARG2, BASE, RA
726 | stw PC, SAVE_PC
727 | add CARG3, BASE, RD
728 | stw BASE, L->base
729 | decode_OP1 CARG4, INS
730 | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
731 | // Returns 0/1 or TValue * (metamethod).
732 |3:
733 | cmplwi CRET1, 1
734 | bgt ->vmeta_binop
735 |4:
736 | lwz INS, 0(PC)
737 | addi PC, PC, 4
738 | decode_RD4 TMP2, INS
739 | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
740 | add TMP2, TMP2, TMP3
741 | isellt PC, PC, TMP2
742 |->cont_nop:
743 | ins_next
744 |
745 |->cont_ra: // RA = resultptr
746 | lwz INS, -4(PC)
747 | evldd TMP0, 0(RA)
748 | decode_RA8 TMP1, INS
749 | evstddx TMP0, BASE, TMP1
750 | b ->cont_nop
751 |
752 |->cont_condt: // RA = resultptr
753 | lwz TMP0, 0(RA)
754 | li TMP1, LJ_TTRUE
755 | cmplw TMP1, TMP0 // Branch if result is true.
756 | b <4
757 |
758 |->cont_condf: // RA = resultptr
759 | lwz TMP0, 0(RA)
760 | li TMP1, LJ_TFALSE
761 | cmplw TMP0, TMP1 // Branch if result is false.
762 | b <4
763 |
764 |->vmeta_equal:
765 | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
766 | subi PC, PC, 4
767 | stw BASE, L->base
768 | mr CARG1, L
769 | stw PC, SAVE_PC
770 | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
771 | // Returns 0/1 or TValue * (metamethod).
772 | b <3
773 |
774 |//-- Arithmetic metamethods ---------------------------------------------
775 |
776 |->vmeta_arith_vn:
777 | add CARG3, BASE, RB
778 | add CARG4, KBASE, RC
779 | b >1
780 |
781 |->vmeta_arith_nv:
782 | add CARG3, KBASE, RC
783 | add CARG4, BASE, RB
784 | b >1
785 |
786 |->vmeta_unm:
787 | add CARG3, BASE, RD
788 | mr CARG4, CARG3
789 | b >1
790 |
791 |->vmeta_arith_vv:
792 | add CARG3, BASE, RB
793 | add CARG4, BASE, RC
794 |1:
795 | add CARG2, BASE, RA
796 | stw BASE, L->base
797 | mr CARG1, L
798 | stw PC, SAVE_PC
799 | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS.
800 | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
801 | // Returns NULL (finished) or TValue * (metamethod).
802 | cmplwi CRET1, 0
803 | beq ->cont_nop
804 |
805 | // Call metamethod for binary op.
806 |->vmeta_binop:
807 | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
808 | sub TMP1, CRET1, BASE
809 | stw PC, -16(CRET1) // [cont|PC]
810 | mr TMP2, BASE
811 | addi PC, TMP1, FRAME_CONT
812 | mr BASE, CRET1
813 | li NARGS8:RC, 16 // 2 args for func(o1, o2).
814 | b ->vm_call_dispatch
815 |
816 |->vmeta_len:
817#if LJ_52
818 | mr SAVE0, CARG1
819#endif
820 | add CARG2, BASE, RD
821 | stw BASE, L->base
822 | mr CARG1, L
823 | stw PC, SAVE_PC
824 | bl extern lj_meta_len // (lua_State *L, TValue *o)
825 | // Returns NULL (retry) or TValue * (metamethod base).
826#if LJ_52
827 | cmplwi CRET1, 0
828 | bne ->vmeta_binop // Binop call for compatibility.
829 | mr CARG1, SAVE0
830 | b ->BC_LEN_Z
831#else
832 | b ->vmeta_binop // Binop call for compatibility.
833#endif
834 |
835 |//-- Call metamethod ----------------------------------------------------
836 |
837 |->vmeta_call: // Resolve and call __call metamethod.
838 | // TMP2 = old base, BASE = new base, RC = nargs*8
839 | mr CARG1, L
840 | stw TMP2, L->base // This is the callers base!
841 | subi CARG2, BASE, 8
842 | stw PC, SAVE_PC
843 | add CARG3, BASE, RC
844 | mr SAVE0, NARGS8:RC
845 | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
846 | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
847 | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
848 | ins_call
849 |
850 |->vmeta_callt: // Resolve __call for BC_CALLT.
851 | // BASE = old base, RA = new base, RC = nargs*8
852 | mr CARG1, L
853 | stw BASE, L->base
854 | subi CARG2, RA, 8
855 | stw PC, SAVE_PC
856 | add CARG3, RA, RC
857 | mr SAVE0, NARGS8:RC
858 | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
859 | lwz TMP1, FRAME_PC(BASE)
860 | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
861 | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
862 | b ->BC_CALLT_Z
863 |
864 |//-- Argument coercion for 'for' statement ------------------------------
865 |
866 |->vmeta_for:
867 | mr CARG1, L
868 | stw BASE, L->base
869 | mr CARG2, RA
870 | stw PC, SAVE_PC
871 | mr SAVE0, INS
872 | bl extern lj_meta_for // (lua_State *L, TValue *base)
873 |.if JIT
874 | decode_OP1 TMP0, SAVE0
875 |.endif
876 | decode_RA8 RA, SAVE0
877 |.if JIT
878 | cmpwi TMP0, BC_JFORI
879 |.endif
880 | decode_RD8 RD, SAVE0
881 |.if JIT
882 | beq =>BC_JFORI
883 |.endif
884 | b =>BC_FORI
885 |
886 |//-----------------------------------------------------------------------
887 |//-- Fast functions -----------------------------------------------------
888 |//-----------------------------------------------------------------------
889 |
890 |.macro .ffunc, name
891 |->ff_ .. name:
892 |.endmacro
893 |
894 |.macro .ffunc_1, name
895 |->ff_ .. name:
896 | cmplwi NARGS8:RC, 8
897 | evldd CARG1, 0(BASE)
898 | blt ->fff_fallback
899 |.endmacro
900 |
901 |.macro .ffunc_2, name
902 |->ff_ .. name:
903 | cmplwi NARGS8:RC, 16
904 | evldd CARG1, 0(BASE)
905 | evldd CARG2, 8(BASE)
906 | blt ->fff_fallback
907 |.endmacro
908 |
909 |.macro .ffunc_n, name
910 | .ffunc_1 name
911 | checknum CARG1
912 | checkfail ->fff_fallback
913 |.endmacro
914 |
915 |.macro .ffunc_nn, name
916 | .ffunc_2 name
917 | evmergehi TMP0, CARG1, CARG2
918 | checknum TMP0
919 | checkanyfail ->fff_fallback
920 |.endmacro
921 |
922 |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1.
923 |.macro ffgccheck
924 | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
925 | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
926 | cmplw TMP0, TMP1
927 | bgel ->fff_gcstep
928 |.endmacro
929 |
930 |//-- Base library: checks -----------------------------------------------
931 |
932 |.ffunc assert
933 | cmplwi NARGS8:RC, 8
934 | evldd TMP0, 0(BASE)
935 | blt ->fff_fallback
936 | evaddw TMP1, TISNIL, TISNIL // Synthesize LJ_TFALSE.
937 | la RA, -8(BASE)
938 | evcmpltu cr1, TMP0, TMP1
939 | lwz PC, FRAME_PC(BASE)
940 | bge cr1, ->fff_fallback
941 | evstdd TMP0, 0(RA)
942 | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
943 | beq ->fff_res // Done if exactly 1 argument.
944 | li TMP1, 8
945 | subi RC, RC, 8
946 |1:
947 | cmplw TMP1, RC
948 | evlddx TMP0, BASE, TMP1
949 | evstddx TMP0, RA, TMP1
950 | addi TMP1, TMP1, 8
951 | bne <1
952 | b ->fff_res
953 |
954 |.ffunc type
955 | cmplwi NARGS8:RC, 8
956 | lwz CARG1, 0(BASE)
957 | blt ->fff_fallback
958 | li TMP2, ~LJ_TNUMX
959 | cmplw CARG1, TISNUM
960 | not TMP1, CARG1
961 | isellt TMP1, TMP2, TMP1
962 | slwi TMP1, TMP1, 3
963 | la TMP2, CFUNC:RB->upvalue
964 | evlddx STR:CRET1, TMP2, TMP1
965 | b ->fff_restv
966 |
967 |//-- Base library: getters and setters ---------------------------------
968 |
969 |.ffunc_1 getmetatable
970 | checktab CARG1
971 | evmergehi TMP1, CARG1, CARG1
972 | checkfail >6
973 |1: // Field metatable must be at same offset for GCtab and GCudata!
974 | lwz TAB:RB, TAB:CARG1->metatable
975 |2:
976 | evmr CRET1, TISNIL
977 | cmplwi TAB:RB, 0
978 | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
979 | beq ->fff_restv
980 | lwz TMP0, TAB:RB->hmask
981 | evmergelo CRET1, TISTAB, TAB:RB // Use metatable as default result.
982 | lwz TMP1, STR:RC->hash
983 | lwz NODE:TMP2, TAB:RB->node
984 | evmergelo STR:RC, TISSTR, STR:RC
985 | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
986 | slwi TMP0, TMP1, 5
987 | slwi TMP1, TMP1, 3
988 | sub TMP1, TMP0, TMP1
989 | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
990 |3: // Rearranged logic, because we expect _not_ to find the key.
991 | evldd TMP0, NODE:TMP2->key
992 | evldd TMP1, NODE:TMP2->val
993 | evcmpeq TMP0, STR:RC
994 | lwz NODE:TMP2, NODE:TMP2->next
995 | checkallok >5
996 | cmplwi NODE:TMP2, 0
997 | beq ->fff_restv // Not found, keep default result.
998 | b <3
999 |5:
1000 | checknil TMP1
1001 | checkok ->fff_restv // Ditto for nil value.
1002 | evmr CRET1, TMP1 // Return value of mt.__metatable.
1003 | b ->fff_restv
1004 |
1005 |6:
1006 | cmpwi TMP1, LJ_TUDATA
1007 | not TMP1, TMP1
1008 | beq <1
1009 | checknum CARG1
1010 | slwi TMP1, TMP1, 2
1011 | li TMP2, 4*~LJ_TNUMX
1012 | isellt TMP1, TMP2, TMP1
1013 | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH)
1014 | lwzx TAB:RB, TMP2, TMP1
1015 | b <2
1016 |
1017 |.ffunc_2 setmetatable
1018 | // Fast path: no mt for table yet and not clearing the mt.
1019 | evmergehi TMP0, TAB:CARG1, TAB:CARG2
1020 | checktab TMP0
1021 | checkanyfail ->fff_fallback
1022 | lwz TAB:TMP1, TAB:CARG1->metatable
1023 | cmplwi TAB:TMP1, 0
1024 | lbz TMP3, TAB:CARG1->marked
1025 | bne ->fff_fallback
1026 | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
1027 | stw TAB:CARG2, TAB:CARG1->metatable
1028 | beq ->fff_restv
1029 | barrierback TAB:CARG1, TMP3, TMP0
1030 | b ->fff_restv
1031 |
1032 |.ffunc rawget
1033 | cmplwi NARGS8:RC, 16
1034 | evldd CARG2, 0(BASE)
1035 | blt ->fff_fallback
1036 | checktab CARG2
1037 | la CARG3, 8(BASE)
1038 | checkfail ->fff_fallback
1039 | mr CARG1, L
1040 | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
1041 | // Returns cTValue *.
1042 | evldd CRET1, 0(CRET1)
1043 | b ->fff_restv
1044 |
1045 |//-- Base library: conversions ------------------------------------------
1046 |
1047 |.ffunc tonumber
1048 | // Only handles the number case inline (without a base argument).
1049 | cmplwi NARGS8:RC, 8
1050 | evldd CARG1, 0(BASE)
1051 | bne ->fff_fallback // Exactly one argument.
1052 | checknum CARG1
1053 | checkok ->fff_restv
1054 | b ->fff_fallback
1055 |
1056 |.ffunc_1 tostring
1057 | // Only handles the string or number case inline.
1058 | checkstr CARG1
1059 | // A __tostring method in the string base metatable is ignored.
1060 | checkok ->fff_restv // String key?
1061 | // Handle numbers inline, unless a number base metatable is present.
1062 | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
1063 | checknum CARG1
1064 | cmplwi cr1, TMP0, 0
1065 | stw BASE, L->base // Add frame since C call can throw.
1066 | crand 4*cr0+eq, 4*cr0+lt, 4*cr1+eq
1067 | stw PC, SAVE_PC // Redundant (but a defined value).
1068 | bne ->fff_fallback
1069 | ffgccheck
1070 | mr CARG1, L
1071 | mr CARG2, BASE
1072 | bl extern lj_str_fromnum // (lua_State *L, lua_Number *np)
1073 | // Returns GCstr *.
1074 | evmergelo STR:CRET1, TISSTR, STR:CRET1
1075 | b ->fff_restv
1076 |
1077 |//-- Base library: iterators -------------------------------------------
1078 |
1079 |.ffunc next
1080 | cmplwi NARGS8:RC, 8
1081 | evldd CARG2, 0(BASE)
1082 | blt ->fff_fallback
1083 | evstddx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil.
1084 | checktab TAB:CARG2
1085 | lwz PC, FRAME_PC(BASE)
1086 | checkfail ->fff_fallback
1087 | stw BASE, L->base // Add frame since C call can throw.
1088 | mr CARG1, L
1089 | stw BASE, L->top // Dummy frame length is ok.
1090 | la CARG3, 8(BASE)
1091 | stw PC, SAVE_PC
1092 | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
1093 | // Returns 0 at end of traversal.
1094 | cmplwi CRET1, 0
1095 | evmr CRET1, TISNIL
1096 | beq ->fff_restv // End of traversal: return nil.
1097 | evldd TMP0, 8(BASE) // Copy key and value to results.
1098 | la RA, -8(BASE)
1099 | evldd TMP1, 16(BASE)
1100 | evstdd TMP0, 0(RA)
1101 | li RD, (2+1)*8
1102 | evstdd TMP1, 8(RA)
1103 | b ->fff_res
1104 |
1105 |.ffunc_1 pairs
1106 | checktab TAB:CARG1
1107 | lwz PC, FRAME_PC(BASE)
1108 | checkfail ->fff_fallback
1109#if LJ_52
1110 | lwz TAB:TMP2, TAB:CARG1->metatable
1111 | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
1112 | cmplwi TAB:TMP2, 0
1113 | la RA, -8(BASE)
1114 | bne ->fff_fallback
1115#else
1116 | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
1117 | la RA, -8(BASE)
1118#endif
1119 | evstdd TISNIL, 8(BASE)
1120 | li RD, (3+1)*8
1121 | evstdd CFUNC:TMP0, 0(RA)
1122 | b ->fff_res
1123 |
1124 |.ffunc_2 ipairs_aux
1125 | checktab TAB:CARG1
1126 | lwz PC, FRAME_PC(BASE)
1127 | checkfail ->fff_fallback
1128 | checknum CARG2
1129 | lus TMP3, 0x3ff0
1130 | checkfail ->fff_fallback
1131 | efdctsi TMP2, CARG2
1132 | lwz TMP0, TAB:CARG1->asize
1133 | evmergelo TMP3, TMP3, ZERO
1134 | lwz TMP1, TAB:CARG1->array
1135 | efdadd CARG2, CARG2, TMP3
1136 | addi TMP2, TMP2, 1
1137 | la RA, -8(BASE)
1138 | cmplw TMP0, TMP2
1139 | slwi TMP3, TMP2, 3
1140 | evstdd CARG2, 0(RA)
1141 | ble >2 // Not in array part?
1142 | evlddx TMP1, TMP1, TMP3
1143 |1:
1144 | checknil TMP1
1145 | li RD, (0+1)*8
1146 | checkok ->fff_res // End of iteration, return 0 results.
1147 | li RD, (2+1)*8
1148 | evstdd TMP1, 8(RA)
1149 | b ->fff_res
1150 |2: // Check for empty hash part first. Otherwise call C function.
1151 | lwz TMP0, TAB:CARG1->hmask
1152 | cmplwi TMP0, 0
1153 | li RD, (0+1)*8
1154 | beq ->fff_res
1155 | mr CARG2, TMP2
1156 | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
1157 | // Returns cTValue * or NULL.
1158 | cmplwi CRET1, 0
1159 | li RD, (0+1)*8
1160 | beq ->fff_res
1161 | evldd TMP1, 0(CRET1)
1162 | b <1
1163 |
1164 |.ffunc_1 ipairs
1165 | checktab TAB:CARG1
1166 | lwz PC, FRAME_PC(BASE)
1167 | checkfail ->fff_fallback
1168#if LJ_52
1169 | lwz TAB:TMP2, TAB:CARG1->metatable
1170 | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
1171 | cmplwi TAB:TMP2, 0
1172 | la RA, -8(BASE)
1173 | bne ->fff_fallback
1174#else
1175 | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
1176 | la RA, -8(BASE)
1177#endif
1178 | evsplati TMP1, 0
1179 | li RD, (3+1)*8
1180 | evstdd TMP1, 8(BASE)
1181 | evstdd CFUNC:TMP0, 0(RA)
1182 | b ->fff_res
1183 |
1184 |//-- Base library: catch errors ----------------------------------------
1185 |
1186 |.ffunc pcall
1187 | cmplwi NARGS8:RC, 8
1188 | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
1189 | blt ->fff_fallback
1190 | mr TMP2, BASE
1191 | la BASE, 8(BASE)
1192 | // Remember active hook before pcall.
1193 | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
1194 | subi NARGS8:RC, NARGS8:RC, 8
1195 | addi PC, TMP3, 8+FRAME_PCALL
1196 | b ->vm_call_dispatch
1197 |
1198 |.ffunc_2 xpcall
1199 | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
1200 | mr TMP2, BASE
1201 | checkfunc CARG2 // Traceback must be a function.
1202 | checkfail ->fff_fallback
1203 | la BASE, 16(BASE)
1204 | // Remember active hook before pcall.
1205 | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
1206 | evstdd CARG2, 0(TMP2) // Swap function and traceback.
1207 | subi NARGS8:RC, NARGS8:RC, 16
1208 | evstdd CARG1, 8(TMP2)
1209 | addi PC, TMP3, 16+FRAME_PCALL
1210 | b ->vm_call_dispatch
1211 |
1212 |//-- Coroutine library --------------------------------------------------
1213 |
1214 |.macro coroutine_resume_wrap, resume
1215 |.if resume
1216 |.ffunc_1 coroutine_resume
1217 | evmergehi TMP0, L:CARG1, L:CARG1
1218 |.else
1219 |.ffunc coroutine_wrap_aux
1220 | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr
1221 |.endif
1222 |.if resume
1223 | cmpwi TMP0, LJ_TTHREAD
1224 | bne ->fff_fallback
1225 |.endif
1226 | lbz TMP0, L:CARG1->status
1227 | lwz TMP1, L:CARG1->cframe
1228 | lwz CARG2, L:CARG1->top
1229 | cmplwi cr0, TMP0, LUA_YIELD
1230 | lwz TMP2, L:CARG1->base
1231 | cmplwi cr1, TMP1, 0
1232 | lwz TMP0, L:CARG1->maxstack
1233 | cmplw cr7, CARG2, TMP2
1234 | lwz PC, FRAME_PC(BASE)
1235 | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0
1236 | add TMP2, CARG2, NARGS8:RC
1237 | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD
1238 | cmplw cr1, TMP2, TMP0
1239 | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt
1240 | stw PC, SAVE_PC
1241 | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov
1242 | stw BASE, L->base
1243 | blt cr6, ->fff_fallback
1244 |1:
1245 |.if resume
1246 | addi BASE, BASE, 8 // Keep resumed thread in stack for GC.
1247 | subi NARGS8:RC, NARGS8:RC, 8
1248 | subi TMP2, TMP2, 8
1249 |.endif
1250 | stw TMP2, L:CARG1->top
1251 | li TMP1, 0
1252 | stw BASE, L->top
1253 |2: // Move args to coroutine.
1254 | cmpw TMP1, NARGS8:RC
1255 | evlddx TMP0, BASE, TMP1
1256 | beq >3
1257 | evstddx TMP0, CARG2, TMP1
1258 | addi TMP1, TMP1, 8
1259 | b <2
1260 |3:
1261 | li CARG3, 0
1262 | mr L:SAVE0, L:CARG1
1263 | li CARG4, 0
1264 | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
1265 | // Returns thread status.
1266 |4:
1267 | lwz TMP2, L:SAVE0->base
1268 | cmplwi CRET1, LUA_YIELD
1269 | lwz TMP3, L:SAVE0->top
1270 | li_vmstate INTERP
1271 | lwz BASE, L->base
1272 | st_vmstate
1273 | bgt >8
1274 | sub RD, TMP3, TMP2
1275 | lwz TMP0, L->maxstack
1276 | cmplwi RD, 0
1277 | add TMP1, BASE, RD
1278 | beq >6 // No results?
1279 | cmplw TMP1, TMP0
1280 | li TMP1, 0
1281 | bgt >9 // Need to grow stack?
1282 |
1283 | subi TMP3, RD, 8
1284 | stw TMP2, L:SAVE0->top // Clear coroutine stack.
1285 |5: // Move results from coroutine.
1286 | cmplw TMP1, TMP3
1287 | evlddx TMP0, TMP2, TMP1
1288 | evstddx TMP0, BASE, TMP1
1289 | addi TMP1, TMP1, 8
1290 | bne <5
1291 |6:
1292 | andi. TMP0, PC, FRAME_TYPE
1293 |.if resume
1294 | li TMP1, LJ_TTRUE
1295 | la RA, -8(BASE)
1296 | stw TMP1, -8(BASE) // Prepend true to results.
1297 | addi RD, RD, 16
1298 |.else
1299 | mr RA, BASE
1300 | addi RD, RD, 8
1301 |.endif
1302 |7:
1303 | stw PC, SAVE_PC
1304 | mr MULTRES, RD
1305 | beq ->BC_RET_Z
1306 | b ->vm_return
1307 |
1308 |8: // Coroutine returned with error (at co->top-1).
1309 |.if resume
1310 | andi. TMP0, PC, FRAME_TYPE
1311 | la TMP3, -8(TMP3)
1312 | li TMP1, LJ_TFALSE
1313 | evldd TMP0, 0(TMP3)
1314 | stw TMP3, L:SAVE0->top // Remove error from coroutine stack.
1315 | li RD, (2+1)*8
1316 | stw TMP1, -8(BASE) // Prepend false to results.
1317 | la RA, -8(BASE)
1318 | evstdd TMP0, 0(BASE) // Copy error message.
1319 | b <7
1320 |.else
1321 | mr CARG1, L
1322 | mr CARG2, L:SAVE0
1323 | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
1324 |.endif
1325 |
1326 |9: // Handle stack expansion on return from yield.
1327 | mr CARG1, L
1328 | srwi CARG2, RD, 3
1329 | bl extern lj_state_growstack // (lua_State *L, int n)
1330 | li CRET1, 0
1331 | b <4
1332 |.endmacro
1333 |
1334 | coroutine_resume_wrap 1 // coroutine.resume
1335 | coroutine_resume_wrap 0 // coroutine.wrap
1336 |
1337 |.ffunc coroutine_yield
1338 | lwz TMP0, L->cframe
1339 | add TMP1, BASE, NARGS8:RC
1340 | stw BASE, L->base
1341 | andi. TMP0, TMP0, CFRAME_RESUME
1342 | stw TMP1, L->top
1343 | li CRET1, LUA_YIELD
1344 | beq ->fff_fallback
1345 | stw ZERO, L->cframe
1346 | stb CRET1, L->status
1347 | b ->vm_leave_unw
1348 |
1349 |//-- Math library -------------------------------------------------------
1350 |
1351 |.ffunc_n math_abs
1352 | efdabs CRET1, CARG1
1353 | // Fallthrough.
1354 |
1355 |->fff_restv:
1356 | // CRET1 = TValue result.
1357 | lwz PC, FRAME_PC(BASE)
1358 | la RA, -8(BASE)
1359 | evstdd CRET1, 0(RA)
1360 |->fff_res1:
1361 | // RA = results, PC = return.
1362 | li RD, (1+1)*8
1363 |->fff_res:
1364 | // RA = results, RD = (nresults+1)*8, PC = return.
1365 | andi. TMP0, PC, FRAME_TYPE
1366 | mr MULTRES, RD
1367 | bne ->vm_return
1368 | lwz INS, -4(PC)
1369 | decode_RB8 RB, INS
1370 |5:
1371 | cmplw RB, RD // More results expected?
1372 | decode_RA8 TMP0, INS
1373 | bgt >6
1374 | ins_next1
1375 | // Adjust BASE. KBASE is assumed to be set for the calling frame.
1376 | sub BASE, RA, TMP0
1377 | ins_next2
1378 |
1379 |6: // Fill up results with nil.
1380 | subi TMP1, RD, 8
1381 | addi RD, RD, 8
1382 | evstddx TISNIL, RA, TMP1
1383 | b <5
1384 |
1385 |.macro math_extern, func
1386 | .ffunc math_ .. func
1387 | cmplwi NARGS8:RC, 8
1388 | evldd CARG2, 0(BASE)
1389 | blt ->fff_fallback
1390 | checknum CARG2
1391 | evmergehi CARG1, CARG2, CARG2
1392 | checkfail ->fff_fallback
1393 | bl extern func@plt
1394 | evmergelo CRET1, CRET1, CRET2
1395 | b ->fff_restv
1396 |.endmacro
1397 |
1398 |.macro math_extern2, func
1399 | .ffunc math_ .. func
1400 | cmplwi NARGS8:RC, 16
1401 | evldd CARG2, 0(BASE)
1402 | evldd CARG4, 8(BASE)
1403 | blt ->fff_fallback
1404 | evmergehi CARG1, CARG4, CARG2
1405 | checknum CARG1
1406 | evmergehi CARG3, CARG4, CARG4
1407 | checkanyfail ->fff_fallback
1408 | bl extern func@plt
1409 | evmergelo CRET1, CRET1, CRET2
1410 | b ->fff_restv
1411 |.endmacro
1412 |
1413 |.macro math_round, func
1414 | .ffunc math_ .. func
1415 | cmplwi NARGS8:RC, 8
1416 | evldd CARG2, 0(BASE)
1417 | blt ->fff_fallback
1418 | checknum CARG2
1419 | evmergehi CARG1, CARG2, CARG2
1420 | checkfail ->fff_fallback
1421 | lwz PC, FRAME_PC(BASE)
1422 | bl ->vm_..func.._hilo;
1423 | la RA, -8(BASE)
1424 | evstdd CRET2, 0(RA)
1425 | b ->fff_res1
1426 |.endmacro
1427 |
1428 | math_round floor
1429 | math_round ceil
1430 |
1431 | math_extern sqrt
1432 |
1433 |.ffunc math_log
1434 | cmplwi NARGS8:RC, 8
1435 | evldd CARG2, 0(BASE)
1436 | bne ->fff_fallback // Need exactly 1 argument.
1437 | checknum CARG2
1438 | evmergehi CARG1, CARG2, CARG2
1439 | checkfail ->fff_fallback
1440 | bl extern log@plt
1441 | evmergelo CRET1, CRET1, CRET2
1442 | b ->fff_restv
1443 |
1444 | math_extern log10
1445 | math_extern exp
1446 | math_extern sin
1447 | math_extern cos
1448 | math_extern tan
1449 | math_extern asin
1450 | math_extern acos
1451 | math_extern atan
1452 | math_extern sinh
1453 | math_extern cosh
1454 | math_extern tanh
1455 | math_extern2 pow
1456 | math_extern2 atan2
1457 | math_extern2 fmod
1458 |
1459 |->ff_math_deg:
1460 |.ffunc_n math_rad
1461 | evldd CARG2, CFUNC:RB->upvalue[0]
1462 | efdmul CRET1, CARG1, CARG2
1463 | b ->fff_restv
1464 |
1465 |.ffunc math_ldexp
1466 | cmplwi NARGS8:RC, 16
1467 | evldd CARG2, 0(BASE)
1468 | evldd CARG4, 8(BASE)
1469 | blt ->fff_fallback
1470 | evmergehi CARG1, CARG4, CARG2
1471 | checknum CARG1
1472 | checkanyfail ->fff_fallback
1473 | efdctsi CARG3, CARG4
1474 | bl extern ldexp@plt
1475 | evmergelo CRET1, CRET1, CRET2
1476 | b ->fff_restv
1477 |
1478 |.ffunc math_frexp
1479 | cmplwi NARGS8:RC, 8
1480 | evldd CARG2, 0(BASE)
1481 | blt ->fff_fallback
1482 | checknum CARG2
1483 | evmergehi CARG1, CARG2, CARG2
1484 | checkfail ->fff_fallback
1485 | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
1486 | lwz PC, FRAME_PC(BASE)
1487 | bl extern frexp@plt
1488 | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH)
1489 | evmergelo CRET1, CRET1, CRET2
1490 | efdcfsi CRET2, TMP1
1491 | la RA, -8(BASE)
1492 | evstdd CRET1, 0(RA)
1493 | li RD, (2+1)*8
1494 | evstdd CRET2, 8(RA)
1495 | b ->fff_res
1496 |
1497 |.ffunc math_modf
1498 | cmplwi NARGS8:RC, 8
1499 | evldd CARG2, 0(BASE)
1500 | blt ->fff_fallback
1501 | checknum CARG2
1502 | evmergehi CARG1, CARG2, CARG2
1503 | checkfail ->fff_fallback
1504 | la CARG3, -8(BASE)
1505 | lwz PC, FRAME_PC(BASE)
1506 | bl extern modf@plt
1507 | evmergelo CRET1, CRET1, CRET2
1508 | la RA, -8(BASE)
1509 | evstdd CRET1, 0(BASE)
1510 | li RD, (2+1)*8
1511 | b ->fff_res
1512 |
1513 |.macro math_minmax, name, cmpop
1514 | .ffunc_1 name
1515 | checknum CARG1
1516 | li TMP1, 8
1517 | checkfail ->fff_fallback
1518 |1:
1519 | evlddx CARG2, BASE, TMP1
1520 | cmplw cr1, TMP1, NARGS8:RC
1521 | checknum CARG2
1522 | bge cr1, ->fff_restv // Ok, since CRET1 = CARG1.
1523 | checkfail ->fff_fallback
1524 | cmpop CARG2, CARG1
1525 | addi TMP1, TMP1, 8
1526 | crmove 4*cr0+lt, 4*cr0+gt
1527 | evsel CARG1, CARG2, CARG1
1528 | b <1
1529 |.endmacro
1530 |
1531 | math_minmax math_min, efdtstlt
1532 | math_minmax math_max, efdtstgt
1533 |
1534 |//-- String library -----------------------------------------------------
1535 |
1536 |.ffunc_1 string_len
1537 | checkstr STR:CARG1
1538 | checkfail ->fff_fallback
1539 | lwz TMP0, STR:CARG1->len
1540 | efdcfsi CRET1, TMP0
1541 | b ->fff_restv
1542 |
1543 |.ffunc string_byte // Only handle the 1-arg case here.
1544 | cmplwi NARGS8:RC, 8
1545 | evldd STR:CARG1, 0(BASE)
1546 | bne ->fff_fallback // Need exactly 1 argument.
1547 | checkstr STR:CARG1
1548 | la RA, -8(BASE)
1549 | checkfail ->fff_fallback
1550 | lwz TMP0, STR:CARG1->len
1551 | li RD, (0+1)*8
1552 | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
1553 | li TMP2, (1+1)*8
1554 | cmplwi TMP0, 0
1555 | lwz PC, FRAME_PC(BASE)
1556 | efdcfsi CRET1, TMP1
1557 | iseleq RD, RD, TMP2
1558 | evstdd CRET1, 0(RA)
1559 | b ->fff_res
1560 |
1561 |.ffunc string_char // Only handle the 1-arg case here.
1562 | ffgccheck
1563 | cmplwi NARGS8:RC, 8
1564 | evldd CARG1, 0(BASE)
1565 | bne ->fff_fallback // Exactly 1 argument.
1566 | checknum CARG1
1567 | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
1568 | checkfail ->fff_fallback
1569 | efdctsiz TMP0, CARG1
1570 | li CARG3, 1
1571 | cmplwi TMP0, 255
1572 | stb TMP0, 0(CARG2)
1573 | bgt ->fff_fallback
1574 |->fff_newstr:
1575 | mr CARG1, L
1576 | stw BASE, L->base
1577 | stw PC, SAVE_PC
1578 | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
1579 | // Returns GCstr *.
1580 | lwz BASE, L->base
1581 | evmergelo STR:CRET1, TISSTR, STR:CRET1
1582 | b ->fff_restv
1583 |
1584 |.ffunc string_sub
1585 | ffgccheck
1586 | cmplwi NARGS8:RC, 16
1587 | evldd CARG3, 16(BASE)
1588 | evldd STR:CARG1, 0(BASE)
1589 | blt ->fff_fallback
1590 | evldd CARG2, 8(BASE)
1591 | li TMP2, -1
1592 | beq >1
1593 | checknum CARG3
1594 | checkfail ->fff_fallback
1595 | efdctsiz TMP2, CARG3
1596 |1:
1597 | checknum CARG2
1598 | checkfail ->fff_fallback
1599 | checkstr STR:CARG1
1600 | efdctsiz TMP1, CARG2
1601 | checkfail ->fff_fallback
1602 | lwz TMP0, STR:CARG1->len
1603 | cmplw TMP0, TMP2 // len < end? (unsigned compare)
1604 | add TMP3, TMP2, TMP0
1605 | blt >5
1606 |2:
1607 | cmpwi TMP1, 0 // start <= 0?
1608 | add TMP3, TMP1, TMP0
1609 | ble >7
1610 |3:
1611 | sub. CARG3, TMP2, TMP1
1612 | addi CARG2, STR:CARG1, #STR-1
1613 | addi CARG3, CARG3, 1
1614 | add CARG2, CARG2, TMP1
1615 | isellt CARG3, r0, CARG3
1616 | b ->fff_newstr
1617 |
1618 |5: // Negative end or overflow.
1619 | cmpw TMP0, TMP2
1620 | addi TMP3, TMP3, 1
1621 | iselgt TMP2, TMP3, TMP0 // end = end > len ? len : end+len+1
1622 | b <2
1623 |
1624 |7: // Negative start or underflow.
1625 | cmpwi cr1, TMP3, 0
1626 | iseleq TMP1, r0, TMP3
1627 | isel TMP1, r0, TMP1, 4*cr1+lt
1628 | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0)
1629 | b <3
1630 |
1631 |.ffunc string_rep // Only handle the 1-char case inline.
1632 | ffgccheck
1633 | cmplwi NARGS8:RC, 16
1634 | evldd CARG1, 0(BASE)
1635 | evldd CARG2, 8(BASE)
1636 | bne ->fff_fallback // Exactly 2 arguments.
1637 | checknum CARG2
1638 | checkfail ->fff_fallback
1639 | checkstr STR:CARG1
1640 | efdctsiz CARG3, CARG2
1641 | checkfail ->fff_fallback
1642 | lwz TMP0, STR:CARG1->len
1643 | cmpwi CARG3, 0
1644 | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
1645 | ble >2 // Count <= 0? (or non-int)
1646 | cmplwi TMP0, 1
1647 | subi TMP2, CARG3, 1
1648 | blt >2 // Zero length string?
1649 | cmplw cr1, TMP1, CARG3
1650 | bne ->fff_fallback // Fallback for > 1-char strings.
1651 | lbz TMP0, STR:CARG1[1]
1652 | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
1653 | blt cr1, ->fff_fallback
1654 |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
1655 | cmplwi TMP2, 0
1656 | stbx TMP0, CARG2, TMP2
1657 | subi TMP2, TMP2, 1
1658 | bne <1
1659 | b ->fff_newstr
1660 |2: // Return empty string.
1661 | la STR:CRET1, DISPATCH_GL(strempty)(DISPATCH)
1662 | evmergelo CRET1, TISSTR, STR:CRET1
1663 | b ->fff_restv
1664 |
1665 |.ffunc string_reverse
1666 | ffgccheck
1667 | cmplwi NARGS8:RC, 8
1668 | evldd CARG1, 0(BASE)
1669 | blt ->fff_fallback
1670 | checkstr STR:CARG1
1671 | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
1672 | checkfail ->fff_fallback
1673 | lwz CARG3, STR:CARG1->len
1674 | la CARG1, #STR(STR:CARG1)
1675 | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
1676 | li TMP2, 0
1677 | cmplw TMP1, CARG3
1678 | subi TMP3, CARG3, 1
1679 | blt ->fff_fallback
1680 |1: // Reverse string copy.
1681 | cmpwi TMP3, 0
1682 | lbzx TMP1, CARG1, TMP2
1683 | blt ->fff_newstr
1684 | stbx TMP1, CARG2, TMP3
1685 | subi TMP3, TMP3, 1
1686 | addi TMP2, TMP2, 1
1687 | b <1
1688 |
1689 |.macro ffstring_case, name, lo
1690 | .ffunc name
1691 | ffgccheck
1692 | cmplwi NARGS8:RC, 8
1693 | evldd CARG1, 0(BASE)
1694 | blt ->fff_fallback
1695 | checkstr STR:CARG1
1696 | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
1697 | checkfail ->fff_fallback
1698 | lwz CARG3, STR:CARG1->len
1699 | la CARG1, #STR(STR:CARG1)
1700 | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
1701 | cmplw TMP1, CARG3
1702 | li TMP2, 0
1703 | blt ->fff_fallback
1704 |1: // ASCII case conversion.
1705 | cmplw TMP2, CARG3
1706 | lbzx TMP1, CARG1, TMP2
1707 | bge ->fff_newstr
1708 | subi TMP0, TMP1, lo
1709 | xori TMP3, TMP1, 0x20
1710 | cmplwi TMP0, 26
1711 | isellt TMP1, TMP3, TMP1
1712 | stbx TMP1, CARG2, TMP2
1713 | addi TMP2, TMP2, 1
1714 | b <1
1715 |.endmacro
1716 |
1717 |ffstring_case string_lower, 65
1718 |ffstring_case string_upper, 97
1719 |
1720 |//-- Table library ------------------------------------------------------
1721 |
1722 |.ffunc_1 table_getn
1723 | checktab CARG1
1724 | checkfail ->fff_fallback
1725 | bl extern lj_tab_len // (GCtab *t)
1726 | // Returns uint32_t (but less than 2^31).
1727 | efdcfsi CRET1, CRET1
1728 | b ->fff_restv
1729 |
1730 |//-- Bit library --------------------------------------------------------
1731 |
1732 |.macro .ffunc_bit, name
1733 | .ffunc_n bit_..name
1734 | efdadd CARG1, CARG1, TOBIT
1735 |.endmacro
1736 |
1737 |.ffunc_bit tobit
1738 |->fff_resbit:
1739 | efdcfsi CRET1, CARG1
1740 | b ->fff_restv
1741 |
1742 |.macro .ffunc_bit_op, name, ins
1743 | .ffunc_bit name
1744 | li TMP1, 8
1745 |1:
1746 | evlddx CARG2, BASE, TMP1
1747 | cmplw cr1, TMP1, NARGS8:RC
1748 | checknum CARG2
1749 | bge cr1, ->fff_resbit
1750 | checkfail ->fff_fallback
1751 | efdadd CARG2, CARG2, TOBIT
1752 | ins CARG1, CARG1, CARG2
1753 | addi TMP1, TMP1, 8
1754 | b <1
1755 |.endmacro
1756 |
1757 |.ffunc_bit_op band, and
1758 |.ffunc_bit_op bor, or
1759 |.ffunc_bit_op bxor, xor
1760 |
1761 |.ffunc_bit bswap
1762 | rotlwi TMP0, CARG1, 8
1763 | rlwimi TMP0, CARG1, 24, 0, 7
1764 | rlwimi TMP0, CARG1, 24, 16, 23
1765 | efdcfsi CRET1, TMP0
1766 | b ->fff_restv
1767 |
1768 |.ffunc_bit bnot
1769 | not TMP0, CARG1
1770 | efdcfsi CRET1, TMP0
1771 | b ->fff_restv
1772 |
1773 |.macro .ffunc_bit_sh, name, ins, shmod
1774 | .ffunc_nn bit_..name
1775 | efdadd CARG2, CARG2, TOBIT
1776 | efdadd CARG1, CARG1, TOBIT
1777 |.if shmod == 1
1778 | rlwinm CARG2, CARG2, 0, 27, 31
1779 |.elif shmod == 2
1780 | neg CARG2, CARG2
1781 |.endif
1782 | ins TMP0, CARG1, CARG2
1783 | efdcfsi CRET1, TMP0
1784 | b ->fff_restv
1785 |.endmacro
1786 |
1787 |.ffunc_bit_sh lshift, slw, 1
1788 |.ffunc_bit_sh rshift, srw, 1
1789 |.ffunc_bit_sh arshift, sraw, 1
1790 |.ffunc_bit_sh rol, rotlw, 0
1791 |.ffunc_bit_sh ror, rotlw, 2
1792 |
1793 |//-----------------------------------------------------------------------
1794 |
1795 |->fff_fallback: // Call fast function fallback handler.
1796 | // BASE = new base, RB = CFUNC, RC = nargs*8
1797 | lwz TMP3, CFUNC:RB->f
1798 | add TMP1, BASE, NARGS8:RC
1799 | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC.
1800 | addi TMP0, TMP1, 8*LUA_MINSTACK
1801 | lwz TMP2, L->maxstack
1802 | stw PC, SAVE_PC // Redundant (but a defined value).
1803 | cmplw TMP0, TMP2
1804 | stw BASE, L->base
1805 | stw TMP1, L->top
1806 | mr CARG1, L
1807 | bgt >5 // Need to grow stack.
1808 | mtctr TMP3
1809 | bctrl // (lua_State *L)
1810 | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
1811 | lwz BASE, L->base
1812 | cmpwi CRET1, 0
1813 | slwi RD, CRET1, 3
1814 | la RA, -8(BASE)
1815 | bgt ->fff_res // Returned nresults+1?
1816 |1: // Returned 0 or -1: retry fast path.
1817 | lwz TMP0, L->top
1818 | lwz LFUNC:RB, FRAME_FUNC(BASE)
1819 | sub NARGS8:RC, TMP0, BASE
1820 | bne ->vm_call_tail // Returned -1?
1821 | ins_callt // Returned 0: retry fast path.
1822 |
1823 |// Reconstruct previous base for vmeta_call during tailcall.
1824 |->vm_call_tail:
1825 | andi. TMP0, PC, FRAME_TYPE
1826 | rlwinm TMP1, PC, 0, 0, 28
1827 | bne >3
1828 | lwz INS, -4(PC)
1829 | decode_RA8 TMP1, INS
1830 | addi TMP1, TMP1, 8
1831 |3:
1832 | sub TMP2, BASE, TMP1
1833 | b ->vm_call_dispatch // Resolve again for tailcall.
1834 |
1835 |5: // Grow stack for fallback handler.
1836 | li CARG2, LUA_MINSTACK
1837 | bl extern lj_state_growstack // (lua_State *L, int n)
1838 | lwz BASE, L->base
1839 | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry.
1840 | b <1
1841 |
1842 |->fff_gcstep: // Call GC step function.
1843 | // BASE = new base, RC = nargs*8
1844 | mflr SAVE0
1845 | stw BASE, L->base
1846 | add TMP0, BASE, NARGS8:RC
1847 | stw PC, SAVE_PC // Redundant (but a defined value).
1848 | stw TMP0, L->top
1849 | mr CARG1, L
1850 | bl extern lj_gc_step // (lua_State *L)
1851 | lwz BASE, L->base
1852 | mtlr SAVE0
1853 | lwz TMP0, L->top
1854 | sub NARGS8:RC, TMP0, BASE
1855 | lwz CFUNC:RB, FRAME_FUNC(BASE)
1856 | blr
1857 |
1858 |//-----------------------------------------------------------------------
1859 |//-- Special dispatch targets -------------------------------------------
1860 |//-----------------------------------------------------------------------
1861 |
1862 |->vm_record: // Dispatch target for recording phase.
1863 |.if JIT
1864 | NYI
1865 |.endif
1866 |
1867 |->vm_rethook: // Dispatch target for return hooks.
1868 | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
1869 | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
1870 | beq >1
1871 |5: // Re-dispatch to static ins.
1872 | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OP4 TMP1, INS.
1873 | lwzx TMP0, DISPATCH, TMP1
1874 | mtctr TMP0
1875 | bctr
1876 |
1877 |->vm_inshook: // Dispatch target for instr/line hooks.
1878 | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
1879 | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
1880 | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
1881 | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0
1882 | bne <5
1883 |
1884 | cmpwi cr1, TMP0, 0
1885 | addic. TMP2, TMP2, -1
1886 | beq cr1, <5
1887 | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
1888 | beq >1
1889 | bge cr1, <5
1890 |1:
1891 | mr CARG1, L
1892 | stw MULTRES, SAVE_MULTRES
1893 | mr CARG2, PC
1894 | stw BASE, L->base
1895 | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
1896 | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
1897 |3:
1898 | lwz BASE, L->base
1899 |4: // Re-dispatch to static ins.
1900 | lwz INS, -4(PC)
1901 | decode_OP4 TMP1, INS
1902 | decode_RB8 RB, INS
1903 | addi TMP1, TMP1, GG_DISP2STATIC
1904 | decode_RD8 RD, INS
1905 | lwzx TMP0, DISPATCH, TMP1
1906 | decode_RA8 RA, INS
1907 | decode_RC8 RC, INS
1908 | mtctr TMP0
1909 | bctr
1910 |
1911 |->cont_hook: // Continue from hook yield.
1912 | addi PC, PC, 4
1913 | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins.
1914 | b <4
1915 |
1916 |->vm_hotloop: // Hot loop counter underflow.
1917 |.if JIT
1918 | NYI
1919 |.endif
1920 |
1921 |->vm_callhook: // Dispatch target for call hooks.
1922 | mr CARG2, PC
1923 |.if JIT
1924 | b >1
1925 |.endif
1926 |
1927 |->vm_hotcall: // Hot call counter underflow.
1928 |.if JIT
1929 | ori CARG2, PC, 1
1930 |1:
1931 |.endif
1932 | add TMP0, BASE, RC
1933 | stw PC, SAVE_PC
1934 | mr CARG1, L
1935 | stw BASE, L->base
1936 | sub RA, RA, BASE
1937 | stw TMP0, L->top
1938 | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
1939 | // Returns ASMFunction.
1940 | lwz BASE, L->base
1941 | lwz TMP0, L->top
1942 | stw ZERO, SAVE_PC // Invalidate for subsequent line hook.
1943 | sub NARGS8:RC, TMP0, BASE
1944 | add RA, BASE, RA
1945 | lwz LFUNC:RB, FRAME_FUNC(BASE)
1946 | mtctr CRET1
1947 | bctr
1948 |
1949 |//-----------------------------------------------------------------------
1950 |//-- Trace exit handler -------------------------------------------------
1951 |//-----------------------------------------------------------------------
1952 |
1953 |->vm_exit_handler:
1954 |.if JIT
1955 | NYI
1956 |.endif
1957 |->vm_exit_interp:
1958 |.if JIT
1959 | NYI
1960 |.endif
1961 |
1962 |//-----------------------------------------------------------------------
1963 |//-- Math helper functions ----------------------------------------------
1964 |//-----------------------------------------------------------------------
1965 |
1966 |// FP value rounding. Called by math.floor/math.ceil fast functions
1967 |// and from JIT code.
1968 |//
1969 |// This can be inlined if the CPU has the frin/friz/frip/frim instructions.
1970 |// The alternative hard-float approaches have a deep dependency chain.
1971 |// The resulting latency is at least 3x-7x the double-precision FP latency
1972 |// (e500v2: 6cy, e600: 5cy, Cell: 10cy) or around 20-70 cycles.
1973 |//
1974 |// The soft-float approach is tedious, but much faster (e500v2: ~11cy/~6cy).
1975 |// However it relies on a fast way to transfer the FP value to GPRs
1976 |// (e500v2: 0cy for lo-word, 1cy for hi-word).
1977 |//
1978 |.macro vm_round, name, mode
1979 | // Used temporaries: TMP0, TMP1, TMP2, TMP3.
1980 |->name.._efd: // Input: CARG2, output: CRET2
1981 | evmergehi CARG1, CARG2, CARG2
1982 |->name.._hilo:
1983 | // Input: CARG1 (hi), CARG2 (hi, lo), output: CRET2
1984 | rlwinm TMP2, CARG1, 12, 21, 31
1985 | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023
1986 | li TMP1, -1
1987 | cmplwi cr1, TMP2, 51 // 0 <= exp <= 51?
1988 | subfic TMP0, TMP2, 52
1989 | bgt cr1, >1
1990 | lus TMP3, 0xfff0
1991 | slw TMP0, TMP1, TMP0 // lomask = -1 << (52-exp)
1992 | sraw TMP1, TMP3, TMP2 // himask = (int32_t)0xfff00000 >> exp
1993 |.if mode == 2 // trunc(x):
1994 | evmergelo TMP0, TMP1, TMP0
1995 | evand CRET2, CARG2, TMP0 // hi &= himask, lo &= lomask
1996 |.else
1997 | andc TMP2, CARG2, TMP0
1998 | andc TMP3, CARG1, TMP1
1999 | or TMP2, TMP2, TMP3 // ztest = (hi&~himask) | (lo&~lomask)
2000 | srawi TMP3, CARG1, 31 // signmask = (int32_t)hi >> 31
2001 |.if mode == 0 // floor(x):
2002 | and. TMP2, TMP2, TMP3 // iszero = ((ztest & signmask) == 0)
2003 |.else // ceil(x):
2004 | andc. TMP2, TMP2, TMP3 // iszero = ((ztest & ~signmask) == 0)
2005 |.endif
2006 | and CARG2, CARG2, TMP0 // lo &= lomask
2007 | and CARG1, CARG1, TMP1 // hi &= himask
2008 | subc TMP0, CARG2, TMP0
2009 | iseleq TMP0, CARG2, TMP0 // lo = iszero ? lo : lo-lomask
2010 | sube TMP1, CARG1, TMP1
2011 | iseleq TMP1, CARG1, TMP1 // hi = iszero ? hi : hi-himask+carry
2012 | evmergelo CRET2, TMP1, TMP0
2013 |.endif
2014 | blr
2015 |1:
2016 | bgtlr // Already done if >=2^52, +-inf or nan.
2017 |.if mode == 2 // trunc(x):
2018 | rlwinm TMP1, CARG1, 0, 0, 0 // hi = sign(x)
2019 | li TMP0, 0
2020 | evmergelo CRET2, TMP1, TMP0
2021 |.else
2022 | rlwinm TMP2, CARG1, 0, 1, 31
2023 | srawi TMP0, CARG1, 31 // signmask = (int32_t)hi >> 31
2024 | or TMP2, TMP2, CARG2 // ztest = abs(hi) | lo
2025 | lus TMP1, 0x3ff0
2026 |.if mode == 0 // floor(x):
2027 | and. TMP2, TMP2, TMP0 // iszero = ((ztest & signmask) == 0)
2028 |.else // ceil(x):
2029 | andc. TMP2, TMP2, TMP0 // iszero = ((ztest & ~signmask) == 0)
2030 |.endif
2031 | li TMP0, 0
2032 | iseleq TMP1, r0, TMP1
2033 | rlwimi CARG1, TMP1, 0, 1, 31 // hi = sign(x) | (iszero ? 0.0 : 1.0)
2034 | evmergelo CRET2, CARG1, TMP0
2035 |.endif
2036 | blr
2037 |.endmacro
2038 |
2039 |->vm_floor:
2040 | mflr CARG3
2041 | evmergelo CARG2, CARG1, CARG2
2042 | bl ->vm_floor_hilo
2043 | mtlr CARG3
2044 | evmergehi CRET1, CRET2, CRET2
2045 | blr
2046 |
2047 | vm_round vm_floor, 0
2048 | vm_round vm_ceil, 1
2049 |.if JIT
2050 | vm_round vm_trunc, 2
2051 |.else
2052 |->vm_trunc_efd:
2053 |->vm_trunc_hilo:
2054 |.endif
2055 |
2056 |//-----------------------------------------------------------------------
2057 |//-- Miscellaneous functions --------------------------------------------
2058 |//-----------------------------------------------------------------------
2059 |
2060 |//-----------------------------------------------------------------------
2061 |//-- FFI helper functions -----------------------------------------------
2062 |//-----------------------------------------------------------------------
2063 |
2064 |->vm_ffi_call:
2065 |.if FFI
2066 | NYI
2067 |.endif
2068 |
2069 |//-----------------------------------------------------------------------
2070}
2071
2072/* Generate the code for a single instruction. */
2073static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2074{
2075 int vk = 0;
2076 |=>defop:
2077
2078 switch (op) {
2079
2080 /* -- Comparison ops ---------------------------------------------------- */
2081
2082 /* Remember: all ops branch for a true comparison, fall through otherwise. */
2083
2084 case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
2085 | // RA = src1*8, RD = src2*8, JMP with RD = target
2086 | evlddx TMP0, BASE, RA
2087 | addi PC, PC, 4
2088 | evlddx TMP1, BASE, RD
2089 | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
2090 | lwz TMP2, -4(PC)
2091 | evmergehi RB, TMP0, TMP1
2092 | decode_RD4 TMP2, TMP2
2093 | checknum RB
2094 | add TMP2, TMP2, TMP3
2095 | checkanyfail ->vmeta_comp
2096 | efdcmplt TMP0, TMP1
2097 if (op == BC_ISLE || op == BC_ISGT) {
2098 | efdcmpeq cr1, TMP0, TMP1
2099 | cror 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
2100 }
2101 if (op == BC_ISLT || op == BC_ISLE) {
2102 | iselgt PC, TMP2, PC
2103 } else {
2104 | iselgt PC, PC, TMP2
2105 }
2106 | ins_next
2107 break;
2108
2109 case BC_ISEQV: case BC_ISNEV:
2110 vk = op == BC_ISEQV;
2111 | // RA = src1*8, RD = src2*8, JMP with RD = target
2112 | evlddx CARG2, BASE, RA
2113 | addi PC, PC, 4
2114 | evlddx CARG3, BASE, RD
2115 | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
2116 | lwz TMP2, -4(PC)
2117 | evmergehi RB, CARG2, CARG3
2118 | decode_RD4 TMP2, TMP2
2119 | checknum RB
2120 | add TMP2, TMP2, TMP3
2121 | checkanyfail >5
2122 | efdcmpeq CARG2, CARG3
2123 if (vk) {
2124 | iselgt PC, TMP2, PC
2125 } else {
2126 | iselgt PC, PC, TMP2
2127 }
2128 |1:
2129 | ins_next
2130 |
2131 |5: // Either or both types are not numbers.
2132 | evcmpeq CARG2, CARG3
2133 | not TMP3, RB
2134 | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive?
2135 | crorc 4*cr7+lt, 4*cr0+so, 4*cr0+lt // 1: Same tv or different type.
2136 | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata?
2137 | crandc 4*cr7+gt, 4*cr0+lt, 4*cr1+gt // 2: Same type and primitive.
2138 | mr SAVE0, PC
2139 if (vk) {
2140 | isel PC, TMP2, PC, 4*cr7+gt
2141 } else {
2142 | isel TMP2, PC, TMP2, 4*cr7+gt
2143 }
2144 | cror 4*cr7+lt, 4*cr7+lt, 4*cr7+gt // 1 or 2.
2145 if (vk) {
2146 | isel PC, TMP2, PC, 4*cr0+so
2147 } else {
2148 | isel PC, PC, TMP2, 4*cr0+so
2149 }
2150 | blt cr7, <1 // Done if 1 or 2.
2151 | blt cr6, <1 // Done if not tab/ud.
2152 |
2153 | // Different tables or userdatas. Need to check __eq metamethod.
2154 | // Field metatable must be at same offset for GCtab and GCudata!
2155 | lwz TAB:TMP2, TAB:CARG2->metatable
2156 | li CARG4, 1-vk // ne = 0 or 1.
2157 | cmplwi TAB:TMP2, 0
2158 | beq <1 // No metatable?
2159 | lbz TMP2, TAB:TMP2->nomm
2160 | andi. TMP2, TMP2, 1<<MM_eq
2161 | bne <1 // Or 'no __eq' flag set?
2162 | mr PC, SAVE0 // Restore old PC.
2163 | b ->vmeta_equal // Handle __eq metamethod.
2164 break;
2165
2166 case BC_ISEQS: case BC_ISNES:
2167 vk = op == BC_ISEQS;
2168 | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
2169 | evlddx TMP0, BASE, RA
2170 | srwi RD, RD, 1
2171 | lwz INS, 0(PC)
2172 | subfic RD, RD, -4
2173 | addi PC, PC, 4
2174 | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4
2175 | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
2176 | decode_RD4 TMP2, INS
2177 | evmergelo STR:TMP1, TISSTR, STR:TMP1
2178 | add TMP2, TMP2, TMP3
2179 | evcmpeq TMP0, STR:TMP1
2180 if (vk) {
2181 | isel PC, TMP2, PC, 4*cr0+so
2182 } else {
2183 | isel PC, PC, TMP2, 4*cr0+so
2184 }
2185 | ins_next
2186 break;
2187
2188 case BC_ISEQN: case BC_ISNEN:
2189 vk = op == BC_ISEQN;
2190 | // RA = src*8, RD = num_const*8, JMP with RD = target
2191 | evlddx TMP0, BASE, RA
2192 | addi PC, PC, 4
2193 | evlddx TMP1, KBASE, RD
2194 | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
2195 | lwz INS, -4(PC)
2196 | checknum TMP0
2197 | checkfail >5
2198 | efdcmpeq TMP0, TMP1
2199 |1:
2200 | decode_RD4 TMP2, INS
2201 | add TMP2, TMP2, TMP3
2202 if (vk) {
2203 | iselgt PC, TMP2, PC
2204 |5:
2205 } else {
2206 | iselgt PC, PC, TMP2
2207 }
2208 |3:
2209 | ins_next
2210 if (!vk) {
2211 |5:
2212 | decode_RD4 TMP2, INS
2213 | add PC, TMP2, TMP3
2214 | b <3
2215 }
2216 break;
2217
2218 case BC_ISEQP: case BC_ISNEP:
2219 vk = op == BC_ISEQP;
2220 | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
2221 | lwzx TMP0, BASE, RA
2222 | srwi TMP1, RD, 3
2223 | lwz INS, 0(PC)
2224 | addi PC, PC, 4
2225 | not TMP1, TMP1
2226 | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
2227 | cmplw TMP0, TMP1
2228 | decode_RD4 TMP2, INS
2229 | add TMP2, TMP2, TMP3
2230 if (vk) {
2231 | iseleq PC, TMP2, PC
2232 } else {
2233 | iseleq PC, PC, TMP2
2234 }
2235 | ins_next
2236 break;
2237
2238 /* -- Unary test and copy ops ------------------------------------------- */
2239
2240 case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
2241 | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
2242 | evlddx TMP0, BASE, RD
2243 | evaddw TMP1, TISNIL, TISNIL // Synthesize LJ_TFALSE.
2244 | lwz INS, 0(PC)
2245 | evcmpltu TMP0, TMP1
2246 | addi PC, PC, 4
2247 if (op == BC_IST || op == BC_ISF) {
2248 | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
2249 | decode_RD4 TMP2, INS
2250 | add TMP2, TMP2, TMP3
2251 if (op == BC_IST) {
2252 | isellt PC, TMP2, PC
2253 } else {
2254 | isellt PC, PC, TMP2
2255 }
2256 } else {
2257 if (op == BC_ISTC) {
2258 | checkfail >1
2259 } else {
2260 | checkok >1
2261 }
2262 | addis PC, PC, -(BCBIAS_J*4 >> 16)
2263 | decode_RD4 TMP2, INS
2264 | evstddx TMP0, BASE, RA
2265 | add PC, PC, TMP2
2266 |1:
2267 }
2268 | ins_next
2269 break;
2270
2271 /* -- Unary ops --------------------------------------------------------- */
2272
2273 case BC_MOV:
2274 | // RA = dst*8, RD = src*8
2275 | ins_next1
2276 | evlddx TMP0, BASE, RD
2277 | evstddx TMP0, BASE, RA
2278 | ins_next2
2279 break;
2280 case BC_NOT:
2281 | // RA = dst*8, RD = src*8
2282 | ins_next1
2283 | lwzx TMP0, BASE, RD
2284 | subfic TMP1, TMP0, LJ_TTRUE
2285 | adde TMP0, TMP0, TMP1
2286 | stwx TMP0, BASE, RA
2287 | ins_next2
2288 break;
2289 case BC_UNM:
2290 | // RA = dst*8, RD = src*8
2291 | evlddx TMP0, BASE, RD
2292 | checknum TMP0
2293 | checkfail ->vmeta_unm
2294 | efdneg TMP0, TMP0
2295 | ins_next1
2296 | evstddx TMP0, BASE, RA
2297 | ins_next2
2298 break;
2299 case BC_LEN:
2300 | // RA = dst*8, RD = src*8
2301 | evlddx CARG1, BASE, RD
2302 | checkstr CARG1
2303 | checkfail >2
2304 | lwz CRET1, STR:CARG1->len
2305 |1:
2306 | ins_next1
2307 | efdcfsi TMP0, CRET1
2308 | evstddx TMP0, BASE, RA
2309 | ins_next2
2310 |2:
2311 | checktab CARG1
2312 | checkfail ->vmeta_len
2313#if LJ_52
2314 | lwz TAB:TMP2, TAB:CARG1->metatable
2315 | cmplwi TAB:TMP2, 0
2316 | bne >9
2317 |3:
2318#endif
2319 |->BC_LEN_Z:
2320 | bl extern lj_tab_len // (GCtab *t)
2321 | // Returns uint32_t (but less than 2^31).
2322 | b <1
2323#if LJ_52
2324 |9:
2325 | lbz TMP0, TAB:TMP2->nomm
2326 | andi. TMP0, TMP0, 1<<MM_len
2327 | bne <3 // 'no __len' flag set: done.
2328 | b ->vmeta_len
2329#endif
2330 break;
2331
2332 /* -- Binary ops -------------------------------------------------------- */
2333
2334 |.macro ins_arithpre, t0, t1
2335 | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
2336 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
2337 ||switch (vk) {
2338 ||case 0:
2339 | evlddx t0, BASE, RB
2340 | checknum t0
2341 | evlddx t1, KBASE, RC
2342 | checkfail ->vmeta_arith_vn
2343 || break;
2344 ||case 1:
2345 | evlddx t1, BASE, RB
2346 | checknum t1
2347 | evlddx t0, KBASE, RC
2348 | checkfail ->vmeta_arith_nv
2349 || break;
2350 ||default:
2351 | evlddx t0, BASE, RB
2352 | evlddx t1, BASE, RC
2353 | evmergehi TMP2, t0, t1
2354 | checknum TMP2
2355 | checkanyfail ->vmeta_arith_vv
2356 || break;
2357 ||}
2358 |.endmacro
2359 |
2360 |.macro ins_arith, ins
2361 | ins_arithpre TMP0, TMP1
2362 | ins_next1
2363 | ins TMP0, TMP0, TMP1
2364 | evstddx TMP0, BASE, RA
2365 | ins_next2
2366 |.endmacro
2367
2368 case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
2369 | ins_arith efdadd
2370 break;
2371 case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
2372 | ins_arith efdsub
2373 break;
2374 case BC_MULVN: case BC_MULNV: case BC_MULVV:
2375 | ins_arith efdmul
2376 break;
2377 case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
2378 | ins_arith efddiv
2379 break;
2380 case BC_MODVN:
2381 | ins_arithpre RD, SAVE0
2382 |->BC_MODVN_Z:
2383 | efddiv CARG2, RD, SAVE0
2384 | bl ->vm_floor_efd // floor(b/c)
2385 | efdmul TMP0, CRET2, SAVE0
2386 | ins_next1
2387 | efdsub TMP0, RD, TMP0 // b - floor(b/c)*c
2388 | evstddx TMP0, BASE, RA
2389 | ins_next2
2390 break;
2391 case BC_MODNV: case BC_MODVV:
2392 | ins_arithpre RD, SAVE0
2393 | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
2394 break;
2395 case BC_POW:
2396 | evlddx CARG2, BASE, RB
2397 | evlddx CARG4, BASE, RC
2398 | evmergehi CARG1, CARG4, CARG2
2399 | checknum CARG1
2400 | evmergehi CARG3, CARG4, CARG4
2401 | checkanyfail ->vmeta_arith_vv
2402 | bl extern pow@plt
2403 | evmergelo CRET2, CRET1, CRET2
2404 | evstddx CRET2, BASE, RA
2405 | ins_next
2406 break;
2407
2408 case BC_CAT:
2409 | // RA = dst*8, RB = src_start*8, RC = src_end*8
2410 | sub CARG3, RC, RB
2411 | stw BASE, L->base
2412 | add CARG2, BASE, RC
2413 | mr SAVE0, RB
2414 |->BC_CAT_Z:
2415 | stw PC, SAVE_PC
2416 | mr CARG1, L
2417 | srwi CARG3, CARG3, 3
2418 | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
2419 | // Returns NULL (finished) or TValue * (metamethod).
2420 | cmplwi CRET1, 0
2421 | lwz BASE, L->base
2422 | bne ->vmeta_binop
2423 | evlddx TMP0, BASE, SAVE0 // Copy result from RB to RA.
2424 | evstddx TMP0, BASE, RA
2425 | ins_next
2426 break;
2427
2428 /* -- Constant ops ------------------------------------------------------ */
2429
2430 case BC_KSTR:
2431 | // RA = dst*8, RD = str_const*8 (~)
2432 | ins_next1
2433 | srwi TMP1, RD, 1
2434 | subfic TMP1, TMP1, -4
2435 | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4
2436 | evmergelo TMP0, TISSTR, TMP0
2437 | evstddx TMP0, BASE, RA
2438 | ins_next2
2439 break;
2440 case BC_KCDATA:
2441 |.if FFI
2442 | // RA = dst*8, RD = cdata_const*8 (~)
2443 | ins_next1
2444 | srwi TMP1, RD, 1
2445 | subfic TMP1, TMP1, -4
2446 | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4
2447 | li TMP2, LJ_TCDATA
2448 | evmergelo TMP0, TMP2, TMP0
2449 | evstddx TMP0, BASE, RA
2450 | ins_next2
2451 |.endif
2452 break;
2453 case BC_KSHORT:
2454 | // RA = dst*8, RD = int16_literal*8
2455 | srwi TMP1, RD, 3
2456 | extsh TMP1, TMP1
2457 | ins_next1
2458 | efdcfsi TMP0, TMP1
2459 | evstddx TMP0, BASE, RA
2460 | ins_next2
2461 break;
2462 case BC_KNUM:
2463 | // RA = dst*8, RD = num_const*8
2464 | evlddx TMP0, KBASE, RD
2465 | ins_next1
2466 | evstddx TMP0, BASE, RA
2467 | ins_next2
2468 break;
2469 case BC_KPRI:
2470 | // RA = dst*8, RD = primitive_type*8 (~)
2471 | srwi TMP1, RD, 3
2472 | not TMP0, TMP1
2473 | ins_next1
2474 | stwx TMP0, BASE, RA
2475 | ins_next2
2476 break;
2477 case BC_KNIL:
2478 | // RA = base*8, RD = end*8
2479 | evstddx TISNIL, BASE, RA
2480 | addi RA, RA, 8
2481 |1:
2482 | evstddx TISNIL, BASE, RA
2483 | cmpw RA, RD
2484 | addi RA, RA, 8
2485 | blt <1
2486 | ins_next_
2487 break;
2488
2489 /* -- Upvalue and function ops ------------------------------------------ */
2490
2491 case BC_UGET:
2492 | // RA = dst*8, RD = uvnum*8
2493 | ins_next1
2494 | lwz LFUNC:RB, FRAME_FUNC(BASE)
2495 | srwi RD, RD, 1
2496 | addi RD, RD, offsetof(GCfuncL, uvptr)
2497 | lwzx UPVAL:RB, LFUNC:RB, RD
2498 | lwz TMP1, UPVAL:RB->v
2499 | evldd TMP0, 0(TMP1)
2500 | evstddx TMP0, BASE, RA
2501 | ins_next2
2502 break;
2503 case BC_USETV:
2504 | // RA = uvnum*8, RD = src*8
2505 | lwz LFUNC:RB, FRAME_FUNC(BASE)
2506 | srwi RA, RA, 1
2507 | addi RA, RA, offsetof(GCfuncL, uvptr)
2508 | evlddx TMP1, BASE, RD
2509 | lwzx UPVAL:RB, LFUNC:RB, RA
2510 | lbz TMP3, UPVAL:RB->marked
2511 | lwz CARG2, UPVAL:RB->v
2512 | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
2513 | lbz TMP0, UPVAL:RB->closed
2514 | evmergehi TMP2, TMP1, TMP1
2515 | evstdd TMP1, 0(CARG2)
2516 | cmplwi cr1, TMP0, 0
2517 | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
2518 | subi TMP2, TMP2, (LJ_TISNUM+1)
2519 | bne >2 // Upvalue is closed and black?
2520 |1:
2521 | ins_next
2522 |
2523 |2: // Check if new value is collectable.
2524 | cmplwi TMP2, LJ_TISGCV - (LJ_TISNUM+1)
2525 | bge <1 // tvisgcv(v)
2526 | lbz TMP3, GCOBJ:TMP1->gch.marked
2527 | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
2528 | la CARG1, GG_DISP2G(DISPATCH)
2529 | // Crossed a write barrier. Move the barrier forward.
2530 | beq <1
2531 | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
2532 | b <1
2533 break;
2534 case BC_USETS:
2535 | // RA = uvnum*8, RD = str_const*8 (~)
2536 | lwz LFUNC:RB, FRAME_FUNC(BASE)
2537 | srwi TMP1, RD, 1
2538 | srwi RA, RA, 1
2539 | subfic TMP1, TMP1, -4
2540 | addi RA, RA, offsetof(GCfuncL, uvptr)
2541 | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4
2542 | lwzx UPVAL:RB, LFUNC:RB, RA
2543 | evmergelo STR:TMP1, TISSTR, STR:TMP1
2544 | lbz TMP3, UPVAL:RB->marked
2545 | lwz CARG2, UPVAL:RB->v
2546 | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
2547 | lbz TMP3, STR:TMP1->marked
2548 | lbz TMP2, UPVAL:RB->closed
2549 | evstdd STR:TMP1, 0(CARG2)
2550 | bne >2
2551 |1:
2552 | ins_next
2553 |
2554 |2: // Check if string is white and ensure upvalue is closed.
2555 | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(str)
2556 | cmplwi cr1, TMP2, 0
2557 | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
2558 | la CARG1, GG_DISP2G(DISPATCH)
2559 | // Crossed a write barrier. Move the barrier forward.
2560 | beq <1
2561 | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
2562 | b <1
2563 break;
2564 case BC_USETN:
2565 | // RA = uvnum*8, RD = num_const*8
2566 | ins_next1
2567 | lwz LFUNC:RB, FRAME_FUNC(BASE)
2568 | srwi RA, RA, 1
2569 | addi RA, RA, offsetof(GCfuncL, uvptr)
2570 | evlddx TMP0, KBASE, RD
2571 | lwzx UPVAL:RB, LFUNC:RB, RA
2572 | lwz TMP1, UPVAL:RB->v
2573 | evstdd TMP0, 0(TMP1)
2574 | ins_next2
2575 break;
2576 case BC_USETP:
2577 | // RA = uvnum*8, RD = primitive_type*8 (~)
2578 | ins_next1
2579 | lwz LFUNC:RB, FRAME_FUNC(BASE)
2580 | srwi RA, RA, 1
2581 | addi RA, RA, offsetof(GCfuncL, uvptr)
2582 | srwi TMP0, RD, 3
2583 | lwzx UPVAL:RB, LFUNC:RB, RA
2584 | not TMP0, TMP0
2585 | lwz TMP1, UPVAL:RB->v
2586 | stw TMP0, 0(TMP1)
2587 | ins_next2
2588 break;
2589
2590 case BC_UCLO:
2591 | // RA = level*8, RD = target
2592 | lwz TMP1, L->openupval
2593 | branch_RD // Do this first since RD is not saved.
2594 | stw BASE, L->base
2595 | cmplwi TMP1, 0
2596 | mr CARG1, L
2597 | beq >1
2598 | add CARG2, BASE, RA
2599 | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
2600 | lwz BASE, L->base
2601 |1:
2602 | ins_next
2603 break;
2604
2605 case BC_FNEW:
2606 | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
2607 | srwi TMP1, RD, 1
2608 | stw BASE, L->base
2609 | subfic TMP1, TMP1, -4
2610 | stw PC, SAVE_PC
2611 | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
2612 | mr CARG1, L
2613 | lwz CARG3, FRAME_FUNC(BASE)
2614 | // (lua_State *L, GCproto *pt, GCfuncL *parent)
2615 | bl extern lj_func_newL_gc
2616 | // Returns GCfuncL *.
2617 | lwz BASE, L->base
2618 | evmergelo LFUNC:CRET1, TISFUNC, LFUNC:CRET1
2619 | evstddx LFUNC:CRET1, BASE, RA
2620 | ins_next
2621 break;
2622
2623 /* -- Table ops --------------------------------------------------------- */
2624
2625 case BC_TNEW:
2626 case BC_TDUP:
2627 | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
2628 | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
2629 | mr CARG1, L
2630 | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
2631 | stw BASE, L->base
2632 | cmplw TMP0, TMP1
2633 | stw PC, SAVE_PC
2634 | bge >5
2635 |1:
2636 if (op == BC_TNEW) {
2637 | rlwinm CARG2, RD, 29, 21, 31
2638 | rlwinm CARG3, RD, 18, 27, 31
2639 | cmpwi CARG2, 0x7ff
2640 | li TMP1, 0x801
2641 | iseleq CARG2, TMP1, CARG2
2642 | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
2643 | // Returns Table *.
2644 } else {
2645 | srwi TMP1, RD, 1
2646 | subfic TMP1, TMP1, -4
2647 | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
2648 | bl extern lj_tab_dup // (lua_State *L, Table *kt)
2649 | // Returns Table *.
2650 }
2651 | lwz BASE, L->base
2652 | evmergelo TAB:CRET1, TISTAB, TAB:CRET1
2653 | evstddx TAB:CRET1, BASE, RA
2654 | ins_next
2655 |5:
2656 | mr SAVE0, RD
2657 | bl extern lj_gc_step_fixtop // (lua_State *L)
2658 | mr RD, SAVE0
2659 | mr CARG1, L
2660 | b <1
2661 break;
2662
2663 case BC_GGET:
2664 | // RA = dst*8, RD = str_const*8 (~)
2665 case BC_GSET:
2666 | // RA = src*8, RD = str_const*8 (~)
2667 | lwz LFUNC:TMP2, FRAME_FUNC(BASE)
2668 | srwi TMP1, RD, 1
2669 | lwz TAB:RB, LFUNC:TMP2->env
2670 | subfic TMP1, TMP1, -4
2671 | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
2672 if (op == BC_GGET) {
2673 | b ->BC_TGETS_Z
2674 } else {
2675 | b ->BC_TSETS_Z
2676 }
2677 break;
2678
2679 case BC_TGETV:
2680 | // RA = dst*8, RB = table*8, RC = key*8
2681 | evlddx TAB:RB, BASE, RB
2682 | evlddx RC, BASE, RC
2683 | checktab TAB:RB
2684 | checkfail ->vmeta_tgetv
2685 | checknum RC
2686 | checkfail >5
2687 | // Convert number key to integer
2688 | efdctsi TMP2, RC
2689 | lwz TMP0, TAB:RB->asize
2690 | efdcfsi TMP1, TMP2
2691 | cmplw cr0, TMP0, TMP2
2692 | efdcmpeq cr1, RC, TMP1
2693 | lwz TMP1, TAB:RB->array
2694 | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
2695 | slwi TMP2, TMP2, 3
2696 | ble ->vmeta_tgetv // Integer key and in array part?
2697 | evlddx TMP1, TMP1, TMP2
2698 | checknil TMP1
2699 | checkok >2
2700 |1:
2701 | evstddx TMP1, BASE, RA
2702 | ins_next
2703 |
2704 |2: // Check for __index if table value is nil.
2705 | lwz TAB:TMP2, TAB:RB->metatable
2706 | cmplwi TAB:TMP2, 0
2707 | beq <1 // No metatable: done.
2708 | lbz TMP0, TAB:TMP2->nomm
2709 | andi. TMP0, TMP0, 1<<MM_index
2710 | bne <1 // 'no __index' flag set: done.
2711 | b ->vmeta_tgetv
2712 |
2713 |5:
2714 | checkstr STR:RC // String key?
2715 | checkok ->BC_TGETS_Z
2716 | b ->vmeta_tgetv
2717 break;
2718 case BC_TGETS:
2719 | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
2720 | evlddx TAB:RB, BASE, RB
2721 | srwi TMP1, RC, 1
2722 | checktab TAB:RB
2723 | subfic TMP1, TMP1, -4
2724 | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
2725 | checkfail ->vmeta_tgets1
2726 |->BC_TGETS_Z:
2727 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
2728 | lwz TMP0, TAB:RB->hmask
2729 | lwz TMP1, STR:RC->hash
2730 | lwz NODE:TMP2, TAB:RB->node
2731 | evmergelo STR:RC, TISSTR, STR:RC
2732 | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
2733 | slwi TMP0, TMP1, 5
2734 | slwi TMP1, TMP1, 3
2735 | sub TMP1, TMP0, TMP1
2736 | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
2737 |1:
2738 | evldd TMP0, NODE:TMP2->key
2739 | evldd TMP1, NODE:TMP2->val
2740 | evcmpeq TMP0, STR:RC
2741 | checkanyfail >4
2742 | checknil TMP1
2743 | checkok >5 // Key found, but nil value?
2744 |3:
2745 | evstddx TMP1, BASE, RA
2746 | ins_next
2747 |
2748 |4: // Follow hash chain.
2749 | lwz NODE:TMP2, NODE:TMP2->next
2750 | cmplwi NODE:TMP2, 0
2751 | bne <1
2752 | // End of hash chain: key not found, nil result.
2753 | evmr TMP1, TISNIL
2754 |
2755 |5: // Check for __index if table value is nil.
2756 | lwz TAB:TMP2, TAB:RB->metatable
2757 | cmplwi TAB:TMP2, 0
2758 | beq <3 // No metatable: done.
2759 | lbz TMP0, TAB:TMP2->nomm
2760 | andi. TMP0, TMP0, 1<<MM_index
2761 | bne <3 // 'no __index' flag set: done.
2762 | b ->vmeta_tgets
2763 break;
2764 case BC_TGETB:
2765 | // RA = dst*8, RB = table*8, RC = index*8
2766 | evlddx TAB:RB, BASE, RB
2767 | srwi TMP0, RC, 3
2768 | checktab TAB:RB
2769 | checkfail ->vmeta_tgetb
2770 | lwz TMP1, TAB:RB->asize
2771 | lwz TMP2, TAB:RB->array
2772 | cmplw TMP0, TMP1
2773 | bge ->vmeta_tgetb
2774 | evlddx TMP1, TMP2, RC
2775 | checknil TMP1
2776 | checkok >5
2777 |1:
2778 | ins_next1
2779 | evstddx TMP1, BASE, RA
2780 | ins_next2
2781 |
2782 |5: // Check for __index if table value is nil.
2783 | lwz TAB:TMP2, TAB:RB->metatable
2784 | cmplwi TAB:TMP2, 0
2785 | beq <1 // No metatable: done.
2786 | lbz TMP2, TAB:TMP2->nomm
2787 | andi. TMP2, TMP2, 1<<MM_index
2788 | bne <1 // 'no __index' flag set: done.
2789 | b ->vmeta_tgetb // Caveat: preserve TMP0!
2790 break;
2791
2792 case BC_TSETV:
2793 | // RA = src*8, RB = table*8, RC = key*8
2794 | evlddx TAB:RB, BASE, RB
2795 | evlddx RC, BASE, RC
2796 | checktab TAB:RB
2797 | checkfail ->vmeta_tsetv
2798 | checknum RC
2799 | checkfail >5
2800 | // Convert number key to integer
2801 | efdctsi TMP2, RC
2802 | evlddx SAVE0, BASE, RA
2803 | lwz TMP0, TAB:RB->asize
2804 | efdcfsi TMP1, TMP2
2805 | cmplw cr0, TMP0, TMP2
2806 | efdcmpeq cr1, RC, TMP1
2807 | lwz TMP1, TAB:RB->array
2808 | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
2809 | slwi TMP0, TMP2, 3
2810 | ble ->vmeta_tsetv // Integer key and in array part?
2811 | lbz TMP3, TAB:RB->marked
2812 | evlddx TMP2, TMP1, TMP0
2813 | checknil TMP2
2814 | checkok >3
2815 |1:
2816 | andi. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
2817 | evstddx SAVE0, TMP1, TMP0
2818 | bne >7
2819 |2:
2820 | ins_next
2821 |
2822 |3: // Check for __newindex if previous value is nil.
2823 | lwz TAB:TMP2, TAB:RB->metatable
2824 | cmplwi TAB:TMP2, 0
2825 | beq <1 // No metatable: done.
2826 | lbz TMP2, TAB:TMP2->nomm
2827 | andi. TMP2, TMP2, 1<<MM_newindex
2828 | bne <1 // 'no __newindex' flag set: done.
2829 | b ->vmeta_tsetv
2830 |
2831 |5:
2832 | checkstr STR:RC // String key?
2833 | checkok ->BC_TSETS_Z
2834 | b ->vmeta_tsetv
2835 |
2836 |7: // Possible table write barrier for the value. Skip valiswhite check.
2837 | barrierback TAB:RB, TMP3, TMP0
2838 | b <2
2839 break;
2840 case BC_TSETS:
2841 | // RA = src*8, RB = table*8, RC = str_const*8 (~)
2842 | evlddx TAB:RB, BASE, RB
2843 | srwi TMP1, RC, 1
2844 | checktab TAB:RB
2845 | subfic TMP1, TMP1, -4
2846 | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
2847 | checkfail ->vmeta_tsets1
2848 |->BC_TSETS_Z:
2849 | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8
2850 | lwz TMP0, TAB:RB->hmask
2851 | lwz TMP1, STR:RC->hash
2852 | lwz NODE:TMP2, TAB:RB->node
2853 | evmergelo STR:RC, TISSTR, STR:RC
2854 | stb ZERO, TAB:RB->nomm // Clear metamethod cache.
2855 | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
2856 | evlddx SAVE0, BASE, RA
2857 | slwi TMP0, TMP1, 5
2858 | slwi TMP1, TMP1, 3
2859 | sub TMP1, TMP0, TMP1
2860 | lbz TMP3, TAB:RB->marked
2861 | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
2862 |1:
2863 | evldd TMP0, NODE:TMP2->key
2864 | evldd TMP1, NODE:TMP2->val
2865 | evcmpeq TMP0, STR:RC
2866 | checkanyfail >5
2867 | checknil TMP1
2868 | checkok >4 // Key found, but nil value?
2869 |2:
2870 | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
2871 | evstdd SAVE0, NODE:TMP2->val
2872 | bne >7
2873 |3:
2874 | ins_next
2875 |
2876 |4: // Check for __newindex if previous value is nil.
2877 | lwz TAB:TMP1, TAB:RB->metatable
2878 | cmplwi TAB:TMP1, 0
2879 | beq <2 // No metatable: done.
2880 | lbz TMP0, TAB:TMP1->nomm
2881 | andi. TMP0, TMP0, 1<<MM_newindex
2882 | bne <2 // 'no __newindex' flag set: done.
2883 | b ->vmeta_tsets
2884 |
2885 |5: // Follow hash chain.
2886 | lwz NODE:TMP2, NODE:TMP2->next
2887 | cmplwi NODE:TMP2, 0
2888 | bne <1
2889 | // End of hash chain: key not found, add a new one.
2890 |
2891 | // But check for __newindex first.
2892 | lwz TAB:TMP1, TAB:RB->metatable
2893 | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
2894 | stw PC, SAVE_PC
2895 | mr CARG1, L
2896 | cmplwi TAB:TMP1, 0
2897 | stw BASE, L->base
2898 | beq >6 // No metatable: continue.
2899 | lbz TMP0, TAB:TMP1->nomm
2900 | andi. TMP0, TMP0, 1<<MM_newindex
2901 | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check.
2902 |6:
2903 | mr CARG2, TAB:RB
2904 | evstdd STR:RC, 0(CARG3)
2905 | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
2906 | // Returns TValue *.
2907 | lwz BASE, L->base
2908 | evstdd SAVE0, 0(CRET1)
2909 | b <3 // No 2nd write barrier needed.
2910 |
2911 |7: // Possible table write barrier for the value. Skip valiswhite check.
2912 | barrierback TAB:RB, TMP3, TMP0
2913 | b <3
2914 break;
2915 case BC_TSETB:
2916 | // RA = src*8, RB = table*8, RC = index*8
2917 | evlddx TAB:RB, BASE, RB
2918 | srwi TMP0, RC, 3
2919 | checktab TAB:RB
2920 | checkfail ->vmeta_tsetb
2921 | lwz TMP1, TAB:RB->asize
2922 | lwz TMP2, TAB:RB->array
2923 | lbz TMP3, TAB:RB->marked
2924 | cmplw TMP0, TMP1
2925 | evlddx SAVE0, BASE, RA
2926 | bge ->vmeta_tsetb
2927 | evlddx TMP1, TMP2, RC
2928 | checknil TMP1
2929 | checkok >5
2930 |1:
2931 | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
2932 | evstddx SAVE0, TMP2, RC
2933 | bne >7
2934 |2:
2935 | ins_next
2936 |
2937 |5: // Check for __newindex if previous value is nil.
2938 | lwz TAB:TMP1, TAB:RB->metatable
2939 | cmplwi TAB:TMP1, 0
2940 | beq <1 // No metatable: done.
2941 | lbz TMP1, TAB:TMP1->nomm
2942 | andi. TMP1, TMP1, 1<<MM_newindex
2943 | bne <1 // 'no __newindex' flag set: done.
2944 | b ->vmeta_tsetb // Caveat: preserve TMP0!
2945 |
2946 |7: // Possible table write barrier for the value. Skip valiswhite check.
2947 | barrierback TAB:RB, TMP3, TMP0
2948 | b <2
2949 break;
2950
2951 case BC_TSETM:
2952 | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
2953 | add RA, BASE, RA
2954 |1:
2955 | add TMP3, KBASE, RD
2956 | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table.
2957 | addic. TMP0, MULTRES, -8
2958 | lwz TMP3, 4(TMP3) // Integer constant is in lo-word.
2959 | srwi CARG3, TMP0, 3
2960 | beq >4 // Nothing to copy?
2961 | add CARG3, CARG3, TMP3
2962 | lwz TMP2, TAB:CARG2->asize
2963 | slwi TMP1, TMP3, 3
2964 | lbz TMP3, TAB:CARG2->marked
2965 | cmplw CARG3, TMP2
2966 | add TMP2, RA, TMP0
2967 | lwz TMP0, TAB:CARG2->array
2968 | bgt >5
2969 | add TMP1, TMP1, TMP0
2970 | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
2971 |3: // Copy result slots to table.
2972 | evldd TMP0, 0(RA)
2973 | addi RA, RA, 8
2974 | cmpw cr1, RA, TMP2
2975 | evstdd TMP0, 0(TMP1)
2976 | addi TMP1, TMP1, 8
2977 | blt cr1, <3
2978 | bne >7
2979 |4:
2980 | ins_next
2981 |
2982 |5: // Need to resize array part.
2983 | stw BASE, L->base
2984 | mr CARG1, L
2985 | stw PC, SAVE_PC
2986 | mr SAVE0, RD
2987 | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
2988 | // Must not reallocate the stack.
2989 | mr RD, SAVE0
2990 | b <1
2991 |
2992 |7: // Possible table write barrier for any value. Skip valiswhite check.
2993 | barrierback TAB:CARG2, TMP3, TMP0
2994 | b <4
2995 break;
2996
2997 /* -- Calls and vararg handling ----------------------------------------- */
2998
2999 case BC_CALLM:
3000 | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
3001 | add NARGS8:RC, NARGS8:RC, MULTRES
3002 | // Fall through. Assumes BC_CALL follows.
3003 break;
3004 case BC_CALL:
3005 | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
3006 | evlddx LFUNC:RB, BASE, RA
3007 | mr TMP2, BASE
3008 | add BASE, BASE, RA
3009 | subi NARGS8:RC, NARGS8:RC, 8
3010 | checkfunc LFUNC:RB
3011 | addi BASE, BASE, 8
3012 | checkfail ->vmeta_call
3013 | ins_call
3014 break;
3015
3016 case BC_CALLMT:
3017 | // RA = base*8, (RB = 0,) RC = extra_nargs*8
3018 | add NARGS8:RC, NARGS8:RC, MULTRES
3019 | // Fall through. Assumes BC_CALLT follows.
3020 break;
3021 case BC_CALLT:
3022 | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
3023 | evlddx LFUNC:RB, BASE, RA
3024 | add RA, BASE, RA
3025 | lwz TMP1, FRAME_PC(BASE)
3026 | subi NARGS8:RC, NARGS8:RC, 8
3027 | checkfunc LFUNC:RB
3028 | addi RA, RA, 8
3029 | checkfail ->vmeta_callt
3030 |->BC_CALLT_Z:
3031 | andi. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand.
3032 | lbz TMP3, LFUNC:RB->ffid
3033 | xori TMP2, TMP1, FRAME_VARG
3034 | cmplwi cr1, NARGS8:RC, 0
3035 | bne >7
3036 |1:
3037 | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
3038 | li TMP2, 0
3039 | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function?
3040 | beq cr1, >3
3041 |2:
3042 | addi TMP3, TMP2, 8
3043 | evlddx TMP0, RA, TMP2
3044 | cmplw cr1, TMP3, NARGS8:RC
3045 | evstddx TMP0, BASE, TMP2
3046 | mr TMP2, TMP3
3047 | bne cr1, <2
3048 |3:
3049 | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt
3050 | beq >5
3051 |4:
3052 | ins_callt
3053 |
3054 |5: // Tailcall to a fast function with a Lua frame below.
3055 | lwz INS, -4(TMP1)
3056 | decode_RA8 RA, INS
3057 | sub TMP1, BASE, RA
3058 | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1)
3059 | lwz TMP1, LFUNC:TMP1->pc
3060 | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
3061 | b <4
3062 |
3063 |7: // Tailcall from a vararg function.
3064 | andi. TMP0, TMP2, FRAME_TYPEP
3065 | bne <1 // Vararg frame below?
3066 | sub BASE, BASE, TMP2 // Relocate BASE down.
3067 | lwz TMP1, FRAME_PC(BASE)
3068 | andi. TMP0, TMP1, FRAME_TYPE
3069 | b <1
3070 break;
3071
3072 case BC_ITERC:
3073 | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
3074 | subi RA, RA, 24 // evldd doesn't support neg. offsets.
3075 | mr TMP2, BASE
3076 | evlddx LFUNC:RB, BASE, RA
3077 | add BASE, BASE, RA
3078 | evldd TMP0, 8(BASE)
3079 | evldd TMP1, 16(BASE)
3080 | evstdd LFUNC:RB, 24(BASE) // Copy callable.
3081 | checkfunc LFUNC:RB
3082 | evstdd TMP0, 32(BASE) // Copy state.
3083 | li NARGS8:RC, 16 // Iterators get 2 arguments.
3084 | evstdd TMP1, 40(BASE) // Copy control var.
3085 | addi BASE, BASE, 32
3086 | checkfail ->vmeta_call
3087 | ins_call
3088 break;
3089
3090 case BC_ITERN:
3091 | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
3092 |.if JIT
3093 | // NYI: add hotloop, record BC_ITERN.
3094 |.endif
3095 | add RA, BASE, RA
3096 | lwz TAB:RB, -12(RA)
3097 | lwz RC, -4(RA) // Get index from control var.
3098 | lwz TMP0, TAB:RB->asize
3099 | lwz TMP1, TAB:RB->array
3100 | addi PC, PC, 4
3101 |1: // Traverse array part.
3102 | cmplw RC, TMP0
3103 | slwi TMP3, RC, 3
3104 | bge >5 // Index points after array part?
3105 | evlddx TMP2, TMP1, TMP3
3106 | checknil TMP2
3107 | lwz INS, -4(PC)
3108 | checkok >4
3109 | efdcfsi TMP0, RC
3110 | addi RC, RC, 1
3111 | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
3112 | evstdd TMP2, 8(RA)
3113 | decode_RD4 TMP1, INS
3114 | stw RC, -4(RA) // Update control var.
3115 | add PC, TMP1, TMP3
3116 | evstdd TMP0, 0(RA)
3117 |3:
3118 | ins_next
3119 |
3120 |4: // Skip holes in array part.
3121 | addi RC, RC, 1
3122 | b <1
3123 |
3124 |5: // Traverse hash part.
3125 | lwz TMP1, TAB:RB->hmask
3126 | sub RC, RC, TMP0
3127 | lwz TMP2, TAB:RB->node
3128 |6:
3129 | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1.
3130 | slwi TMP3, RC, 5
3131 | bgt <3
3132 | slwi RB, RC, 3
3133 | sub TMP3, TMP3, RB
3134 | evlddx RB, TMP2, TMP3
3135 | add NODE:TMP3, TMP2, TMP3
3136 | checknil RB
3137 | lwz INS, -4(PC)
3138 | checkok >7
3139 | evldd TMP3, NODE:TMP3->key
3140 | addis TMP2, PC, -(BCBIAS_J*4 >> 16)
3141 | evstdd RB, 8(RA)
3142 | add RC, RC, TMP0
3143 | decode_RD4 TMP1, INS
3144 | evstdd TMP3, 0(RA)
3145 | addi RC, RC, 1
3146 | add PC, TMP1, TMP2
3147 | stw RC, -4(RA) // Update control var.
3148 | b <3
3149 |
3150 |7: // Skip holes in hash part.
3151 | addi RC, RC, 1
3152 | b <6
3153 break;
3154
3155 case BC_ISNEXT:
3156 | // RA = base*8, RD = target (points to ITERN)
3157 | add RA, BASE, RA
3158 | li TMP2, -24
3159 | evlddx CFUNC:TMP1, RA, TMP2
3160 | lwz TMP2, -16(RA)
3161 | lwz TMP3, -8(RA)
3162 | evmergehi TMP0, CFUNC:TMP1, CFUNC:TMP1
3163 | cmpwi cr0, TMP2, LJ_TTAB
3164 | cmpwi cr1, TMP0, LJ_TFUNC
3165 | cmpwi cr6, TMP3, LJ_TNIL
3166 | bne cr1, >5
3167 | lbz TMP1, CFUNC:TMP1->ffid
3168 | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq
3169 | cmpwi cr7, TMP1, FF_next_N
3170 | srwi TMP0, RD, 1
3171 | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
3172 | add TMP3, PC, TMP0
3173 | bne cr0, >5
3174 | lus TMP1, 0xfffe
3175 | ori TMP1, TMP1, 0x7fff
3176 | stw ZERO, -4(RA) // Initialize control var.
3177 | stw TMP1, -8(RA)
3178 | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
3179 |1:
3180 | ins_next
3181 |5: // Despecialize bytecode if any of the checks fail.
3182 | li TMP0, BC_JMP
3183 | li TMP1, BC_ITERC
3184 | stb TMP0, -1(PC)
3185 | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
3186 | stb TMP1, 3(PC)
3187 | b <1
3188 break;
3189
3190 case BC_VARG:
3191 | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
3192 | lwz TMP0, FRAME_PC(BASE)
3193 | add RC, BASE, RC
3194 | add RA, BASE, RA
3195 | addi RC, RC, FRAME_VARG
3196 | add TMP2, RA, RB
3197 | subi TMP3, BASE, 8 // TMP3 = vtop
3198 | sub RC, RC, TMP0 // RC = vbase
3199 | // Note: RC may now be even _above_ BASE if nargs was < numparams.
3200 | cmplwi cr1, RB, 0
3201 | sub. TMP1, TMP3, RC
3202 | beq cr1, >5 // Copy all varargs?
3203 | subi TMP2, TMP2, 16
3204 | ble >2 // No vararg slots?
3205 |1: // Copy vararg slots to destination slots.
3206 | evldd TMP0, 0(RC)
3207 | addi RC, RC, 8
3208 | evstdd TMP0, 0(RA)
3209 | cmplw RA, TMP2
3210 | cmplw cr1, RC, TMP3
3211 | bge >3 // All destination slots filled?
3212 | addi RA, RA, 8
3213 | blt cr1, <1 // More vararg slots?
3214 |2: // Fill up remainder with nil.
3215 | evstdd TISNIL, 0(RA)
3216 | cmplw RA, TMP2
3217 | addi RA, RA, 8
3218 | blt <2
3219 |3:
3220 | ins_next
3221 |
3222 |5: // Copy all varargs.
3223 | lwz TMP0, L->maxstack
3224 | li MULTRES, 8 // MULTRES = (0+1)*8
3225 | ble <3 // No vararg slots?
3226 | add TMP2, RA, TMP1
3227 | cmplw TMP2, TMP0
3228 | addi MULTRES, TMP1, 8
3229 | bgt >7
3230 |6:
3231 | evldd TMP0, 0(RC)
3232 | addi RC, RC, 8
3233 | evstdd TMP0, 0(RA)
3234 | cmplw RC, TMP3
3235 | addi RA, RA, 8
3236 | blt <6 // More vararg slots?
3237 | b <3
3238 |
3239 |7: // Grow stack for varargs.
3240 | mr CARG1, L
3241 | stw RA, L->top
3242 | sub SAVE0, RC, BASE // Need delta, because BASE may change.
3243 | stw BASE, L->base
3244 | sub RA, RA, BASE
3245 | stw PC, SAVE_PC
3246 | srwi CARG2, TMP1, 3
3247 | bl extern lj_state_growstack // (lua_State *L, int n)
3248 | lwz BASE, L->base
3249 | add RA, BASE, RA
3250 | add RC, BASE, SAVE0
3251 | subi TMP3, BASE, 8
3252 | b <6
3253 break;
3254
3255 /* -- Returns ----------------------------------------------------------- */
3256
3257 case BC_RETM:
3258 | // RA = results*8, RD = extra_nresults*8
3259 | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
3260 | // Fall through. Assumes BC_RET follows.
3261 break;
3262
3263 case BC_RET:
3264 | // RA = results*8, RD = (nresults+1)*8
3265 | lwz PC, FRAME_PC(BASE)
3266 | add RA, BASE, RA
3267 | mr MULTRES, RD
3268 |1:
3269 | andi. TMP0, PC, FRAME_TYPE
3270 | xori TMP1, PC, FRAME_VARG
3271 | bne ->BC_RETV_Z
3272 |
3273 |->BC_RET_Z:
3274 | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
3275 | lwz INS, -4(PC)
3276 | cmpwi RD, 8
3277 | subi TMP2, BASE, 8
3278 | subi RC, RD, 8
3279 | decode_RB8 RB, INS
3280 | beq >3
3281 | li TMP1, 0
3282 |2:
3283 | addi TMP3, TMP1, 8
3284 | evlddx TMP0, RA, TMP1
3285 | cmpw TMP3, RC
3286 | evstddx TMP0, TMP2, TMP1
3287 | beq >3
3288 | addi TMP1, TMP3, 8
3289 | evlddx TMP0, RA, TMP3
3290 | cmpw TMP1, RC
3291 | evstddx TMP0, TMP2, TMP3
3292 | bne <2
3293 |3:
3294 |5:
3295 | cmplw RB, RD
3296 | decode_RA8 RA, INS
3297 | bgt >6
3298 | sub BASE, TMP2, RA
3299 | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
3300 | ins_next1
3301 | lwz TMP1, LFUNC:TMP1->pc
3302 | lwz KBASE, PC2PROTO(k)(TMP1)
3303 | ins_next2
3304 |
3305 |6: // Fill up results with nil.
3306 | subi TMP1, RD, 8
3307 | addi RD, RD, 8
3308 | evstddx TISNIL, TMP2, TMP1
3309 | b <5
3310 |
3311 |->BC_RETV_Z: // Non-standard return case.
3312 | andi. TMP2, TMP1, FRAME_TYPEP
3313 | bne ->vm_return
3314 | // Return from vararg function: relocate BASE down.
3315 | sub BASE, BASE, TMP1
3316 | lwz PC, FRAME_PC(BASE)
3317 | b <1
3318 break;
3319
3320 case BC_RET0: case BC_RET1:
3321 | // RA = results*8, RD = (nresults+1)*8
3322 | lwz PC, FRAME_PC(BASE)
3323 | add RA, BASE, RA
3324 | mr MULTRES, RD
3325 | andi. TMP0, PC, FRAME_TYPE
3326 | xori TMP1, PC, FRAME_VARG
3327 | bne ->BC_RETV_Z
3328 |
3329 | lwz INS, -4(PC)
3330 | subi TMP2, BASE, 8
3331 | decode_RB8 RB, INS
3332 if (op == BC_RET1) {
3333 | evldd TMP0, 0(RA)
3334 | evstdd TMP0, 0(TMP2)
3335 }
3336 |5:
3337 | cmplw RB, RD
3338 | decode_RA8 RA, INS
3339 | bgt >6
3340 | sub BASE, TMP2, RA
3341 | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
3342 | ins_next1
3343 | lwz TMP1, LFUNC:TMP1->pc
3344 | lwz KBASE, PC2PROTO(k)(TMP1)
3345 | ins_next2
3346 |
3347 |6: // Fill up results with nil.
3348 | subi TMP1, RD, 8
3349 | addi RD, RD, 8
3350 | evstddx TISNIL, TMP2, TMP1
3351 | b <5
3352 break;
3353
3354 /* -- Loops and branches ------------------------------------------------ */
3355
3356 case BC_FORL:
3357 |.if JIT
3358 | hotloop
3359 |.endif
3360 | // Fall through. Assumes BC_IFORL follows.
3361 break;
3362
3363 case BC_JFORI:
3364 case BC_JFORL:
3365#if !LJ_HASJIT
3366 break;
3367#endif
3368 case BC_FORI:
3369 case BC_IFORL:
3370 | // RA = base*8, RD = target (after end of loop or start of loop)
3371 vk = (op == BC_IFORL || op == BC_JFORL);
3372 | add RA, BASE, RA
3373 | evldd TMP1, FORL_IDX*8(RA)
3374 | evldd TMP3, FORL_STEP*8(RA)
3375 | evldd TMP2, FORL_STOP*8(RA)
3376 if (!vk) {
3377 | evcmpgtu cr0, TMP1, TISNUM
3378 | evcmpgtu cr7, TMP3, TISNUM
3379 | evcmpgtu cr1, TMP2, TISNUM
3380 | cror 4*cr0+lt, 4*cr0+lt, 4*cr7+lt
3381 | cror 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
3382 | blt ->vmeta_for
3383 }
3384 if (vk) {
3385 | efdadd TMP1, TMP1, TMP3
3386 | evstdd TMP1, FORL_IDX*8(RA)
3387 }
3388 | evcmpgts TMP3, TISNIL
3389 | evstdd TMP1, FORL_EXT*8(RA)
3390 | bge >2
3391 | efdcmpgt TMP1, TMP2
3392 |1:
3393 if (op != BC_JFORL) {
3394 | srwi RD, RD, 1
3395 | add RD, PC, RD
3396 if (op == BC_JFORI) {
3397 | addis PC, RD, -(BCBIAS_J*4 >> 16)
3398 } else {
3399 | addis RD, RD, -(BCBIAS_J*4 >> 16)
3400 }
3401 }
3402 if (op == BC_FORI) {
3403 | iselgt PC, RD, PC
3404 } else if (op == BC_IFORL) {
3405 | iselgt PC, PC, RD
3406 } else {
3407 | ble =>BC_JLOOP
3408 }
3409 | ins_next
3410 |2:
3411 | efdcmpgt TMP2, TMP1
3412 | b <1
3413 break;
3414
3415 case BC_ITERL:
3416 |.if JIT
3417 | hotloop
3418 |.endif
3419 | // Fall through. Assumes BC_IITERL follows.
3420 break;
3421
3422 case BC_JITERL:
3423#if !LJ_HASJIT
3424 break;
3425#endif
3426 case BC_IITERL:
3427 | // RA = base*8, RD = target
3428 | evlddx TMP1, BASE, RA
3429 | subi RA, RA, 8
3430 | checknil TMP1
3431 | checkok >1 // Stop if iterator returned nil.
3432 if (op == BC_JITERL) {
3433 | NYI
3434 } else {
3435 | branch_RD // Otherwise save control var + branch.
3436 | evstddx TMP1, BASE, RA
3437 }
3438 |1:
3439 | ins_next
3440 break;
3441
3442 case BC_LOOP:
3443 | // RA = base*8, RD = target (loop extent)
3444 | // Note: RA/RD is only used by trace recorder to determine scope/extent
3445 | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
3446 |.if JIT
3447 | hotloop
3448 |.endif
3449 | // Fall through. Assumes BC_ILOOP follows.
3450 break;
3451
3452 case BC_ILOOP:
3453 | // RA = base*8, RD = target (loop extent)
3454 | ins_next
3455 break;
3456
3457 case BC_JLOOP:
3458 |.if JIT
3459 | NYI
3460 |.endif
3461 break;
3462
3463 case BC_JMP:
3464 | // RA = base*8 (only used by trace recorder), RD = target
3465 | branch_RD
3466 | ins_next
3467 break;
3468
3469 /* -- Function headers -------------------------------------------------- */
3470
3471 case BC_FUNCF:
3472 |.if JIT
3473 | hotcall
3474 |.endif
3475 case BC_FUNCV: /* NYI: compiled vararg functions. */
3476 | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
3477 break;
3478
3479 case BC_JFUNCF:
3480#if !LJ_HASJIT
3481 break;
3482#endif
3483 case BC_IFUNCF:
3484 | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
3485 | lwz TMP2, L->maxstack
3486 | lbz TMP1, -4+PC2PROTO(numparams)(PC)
3487 | lwz KBASE, -4+PC2PROTO(k)(PC)
3488 | cmplw RA, TMP2
3489 | slwi TMP1, TMP1, 3
3490 | bgt ->vm_growstack_l
3491 | ins_next1
3492 |2:
3493 | cmplw NARGS8:RC, TMP1 // Check for missing parameters.
3494 | ble >3
3495 if (op == BC_JFUNCF) {
3496 | NYI
3497 } else {
3498 | ins_next2
3499 }
3500 |
3501 |3: // Clear missing parameters.
3502 | evstddx TISNIL, BASE, NARGS8:RC
3503 | addi NARGS8:RC, NARGS8:RC, 8
3504 | b <2
3505 break;
3506
3507 case BC_JFUNCV:
3508#if !LJ_HASJIT
3509 break;
3510#endif
3511 | NYI // NYI: compiled vararg functions
3512 break; /* NYI: compiled vararg functions. */
3513
3514 case BC_IFUNCV:
3515 | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
3516 | lwz TMP2, L->maxstack
3517 | add TMP1, BASE, RC
3518 | add TMP0, RA, RC
3519 | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC.
3520 | addi TMP3, RC, 8+FRAME_VARG
3521 | lwz KBASE, -4+PC2PROTO(k)(PC)
3522 | cmplw TMP0, TMP2
3523 | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG.
3524 | bge ->vm_growstack_l
3525 | lbz TMP2, -4+PC2PROTO(numparams)(PC)
3526 | mr RA, BASE
3527 | mr RC, TMP1
3528 | ins_next1
3529 | cmpwi TMP2, 0
3530 | addi BASE, TMP1, 8
3531 | beq >3
3532 |1:
3533 | cmplw RA, RC // Less args than parameters?
3534 | evldd TMP0, 0(RA)
3535 | bge >4
3536 | evstdd TISNIL, 0(RA) // Clear old fixarg slot (help the GC).
3537 | addi RA, RA, 8
3538 |2:
3539 | addic. TMP2, TMP2, -1
3540 | evstdd TMP0, 8(TMP1)
3541 | addi TMP1, TMP1, 8
3542 | bne <1
3543 |3:
3544 | ins_next2
3545 |
3546 |4: // Clear missing parameters.
3547 | evmr TMP0, TISNIL
3548 | b <2
3549 break;
3550
3551 case BC_FUNCC:
3552 case BC_FUNCCW:
3553 | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
3554 if (op == BC_FUNCC) {
3555 | lwz TMP3, CFUNC:RB->f
3556 } else {
3557 | lwz TMP3, DISPATCH_GL(wrapf)(DISPATCH)
3558 }
3559 | add TMP1, RA, NARGS8:RC
3560 | lwz TMP2, L->maxstack
3561 | add RC, BASE, NARGS8:RC
3562 | stw BASE, L->base
3563 | cmplw TMP1, TMP2
3564 | stw RC, L->top
3565 | li_vmstate C
3566 | mtctr TMP3
3567 if (op == BC_FUNCCW) {
3568 | lwz CARG2, CFUNC:RB->f
3569 }
3570 | mr CARG1, L
3571 | bgt ->vm_growstack_c // Need to grow stack.
3572 | st_vmstate
3573 | bctrl // (lua_State *L [, lua_CFunction f])
3574 | // Returns nresults.
3575 | lwz TMP1, L->top
3576 | slwi RD, CRET1, 3
3577 | lwz BASE, L->base
3578 | li_vmstate INTERP
3579 | lwz PC, FRAME_PC(BASE) // Fetch PC of caller.
3580 | sub RA, TMP1, RD // RA = L->top - nresults*8
3581 | st_vmstate
3582 | b ->vm_returnc
3583 break;
3584
3585 /* ---------------------------------------------------------------------- */
3586
3587 default:
3588 fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
3589 exit(2);
3590 break;
3591 }
3592}
3593
3594static int build_backend(BuildCtx *ctx)
3595{
3596 int op;
3597
3598 dasm_growpc(Dst, BC__MAX);
3599
3600 build_subroutines(ctx);
3601
3602 |.code_op
3603 for (op = 0; op < BC__MAX; op++)
3604 build_ins(ctx, (BCOp)op, op);
3605
3606 return BC__MAX;
3607}
3608
3609/* Emit pseudo frame-info for all assembler functions. */
3610static void emit_asm_debug(BuildCtx *ctx)
3611{
3612 int i;
3613 switch (ctx->mode) {
3614 case BUILD_elfasm:
3615 fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
3616 fprintf(ctx->fp,
3617 ".Lframe0:\n"
3618 "\t.long .LECIE0-.LSCIE0\n"
3619 ".LSCIE0:\n"
3620 "\t.long 0xffffffff\n"
3621 "\t.byte 0x1\n"
3622 "\t.string \"\"\n"
3623 "\t.uleb128 0x1\n"
3624 "\t.sleb128 -4\n"
3625 "\t.byte 65\n"
3626 "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
3627 "\t.align 2\n"
3628 ".LECIE0:\n\n");
3629 fprintf(ctx->fp,
3630 ".LSFDE0:\n"
3631 "\t.long .LEFDE0-.LASFDE0\n"
3632 ".LASFDE0:\n"
3633 "\t.long .Lframe0\n"
3634 "\t.long .Lbegin\n"
3635 "\t.long %d\n"
3636 "\t.byte 0xe\n\t.uleb128 %d\n"
3637 "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
3638 "\t.byte 0x5\n\t.uleb128 70\n\t.sleb128 37\n",
3639 (int)ctx->codesz, CFRAME_SIZE);
3640 for (i = 14; i <= 31; i++)
3641 fprintf(ctx->fp,
3642 "\t.byte %d\n\t.uleb128 %d\n"
3643 "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n",
3644 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i));
3645 fprintf(ctx->fp,
3646 "\t.align 2\n"
3647 ".LEFDE0:\n\n");
3648 fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
3649 fprintf(ctx->fp,
3650 ".Lframe1:\n"
3651 "\t.long .LECIE1-.LSCIE1\n"
3652 ".LSCIE1:\n"
3653 "\t.long 0\n"
3654 "\t.byte 0x1\n"
3655 "\t.string \"zPR\"\n"
3656 "\t.uleb128 0x1\n"
3657 "\t.sleb128 -4\n"
3658 "\t.byte 65\n"
3659 "\t.uleb128 6\n" /* augmentation length */
3660 "\t.byte 0x1b\n" /* pcrel|sdata4 */
3661 "\t.long lj_err_unwind_dwarf-.\n"
3662 "\t.byte 0x1b\n" /* pcrel|sdata4 */
3663 "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
3664 "\t.align 2\n"
3665 ".LECIE1:\n\n");
3666 fprintf(ctx->fp,
3667 ".LSFDE1:\n"
3668 "\t.long .LEFDE1-.LASFDE1\n"
3669 ".LASFDE1:\n"
3670 "\t.long .LASFDE1-.Lframe1\n"
3671 "\t.long .Lbegin-.\n"
3672 "\t.long %d\n"
3673 "\t.uleb128 0\n" /* augmentation length */
3674 "\t.byte 0xe\n\t.uleb128 %d\n"
3675 "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
3676 "\t.byte 0x5\n\t.uleb128 70\n\t.sleb128 37\n",
3677 (int)ctx->codesz, CFRAME_SIZE);
3678 for (i = 14; i <= 31; i++)
3679 fprintf(ctx->fp,
3680 "\t.byte %d\n\t.uleb128 %d\n"
3681 "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n",
3682 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i));
3683 fprintf(ctx->fp,
3684 "\t.align 2\n"
3685 ".LEFDE1:\n\n");
3686 break;
3687 default:
3688 break;
3689 }
3690}
3691
diff --git a/src/vm_x64.dasc b/src/vm_x64.dasc
new file mode 100644
index 00000000..7cace399
--- /dev/null
+++ b/src/vm_x64.dasc
@@ -0,0 +1,4907 @@
1|// Low-level VM code for x64 CPUs in LJ_GC64 mode.
2|// Bytecode interpreter, fast functions and helper functions.
3|// Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h
4|
5|.arch x64
6|.section code_op, code_sub
7|
8|.actionlist build_actionlist
9|.globals GLOB_
10|.globalnames globnames
11|.externnames extnames
12|
13|//-----------------------------------------------------------------------
14|
15|.if WIN
16|.define X64WIN, 1 // Windows/x64 calling conventions.
17|.endif
18|
19|// Fixed register assignments for the interpreter.
20|// This is very fragile and has many dependencies. Caveat emptor.
21|.define BASE, rdx // Not C callee-save, refetched anyway.
22|.if X64WIN
23|.define KBASE, rdi // Must be C callee-save.
24|.define PC, rsi // Must be C callee-save.
25|.define DISPATCH, rbx // Must be C callee-save.
26|.define KBASEd, edi
27|.define PCd, esi
28|.define DISPATCHd, ebx
29|.else
30|.define KBASE, r15 // Must be C callee-save.
31|.define PC, rbx // Must be C callee-save.
32|.define DISPATCH, r14 // Must be C callee-save.
33|.define KBASEd, r15d
34|.define PCd, ebx
35|.define DISPATCHd, r14d
36|.endif
37|
38|.define RA, rcx
39|.define RAd, ecx
40|.define RAH, ch
41|.define RAL, cl
42|.define RB, rbp // Must be rbp (C callee-save).
43|.define RBd, ebp
44|.define RC, rax // Must be rax.
45|.define RCd, eax
46|.define RCW, ax
47|.define RCH, ah
48|.define RCL, al
49|.define OP, RBd
50|.define RD, RC
51|.define RDd, RCd
52|.define RDW, RCW
53|.define RDL, RCL
54|.define TMPR, r10
55|.define TMPRd, r10d
56|.define ITYPE, r11
57|.define ITYPEd, r11d
58|
59|.if X64WIN
60|.define CARG1, rcx // x64/WIN64 C call arguments.
61|.define CARG2, rdx
62|.define CARG3, r8
63|.define CARG4, r9
64|.define CARG1d, ecx
65|.define CARG2d, edx
66|.define CARG3d, r8d
67|.define CARG4d, r9d
68|.else
69|.define CARG1, rdi // x64/POSIX C call arguments.
70|.define CARG2, rsi
71|.define CARG3, rdx
72|.define CARG4, rcx
73|.define CARG5, r8
74|.define CARG6, r9
75|.define CARG1d, edi
76|.define CARG2d, esi
77|.define CARG3d, edx
78|.define CARG4d, ecx
79|.define CARG5d, r8d
80|.define CARG6d, r9d
81|.endif
82|
83|// Type definitions. Some of these are only used for documentation.
84|.type L, lua_State
85|.type GL, global_State
86|.type TVALUE, TValue
87|.type GCOBJ, GCobj
88|.type STR, GCstr
89|.type TAB, GCtab
90|.type LFUNC, GCfuncL
91|.type CFUNC, GCfuncC
92|.type PROTO, GCproto
93|.type UPVAL, GCupval
94|.type NODE, Node
95|.type NARGS, int
96|.type TRACE, GCtrace
97|.type SBUF, SBuf
98|
99|// Stack layout while in interpreter. Must match with lj_frame.h.
100|//-----------------------------------------------------------------------
101|.if X64WIN // x64/Windows stack layout
102|
103|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--).
104|.macro saveregs_
105| push rdi; push rsi; push rbx
106| sub rsp, CFRAME_SPACE
107|.endmacro
108|.macro saveregs
109| push rbp; saveregs_
110|.endmacro
111|.macro restoreregs
112| add rsp, CFRAME_SPACE
113| pop rbx; pop rsi; pop rdi; pop rbp
114|.endmacro
115|
116|.define SAVE_CFRAME, aword [rsp+aword*13]
117|.define SAVE_PC, aword [rsp+aword*12]
118|.define SAVE_L, aword [rsp+aword*11]
119|.define SAVE_ERRF, dword [rsp+dword*21]
120|.define SAVE_NRES, dword [rsp+dword*20]
121|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter
122|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter.
123|.define SAVE_R4, aword [rsp+aword*8]
124|.define SAVE_R3, aword [rsp+aword*7]
125|.define SAVE_R2, aword [rsp+aword*6]
126|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves.
127|.define ARG5, aword [rsp+aword*4]
128|.define CSAVE_4, aword [rsp+aword*3]
129|.define CSAVE_3, aword [rsp+aword*2]
130|.define CSAVE_2, aword [rsp+aword*1]
131|.define CSAVE_1, aword [rsp] //<-- rsp while in interpreter.
132|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee
133|
134|.define ARG5d, dword [rsp+dword*8]
135|.define TMP1, ARG5 // TMP1 overlaps ARG5
136|.define TMP1d, ARG5d
137|.define TMP1hi, dword [rsp+dword*9]
138|.define MULTRES, TMP1d // MULTRES overlaps TMP1d.
139|
140|//-----------------------------------------------------------------------
141|.else // x64/POSIX stack layout
142|
143|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--).
144|.macro saveregs_
145| push rbx; push r15; push r14
146|.if NO_UNWIND
147| push r13; push r12
148|.endif
149| sub rsp, CFRAME_SPACE
150|.endmacro
151|.macro saveregs
152| push rbp; saveregs_
153|.endmacro
154|.macro restoreregs
155| add rsp, CFRAME_SPACE
156|.if NO_UNWIND
157| pop r12; pop r13
158|.endif
159| pop r14; pop r15; pop rbx; pop rbp
160|.endmacro
161|
162|//----- 16 byte aligned,
163|.if NO_UNWIND
164|.define SAVE_RET, aword [rsp+aword*11] //<-- rsp entering interpreter.
165|.define SAVE_R4, aword [rsp+aword*10]
166|.define SAVE_R3, aword [rsp+aword*9]
167|.define SAVE_R2, aword [rsp+aword*8]
168|.define SAVE_R1, aword [rsp+aword*7]
169|.define SAVE_RU2, aword [rsp+aword*6]
170|.define SAVE_RU1, aword [rsp+aword*5] //<-- rsp after register saves.
171|.else
172|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter.
173|.define SAVE_R4, aword [rsp+aword*8]
174|.define SAVE_R3, aword [rsp+aword*7]
175|.define SAVE_R2, aword [rsp+aword*6]
176|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves.
177|.endif
178|.define SAVE_CFRAME, aword [rsp+aword*4]
179|.define SAVE_PC, aword [rsp+aword*3]
180|.define SAVE_L, aword [rsp+aword*2]
181|.define SAVE_ERRF, dword [rsp+dword*3]
182|.define SAVE_NRES, dword [rsp+dword*2]
183|.define TMP1, aword [rsp] //<-- rsp while in interpreter.
184|//----- 16 byte aligned
185|
186|.define TMP1d, dword [rsp]
187|.define TMP1hi, dword [rsp+dword*1]
188|.define MULTRES, TMP1d // MULTRES overlaps TMP1d.
189|
190|.endif
191|
192|//-----------------------------------------------------------------------
193|
194|// Instruction headers.
195|.macro ins_A; .endmacro
196|.macro ins_AD; .endmacro
197|.macro ins_AJ; .endmacro
198|.macro ins_ABC; movzx RBd, RCH; movzx RCd, RCL; .endmacro
199|.macro ins_AB_; movzx RBd, RCH; .endmacro
200|.macro ins_A_C; movzx RCd, RCL; .endmacro
201|.macro ins_AND; not RD; .endmacro
202|
203|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster).
204|.macro ins_NEXT
205| mov RCd, [PC]
206| movzx RAd, RCH
207| movzx OP, RCL
208| add PC, 4
209| shr RCd, 16
210| jmp aword [DISPATCH+OP*8]
211|.endmacro
212|
213|// Instruction footer.
214|.if 1
215| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
216| .define ins_next, ins_NEXT
217| .define ins_next_, ins_NEXT
218|.else
219| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
220| // Affects only certain kinds of benchmarks (and only with -j off).
221| // Around 10%-30% slower on Core2, a lot more slower on P4.
222| .macro ins_next
223| jmp ->ins_next
224| .endmacro
225| .macro ins_next_
226| ->ins_next:
227| ins_NEXT
228| .endmacro
229|.endif
230|
231|// Call decode and dispatch.
232|.macro ins_callt
233| // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-8] = PC
234| mov PC, LFUNC:RB->pc
235| mov RAd, [PC]
236| movzx OP, RAL
237| movzx RAd, RAH
238| add PC, 4
239| jmp aword [DISPATCH+OP*8]
240|.endmacro
241|
242|.macro ins_call
243| // BASE = new base, RB = LFUNC, RD = nargs+1
244| mov [BASE-8], PC
245| ins_callt
246|.endmacro
247|
248|//-----------------------------------------------------------------------
249|
250|// Macros to clear or set tags.
251|.macro cleartp, reg; shl reg, 17; shr reg, 17; .endmacro
252|.macro settp, reg, tp
253| mov64 ITYPE, ((uint64_t)tp<<47)
254| or reg, ITYPE
255|.endmacro
256|.macro settp, dst, reg, tp
257| mov64 dst, ((uint64_t)tp<<47)
258| or dst, reg
259|.endmacro
260|.macro setint, reg
261| settp reg, LJ_TISNUM
262|.endmacro
263|.macro setint, dst, reg
264| settp dst, reg, LJ_TISNUM
265|.endmacro
266|
267|// Macros to test operand types.
268|.macro checktp_nc, reg, tp, target
269| mov ITYPE, reg
270| sar ITYPE, 47
271| cmp ITYPEd, tp
272| jne target
273|.endmacro
274|.macro checktp, reg, tp, target
275| mov ITYPE, reg
276| cleartp reg
277| sar ITYPE, 47
278| cmp ITYPEd, tp
279| jne target
280|.endmacro
281|.macro checktptp, src, tp, target
282| mov ITYPE, src
283| sar ITYPE, 47
284| cmp ITYPEd, tp
285| jne target
286|.endmacro
287|.macro checkstr, reg, target; checktp reg, LJ_TSTR, target; .endmacro
288|.macro checktab, reg, target; checktp reg, LJ_TTAB, target; .endmacro
289|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC, target; .endmacro
290|
291|.macro checknumx, reg, target, jump
292| mov ITYPE, reg
293| sar ITYPE, 47
294| cmp ITYPEd, LJ_TISNUM
295| jump target
296|.endmacro
297|.macro checkint, reg, target; checknumx reg, target, jne; .endmacro
298|.macro checkinttp, src, target; checknumx src, target, jne; .endmacro
299|.macro checknum, reg, target; checknumx reg, target, jae; .endmacro
300|.macro checknumtp, src, target; checknumx src, target, jae; .endmacro
301|.macro checknumber, src, target; checknumx src, target, ja; .endmacro
302|
303|.macro mov_false, reg; mov64 reg, (int64_t)~((uint64_t)1<<47); .endmacro
304|.macro mov_true, reg; mov64 reg, (int64_t)~((uint64_t)2<<47); .endmacro
305|
306|// These operands must be used with movzx.
307|.define PC_OP, byte [PC-4]
308|.define PC_RA, byte [PC-3]
309|.define PC_RB, byte [PC-1]
310|.define PC_RC, byte [PC-2]
311|.define PC_RD, word [PC-2]
312|
313|.macro branchPC, reg
314| lea PC, [PC+reg*4-BCBIAS_J*4]
315|.endmacro
316|
317|// Assumes DISPATCH is relative to GL.
318#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
319#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
320|
321#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
322|
323|// Decrement hashed hotcount and trigger trace recorder if zero.
324|.macro hotloop, reg
325| mov reg, PCd
326| shr reg, 1
327| and reg, HOTCOUNT_PCMASK
328| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_LOOP
329| jb ->vm_hotloop
330|.endmacro
331|
332|.macro hotcall, reg
333| mov reg, PCd
334| shr reg, 1
335| and reg, HOTCOUNT_PCMASK
336| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_CALL
337| jb ->vm_hotcall
338|.endmacro
339|
340|// Set current VM state.
341|.macro set_vmstate, st
342| mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st
343|.endmacro
344|
345|.macro fpop1; fstp st1; .endmacro
346|
347|// Synthesize SSE FP constants.
348|.macro sseconst_abs, reg, tmp // Synthesize abs mask.
349| mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp
350|.endmacro
351|
352|.macro sseconst_hi, reg, tmp, val // Synthesize hi-32 bit const.
353| mov64 tmp, U64x(val,00000000); movd reg, tmp
354|.endmacro
355|
356|.macro sseconst_sign, reg, tmp // Synthesize sign mask.
357| sseconst_hi reg, tmp, 80000000
358|.endmacro
359|.macro sseconst_1, reg, tmp // Synthesize 1.0.
360| sseconst_hi reg, tmp, 3ff00000
361|.endmacro
362|.macro sseconst_m1, reg, tmp // Synthesize -1.0.
363| sseconst_hi reg, tmp, bff00000
364|.endmacro
365|.macro sseconst_2p52, reg, tmp // Synthesize 2^52.
366| sseconst_hi reg, tmp, 43300000
367|.endmacro
368|.macro sseconst_tobit, reg, tmp // Synthesize 2^52 + 2^51.
369| sseconst_hi reg, tmp, 43380000
370|.endmacro
371|
372|// Move table write barrier back. Overwrites reg.
373|.macro barrierback, tab, reg
374| and byte tab->marked, (uint8_t)~LJ_GC_BLACK // black2gray(tab)
375| mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)]
376| mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab
377| mov tab->gclist, reg
378|.endmacro
379|
380|//-----------------------------------------------------------------------
381
382/* Generate subroutines used by opcodes and other parts of the VM. */
383/* The .code_sub section should be last to help static branch prediction. */
384static void build_subroutines(BuildCtx *ctx)
385{
386 |.code_sub
387 |
388 |//-----------------------------------------------------------------------
389 |//-- Return handling ----------------------------------------------------
390 |//-----------------------------------------------------------------------
391 |
392 |->vm_returnp:
393 | test PCd, FRAME_P
394 | jz ->cont_dispatch
395 |
396 | // Return from pcall or xpcall fast func.
397 | and PC, -8
398 | sub BASE, PC // Restore caller base.
399 | lea RA, [RA+PC-8] // Rebase RA and prepend one result.
400 | mov PC, [BASE-8] // Fetch PC of previous frame.
401 | // Prepending may overwrite the pcall frame, so do it at the end.
402 | mov_true ITYPE
403 | mov aword [BASE+RA], ITYPE // Prepend true to results.
404 |
405 |->vm_returnc:
406 | add RDd, 1 // RD = nresults+1
407 | jz ->vm_unwind_yield
408 | mov MULTRES, RDd
409 | test PC, FRAME_TYPE
410 | jz ->BC_RET_Z // Handle regular return to Lua.
411 |
412 |->vm_return:
413 | // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return
414 | xor PC, FRAME_C
415 | test PCd, FRAME_TYPE
416 | jnz ->vm_returnp
417 |
418 | // Return to C.
419 | set_vmstate C
420 | and PC, -8
421 | sub PC, BASE
422 | neg PC // Previous base = BASE - delta.
423 |
424 | sub RDd, 1
425 | jz >2
426 |1: // Move results down.
427 | mov RB, [BASE+RA]
428 | mov [BASE-16], RB
429 | add BASE, 8
430 | sub RDd, 1
431 | jnz <1
432 |2:
433 | mov L:RB, SAVE_L
434 | mov L:RB->base, PC
435 |3:
436 | mov RDd, MULTRES
437 | mov RAd, SAVE_NRES // RA = wanted nresults+1
438 |4:
439 | cmp RAd, RDd
440 | jne >6 // More/less results wanted?
441 |5:
442 | sub BASE, 16
443 | mov L:RB->top, BASE
444 |
445 |->vm_leave_cp:
446 | mov RA, SAVE_CFRAME // Restore previous C frame.
447 | mov L:RB->cframe, RA
448 | xor eax, eax // Ok return status for vm_pcall.
449 |
450 |->vm_leave_unw:
451 | restoreregs
452 | ret
453 |
454 |6:
455 | jb >7 // Less results wanted?
456 | // More results wanted. Check stack size and fill up results with nil.
457 | cmp BASE, L:RB->maxstack
458 | ja >8
459 | mov aword [BASE-16], LJ_TNIL
460 | add BASE, 8
461 | add RDd, 1
462 | jmp <4
463 |
464 |7: // Less results wanted.
465 | test RAd, RAd
466 | jz <5 // But check for LUA_MULTRET+1.
467 | sub RA, RD // Negative result!
468 | lea BASE, [BASE+RA*8] // Correct top.
469 | jmp <5
470 |
471 |8: // Corner case: need to grow stack for filling up results.
472 | // This can happen if:
473 | // - A C function grows the stack (a lot).
474 | // - The GC shrinks the stack in between.
475 | // - A return back from a lua_call() with (high) nresults adjustment.
476 | mov L:RB->top, BASE // Save current top held in BASE (yes).
477 | mov MULTRES, RDd // Need to fill only remainder with nil.
478 | mov CARG2d, RAd
479 | mov CARG1, L:RB
480 | call extern lj_state_growstack // (lua_State *L, int n)
481 | mov BASE, L:RB->top // Need the (realloced) L->top in BASE.
482 | jmp <3
483 |
484 |->vm_unwind_yield:
485 | mov al, LUA_YIELD
486 | jmp ->vm_unwind_c_eh
487 |
488 |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
489 | // (void *cframe, int errcode)
490 | mov eax, CARG2d // Error return status for vm_pcall.
491 | mov rsp, CARG1
492 |->vm_unwind_c_eh: // Landing pad for external unwinder.
493 | mov L:RB, SAVE_L
494 | mov GL:RB, L:RB->glref
495 | mov dword GL:RB->vmstate, ~LJ_VMST_C
496 | jmp ->vm_leave_unw
497 |
498 |->vm_unwind_rethrow:
499 |.if not X64WIN
500 | mov CARG1, SAVE_L
501 | mov CARG2d, eax
502 | restoreregs
503 | jmp extern lj_err_throw // (lua_State *L, int errcode)
504 |.endif
505 |
506 |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
507 | // (void *cframe)
508 | and CARG1, CFRAME_RAWMASK
509 | mov rsp, CARG1
510 |->vm_unwind_ff_eh: // Landing pad for external unwinder.
511 | mov L:RB, SAVE_L
512 | mov RDd, 1+1 // Really 1+2 results, incr. later.
513 | mov BASE, L:RB->base
514 | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
515 | add DISPATCH, GG_G2DISP
516 | mov PC, [BASE-8] // Fetch PC of previous frame.
517 | mov_false RA
518 | mov RB, [BASE]
519 | mov [BASE-16], RA // Prepend false to error message.
520 | mov [BASE-8], RB
521 | mov RA, -16 // Results start at BASE+RA = BASE-16.
522 | set_vmstate INTERP
523 | jmp ->vm_returnc // Increments RD/MULTRES and returns.
524 |
525 |//-----------------------------------------------------------------------
526 |//-- Grow stack for calls -----------------------------------------------
527 |//-----------------------------------------------------------------------
528 |
529 |->vm_growstack_c: // Grow stack for C function.
530 | mov CARG2d, LUA_MINSTACK
531 | jmp >2
532 |
533 |->vm_growstack_v: // Grow stack for vararg Lua function.
534 | sub RD, 16 // LJ_FR2
535 | jmp >1
536 |
537 |->vm_growstack_f: // Grow stack for fixarg Lua function.
538 | // BASE = new base, RD = nargs+1, RB = L, PC = first PC
539 | lea RD, [BASE+NARGS:RD*8-8]
540 |1:
541 | movzx RAd, byte [PC-4+PC2PROTO(framesize)]
542 | add PC, 4 // Must point after first instruction.
543 | mov L:RB->base, BASE
544 | mov L:RB->top, RD
545 | mov SAVE_PC, PC
546 | mov CARG2, RA
547 |2:
548 | // RB = L, L->base = new base, L->top = top
549 | mov CARG1, L:RB
550 | call extern lj_state_growstack // (lua_State *L, int n)
551 | mov BASE, L:RB->base
552 | mov RD, L:RB->top
553 | mov LFUNC:RB, [BASE-16]
554 | cleartp LFUNC:RB
555 | sub RD, BASE
556 | shr RDd, 3
557 | add NARGS:RDd, 1
558 | // BASE = new base, RB = LFUNC, RD = nargs+1
559 | ins_callt // Just retry the call.
560 |
561 |//-----------------------------------------------------------------------
562 |//-- Entry points into the assembler VM ---------------------------------
563 |//-----------------------------------------------------------------------
564 |
565 |->vm_resume: // Setup C frame and resume thread.
566 | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
567 | saveregs
568 | mov L:RB, CARG1 // Caveat: CARG1 may be RA.
569 | mov SAVE_L, CARG1
570 | mov RA, CARG2
571 | mov PCd, FRAME_CP
572 | xor RDd, RDd
573 | lea KBASE, [esp+CFRAME_RESUME]
574 | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
575 | add DISPATCH, GG_G2DISP
576 | mov SAVE_PC, RD // Any value outside of bytecode is ok.
577 | mov SAVE_CFRAME, RD
578 | mov SAVE_NRES, RDd
579 | mov SAVE_ERRF, RDd
580 | mov L:RB->cframe, KBASE
581 | cmp byte L:RB->status, RDL
582 | je >2 // Initial resume (like a call).
583 |
584 | // Resume after yield (like a return).
585 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
586 | set_vmstate INTERP
587 | mov byte L:RB->status, RDL
588 | mov BASE, L:RB->base
589 | mov RD, L:RB->top
590 | sub RD, RA
591 | shr RDd, 3
592 | add RDd, 1 // RD = nresults+1
593 | sub RA, BASE // RA = resultofs
594 | mov PC, [BASE-8]
595 | mov MULTRES, RDd
596 | test PCd, FRAME_TYPE
597 | jz ->BC_RET_Z
598 | jmp ->vm_return
599 |
600 |->vm_pcall: // Setup protected C frame and enter VM.
601 | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
602 | saveregs
603 | mov PCd, FRAME_CP
604 | mov SAVE_ERRF, CARG4d
605 | jmp >1
606 |
607 |->vm_call: // Setup C frame and enter VM.
608 | // (lua_State *L, TValue *base, int nres1)
609 | saveregs
610 | mov PCd, FRAME_C
611 |
612 |1: // Entry point for vm_pcall above (PC = ftype).
613 | mov SAVE_NRES, CARG3d
614 | mov L:RB, CARG1 // Caveat: CARG1 may be RA.
615 | mov SAVE_L, CARG1
616 | mov RA, CARG2
617 |
618 | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
619 | mov KBASE, L:RB->cframe // Add our C frame to cframe chain.
620 | mov SAVE_CFRAME, KBASE
621 | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
622 | add DISPATCH, GG_G2DISP
623 | mov L:RB->cframe, rsp
624 |
625 |2: // Entry point for vm_resume/vm_cpcall (RA = base, RB = L, PC = ftype).
626 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
627 | set_vmstate INTERP
628 | mov BASE, L:RB->base // BASE = old base (used in vmeta_call).
629 | add PC, RA
630 | sub PC, BASE // PC = frame delta + frame type
631 |
632 | mov RD, L:RB->top
633 | sub RD, RA
634 | shr NARGS:RDd, 3
635 | add NARGS:RDd, 1 // RD = nargs+1
636 |
637 |->vm_call_dispatch:
638 | mov LFUNC:RB, [RA-16]
639 | checkfunc LFUNC:RB, ->vmeta_call // Ensure KBASE defined and != BASE.
640 |
641 |->vm_call_dispatch_f:
642 | mov BASE, RA
643 | ins_call
644 | // BASE = new base, RB = func, RD = nargs+1, PC = caller PC
645 |
646 |->vm_cpcall: // Setup protected C frame, call C.
647 | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
648 | saveregs
649 | mov L:RB, CARG1 // Caveat: CARG1 may be RA.
650 | mov SAVE_L, CARG1
651 | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
652 |
653 | mov KBASE, L:RB->stack // Compute -savestack(L, L->top).
654 | sub KBASE, L:RB->top
655 | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
656 | mov SAVE_ERRF, 0 // No error function.
657 | mov SAVE_NRES, KBASEd // Neg. delta means cframe w/o frame.
658 | add DISPATCH, GG_G2DISP
659 | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe).
660 |
661 | mov KBASE, L:RB->cframe // Add our C frame to cframe chain.
662 | mov SAVE_CFRAME, KBASE
663 | mov L:RB->cframe, rsp
664 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
665 |
666 | call CARG4 // (lua_State *L, lua_CFunction func, void *ud)
667 | // TValue * (new base) or NULL returned in eax (RC).
668 | test RC, RC
669 | jz ->vm_leave_cp // No base? Just remove C frame.
670 | mov RA, RC
671 | mov PCd, FRAME_CP
672 | jmp <2 // Else continue with the call.
673 |
674 |//-----------------------------------------------------------------------
675 |//-- Metamethod handling ------------------------------------------------
676 |//-----------------------------------------------------------------------
677 |
678 |//-- Continuation dispatch ----------------------------------------------
679 |
680 |->cont_dispatch:
681 | // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES)
682 | add RA, BASE
683 | and PC, -8
684 | mov RB, BASE
685 | sub BASE, PC // Restore caller BASE.
686 | mov aword [RA+RD*8-8], LJ_TNIL // Ensure one valid arg.
687 | mov RC, RA // ... in [RC]
688 | mov PC, [RB-24] // Restore PC from [cont|PC].
689 | mov RA, qword [RB-32] // May be negative on WIN64 with debug.
690 |.if FFI
691 | cmp RA, 1
692 | jbe >1
693 |.endif
694 | mov LFUNC:KBASE, [BASE-16]
695 | cleartp LFUNC:KBASE
696 | mov KBASE, LFUNC:KBASE->pc
697 | mov KBASE, [KBASE+PC2PROTO(k)]
698 | // BASE = base, RC = result, RB = meta base
699 | jmp RA // Jump to continuation.
700 |
701 |.if FFI
702 |1:
703 | je ->cont_ffi_callback // cont = 1: return from FFI callback.
704 | // cont = 0: Tail call from C function.
705 | sub RB, BASE
706 | shr RBd, 3
707 | lea RDd, [RBd-3]
708 | jmp ->vm_call_tail
709 |.endif
710 |
711 |->cont_cat: // BASE = base, RC = result, RB = mbase
712 | movzx RAd, PC_RB
713 | sub RB, 32
714 | lea RA, [BASE+RA*8]
715 | sub RA, RB
716 | je ->cont_ra
717 | neg RA
718 | shr RAd, 3
719 |.if X64WIN
720 | mov CARG3d, RAd
721 | mov L:CARG1, SAVE_L
722 | mov L:CARG1->base, BASE
723 | mov RC, [RC]
724 | mov [RB], RC
725 | mov CARG2, RB
726 |.else
727 | mov L:CARG1, SAVE_L
728 | mov L:CARG1->base, BASE
729 | mov CARG3d, RAd
730 | mov RA, [RC]
731 | mov [RB], RA
732 | mov CARG2, RB
733 |.endif
734 | jmp ->BC_CAT_Z
735 |
736 |//-- Table indexing metamethods -----------------------------------------
737 |
738 |->vmeta_tgets:
739 | settp STR:RC, LJ_TSTR // STR:RC = GCstr *
740 | mov TMP1, STR:RC
741 | lea RC, TMP1
742 | cmp PC_OP, BC_GGET
743 | jne >1
744 | settp TAB:RA, TAB:RB, LJ_TTAB // TAB:RB = GCtab *
745 | lea RB, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv.
746 | mov [RB], TAB:RA
747 | jmp >2
748 |
749 |->vmeta_tgetb:
750 | movzx RCd, PC_RC
751 |.if DUALNUM
752 | setint RC
753 | mov TMP1, RC
754 |.else
755 | cvtsi2sd xmm0, RCd
756 | movsd TMP1, xmm0
757 |.endif
758 | lea RC, TMP1
759 | jmp >1
760 |
761 |->vmeta_tgetv:
762 | movzx RCd, PC_RC // Reload TValue *k from RC.
763 | lea RC, [BASE+RC*8]
764 |1:
765 | movzx RBd, PC_RB // Reload TValue *t from RB.
766 | lea RB, [BASE+RB*8]
767 |2:
768 | mov L:CARG1, SAVE_L
769 | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE.
770 | mov CARG2, RB
771 | mov CARG3, RC
772 | mov L:RB, L:CARG1
773 | mov SAVE_PC, PC
774 | call extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
775 | // TValue * (finished) or NULL (metamethod) returned in eax (RC).
776 | mov BASE, L:RB->base
777 | test RC, RC
778 | jz >3
779 |->cont_ra: // BASE = base, RC = result
780 | movzx RAd, PC_RA
781 | mov RB, [RC]
782 | mov [BASE+RA*8], RB
783 | ins_next
784 |
785 |3: // Call __index metamethod.
786 | // BASE = base, L->top = new base, stack = cont/func/t/k
787 | mov RA, L:RB->top
788 | mov [RA-24], PC // [cont|PC]
789 | lea PC, [RA+FRAME_CONT]
790 | sub PC, BASE
791 | mov LFUNC:RB, [RA-16] // Guaranteed to be a function here.
792 | mov NARGS:RDd, 2+1 // 2 args for func(t, k).
793 | cleartp LFUNC:RB
794 | jmp ->vm_call_dispatch_f
795 |
796 |->vmeta_tgetr:
797 | mov CARG1, TAB:RB
798 | mov RB, BASE // Save BASE.
799 | mov CARG2d, RCd // Caveat: CARG2 == BASE
800 | call extern lj_tab_getinth // (GCtab *t, int32_t key)
801 | // cTValue * or NULL returned in eax (RC).
802 | movzx RAd, PC_RA
803 | mov BASE, RB // Restore BASE.
804 | test RC, RC
805 | jnz ->BC_TGETR_Z
806 | mov ITYPE, LJ_TNIL
807 | jmp ->BC_TGETR2_Z
808 |
809 |//-----------------------------------------------------------------------
810 |
811 |->vmeta_tsets:
812 | settp STR:RC, LJ_TSTR // STR:RC = GCstr *
813 | mov TMP1, STR:RC
814 | lea RC, TMP1
815 | cmp PC_OP, BC_GSET
816 | jne >1
817 | settp TAB:RA, TAB:RB, LJ_TTAB // TAB:RB = GCtab *
818 | lea RB, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv.
819 | mov [RB], TAB:RA
820 | jmp >2
821 |
822 |->vmeta_tsetb:
823 | movzx RCd, PC_RC
824 |.if DUALNUM
825 | setint RC
826 | mov TMP1, RC
827 |.else
828 | cvtsi2sd xmm0, RCd
829 | movsd TMP1, xmm0
830 |.endif
831 | lea RC, TMP1
832 | jmp >1
833 |
834 |->vmeta_tsetv:
835 | movzx RCd, PC_RC // Reload TValue *k from RC.
836 | lea RC, [BASE+RC*8]
837 |1:
838 | movzx RBd, PC_RB // Reload TValue *t from RB.
839 | lea RB, [BASE+RB*8]
840 |2:
841 | mov L:CARG1, SAVE_L
842 | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE.
843 | mov CARG2, RB
844 | mov CARG3, RC
845 | mov L:RB, L:CARG1
846 | mov SAVE_PC, PC
847 | call extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
848 | // TValue * (finished) or NULL (metamethod) returned in eax (RC).
849 | mov BASE, L:RB->base
850 | test RC, RC
851 | jz >3
852 | // NOBARRIER: lj_meta_tset ensures the table is not black.
853 | movzx RAd, PC_RA
854 | mov RB, [BASE+RA*8]
855 | mov [RC], RB
856 |->cont_nop: // BASE = base, (RC = result)
857 | ins_next
858 |
859 |3: // Call __newindex metamethod.
860 | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
861 | mov RA, L:RB->top
862 | mov [RA-24], PC // [cont|PC]
863 | movzx RCd, PC_RA
864 | // Copy value to third argument.
865 | mov RB, [BASE+RC*8]
866 | mov [RA+16], RB
867 | lea PC, [RA+FRAME_CONT]
868 | sub PC, BASE
869 | mov LFUNC:RB, [RA-16] // Guaranteed to be a function here.
870 | mov NARGS:RDd, 3+1 // 3 args for func(t, k, v).
871 | cleartp LFUNC:RB
872 | jmp ->vm_call_dispatch_f
873 |
874 |->vmeta_tsetr:
875 |.if X64WIN
876 | mov L:CARG1, SAVE_L
877 | mov CARG3d, RCd
878 | mov L:CARG1->base, BASE
879 | xchg CARG2, TAB:RB // Caveat: CARG2 == BASE.
880 |.else
881 | mov L:CARG1, SAVE_L
882 | mov CARG2, TAB:RB
883 | mov L:CARG1->base, BASE
884 | mov RB, BASE // Save BASE.
885 | mov CARG3d, RCd // Caveat: CARG3 == BASE.
886 |.endif
887 | mov SAVE_PC, PC
888 | call extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
889 | // TValue * returned in eax (RC).
890 | movzx RAd, PC_RA
891 | mov BASE, RB // Restore BASE.
892 | jmp ->BC_TSETR_Z
893 |
894 |//-- Comparison metamethods ---------------------------------------------
895 |
896 |->vmeta_comp:
897 | movzx RDd, PC_RD
898 | movzx RAd, PC_RA
899 | mov L:RB, SAVE_L
900 | mov L:RB->base, BASE // Caveat: CARG2/CARG3 == BASE.
901 |.if X64WIN
902 | lea CARG3, [BASE+RD*8]
903 | lea CARG2, [BASE+RA*8]
904 |.else
905 | lea CARG2, [BASE+RA*8]
906 | lea CARG3, [BASE+RD*8]
907 |.endif
908 | mov CARG1, L:RB // Caveat: CARG1/CARG4 == RA.
909 | movzx CARG4d, PC_OP
910 | mov SAVE_PC, PC
911 | call extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
912 | // 0/1 or TValue * (metamethod) returned in eax (RC).
913 |3:
914 | mov BASE, L:RB->base
915 | cmp RC, 1
916 | ja ->vmeta_binop
917 |4:
918 | lea PC, [PC+4]
919 | jb >6
920 |5:
921 | movzx RDd, PC_RD
922 | branchPC RD
923 |6:
924 | ins_next
925 |
926 |->cont_condt: // BASE = base, RC = result
927 | add PC, 4
928 | mov ITYPE, [RC]
929 | sar ITYPE, 47
930 | cmp ITYPEd, LJ_TISTRUECOND // Branch if result is true.
931 | jb <5
932 | jmp <6
933 |
934 |->cont_condf: // BASE = base, RC = result
935 | mov ITYPE, [RC]
936 | sar ITYPE, 47
937 | cmp ITYPEd, LJ_TISTRUECOND // Branch if result is false.
938 | jmp <4
939 |
940 |->vmeta_equal:
941 | cleartp TAB:RD
942 | sub PC, 4
943 |.if X64WIN
944 | mov CARG3, RD
945 | mov CARG4d, RBd
946 | mov L:RB, SAVE_L
947 | mov L:RB->base, BASE // Caveat: CARG2 == BASE.
948 | mov CARG2, RA
949 | mov CARG1, L:RB // Caveat: CARG1 == RA.
950 |.else
951 | mov CARG2, RA
952 | mov CARG4d, RBd // Caveat: CARG4 == RA.
953 | mov L:RB, SAVE_L
954 | mov L:RB->base, BASE // Caveat: CARG3 == BASE.
955 | mov CARG3, RD
956 | mov CARG1, L:RB
957 |.endif
958 | mov SAVE_PC, PC
959 | call extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
960 | // 0/1 or TValue * (metamethod) returned in eax (RC).
961 | jmp <3
962 |
963 |->vmeta_equal_cd:
964 |.if FFI
965 | sub PC, 4
966 | mov L:RB, SAVE_L
967 | mov L:RB->base, BASE
968 | mov CARG1, L:RB
969 | mov CARG2d, dword [PC-4]
970 | mov SAVE_PC, PC
971 | call extern lj_meta_equal_cd // (lua_State *L, BCIns ins)
972 | // 0/1 or TValue * (metamethod) returned in eax (RC).
973 | jmp <3
974 |.endif
975 |
976 |->vmeta_istype:
977 | mov L:RB, SAVE_L
978 | mov L:RB->base, BASE // Caveat: CARG2/CARG3 may be BASE.
979 | mov CARG2d, RAd
980 | mov CARG3d, RDd
981 | mov L:CARG1, L:RB
982 | mov SAVE_PC, PC
983 | call extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
984 | mov BASE, L:RB->base
985 | jmp <6
986 |
987 |//-- Arithmetic metamethods ---------------------------------------------
988 |
989 |->vmeta_arith_vno:
990 |.if DUALNUM
991 | movzx RBd, PC_RB
992 | movzx RCd, PC_RC
993 |.endif
994 |->vmeta_arith_vn:
995 | lea RC, [KBASE+RC*8]
996 | jmp >1
997 |
998 |->vmeta_arith_nvo:
999 |.if DUALNUM
1000 | movzx RBd, PC_RB
1001 | movzx RCd, PC_RC
1002 |.endif
1003 |->vmeta_arith_nv:
1004 | lea TMPR, [KBASE+RC*8]
1005 | lea RC, [BASE+RB*8]
1006 | mov RB, TMPR
1007 | jmp >2
1008 |
1009 |->vmeta_unm:
1010 | lea RC, [BASE+RD*8]
1011 | mov RB, RC
1012 | jmp >2
1013 |
1014 |->vmeta_arith_vvo:
1015 |.if DUALNUM
1016 | movzx RBd, PC_RB
1017 | movzx RCd, PC_RC
1018 |.endif
1019 |->vmeta_arith_vv:
1020 | lea RC, [BASE+RC*8]
1021 |1:
1022 | lea RB, [BASE+RB*8]
1023 |2:
1024 | lea RA, [BASE+RA*8]
1025 |.if X64WIN
1026 | mov CARG3, RB
1027 | mov CARG4, RC
1028 | movzx RCd, PC_OP
1029 | mov ARG5d, RCd
1030 | mov L:RB, SAVE_L
1031 | mov L:RB->base, BASE // Caveat: CARG2 == BASE.
1032 | mov CARG2, RA
1033 | mov CARG1, L:RB // Caveat: CARG1 == RA.
1034 |.else
1035 | movzx CARG5d, PC_OP
1036 | mov CARG2, RA
1037 | mov CARG4, RC // Caveat: CARG4 == RA.
1038 | mov L:CARG1, SAVE_L
1039 | mov L:CARG1->base, BASE // Caveat: CARG3 == BASE.
1040 | mov CARG3, RB
1041 | mov L:RB, L:CARG1
1042 |.endif
1043 | mov SAVE_PC, PC
1044 | call extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
1045 | // NULL (finished) or TValue * (metamethod) returned in eax (RC).
1046 | mov BASE, L:RB->base
1047 | test RC, RC
1048 | jz ->cont_nop
1049 |
1050 | // Call metamethod for binary op.
1051 |->vmeta_binop:
1052 | // BASE = base, RC = new base, stack = cont/func/o1/o2
1053 | mov RA, RC
1054 | sub RC, BASE
1055 | mov [RA-24], PC // [cont|PC]
1056 | lea PC, [RC+FRAME_CONT]
1057 | mov NARGS:RDd, 2+1 // 2 args for func(o1, o2).
1058 | jmp ->vm_call_dispatch
1059 |
1060 |->vmeta_len:
1061 | movzx RDd, PC_RD
1062 | mov L:RB, SAVE_L
1063 | mov L:RB->base, BASE
1064 | lea CARG2, [BASE+RD*8] // Caveat: CARG2 == BASE
1065 | mov L:CARG1, L:RB
1066 | mov SAVE_PC, PC
1067 | call extern lj_meta_len // (lua_State *L, TValue *o)
1068 | // NULL (retry) or TValue * (metamethod) returned in eax (RC).
1069 | mov BASE, L:RB->base
1070#if LJ_52
1071 | test RC, RC
1072 | jne ->vmeta_binop // Binop call for compatibility.
1073 | movzx RDd, PC_RD
1074 | mov TAB:CARG1, [BASE+RD*8]
1075 | cleartp TAB:CARG1
1076 | jmp ->BC_LEN_Z
1077#else
1078 | jmp ->vmeta_binop // Binop call for compatibility.
1079#endif
1080 |
1081 |//-- Call metamethod ----------------------------------------------------
1082 |
1083 |->vmeta_call_ra:
1084 | lea RA, [BASE+RA*8+16]
1085 |->vmeta_call: // Resolve and call __call metamethod.
1086 | // BASE = old base, RA = new base, RC = nargs+1, PC = return
1087 | mov TMP1d, NARGS:RDd // Save RA, RC for us.
1088 | mov RB, RA
1089 |.if X64WIN
1090 | mov L:TMPR, SAVE_L
1091 | mov L:TMPR->base, BASE // Caveat: CARG2 is BASE.
1092 | lea CARG2, [RA-16]
1093 | lea CARG3, [RA+NARGS:RD*8-8]
1094 | mov CARG1, L:TMPR // Caveat: CARG1 is RA.
1095 |.else
1096 | mov L:CARG1, SAVE_L
1097 | mov L:CARG1->base, BASE // Caveat: CARG3 is BASE.
1098 | lea CARG2, [RA-16]
1099 | lea CARG3, [RA+NARGS:RD*8-8]
1100 |.endif
1101 | mov SAVE_PC, PC
1102 | call extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
1103 | mov RA, RB
1104 | mov L:RB, SAVE_L
1105 | mov BASE, L:RB->base
1106 | mov NARGS:RDd, TMP1d
1107 | mov LFUNC:RB, [RA-16]
1108 | add NARGS:RDd, 1
1109 | // This is fragile. L->base must not move, KBASE must always be defined.
1110 | cmp KBASE, BASE // Continue with CALLT if flag set.
1111 | je ->BC_CALLT_Z
1112 | cleartp LFUNC:RB
1113 | mov BASE, RA
1114 | ins_call // Otherwise call resolved metamethod.
1115 |
1116 |//-- Argument coercion for 'for' statement ------------------------------
1117 |
1118 |->vmeta_for:
1119 | mov L:RB, SAVE_L
1120 | mov L:RB->base, BASE
1121 | mov CARG2, RA // Caveat: CARG2 == BASE
1122 | mov L:CARG1, L:RB // Caveat: CARG1 == RA
1123 | mov SAVE_PC, PC
1124 | call extern lj_meta_for // (lua_State *L, TValue *base)
1125 | mov BASE, L:RB->base
1126 | mov RCd, [PC-4]
1127 | movzx RAd, RCH
1128 | movzx OP, RCL
1129 | shr RCd, 16
1130 | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Retry FORI or JFORI.
1131 |
1132 |//-----------------------------------------------------------------------
1133 |//-- Fast functions -----------------------------------------------------
1134 |//-----------------------------------------------------------------------
1135 |
1136 |.macro .ffunc, name
1137 |->ff_ .. name:
1138 |.endmacro
1139 |
1140 |.macro .ffunc_1, name
1141 |->ff_ .. name:
1142 | cmp NARGS:RDd, 1+1; jb ->fff_fallback
1143 |.endmacro
1144 |
1145 |.macro .ffunc_2, name
1146 |->ff_ .. name:
1147 | cmp NARGS:RDd, 2+1; jb ->fff_fallback
1148 |.endmacro
1149 |
1150 |.macro .ffunc_n, name, op
1151 | .ffunc_1 name
1152 | checknumtp [BASE], ->fff_fallback
1153 | op xmm0, qword [BASE]
1154 |.endmacro
1155 |
1156 |.macro .ffunc_n, name
1157 | .ffunc_n name, movsd
1158 |.endmacro
1159 |
1160 |.macro .ffunc_nn, name
1161 | .ffunc_2 name
1162 | checknumtp [BASE], ->fff_fallback
1163 | checknumtp [BASE+8], ->fff_fallback
1164 | movsd xmm0, qword [BASE]
1165 | movsd xmm1, qword [BASE+8]
1166 |.endmacro
1167 |
1168 |// Inlined GC threshold check. Caveat: uses label 1.
1169 |.macro ffgccheck
1170 | mov RB, [DISPATCH+DISPATCH_GL(gc.total)]
1171 | cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)]
1172 | jb >1
1173 | call ->fff_gcstep
1174 |1:
1175 |.endmacro
1176 |
1177 |//-- Base library: checks -----------------------------------------------
1178 |
1179 |.ffunc_1 assert
1180 | mov ITYPE, [BASE]
1181 | mov RB, ITYPE
1182 | sar ITYPE, 47
1183 | cmp ITYPEd, LJ_TISTRUECOND; jae ->fff_fallback
1184 | mov PC, [BASE-8]
1185 | mov MULTRES, RDd
1186 | mov RB, [BASE]
1187 | mov [BASE-16], RB
1188 | sub RDd, 2
1189 | jz >2
1190 | mov RA, BASE
1191 |1:
1192 | add RA, 8
1193 | mov RB, [RA]
1194 | mov [RA-16], RB
1195 | sub RDd, 1
1196 | jnz <1
1197 |2:
1198 | mov RDd, MULTRES
1199 | jmp ->fff_res_
1200 |
1201 |.ffunc_1 type
1202 | mov RC, [BASE]
1203 | sar RC, 47
1204 | mov RBd, LJ_TISNUM
1205 | cmp RCd, RBd
1206 | cmovb RCd, RBd
1207 | not RCd
1208 |2:
1209 | mov CFUNC:RB, [BASE-16]
1210 | cleartp CFUNC:RB
1211 | mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))]
1212 | mov PC, [BASE-8]
1213 | settp STR:RC, LJ_TSTR
1214 | mov [BASE-16], STR:RC
1215 | jmp ->fff_res1
1216 |
1217 |//-- Base library: getters and setters ---------------------------------
1218 |
1219 |.ffunc_1 getmetatable
1220 | mov TAB:RB, [BASE]
1221 | mov PC, [BASE-8]
1222 | checktab TAB:RB, >6
1223 |1: // Field metatable must be at same offset for GCtab and GCudata!
1224 | mov TAB:RB, TAB:RB->metatable
1225 |2:
1226 | test TAB:RB, TAB:RB
1227 | mov aword [BASE-16], LJ_TNIL
1228 | jz ->fff_res1
1229 | settp TAB:RC, TAB:RB, LJ_TTAB
1230 | mov [BASE-16], TAB:RC // Store metatable as default result.
1231 | mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+8*(GCROOT_MMNAME+MM_metatable)]
1232 | mov RAd, TAB:RB->hmask
1233 | and RAd, STR:RC->sid
1234 | settp STR:RC, LJ_TSTR
1235 | imul RAd, #NODE
1236 | add NODE:RA, TAB:RB->node
1237 |3: // Rearranged logic, because we expect _not_ to find the key.
1238 | cmp NODE:RA->key, STR:RC
1239 | je >5
1240 |4:
1241 | mov NODE:RA, NODE:RA->next
1242 | test NODE:RA, NODE:RA
1243 | jnz <3
1244 | jmp ->fff_res1 // Not found, keep default result.
1245 |5:
1246 | mov RB, NODE:RA->val
1247 | cmp RB, LJ_TNIL; je ->fff_res1 // Ditto for nil value.
1248 | mov [BASE-16], RB // Return value of mt.__metatable.
1249 | jmp ->fff_res1
1250 |
1251 |6:
1252 | cmp ITYPEd, LJ_TUDATA; je <1
1253 | cmp ITYPEd, LJ_TISNUM; ja >7
1254 | mov ITYPEd, LJ_TISNUM
1255 |7:
1256 | not ITYPEd
1257 | mov TAB:RB, [DISPATCH+ITYPE*8+DISPATCH_GL(gcroot[GCROOT_BASEMT])]
1258 | jmp <2
1259 |
1260 |.ffunc_2 setmetatable
1261 | mov TAB:RB, [BASE]
1262 | mov TAB:TMPR, TAB:RB
1263 | checktab TAB:RB, ->fff_fallback
1264 | // Fast path: no mt for table yet and not clearing the mt.
1265 | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback
1266 | mov TAB:RA, [BASE+8]
1267 | checktab TAB:RA, ->fff_fallback
1268 | mov TAB:RB->metatable, TAB:RA
1269 | mov PC, [BASE-8]
1270 | mov [BASE-16], TAB:TMPR // Return original table.
1271 | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
1272 | jz >1
1273 | // Possible write barrier. Table is black, but skip iswhite(mt) check.
1274 | barrierback TAB:RB, RC
1275 |1:
1276 | jmp ->fff_res1
1277 |
1278 |.ffunc_2 rawget
1279 |.if X64WIN
1280 | mov TAB:RA, [BASE]
1281 | checktab TAB:RA, ->fff_fallback
1282 | mov RB, BASE // Save BASE.
1283 | lea CARG3, [BASE+8]
1284 | mov CARG2, TAB:RA // Caveat: CARG2 == BASE.
1285 | mov CARG1, SAVE_L
1286 |.else
1287 | mov TAB:CARG2, [BASE]
1288 | checktab TAB:CARG2, ->fff_fallback
1289 | mov RB, BASE // Save BASE.
1290 | lea CARG3, [BASE+8] // Caveat: CARG3 == BASE.
1291 | mov CARG1, SAVE_L
1292 |.endif
1293 | call extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
1294 | // cTValue * returned in eax (RD).
1295 | mov BASE, RB // Restore BASE.
1296 | // Copy table slot.
1297 | mov RB, [RD]
1298 | mov PC, [BASE-8]
1299 | mov [BASE-16], RB
1300 | jmp ->fff_res1
1301 |
1302 |//-- Base library: conversions ------------------------------------------
1303 |
1304 |.ffunc tonumber
1305 | // Only handles the number case inline (without a base argument).
1306 | cmp NARGS:RDd, 1+1; jne ->fff_fallback // Exactly one argument.
1307 | mov RB, [BASE]
1308 | checknumber RB, ->fff_fallback
1309 | mov PC, [BASE-8]
1310 | mov [BASE-16], RB
1311 | jmp ->fff_res1
1312 |
1313 |.ffunc_1 tostring
1314 | // Only handles the string or number case inline.
1315 | mov PC, [BASE-8]
1316 | mov STR:RB, [BASE]
1317 | checktp_nc STR:RB, LJ_TSTR, >3
1318 | // A __tostring method in the string base metatable is ignored.
1319 |2:
1320 | mov [BASE-16], STR:RB
1321 | jmp ->fff_res1
1322 |3: // Handle numbers inline, unless a number base metatable is present.
1323 | cmp ITYPEd, LJ_TISNUM; ja ->fff_fallback_1
1324 | cmp aword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0
1325 | jne ->fff_fallback
1326 | ffgccheck // Caveat: uses label 1.
1327 | mov L:RB, SAVE_L
1328 | mov L:RB->base, BASE // Add frame since C call can throw.
1329 | mov SAVE_PC, PC // Redundant (but a defined value).
1330 |.if not X64WIN
1331 | mov CARG2, BASE // Otherwise: CARG2 == BASE
1332 |.endif
1333 | mov L:CARG1, L:RB
1334 |.if DUALNUM
1335 | call extern lj_strfmt_number // (lua_State *L, cTValue *o)
1336 |.else
1337 | call extern lj_strfmt_num // (lua_State *L, lua_Number *np)
1338 |.endif
1339 | // GCstr returned in eax (RD).
1340 | mov BASE, L:RB->base
1341 | settp STR:RB, RD, LJ_TSTR
1342 | jmp <2
1343 |
1344 |//-- Base library: iterators -------------------------------------------
1345 |
1346 |.ffunc_1 next
1347 | je >2 // Missing 2nd arg?
1348 |1:
1349 |.if X64WIN
1350 | mov RA, [BASE]
1351 | checktab RA, ->fff_fallback
1352 |.else
1353 | mov CARG2, [BASE]
1354 | checktab CARG2, ->fff_fallback
1355 |.endif
1356 | mov L:RB, SAVE_L
1357 | mov L:RB->base, BASE // Add frame since C call can throw.
1358 | mov L:RB->top, BASE // Dummy frame length is ok.
1359 | mov PC, [BASE-8]
1360 |.if X64WIN
1361 | lea CARG3, [BASE+8]
1362 | mov CARG2, RA // Caveat: CARG2 == BASE.
1363 | mov CARG1, L:RB
1364 |.else
1365 | lea CARG3, [BASE+8] // Caveat: CARG3 == BASE.
1366 | mov CARG1, L:RB
1367 |.endif
1368 | mov SAVE_PC, PC // Needed for ITERN fallback.
1369 | call extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
1370 | // Flag returned in eax (RD).
1371 | mov BASE, L:RB->base
1372 | test RDd, RDd; jz >3 // End of traversal?
1373 | // Copy key and value to results.
1374 | mov RB, [BASE+8]
1375 | mov RD, [BASE+16]
1376 | mov [BASE-16], RB
1377 | mov [BASE-8], RD
1378 |->fff_res2:
1379 | mov RDd, 1+2
1380 | jmp ->fff_res
1381 |2: // Set missing 2nd arg to nil.
1382 | mov aword [BASE+8], LJ_TNIL
1383 | jmp <1
1384 |3: // End of traversal: return nil.
1385 | mov aword [BASE-16], LJ_TNIL
1386 | jmp ->fff_res1
1387 |
1388 |.ffunc_1 pairs
1389 | mov TAB:RB, [BASE]
1390 | mov TMPR, TAB:RB
1391 | checktab TAB:RB, ->fff_fallback
1392#if LJ_52
1393 | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback
1394#endif
1395 | mov CFUNC:RD, [BASE-16]
1396 | cleartp CFUNC:RD
1397 | mov CFUNC:RD, CFUNC:RD->upvalue[0]
1398 | settp CFUNC:RD, LJ_TFUNC
1399 | mov PC, [BASE-8]
1400 | mov [BASE-16], CFUNC:RD
1401 | mov [BASE-8], TMPR
1402 | mov aword [BASE], LJ_TNIL
1403 | mov RDd, 1+3
1404 | jmp ->fff_res
1405 |
1406 |.ffunc_2 ipairs_aux
1407 | mov TAB:RB, [BASE]
1408 | checktab TAB:RB, ->fff_fallback
1409 |.if DUALNUM
1410 | mov RA, [BASE+8]
1411 | checkint RA, ->fff_fallback
1412 |.else
1413 | checknumtp [BASE+8], ->fff_fallback
1414 | movsd xmm0, qword [BASE+8]
1415 |.endif
1416 | mov PC, [BASE-8]
1417 |.if DUALNUM
1418 | add RAd, 1
1419 | setint ITYPE, RA
1420 | mov [BASE-16], ITYPE
1421 |.else
1422 | sseconst_1 xmm1, TMPR
1423 | addsd xmm0, xmm1
1424 | cvttsd2si RAd, xmm0
1425 | movsd qword [BASE-16], xmm0
1426 |.endif
1427 | cmp RAd, TAB:RB->asize; jae >2 // Not in array part?
1428 | mov RD, TAB:RB->array
1429 | lea RD, [RD+RA*8]
1430 |1:
1431 | cmp aword [RD], LJ_TNIL; je ->fff_res0
1432 | // Copy array slot.
1433 | mov RB, [RD]
1434 | mov [BASE-8], RB
1435 | jmp ->fff_res2
1436 |2: // Check for empty hash part first. Otherwise call C function.
1437 | cmp dword TAB:RB->hmask, 0; je ->fff_res0
1438 |.if X64WIN
1439 | mov TMPR, BASE
1440 | mov CARG2d, RAd
1441 | mov CARG1, TAB:RB
1442 | mov RB, TMPR
1443 |.else
1444 | mov CARG1, TAB:RB
1445 | mov RB, BASE // Save BASE.
1446 | mov CARG2d, RAd // Caveat: CARG2 == BASE
1447 |.endif
1448 | call extern lj_tab_getinth // (GCtab *t, int32_t key)
1449 | // cTValue * or NULL returned in eax (RD).
1450 | mov BASE, RB
1451 | test RD, RD
1452 | jnz <1
1453 |->fff_res0:
1454 | mov RDd, 1+0
1455 | jmp ->fff_res
1456 |
1457 |.ffunc_1 ipairs
1458 | mov TAB:RB, [BASE]
1459 | mov TMPR, TAB:RB
1460 | checktab TAB:RB, ->fff_fallback
1461#if LJ_52
1462 | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback
1463#endif
1464 | mov CFUNC:RD, [BASE-16]
1465 | cleartp CFUNC:RD
1466 | mov CFUNC:RD, CFUNC:RD->upvalue[0]
1467 | settp CFUNC:RD, LJ_TFUNC
1468 | mov PC, [BASE-8]
1469 | mov [BASE-16], CFUNC:RD
1470 | mov [BASE-8], TMPR
1471 |.if DUALNUM
1472 | mov64 RD, ((uint64_t)LJ_TISNUM<<47)
1473 | mov [BASE], RD
1474 |.else
1475 | mov qword [BASE], 0
1476 |.endif
1477 | mov RDd, 1+3
1478 | jmp ->fff_res
1479 |
1480 |//-- Base library: catch errors ----------------------------------------
1481 |
1482 |.ffunc_1 pcall
1483 | lea RA, [BASE+16]
1484 | sub NARGS:RDd, 1
1485 | mov PCd, 16+FRAME_PCALL
1486 |1:
1487 | movzx RBd, byte [DISPATCH+DISPATCH_GL(hookmask)]
1488 | shr RB, HOOK_ACTIVE_SHIFT
1489 | and RB, 1
1490 | add PC, RB // Remember active hook before pcall.
1491 | // Note: this does a (harmless) copy of the function to the PC slot, too.
1492 | mov KBASE, RD
1493 |2:
1494 | mov RB, [RA+KBASE*8-24]
1495 | mov [RA+KBASE*8-16], RB
1496 | sub KBASE, 1
1497 | ja <2
1498 | jmp ->vm_call_dispatch
1499 |
1500 |.ffunc_2 xpcall
1501 | mov LFUNC:RA, [BASE+8]
1502 | checktp_nc LFUNC:RA, LJ_TFUNC, ->fff_fallback
1503 | mov LFUNC:RB, [BASE] // Swap function and traceback.
1504 | mov [BASE], LFUNC:RA
1505 | mov [BASE+8], LFUNC:RB
1506 | lea RA, [BASE+24]
1507 | sub NARGS:RDd, 2
1508 | mov PCd, 24+FRAME_PCALL
1509 | jmp <1
1510 |
1511 |//-- Coroutine library --------------------------------------------------
1512 |
1513 |.macro coroutine_resume_wrap, resume
1514 |.if resume
1515 |.ffunc_1 coroutine_resume
1516 | mov L:RB, [BASE]
1517 | cleartp L:RB
1518 |.else
1519 |.ffunc coroutine_wrap_aux
1520 | mov CFUNC:RB, [BASE-16]
1521 | cleartp CFUNC:RB
1522 | mov L:RB, CFUNC:RB->upvalue[0].gcr
1523 | cleartp L:RB
1524 |.endif
1525 | mov PC, [BASE-8]
1526 | mov SAVE_PC, PC
1527 | mov TMP1, L:RB
1528 |.if resume
1529 | checktptp [BASE], LJ_TTHREAD, ->fff_fallback
1530 |.endif
1531 | cmp aword L:RB->cframe, 0; jne ->fff_fallback
1532 | cmp byte L:RB->status, LUA_YIELD; ja ->fff_fallback
1533 | mov RA, L:RB->top
1534 | je >1 // Status != LUA_YIELD (i.e. 0)?
1535 | cmp RA, L:RB->base // Check for presence of initial func.
1536 | je ->fff_fallback
1537 | mov PC, [RA-8] // Move initial function up.
1538 | mov [RA], PC
1539 | add RA, 8
1540 |1:
1541 |.if resume
1542 | lea PC, [RA+NARGS:RD*8-16] // Check stack space (-1-thread).
1543 |.else
1544 | lea PC, [RA+NARGS:RD*8-8] // Check stack space (-1).
1545 |.endif
1546 | cmp PC, L:RB->maxstack; ja ->fff_fallback
1547 | mov L:RB->top, PC
1548 |
1549 | mov L:RB, SAVE_L
1550 | mov L:RB->base, BASE
1551 |.if resume
1552 | add BASE, 8 // Keep resumed thread in stack for GC.
1553 |.endif
1554 | mov L:RB->top, BASE
1555 |.if resume
1556 | lea RB, [BASE+NARGS:RD*8-24] // RB = end of source for stack move.
1557 |.else
1558 | lea RB, [BASE+NARGS:RD*8-16] // RB = end of source for stack move.
1559 |.endif
1560 | sub RB, PC // Relative to PC.
1561 |
1562 | cmp PC, RA
1563 | je >3
1564 |2: // Move args to coroutine.
1565 | mov RC, [PC+RB]
1566 | mov [PC-8], RC
1567 | sub PC, 8
1568 | cmp PC, RA
1569 | jne <2
1570 |3:
1571 | mov CARG2, RA
1572 | mov CARG1, TMP1
1573 | call ->vm_resume // (lua_State *L, TValue *base, 0, 0)
1574 |
1575 | mov L:RB, SAVE_L
1576 | mov L:PC, TMP1
1577 | mov BASE, L:RB->base
1578 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
1579 | set_vmstate INTERP
1580 |
1581 | cmp eax, LUA_YIELD
1582 | ja >8
1583 |4:
1584 | mov RA, L:PC->base
1585 | mov KBASE, L:PC->top
1586 | mov L:PC->top, RA // Clear coroutine stack.
1587 | mov PC, KBASE
1588 | sub PC, RA
1589 | je >6 // No results?
1590 | lea RD, [BASE+PC]
1591 | shr PCd, 3
1592 | cmp RD, L:RB->maxstack
1593 | ja >9 // Need to grow stack?
1594 |
1595 | mov RB, BASE
1596 | sub RB, RA
1597 |5: // Move results from coroutine.
1598 | mov RD, [RA]
1599 | mov [RA+RB], RD
1600 | add RA, 8
1601 | cmp RA, KBASE
1602 | jne <5
1603 |6:
1604 |.if resume
1605 | lea RDd, [PCd+2] // nresults+1 = 1 + true + results.
1606 | mov_true ITYPE // Prepend true to results.
1607 | mov [BASE-8], ITYPE
1608 |.else
1609 | lea RDd, [PCd+1] // nresults+1 = 1 + results.
1610 |.endif
1611 |7:
1612 | mov PC, SAVE_PC
1613 | mov MULTRES, RDd
1614 |.if resume
1615 | mov RA, -8
1616 |.else
1617 | xor RAd, RAd
1618 |.endif
1619 | test PCd, FRAME_TYPE
1620 | jz ->BC_RET_Z
1621 | jmp ->vm_return
1622 |
1623 |8: // Coroutine returned with error (at co->top-1).
1624 |.if resume
1625 | mov_false ITYPE // Prepend false to results.
1626 | mov [BASE-8], ITYPE
1627 | mov RA, L:PC->top
1628 | sub RA, 8
1629 | mov L:PC->top, RA // Clear error from coroutine stack.
1630 | // Copy error message.
1631 | mov RD, [RA]
1632 | mov [BASE], RD
1633 | mov RDd, 1+2 // nresults+1 = 1 + false + error.
1634 | jmp <7
1635 |.else
1636 | mov CARG2, L:PC
1637 | mov CARG1, L:RB
1638 | call extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
1639 | // Error function does not return.
1640 |.endif
1641 |
1642 |9: // Handle stack expansion on return from yield.
1643 | mov L:RA, TMP1
1644 | mov L:RA->top, KBASE // Undo coroutine stack clearing.
1645 | mov CARG2, PC
1646 | mov CARG1, L:RB
1647 | call extern lj_state_growstack // (lua_State *L, int n)
1648 | mov L:PC, TMP1
1649 | mov BASE, L:RB->base
1650 | jmp <4 // Retry the stack move.
1651 |.endmacro
1652 |
1653 | coroutine_resume_wrap 1 // coroutine.resume
1654 | coroutine_resume_wrap 0 // coroutine.wrap
1655 |
1656 |.ffunc coroutine_yield
1657 | mov L:RB, SAVE_L
1658 | test aword L:RB->cframe, CFRAME_RESUME
1659 | jz ->fff_fallback
1660 | mov L:RB->base, BASE
1661 | lea RD, [BASE+NARGS:RD*8-8]
1662 | mov L:RB->top, RD
1663 | xor RDd, RDd
1664 | mov aword L:RB->cframe, RD
1665 | mov al, LUA_YIELD
1666 | mov byte L:RB->status, al
1667 | jmp ->vm_leave_unw
1668 |
1669 |//-- Math library -------------------------------------------------------
1670 |
1671 | .ffunc_1 math_abs
1672 | mov RB, [BASE]
1673 |.if DUALNUM
1674 | checkint RB, >3
1675 | cmp RBd, 0; jns ->fff_resi
1676 | neg RBd; js >2
1677 |->fff_resbit:
1678 |->fff_resi:
1679 | setint RB
1680 |->fff_resRB:
1681 | mov PC, [BASE-8]
1682 | mov [BASE-16], RB
1683 | jmp ->fff_res1
1684 |2:
1685 | mov64 RB, U64x(41e00000,00000000) // 2^31.
1686 | jmp ->fff_resRB
1687 |3:
1688 | ja ->fff_fallback
1689 |.else
1690 | checknum RB, ->fff_fallback
1691 |.endif
1692 | shl RB, 1
1693 | shr RB, 1
1694 | mov PC, [BASE-8]
1695 | mov [BASE-16], RB
1696 | jmp ->fff_res1
1697 |
1698 |.ffunc_n math_sqrt, sqrtsd
1699 |->fff_resxmm0:
1700 | mov PC, [BASE-8]
1701 | movsd qword [BASE-16], xmm0
1702 | // fallthrough
1703 |
1704 |->fff_res1:
1705 | mov RDd, 1+1
1706 |->fff_res:
1707 | mov MULTRES, RDd
1708 |->fff_res_:
1709 | test PCd, FRAME_TYPE
1710 | jnz >7
1711 |5:
1712 | cmp PC_RB, RDL // More results expected?
1713 | ja >6
1714 | // Adjust BASE. KBASE is assumed to be set for the calling frame.
1715 | movzx RAd, PC_RA
1716 | neg RA
1717 | lea BASE, [BASE+RA*8-16] // base = base - (RA+2)*8
1718 | ins_next
1719 |
1720 |6: // Fill up results with nil.
1721 | mov aword [BASE+RD*8-24], LJ_TNIL
1722 | add RD, 1
1723 | jmp <5
1724 |
1725 |7: // Non-standard return case.
1726 | mov RA, -16 // Results start at BASE+RA = BASE-16.
1727 | jmp ->vm_return
1728 |
1729 |.macro math_round, func
1730 | .ffunc math_ .. func
1731 |.if DUALNUM
1732 | mov RB, [BASE]
1733 | checknumx RB, ->fff_resRB, je
1734 | ja ->fff_fallback
1735 |.else
1736 | checknumtp [BASE], ->fff_fallback
1737 |.endif
1738 | movsd xmm0, qword [BASE]
1739 | call ->vm_ .. func .. _sse
1740 |.if DUALNUM
1741 | cvttsd2si RBd, xmm0
1742 | cmp RBd, 0x80000000
1743 | jne ->fff_resi
1744 | cvtsi2sd xmm1, RBd
1745 | ucomisd xmm0, xmm1
1746 | jp ->fff_resxmm0
1747 | je ->fff_resi
1748 |.endif
1749 | jmp ->fff_resxmm0
1750 |.endmacro
1751 |
1752 | math_round floor
1753 | math_round ceil
1754 |
1755 |.ffunc math_log
1756 | cmp NARGS:RDd, 1+1; jne ->fff_fallback // Exactly one argument.
1757 | checknumtp [BASE], ->fff_fallback
1758 | movsd xmm0, qword [BASE]
1759 | mov RB, BASE
1760 | call extern log
1761 | mov BASE, RB
1762 | jmp ->fff_resxmm0
1763 |
1764 |.macro math_extern, func
1765 | .ffunc_n math_ .. func
1766 | mov RB, BASE
1767 | call extern func
1768 | mov BASE, RB
1769 | jmp ->fff_resxmm0
1770 |.endmacro
1771 |
1772 |.macro math_extern2, func
1773 | .ffunc_nn math_ .. func
1774 | mov RB, BASE
1775 | call extern func
1776 | mov BASE, RB
1777 | jmp ->fff_resxmm0
1778 |.endmacro
1779 |
1780 | math_extern log10
1781 | math_extern exp
1782 | math_extern sin
1783 | math_extern cos
1784 | math_extern tan
1785 | math_extern asin
1786 | math_extern acos
1787 | math_extern atan
1788 | math_extern sinh
1789 | math_extern cosh
1790 | math_extern tanh
1791 | math_extern2 pow
1792 | math_extern2 atan2
1793 | math_extern2 fmod
1794 |
1795 |.ffunc_2 math_ldexp
1796 | checknumtp [BASE], ->fff_fallback
1797 | checknumtp [BASE+8], ->fff_fallback
1798 | fld qword [BASE+8]
1799 | fld qword [BASE]
1800 | fscale
1801 | fpop1
1802 | mov PC, [BASE-8]
1803 | fstp qword [BASE-16]
1804 | jmp ->fff_res1
1805 |
1806 |.ffunc_n math_frexp
1807 | mov RB, BASE
1808 |.if X64WIN
1809 | lea CARG2, TMP1 // Caveat: CARG2 == BASE
1810 |.else
1811 | lea CARG1, TMP1
1812 |.endif
1813 | call extern frexp
1814 | mov BASE, RB
1815 | mov RBd, TMP1d
1816 | mov PC, [BASE-8]
1817 | movsd qword [BASE-16], xmm0
1818 |.if DUALNUM
1819 | setint RB
1820 | mov [BASE-8], RB
1821 |.else
1822 | cvtsi2sd xmm1, RBd
1823 | movsd qword [BASE-8], xmm1
1824 |.endif
1825 | mov RDd, 1+2
1826 | jmp ->fff_res
1827 |
1828 |.ffunc_n math_modf
1829 | mov RB, BASE
1830 |.if X64WIN
1831 | lea CARG2, [BASE-16] // Caveat: CARG2 == BASE
1832 |.else
1833 | lea CARG1, [BASE-16]
1834 |.endif
1835 | call extern modf
1836 | mov BASE, RB
1837 | mov PC, [BASE-8]
1838 | movsd qword [BASE-8], xmm0
1839 | mov RDd, 1+2
1840 | jmp ->fff_res
1841 |
1842 |.macro math_minmax, name, cmovop, sseop
1843 | .ffunc_1 name
1844 | mov RAd, 2
1845 |.if DUALNUM
1846 | mov RB, [BASE]
1847 | checkint RB, >4
1848 |1: // Handle integers.
1849 | cmp RAd, RDd; jae ->fff_resRB
1850 | mov TMPR, [BASE+RA*8-8]
1851 | checkint TMPR, >3
1852 | cmp RBd, TMPRd
1853 | cmovop RB, TMPR
1854 | add RAd, 1
1855 | jmp <1
1856 |3:
1857 | ja ->fff_fallback
1858 | // Convert intermediate result to number and continue below.
1859 | cvtsi2sd xmm0, RBd
1860 | jmp >6
1861 |4:
1862 | ja ->fff_fallback
1863 |.else
1864 | checknumtp [BASE], ->fff_fallback
1865 |.endif
1866 |
1867 | movsd xmm0, qword [BASE]
1868 |5: // Handle numbers or integers.
1869 | cmp RAd, RDd; jae ->fff_resxmm0
1870 |.if DUALNUM
1871 | mov RB, [BASE+RA*8-8]
1872 | checknumx RB, >6, jb
1873 | ja ->fff_fallback
1874 | cvtsi2sd xmm1, RBd
1875 | jmp >7
1876 |.else
1877 | checknumtp [BASE+RA*8-8], ->fff_fallback
1878 |.endif
1879 |6:
1880 | movsd xmm1, qword [BASE+RA*8-8]
1881 |7:
1882 | sseop xmm0, xmm1
1883 | add RAd, 1
1884 | jmp <5
1885 |.endmacro
1886 |
1887 | math_minmax math_min, cmovg, minsd
1888 | math_minmax math_max, cmovl, maxsd
1889 |
1890 |//-- String library -----------------------------------------------------
1891 |
1892 |.ffunc string_byte // Only handle the 1-arg case here.
1893 | cmp NARGS:RDd, 1+1; jne ->fff_fallback
1894 | mov STR:RB, [BASE]
1895 | checkstr STR:RB, ->fff_fallback
1896 | mov PC, [BASE-8]
1897 | cmp dword STR:RB->len, 1
1898 | jb ->fff_res0 // Return no results for empty string.
1899 | movzx RBd, byte STR:RB[1]
1900 |.if DUALNUM
1901 | jmp ->fff_resi
1902 |.else
1903 | cvtsi2sd xmm0, RBd; jmp ->fff_resxmm0
1904 |.endif
1905 |
1906 |.ffunc string_char // Only handle the 1-arg case here.
1907 | ffgccheck
1908 | cmp NARGS:RDd, 1+1; jne ->fff_fallback // *Exactly* 1 arg.
1909 |.if DUALNUM
1910 | mov RB, [BASE]
1911 | checkint RB, ->fff_fallback
1912 |.else
1913 | checknumtp [BASE], ->fff_fallback
1914 | cvttsd2si RBd, qword [BASE]
1915 |.endif
1916 | cmp RBd, 255; ja ->fff_fallback
1917 | mov TMP1d, RBd
1918 | mov TMPRd, 1
1919 | lea RD, TMP1 // Points to stack. Little-endian.
1920 |->fff_newstr:
1921 | mov L:RB, SAVE_L
1922 | mov L:RB->base, BASE
1923 | mov CARG3d, TMPRd // Zero-extended to size_t.
1924 | mov CARG2, RD
1925 | mov CARG1, L:RB
1926 | mov SAVE_PC, PC
1927 | call extern lj_str_new // (lua_State *L, char *str, size_t l)
1928 |->fff_resstr:
1929 | // GCstr * returned in eax (RD).
1930 | mov BASE, L:RB->base
1931 | mov PC, [BASE-8]
1932 | settp STR:RD, LJ_TSTR
1933 | mov [BASE-16], STR:RD
1934 | jmp ->fff_res1
1935 |
1936 |.ffunc string_sub
1937 | ffgccheck
1938 | mov TMPRd, -1
1939 | cmp NARGS:RDd, 1+2; jb ->fff_fallback
1940 | jna >1
1941 |.if DUALNUM
1942 | mov TMPR, [BASE+16]
1943 | checkint TMPR, ->fff_fallback
1944 |.else
1945 | checknumtp [BASE+16], ->fff_fallback
1946 | cvttsd2si TMPRd, qword [BASE+16]
1947 |.endif
1948 |1:
1949 | mov STR:RB, [BASE]
1950 | checkstr STR:RB, ->fff_fallback
1951 |.if DUALNUM
1952 | mov ITYPE, [BASE+8]
1953 | mov RAd, ITYPEd // Must clear hiword for lea below.
1954 | sar ITYPE, 47
1955 | cmp ITYPEd, LJ_TISNUM
1956 | jne ->fff_fallback
1957 |.else
1958 | checknumtp [BASE+8], ->fff_fallback
1959 | cvttsd2si RAd, qword [BASE+8]
1960 |.endif
1961 | mov RCd, STR:RB->len
1962 | cmp RCd, TMPRd // len < end? (unsigned compare)
1963 | jb >5
1964 |2:
1965 | test RAd, RAd // start <= 0?
1966 | jle >7
1967 |3:
1968 | sub TMPRd, RAd // start > end?
1969 | jl ->fff_emptystr
1970 | lea RD, [STR:RB+RAd+#STR-1]
1971 | add TMPRd, 1
1972 |4:
1973 | jmp ->fff_newstr
1974 |
1975 |5: // Negative end or overflow.
1976 | jl >6
1977 | lea TMPRd, [TMPRd+RCd+1] // end = end+(len+1)
1978 | jmp <2
1979 |6: // Overflow.
1980 | mov TMPRd, RCd // end = len
1981 | jmp <2
1982 |
1983 |7: // Negative start or underflow.
1984 | je >8
1985 | add RAd, RCd // start = start+(len+1)
1986 | add RAd, 1
1987 | jg <3 // start > 0?
1988 |8: // Underflow.
1989 | mov RAd, 1 // start = 1
1990 | jmp <3
1991 |
1992 |->fff_emptystr: // Range underflow.
1993 | xor TMPRd, TMPRd // Zero length. Any ptr in RD is ok.
1994 | jmp <4
1995 |
1996 |.macro ffstring_op, name
1997 | .ffunc_1 string_ .. name
1998 | ffgccheck
1999 |.if X64WIN
2000 | mov STR:TMPR, [BASE]
2001 | checkstr STR:TMPR, ->fff_fallback
2002 |.else
2003 | mov STR:CARG2, [BASE]
2004 | checkstr STR:CARG2, ->fff_fallback
2005 |.endif
2006 | mov L:RB, SAVE_L
2007 | lea SBUF:CARG1, [DISPATCH+DISPATCH_GL(tmpbuf)]
2008 | mov L:RB->base, BASE
2009 |.if X64WIN
2010 | mov STR:CARG2, STR:TMPR // Caveat: CARG2 == BASE
2011 |.endif
2012 | mov RC, SBUF:CARG1->b
2013 | mov SBUF:CARG1->L, L:RB
2014 | mov SBUF:CARG1->p, RC
2015 | mov SAVE_PC, PC
2016 | call extern lj_buf_putstr_ .. name
2017 | mov CARG1, rax
2018 | call extern lj_buf_tostr
2019 | jmp ->fff_resstr
2020 |.endmacro
2021 |
2022 |ffstring_op reverse
2023 |ffstring_op lower
2024 |ffstring_op upper
2025 |
2026 |//-- Bit library --------------------------------------------------------
2027 |
2028 |.macro .ffunc_bit, name, kind, fdef
2029 | fdef name
2030 |.if kind == 2
2031 | sseconst_tobit xmm1, RB
2032 |.endif
2033 |.if DUALNUM
2034 | mov RB, [BASE]
2035 | checkint RB, >1
2036 |.if kind > 0
2037 | jmp >2
2038 |.else
2039 | jmp ->fff_resbit
2040 |.endif
2041 |1:
2042 | ja ->fff_fallback
2043 | movd xmm0, RB
2044 |.else
2045 | checknumtp [BASE], ->fff_fallback
2046 | movsd xmm0, qword [BASE]
2047 |.endif
2048 |.if kind < 2
2049 | sseconst_tobit xmm1, RB
2050 |.endif
2051 | addsd xmm0, xmm1
2052 | movd RBd, xmm0
2053 |2:
2054 |.endmacro
2055 |
2056 |.macro .ffunc_bit, name, kind
2057 | .ffunc_bit name, kind, .ffunc_1
2058 |.endmacro
2059 |
2060 |.ffunc_bit bit_tobit, 0
2061 | jmp ->fff_resbit
2062 |
2063 |.macro .ffunc_bit_op, name, ins
2064 | .ffunc_bit name, 2
2065 | mov TMPRd, NARGS:RDd // Save for fallback.
2066 | lea RD, [BASE+NARGS:RD*8-16]
2067 |1:
2068 | cmp RD, BASE
2069 | jbe ->fff_resbit
2070 |.if DUALNUM
2071 | mov RA, [RD]
2072 | checkint RA, >2
2073 | ins RBd, RAd
2074 | sub RD, 8
2075 | jmp <1
2076 |2:
2077 | ja ->fff_fallback_bit_op
2078 | movd xmm0, RA
2079 |.else
2080 | checknumtp [RD], ->fff_fallback_bit_op
2081 | movsd xmm0, qword [RD]
2082 |.endif
2083 | addsd xmm0, xmm1
2084 | movd RAd, xmm0
2085 | ins RBd, RAd
2086 | sub RD, 8
2087 | jmp <1
2088 |.endmacro
2089 |
2090 |.ffunc_bit_op bit_band, and
2091 |.ffunc_bit_op bit_bor, or
2092 |.ffunc_bit_op bit_bxor, xor
2093 |
2094 |.ffunc_bit bit_bswap, 1
2095 | bswap RBd
2096 | jmp ->fff_resbit
2097 |
2098 |.ffunc_bit bit_bnot, 1
2099 | not RBd
2100 |.if DUALNUM
2101 | jmp ->fff_resbit
2102 |.else
2103 |->fff_resbit:
2104 | cvtsi2sd xmm0, RBd
2105 | jmp ->fff_resxmm0
2106 |.endif
2107 |
2108 |->fff_fallback_bit_op:
2109 | mov NARGS:RDd, TMPRd // Restore for fallback
2110 | jmp ->fff_fallback
2111 |
2112 |.macro .ffunc_bit_sh, name, ins
2113 |.if DUALNUM
2114 | .ffunc_bit name, 1, .ffunc_2
2115 | // Note: no inline conversion from number for 2nd argument!
2116 | mov RA, [BASE+8]
2117 | checkint RA, ->fff_fallback
2118 |.else
2119 | .ffunc_nn name
2120 | sseconst_tobit xmm2, RB
2121 | addsd xmm0, xmm2
2122 | addsd xmm1, xmm2
2123 | movd RBd, xmm0
2124 | movd RAd, xmm1
2125 |.endif
2126 | ins RBd, cl // Assumes RA is ecx.
2127 | jmp ->fff_resbit
2128 |.endmacro
2129 |
2130 |.ffunc_bit_sh bit_lshift, shl
2131 |.ffunc_bit_sh bit_rshift, shr
2132 |.ffunc_bit_sh bit_arshift, sar
2133 |.ffunc_bit_sh bit_rol, rol
2134 |.ffunc_bit_sh bit_ror, ror
2135 |
2136 |//-----------------------------------------------------------------------
2137 |
2138 |->fff_fallback_2:
2139 | mov NARGS:RDd, 1+2 // Other args are ignored, anyway.
2140 | jmp ->fff_fallback
2141 |->fff_fallback_1:
2142 | mov NARGS:RDd, 1+1 // Other args are ignored, anyway.
2143 |->fff_fallback: // Call fast function fallback handler.
2144 | // BASE = new base, RD = nargs+1
2145 | mov L:RB, SAVE_L
2146 | mov PC, [BASE-8] // Fallback may overwrite PC.
2147 | mov SAVE_PC, PC // Redundant (but a defined value).
2148 | mov L:RB->base, BASE
2149 | lea RD, [BASE+NARGS:RD*8-8]
2150 | lea RA, [RD+8*LUA_MINSTACK] // Ensure enough space for handler.
2151 | mov L:RB->top, RD
2152 | mov CFUNC:RD, [BASE-16]
2153 | cleartp CFUNC:RD
2154 | cmp RA, L:RB->maxstack
2155 | ja >5 // Need to grow stack.
2156 | mov CARG1, L:RB
2157 | call aword CFUNC:RD->f // (lua_State *L)
2158 | mov BASE, L:RB->base
2159 | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
2160 | test RDd, RDd; jg ->fff_res // Returned nresults+1?
2161 |1:
2162 | mov RA, L:RB->top
2163 | sub RA, BASE
2164 | shr RAd, 3
2165 | test RDd, RDd
2166 | lea NARGS:RDd, [RAd+1]
2167 | mov LFUNC:RB, [BASE-16]
2168 | jne ->vm_call_tail // Returned -1?
2169 | cleartp LFUNC:RB
2170 | ins_callt // Returned 0: retry fast path.
2171 |
2172 |// Reconstruct previous base for vmeta_call during tailcall.
2173 |->vm_call_tail:
2174 | mov RA, BASE
2175 | test PCd, FRAME_TYPE
2176 | jnz >3
2177 | movzx RBd, PC_RA
2178 | neg RB
2179 | lea BASE, [BASE+RB*8-16] // base = base - (RB+2)*8
2180 | jmp ->vm_call_dispatch // Resolve again for tailcall.
2181 |3:
2182 | mov RB, PC
2183 | and RB, -8
2184 | sub BASE, RB
2185 | jmp ->vm_call_dispatch // Resolve again for tailcall.
2186 |
2187 |5: // Grow stack for fallback handler.
2188 | mov CARG2d, LUA_MINSTACK
2189 | mov CARG1, L:RB
2190 | call extern lj_state_growstack // (lua_State *L, int n)
2191 | mov BASE, L:RB->base
2192 | xor RDd, RDd // Simulate a return 0.
2193 | jmp <1 // Dumb retry (goes through ff first).
2194 |
2195 |->fff_gcstep: // Call GC step function.
2196 | // BASE = new base, RD = nargs+1
2197 | pop RB // Must keep stack at same level.
2198 | mov TMP1, RB // Save return address
2199 | mov L:RB, SAVE_L
2200 | mov SAVE_PC, PC // Redundant (but a defined value).
2201 | mov L:RB->base, BASE
2202 | lea RD, [BASE+NARGS:RD*8-8]
2203 | mov CARG1, L:RB
2204 | mov L:RB->top, RD
2205 | call extern lj_gc_step // (lua_State *L)
2206 | mov BASE, L:RB->base
2207 | mov RD, L:RB->top
2208 | sub RD, BASE
2209 | shr RDd, 3
2210 | add NARGS:RDd, 1
2211 | mov RB, TMP1
2212 | push RB // Restore return address.
2213 | ret
2214 |
2215 |//-----------------------------------------------------------------------
2216 |//-- Special dispatch targets -------------------------------------------
2217 |//-----------------------------------------------------------------------
2218 |
2219 |->vm_record: // Dispatch target for recording phase.
2220 |.if JIT
2221 | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)]
2222 | test RDL, HOOK_VMEVENT // No recording while in vmevent.
2223 | jnz >5
2224 | // Decrement the hookcount for consistency, but always do the call.
2225 | test RDL, HOOK_ACTIVE
2226 | jnz >1
2227 | test RDL, LUA_MASKLINE|LUA_MASKCOUNT
2228 | jz >1
2229 | dec dword [DISPATCH+DISPATCH_GL(hookcount)]
2230 | jmp >1
2231 |.endif
2232 |
2233 |->vm_rethook: // Dispatch target for return hooks.
2234 | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)]
2235 | test RDL, HOOK_ACTIVE // Hook already active?
2236 | jnz >5
2237 | jmp >1
2238 |
2239 |->vm_inshook: // Dispatch target for instr/line hooks.
2240 | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)]
2241 | test RDL, HOOK_ACTIVE // Hook already active?
2242 | jnz >5
2243 |
2244 | test RDL, LUA_MASKLINE|LUA_MASKCOUNT
2245 | jz >5
2246 | dec dword [DISPATCH+DISPATCH_GL(hookcount)]
2247 | jz >1
2248 | test RDL, LUA_MASKLINE
2249 | jz >5
2250 |1:
2251 | mov L:RB, SAVE_L
2252 | mov L:RB->base, BASE
2253 | mov CARG2, PC // Caveat: CARG2 == BASE
2254 | mov CARG1, L:RB
2255 | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
2256 | call extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
2257 |3:
2258 | mov BASE, L:RB->base
2259 |4:
2260 | movzx RAd, PC_RA
2261 |5:
2262 | movzx OP, PC_OP
2263 | movzx RDd, PC_RD
2264 | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Re-dispatch to static ins.
2265 |
2266 |->cont_hook: // Continue from hook yield.
2267 | add PC, 4
2268 | mov RA, [RB-40]
2269 | mov MULTRES, RAd // Restore MULTRES for *M ins.
2270 | jmp <4
2271 |
2272 |->vm_hotloop: // Hot loop counter underflow.
2273 |.if JIT
2274 | mov LFUNC:RB, [BASE-16] // Same as curr_topL(L).
2275 | cleartp LFUNC:RB
2276 | mov RB, LFUNC:RB->pc
2277 | movzx RDd, byte [RB+PC2PROTO(framesize)]
2278 | lea RD, [BASE+RD*8]
2279 | mov L:RB, SAVE_L
2280 | mov L:RB->base, BASE
2281 | mov L:RB->top, RD
2282 | mov CARG2, PC
2283 | lea CARG1, [DISPATCH+GG_DISP2J]
2284 | mov aword [DISPATCH+DISPATCH_J(L)], L:RB
2285 | mov SAVE_PC, PC
2286 | call extern lj_trace_hot // (jit_State *J, const BCIns *pc)
2287 | jmp <3
2288 |.endif
2289 |
2290 |->vm_callhook: // Dispatch target for call hooks.
2291 | mov SAVE_PC, PC
2292 |.if JIT
2293 | jmp >1
2294 |.endif
2295 |
2296 |->vm_hotcall: // Hot call counter underflow.
2297 |.if JIT
2298 | mov SAVE_PC, PC
2299 | or PC, 1 // Marker for hot call.
2300 |1:
2301 |.endif
2302 | lea RD, [BASE+NARGS:RD*8-8]
2303 | mov L:RB, SAVE_L
2304 | mov L:RB->base, BASE
2305 | mov L:RB->top, RD
2306 | mov CARG2, PC
2307 | mov CARG1, L:RB
2308 | call extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
2309 | // ASMFunction returned in eax/rax (RD).
2310 | mov SAVE_PC, 0 // Invalidate for subsequent line hook.
2311 |.if JIT
2312 | and PC, -2
2313 |.endif
2314 | mov BASE, L:RB->base
2315 | mov RA, RD
2316 | mov RD, L:RB->top
2317 | sub RD, BASE
2318 | mov RB, RA
2319 | movzx RAd, PC_RA
2320 | shr RDd, 3
2321 | add NARGS:RDd, 1
2322 | jmp RB
2323 |
2324 |->cont_stitch: // Trace stitching.
2325 |.if JIT
2326 | // BASE = base, RC = result, RB = mbase
2327 | mov TRACE:ITYPE, [RB-40] // Save previous trace.
2328 | cleartp TRACE:ITYPE
2329 | mov TMPRd, MULTRES
2330 | movzx RAd, PC_RA
2331 | lea RA, [BASE+RA*8] // Call base.
2332 | sub TMPRd, 1
2333 | jz >2
2334 |1: // Move results down.
2335 | mov RB, [RC]
2336 | mov [RA], RB
2337 | add RC, 8
2338 | add RA, 8
2339 | sub TMPRd, 1
2340 | jnz <1
2341 |2:
2342 | movzx RCd, PC_RA
2343 | movzx RBd, PC_RB
2344 | add RC, RB
2345 | lea RC, [BASE+RC*8-8]
2346 |3:
2347 | cmp RC, RA
2348 | ja >9 // More results wanted?
2349 |
2350 | test TRACE:ITYPE, TRACE:ITYPE
2351 | jz ->cont_nop
2352 | movzx RBd, word TRACE:ITYPE->traceno
2353 | movzx RDd, word TRACE:ITYPE->link
2354 | cmp RDd, RBd
2355 | je ->cont_nop // Blacklisted.
2356 | test RDd, RDd
2357 | jne =>BC_JLOOP // Jump to stitched trace.
2358 |
2359 | // Stitch a new trace to the previous trace.
2360 | mov [DISPATCH+DISPATCH_J(exitno)], RB
2361 | mov L:RB, SAVE_L
2362 | mov L:RB->base, BASE
2363 | mov CARG2, PC
2364 | lea CARG1, [DISPATCH+GG_DISP2J]
2365 | mov aword [DISPATCH+DISPATCH_J(L)], L:RB
2366 | call extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
2367 | mov BASE, L:RB->base
2368 | jmp ->cont_nop
2369 |
2370 |9: // Fill up results with nil.
2371 | mov aword [RA], LJ_TNIL
2372 | add RA, 8
2373 | jmp <3
2374 |.endif
2375 |
2376 |->vm_profhook: // Dispatch target for profiler hook.
2377#if LJ_HASPROFILE
2378 | mov L:RB, SAVE_L
2379 | mov L:RB->base, BASE
2380 | mov CARG2, PC // Caveat: CARG2 == BASE
2381 | mov CARG1, L:RB
2382 | call extern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
2383 | mov BASE, L:RB->base
2384 | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
2385 | sub PC, 4
2386 | jmp ->cont_nop
2387#endif
2388 |
2389 |//-----------------------------------------------------------------------
2390 |//-- Trace exit handler -------------------------------------------------
2391 |//-----------------------------------------------------------------------
2392 |
2393 |// Called from an exit stub with the exit number on the stack.
2394 |// The 16 bit exit number is stored with two (sign-extended) push imm8.
2395 |->vm_exit_handler:
2396 |.if JIT
2397 | push r13; push r12
2398 | push r11; push r10; push r9; push r8
2399 | push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp
2400 | push rbx; push rdx; push rcx; push rax
2401 | movzx RCd, byte [rbp-8] // Reconstruct exit number.
2402 | mov RCH, byte [rbp-16]
2403 | mov [rbp-8], r15; mov [rbp-16], r14
2404 | // DISPATCH is preserved on-trace in LJ_GC64 mode.
2405 | mov RAd, [DISPATCH+DISPATCH_GL(vmstate)] // Get trace number.
2406 | set_vmstate EXIT
2407 | mov [DISPATCH+DISPATCH_J(exitno)], RCd
2408 | mov [DISPATCH+DISPATCH_J(parent)], RAd
2409 |.if X64WIN
2410 | sub rsp, 16*8+4*8 // Room for SSE regs + save area.
2411 |.else
2412 | sub rsp, 16*8 // Room for SSE regs.
2413 |.endif
2414 | add rbp, -128
2415 | movsd qword [rbp-8], xmm15; movsd qword [rbp-16], xmm14
2416 | movsd qword [rbp-24], xmm13; movsd qword [rbp-32], xmm12
2417 | movsd qword [rbp-40], xmm11; movsd qword [rbp-48], xmm10
2418 | movsd qword [rbp-56], xmm9; movsd qword [rbp-64], xmm8
2419 | movsd qword [rbp-72], xmm7; movsd qword [rbp-80], xmm6
2420 | movsd qword [rbp-88], xmm5; movsd qword [rbp-96], xmm4
2421 | movsd qword [rbp-104], xmm3; movsd qword [rbp-112], xmm2
2422 | movsd qword [rbp-120], xmm1; movsd qword [rbp-128], xmm0
2423 | // Caveat: RB is rbp.
2424 | mov L:RB, [DISPATCH+DISPATCH_GL(cur_L)]
2425 | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)]
2426 | mov aword [DISPATCH+DISPATCH_J(L)], L:RB
2427 | mov L:RB->base, BASE
2428 |.if X64WIN
2429 | lea CARG2, [rsp+4*8]
2430 |.else
2431 | mov CARG2, rsp
2432 |.endif
2433 | lea CARG1, [DISPATCH+GG_DISP2J]
2434 | mov qword [DISPATCH+DISPATCH_GL(jit_base)], 0
2435 | call extern lj_trace_exit // (jit_State *J, ExitState *ex)
2436 | // MULTRES or negated error code returned in eax (RD).
2437 | mov RA, L:RB->cframe
2438 | and RA, CFRAME_RAWMASK
2439 | mov [RA+CFRAME_OFS_L], L:RB // Set SAVE_L (on-trace resume/yield).
2440 | mov BASE, L:RB->base
2441 | mov PC, [RA+CFRAME_OFS_PC] // Get SAVE_PC.
2442 | jmp >1
2443 |.endif
2444 |->vm_exit_interp:
2445 | // RD = MULTRES or negated error code, BASE, PC and DISPATCH set.
2446 |.if JIT
2447 | // Restore additional callee-save registers only used in compiled code.
2448 |.if X64WIN
2449 | lea RA, [rsp+10*16+4*8]
2450 |1:
2451 | movdqa xmm15, [RA-10*16]
2452 | movdqa xmm14, [RA-9*16]
2453 | movdqa xmm13, [RA-8*16]
2454 | movdqa xmm12, [RA-7*16]
2455 | movdqa xmm11, [RA-6*16]
2456 | movdqa xmm10, [RA-5*16]
2457 | movdqa xmm9, [RA-4*16]
2458 | movdqa xmm8, [RA-3*16]
2459 | movdqa xmm7, [RA-2*16]
2460 | mov rsp, RA // Reposition stack to C frame.
2461 | movdqa xmm6, [RA-1*16]
2462 | mov r15, CSAVE_1
2463 | mov r14, CSAVE_2
2464 | mov r13, CSAVE_3
2465 | mov r12, CSAVE_4
2466 |.else
2467 | lea RA, [rsp+16]
2468 |1:
2469 | mov r13, [RA-8]
2470 | mov r12, [RA]
2471 | mov rsp, RA // Reposition stack to C frame.
2472 |.endif
2473 | test RDd, RDd; js >9 // Check for error from exit.
2474 | mov L:RB, SAVE_L
2475 | mov MULTRES, RDd
2476 | mov LFUNC:KBASE, [BASE-16]
2477 | cleartp LFUNC:KBASE
2478 | mov KBASE, LFUNC:KBASE->pc
2479 | mov KBASE, [KBASE+PC2PROTO(k)]
2480 | mov L:RB->base, BASE
2481 | mov qword [DISPATCH+DISPATCH_GL(jit_base)], 0
2482 | set_vmstate INTERP
2483 | // Modified copy of ins_next which handles function header dispatch, too.
2484 | mov RCd, [PC]
2485 | movzx RAd, RCH
2486 | movzx OP, RCL
2487 | add PC, 4
2488 | shr RCd, 16
2489 | cmp OP, BC_FUNCF // Function header?
2490 | jb >3
2491 | cmp OP, BC_FUNCC+2 // Fast function?
2492 | jae >4
2493 |2:
2494 | mov RCd, MULTRES // RC/RD holds nres+1.
2495 |3:
2496 | jmp aword [DISPATCH+OP*8]
2497 |
2498 |4: // Check frame below fast function.
2499 | mov RC, [BASE-8]
2500 | test RCd, FRAME_TYPE
2501 | jnz <2 // Trace stitching continuation?
2502 | // Otherwise set KBASE for Lua function below fast function.
2503 | movzx RCd, byte [RC-3]
2504 | neg RC
2505 | mov LFUNC:KBASE, [BASE+RC*8-32]
2506 | cleartp LFUNC:KBASE
2507 | mov KBASE, LFUNC:KBASE->pc
2508 | mov KBASE, [KBASE+PC2PROTO(k)]
2509 | jmp <2
2510 |
2511 |9: // Rethrow error from the right C frame.
2512 | mov CARG1, L:RB
2513 | call extern lj_err_run // (lua_State *L)
2514 |.endif
2515 |
2516 |//-----------------------------------------------------------------------
2517 |//-- Math helper functions ----------------------------------------------
2518 |//-----------------------------------------------------------------------
2519 |
2520 |// FP value rounding. Called by math.floor/math.ceil fast functions
2521 |// and from JIT code. arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified.
2522 |.macro vm_round, name, mode, cond
2523 |->name:
2524 |->name .. _sse:
2525 | sseconst_abs xmm2, RD
2526 | sseconst_2p52 xmm3, RD
2527 | movaps xmm1, xmm0
2528 | andpd xmm1, xmm2 // |x|
2529 | ucomisd xmm3, xmm1 // No truncation if 2^52 <= |x|.
2530 | jbe >1
2531 | andnpd xmm2, xmm0 // Isolate sign bit.
2532 |.if mode == 2 // trunc(x)?
2533 | movaps xmm0, xmm1
2534 | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52
2535 | subsd xmm1, xmm3
2536 | sseconst_1 xmm3, RD
2537 | cmpsd xmm0, xmm1, 1 // |x| < result?
2538 | andpd xmm0, xmm3
2539 | subsd xmm1, xmm0 // If yes, subtract -1.
2540 | orpd xmm1, xmm2 // Merge sign bit back in.
2541 |.else
2542 | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52
2543 | subsd xmm1, xmm3
2544 | orpd xmm1, xmm2 // Merge sign bit back in.
2545 | .if mode == 1 // ceil(x)?
2546 | sseconst_m1 xmm2, RD // Must subtract -1 to preserve -0.
2547 | cmpsd xmm0, xmm1, 6 // x > result?
2548 | .else // floor(x)?
2549 | sseconst_1 xmm2, RD
2550 | cmpsd xmm0, xmm1, 1 // x < result?
2551 | .endif
2552 | andpd xmm0, xmm2
2553 | subsd xmm1, xmm0 // If yes, subtract +-1.
2554 |.endif
2555 | movaps xmm0, xmm1
2556 |1:
2557 | ret
2558 |.endmacro
2559 |
2560 | vm_round vm_floor, 0, 1
2561 | vm_round vm_ceil, 1, JIT
2562 | vm_round vm_trunc, 2, JIT
2563 |
2564 |// FP modulo x%y. Called by BC_MOD* and vm_arith.
2565 |->vm_mod:
2566 |// Args in xmm0/xmm1, return value in xmm0.
2567 |// Caveat: xmm0-xmm5 and RC (eax) modified!
2568 | movaps xmm5, xmm0
2569 | divsd xmm0, xmm1
2570 | sseconst_abs xmm2, RD
2571 | sseconst_2p52 xmm3, RD
2572 | movaps xmm4, xmm0
2573 | andpd xmm4, xmm2 // |x/y|
2574 | ucomisd xmm3, xmm4 // No truncation if 2^52 <= |x/y|.
2575 | jbe >1
2576 | andnpd xmm2, xmm0 // Isolate sign bit.
2577 | addsd xmm4, xmm3 // (|x/y| + 2^52) - 2^52
2578 | subsd xmm4, xmm3
2579 | orpd xmm4, xmm2 // Merge sign bit back in.
2580 | sseconst_1 xmm2, RD
2581 | cmpsd xmm0, xmm4, 1 // x/y < result?
2582 | andpd xmm0, xmm2
2583 | subsd xmm4, xmm0 // If yes, subtract 1.0.
2584 | movaps xmm0, xmm5
2585 | mulsd xmm1, xmm4
2586 | subsd xmm0, xmm1
2587 | ret
2588 |1:
2589 | mulsd xmm1, xmm0
2590 | movaps xmm0, xmm5
2591 | subsd xmm0, xmm1
2592 | ret
2593 |
2594 |// Args in xmm0/eax. Ret in xmm0. xmm0-xmm1 and eax modified.
2595 |->vm_powi_sse:
2596 | cmp eax, 1; jle >6 // i<=1?
2597 | // Now 1 < (unsigned)i <= 0x80000000.
2598 |1: // Handle leading zeros.
2599 | test eax, 1; jnz >2
2600 | mulsd xmm0, xmm0
2601 | shr eax, 1
2602 | jmp <1
2603 |2:
2604 | shr eax, 1; jz >5
2605 | movaps xmm1, xmm0
2606 |3: // Handle trailing bits.
2607 | mulsd xmm0, xmm0
2608 | shr eax, 1; jz >4
2609 | jnc <3
2610 | mulsd xmm1, xmm0
2611 | jmp <3
2612 |4:
2613 | mulsd xmm0, xmm1
2614 |5:
2615 | ret
2616 |6:
2617 | je <5 // x^1 ==> x
2618 | jb >7 // x^0 ==> 1
2619 | neg eax
2620 | call <1
2621 | sseconst_1 xmm1, RD
2622 | divsd xmm1, xmm0
2623 | movaps xmm0, xmm1
2624 | ret
2625 |7:
2626 | sseconst_1 xmm0, RD
2627 | ret
2628 |
2629 |//-----------------------------------------------------------------------
2630 |//-- Miscellaneous functions --------------------------------------------
2631 |//-----------------------------------------------------------------------
2632 |
2633 |// int lj_vm_cpuid(uint32_t f, uint32_t res[4])
2634 |->vm_cpuid:
2635 | mov eax, CARG1d
2636 | .if X64WIN; push rsi; mov rsi, CARG2; .endif
2637 | push rbx
2638 | xor ecx, ecx
2639 | cpuid
2640 | mov [rsi], eax
2641 | mov [rsi+4], ebx
2642 | mov [rsi+8], ecx
2643 | mov [rsi+12], edx
2644 | pop rbx
2645 | .if X64WIN; pop rsi; .endif
2646 | ret
2647 |
2648 |//-----------------------------------------------------------------------
2649 |//-- Assertions ---------------------------------------------------------
2650 |//-----------------------------------------------------------------------
2651 |
2652 |->assert_bad_for_arg_type:
2653#ifdef LUA_USE_ASSERT
2654 | int3
2655#endif
2656 | int3
2657 |
2658 |//-----------------------------------------------------------------------
2659 |//-- FFI helper functions -----------------------------------------------
2660 |//-----------------------------------------------------------------------
2661 |
2662 |// Handler for callback functions. Callback slot number in ah/al.
2663 |->vm_ffi_callback:
2664 |.if FFI
2665 |.type CTSTATE, CTState, PC
2666 | saveregs_ // ebp/rbp already saved. ebp now holds global_State *.
2667 | lea DISPATCH, [ebp+GG_G2DISP]
2668 | mov CTSTATE, GL:ebp->ctype_state
2669 | movzx eax, ax
2670 | mov CTSTATE->cb.slot, eax
2671 | mov CTSTATE->cb.gpr[0], CARG1
2672 | mov CTSTATE->cb.gpr[1], CARG2
2673 | mov CTSTATE->cb.gpr[2], CARG3
2674 | mov CTSTATE->cb.gpr[3], CARG4
2675 | movsd qword CTSTATE->cb.fpr[0], xmm0
2676 | movsd qword CTSTATE->cb.fpr[1], xmm1
2677 | movsd qword CTSTATE->cb.fpr[2], xmm2
2678 | movsd qword CTSTATE->cb.fpr[3], xmm3
2679 |.if X64WIN
2680 | lea rax, [rsp+CFRAME_SIZE+4*8]
2681 |.else
2682 | lea rax, [rsp+CFRAME_SIZE]
2683 | mov CTSTATE->cb.gpr[4], CARG5
2684 | mov CTSTATE->cb.gpr[5], CARG6
2685 | movsd qword CTSTATE->cb.fpr[4], xmm4
2686 | movsd qword CTSTATE->cb.fpr[5], xmm5
2687 | movsd qword CTSTATE->cb.fpr[6], xmm6
2688 | movsd qword CTSTATE->cb.fpr[7], xmm7
2689 |.endif
2690 | mov CTSTATE->cb.stack, rax
2691 | mov CARG2, rsp
2692 | mov SAVE_PC, CTSTATE // Any value outside of bytecode is ok.
2693 | mov CARG1, CTSTATE
2694 | call extern lj_ccallback_enter // (CTState *cts, void *cf)
2695 | // lua_State * returned in eax (RD).
2696 | set_vmstate INTERP
2697 | mov BASE, L:RD->base
2698 | mov RD, L:RD->top
2699 | sub RD, BASE
2700 | mov LFUNC:RB, [BASE-16]
2701 | cleartp LFUNC:RB
2702 | shr RD, 3
2703 | add RD, 1
2704 | ins_callt
2705 |.endif
2706 |
2707 |->cont_ffi_callback: // Return from FFI callback.
2708 |.if FFI
2709 | mov L:RA, SAVE_L
2710 | mov CTSTATE, [DISPATCH+DISPATCH_GL(ctype_state)]
2711 | mov aword CTSTATE->L, L:RA
2712 | mov L:RA->base, BASE
2713 | mov L:RA->top, RB
2714 | mov CARG1, CTSTATE
2715 | mov CARG2, RC
2716 | call extern lj_ccallback_leave // (CTState *cts, TValue *o)
2717 | mov rax, CTSTATE->cb.gpr[0]
2718 | movsd xmm0, qword CTSTATE->cb.fpr[0]
2719 | jmp ->vm_leave_unw
2720 |.endif
2721 |
2722 |->vm_ffi_call: // Call C function via FFI.
2723 | // Caveat: needs special frame unwinding, see below.
2724 |.if FFI
2725 | .type CCSTATE, CCallState, rbx
2726 | push rbp; mov rbp, rsp; push rbx; mov CCSTATE, CARG1
2727 |
2728 | // Readjust stack.
2729 | mov eax, CCSTATE->spadj
2730 | sub rsp, rax
2731 |
2732 | // Copy stack slots.
2733 | movzx ecx, byte CCSTATE->nsp
2734 | sub ecx, 1
2735 | js >2
2736 |1:
2737 | mov rax, [CCSTATE+rcx*8+offsetof(CCallState, stack)]
2738 | mov [rsp+rcx*8+CCALL_SPS_EXTRA*8], rax
2739 | sub ecx, 1
2740 | jns <1
2741 |2:
2742 |
2743 | movzx eax, byte CCSTATE->nfpr
2744 | mov CARG1, CCSTATE->gpr[0]
2745 | mov CARG2, CCSTATE->gpr[1]
2746 | mov CARG3, CCSTATE->gpr[2]
2747 | mov CARG4, CCSTATE->gpr[3]
2748 |.if not X64WIN
2749 | mov CARG5, CCSTATE->gpr[4]
2750 | mov CARG6, CCSTATE->gpr[5]
2751 |.endif
2752 | test eax, eax; jz >5
2753 | movaps xmm0, CCSTATE->fpr[0]
2754 | movaps xmm1, CCSTATE->fpr[1]
2755 | movaps xmm2, CCSTATE->fpr[2]
2756 | movaps xmm3, CCSTATE->fpr[3]
2757 |.if not X64WIN
2758 | cmp eax, 4; jbe >5
2759 | movaps xmm4, CCSTATE->fpr[4]
2760 | movaps xmm5, CCSTATE->fpr[5]
2761 | movaps xmm6, CCSTATE->fpr[6]
2762 | movaps xmm7, CCSTATE->fpr[7]
2763 |.endif
2764 |5:
2765 |
2766 | call aword CCSTATE->func
2767 |
2768 | mov CCSTATE->gpr[0], rax
2769 | movaps CCSTATE->fpr[0], xmm0
2770 |.if not X64WIN
2771 | mov CCSTATE->gpr[1], rdx
2772 | movaps CCSTATE->fpr[1], xmm1
2773 |.endif
2774 |
2775 | mov rbx, [rbp-8]; leave; ret
2776 |.endif
2777 |// Note: vm_ffi_call must be the last function in this object file!
2778 |
2779 |//-----------------------------------------------------------------------
2780}
2781
2782/* Generate the code for a single instruction. */
2783static void build_ins(BuildCtx *ctx, BCOp op, int defop)
2784{
2785 int vk = 0;
2786 |// Note: aligning all instructions does not pay off.
2787 |=>defop:
2788
2789 switch (op) {
2790
2791 /* -- Comparison ops ---------------------------------------------------- */
2792
2793 /* Remember: all ops branch for a true comparison, fall through otherwise. */
2794
2795 |.macro jmp_comp, lt, ge, le, gt, target
2796 ||switch (op) {
2797 ||case BC_ISLT:
2798 | lt target
2799 ||break;
2800 ||case BC_ISGE:
2801 | ge target
2802 ||break;
2803 ||case BC_ISLE:
2804 | le target
2805 ||break;
2806 ||case BC_ISGT:
2807 | gt target
2808 ||break;
2809 ||default: break; /* Shut up GCC. */
2810 ||}
2811 |.endmacro
2812
2813 case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
2814 | // RA = src1, RD = src2, JMP with RD = target
2815 | ins_AD
2816 | mov ITYPE, [BASE+RA*8]
2817 | mov RB, [BASE+RD*8]
2818 | mov RA, ITYPE
2819 | mov RD, RB
2820 | sar ITYPE, 47
2821 | sar RB, 47
2822 |.if DUALNUM
2823 | cmp ITYPEd, LJ_TISNUM; jne >7
2824 | cmp RBd, LJ_TISNUM; jne >8
2825 | add PC, 4
2826 | cmp RAd, RDd
2827 | jmp_comp jge, jl, jg, jle, >9
2828 |6:
2829 | movzx RDd, PC_RD
2830 | branchPC RD
2831 |9:
2832 | ins_next
2833 |
2834 |7: // RA is not an integer.
2835 | ja ->vmeta_comp
2836 | // RA is a number.
2837 | cmp RBd, LJ_TISNUM; jb >1; jne ->vmeta_comp
2838 | // RA is a number, RD is an integer.
2839 | cvtsi2sd xmm0, RDd
2840 | jmp >2
2841 |
2842 |8: // RA is an integer, RD is not an integer.
2843 | ja ->vmeta_comp
2844 | // RA is an integer, RD is a number.
2845 | cvtsi2sd xmm1, RAd
2846 | movd xmm0, RD
2847 | jmp >3
2848 |.else
2849 | cmp ITYPEd, LJ_TISNUM; jae ->vmeta_comp
2850 | cmp RBd, LJ_TISNUM; jae ->vmeta_comp
2851 |.endif
2852 |1:
2853 | movd xmm0, RD
2854 |2:
2855 | movd xmm1, RA
2856 |3:
2857 | add PC, 4
2858 | ucomisd xmm0, xmm1
2859 | // Unordered: all of ZF CF PF set, ordered: PF clear.
2860 | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
2861 |.if DUALNUM
2862 | jmp_comp jbe, ja, jb, jae, <9
2863 | jmp <6
2864 |.else
2865 | jmp_comp jbe, ja, jb, jae, >1
2866 | movzx RDd, PC_RD
2867 | branchPC RD
2868 |1:
2869 | ins_next
2870 |.endif
2871 break;
2872
2873 case BC_ISEQV: case BC_ISNEV:
2874 vk = op == BC_ISEQV;
2875 | ins_AD // RA = src1, RD = src2, JMP with RD = target
2876 | mov RB, [BASE+RD*8]
2877 | mov ITYPE, [BASE+RA*8]
2878 | add PC, 4
2879 | mov RD, RB
2880 | mov RA, ITYPE
2881 | sar RB, 47
2882 | sar ITYPE, 47
2883 |.if DUALNUM
2884 | cmp RBd, LJ_TISNUM; jne >7
2885 | cmp ITYPEd, LJ_TISNUM; jne >8
2886 | cmp RDd, RAd
2887 if (vk) {
2888 | jne >9
2889 } else {
2890 | je >9
2891 }
2892 | movzx RDd, PC_RD
2893 | branchPC RD
2894 |9:
2895 | ins_next
2896 |
2897 |7: // RD is not an integer.
2898 | ja >5
2899 | // RD is a number.
2900 | movd xmm1, RD
2901 | cmp ITYPEd, LJ_TISNUM; jb >1; jne >5
2902 | // RD is a number, RA is an integer.
2903 | cvtsi2sd xmm0, RAd
2904 | jmp >2
2905 |
2906 |8: // RD is an integer, RA is not an integer.
2907 | ja >5
2908 | // RD is an integer, RA is a number.
2909 | cvtsi2sd xmm1, RDd
2910 | jmp >1
2911 |
2912 |.else
2913 | cmp RBd, LJ_TISNUM; jae >5
2914 | cmp ITYPEd, LJ_TISNUM; jae >5
2915 | movd xmm1, RD
2916 |.endif
2917 |1:
2918 | movd xmm0, RA
2919 |2:
2920 | ucomisd xmm0, xmm1
2921 |4:
2922 iseqne_fp:
2923 if (vk) {
2924 | jp >2 // Unordered means not equal.
2925 | jne >2
2926 } else {
2927 | jp >2 // Unordered means not equal.
2928 | je >1
2929 }
2930 iseqne_end:
2931 if (vk) {
2932 |1: // EQ: Branch to the target.
2933 | movzx RDd, PC_RD
2934 | branchPC RD
2935 |2: // NE: Fallthrough to next instruction.
2936 |.if not FFI
2937 |3:
2938 |.endif
2939 } else {
2940 |.if not FFI
2941 |3:
2942 |.endif
2943 |2: // NE: Branch to the target.
2944 | movzx RDd, PC_RD
2945 | branchPC RD
2946 |1: // EQ: Fallthrough to next instruction.
2947 }
2948 if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV ||
2949 op == BC_ISEQN || op == BC_ISNEN)) {
2950 | jmp <9
2951 } else {
2952 | ins_next
2953 }
2954 |
2955 if (op == BC_ISEQV || op == BC_ISNEV) {
2956 |5: // Either or both types are not numbers.
2957 |.if FFI
2958 | cmp RBd, LJ_TCDATA; je ->vmeta_equal_cd
2959 | cmp ITYPEd, LJ_TCDATA; je ->vmeta_equal_cd
2960 |.endif
2961 | cmp RA, RD
2962 | je <1 // Same GCobjs or pvalues?
2963 | cmp RBd, ITYPEd
2964 | jne <2 // Not the same type?
2965 | cmp RBd, LJ_TISTABUD
2966 | ja <2 // Different objects and not table/ud?
2967 |
2968 | // Different tables or userdatas. Need to check __eq metamethod.
2969 | // Field metatable must be at same offset for GCtab and GCudata!
2970 | cleartp TAB:RA
2971 | mov TAB:RB, TAB:RA->metatable
2972 | test TAB:RB, TAB:RB
2973 | jz <2 // No metatable?
2974 | test byte TAB:RB->nomm, 1<<MM_eq
2975 | jnz <2 // Or 'no __eq' flag set?
2976 if (vk) {
2977 | xor RBd, RBd // ne = 0
2978 } else {
2979 | mov RBd, 1 // ne = 1
2980 }
2981 | jmp ->vmeta_equal // Handle __eq metamethod.
2982 } else {
2983 |.if FFI
2984 |3:
2985 | cmp ITYPEd, LJ_TCDATA
2986 if (LJ_DUALNUM && vk) {
2987 | jne <9
2988 } else {
2989 | jne <2
2990 }
2991 | jmp ->vmeta_equal_cd
2992 |.endif
2993 }
2994 break;
2995 case BC_ISEQS: case BC_ISNES:
2996 vk = op == BC_ISEQS;
2997 | ins_AND // RA = src, RD = str const, JMP with RD = target
2998 | mov RB, [BASE+RA*8]
2999 | add PC, 4
3000 | checkstr RB, >3
3001 | cmp RB, [KBASE+RD*8]
3002 iseqne_test:
3003 if (vk) {
3004 | jne >2
3005 } else {
3006 | je >1
3007 }
3008 goto iseqne_end;
3009 case BC_ISEQN: case BC_ISNEN:
3010 vk = op == BC_ISEQN;
3011 | ins_AD // RA = src, RD = num const, JMP with RD = target
3012 | mov RB, [BASE+RA*8]
3013 | add PC, 4
3014 |.if DUALNUM
3015 | checkint RB, >7
3016 | mov RD, [KBASE+RD*8]
3017 | checkint RD, >8
3018 | cmp RBd, RDd
3019 if (vk) {
3020 | jne >9
3021 } else {
3022 | je >9
3023 }
3024 | movzx RDd, PC_RD
3025 | branchPC RD
3026 |9:
3027 | ins_next
3028 |
3029 |7: // RA is not an integer.
3030 | ja >3
3031 | // RA is a number.
3032 | mov RD, [KBASE+RD*8]
3033 | checkint RD, >1
3034 | // RA is a number, RD is an integer.
3035 | cvtsi2sd xmm0, RDd
3036 | jmp >2
3037 |
3038 |8: // RA is an integer, RD is a number.
3039 | cvtsi2sd xmm0, RBd
3040 | movd xmm1, RD
3041 | ucomisd xmm0, xmm1
3042 | jmp >4
3043 |1:
3044 | movd xmm0, RD
3045 |.else
3046 | checknum RB, >3
3047 |1:
3048 | movsd xmm0, qword [KBASE+RD*8]
3049 |.endif
3050 |2:
3051 | ucomisd xmm0, qword [BASE+RA*8]
3052 |4:
3053 goto iseqne_fp;
3054 case BC_ISEQP: case BC_ISNEP:
3055 vk = op == BC_ISEQP;
3056 | ins_AND // RA = src, RD = primitive type (~), JMP with RD = target
3057 | mov RB, [BASE+RA*8]
3058 | sar RB, 47
3059 | add PC, 4
3060 | cmp RBd, RDd
3061 if (!LJ_HASFFI) goto iseqne_test;
3062 if (vk) {
3063 | jne >3
3064 | movzx RDd, PC_RD
3065 | branchPC RD
3066 |2:
3067 | ins_next
3068 |3:
3069 | cmp RBd, LJ_TCDATA; jne <2
3070 | jmp ->vmeta_equal_cd
3071 } else {
3072 | je >2
3073 | cmp RBd, LJ_TCDATA; je ->vmeta_equal_cd
3074 | movzx RDd, PC_RD
3075 | branchPC RD
3076 |2:
3077 | ins_next
3078 }
3079 break;
3080
3081 /* -- Unary test and copy ops ------------------------------------------- */
3082
3083 case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
3084 | ins_AD // RA = dst or unused, RD = src, JMP with RD = target
3085 | mov ITYPE, [BASE+RD*8]
3086 | add PC, 4
3087 if (op == BC_ISTC || op == BC_ISFC) {
3088 | mov RB, ITYPE
3089 }
3090 | sar ITYPE, 47
3091 | cmp ITYPEd, LJ_TISTRUECOND
3092 if (op == BC_IST || op == BC_ISTC) {
3093 | jae >1
3094 } else {
3095 | jb >1
3096 }
3097 if (op == BC_ISTC || op == BC_ISFC) {
3098 | mov [BASE+RA*8], RB
3099 }
3100 | movzx RDd, PC_RD
3101 | branchPC RD
3102 |1: // Fallthrough to the next instruction.
3103 | ins_next
3104 break;
3105
3106 case BC_ISTYPE:
3107 | ins_AD // RA = src, RD = -type
3108 | mov RB, [BASE+RA*8]
3109 | sar RB, 47
3110 | add RBd, RDd
3111 | jne ->vmeta_istype
3112 | ins_next
3113 break;
3114 case BC_ISNUM:
3115 | ins_AD // RA = src, RD = -(TISNUM-1)
3116 | checknumtp [BASE+RA*8], ->vmeta_istype
3117 | ins_next
3118 break;
3119
3120 /* -- Unary ops --------------------------------------------------------- */
3121
3122 case BC_MOV:
3123 | ins_AD // RA = dst, RD = src
3124 | mov RB, [BASE+RD*8]
3125 | mov [BASE+RA*8], RB
3126 | ins_next_
3127 break;
3128 case BC_NOT:
3129 | ins_AD // RA = dst, RD = src
3130 | mov RB, [BASE+RD*8]
3131 | sar RB, 47
3132 | mov RCd, 2
3133 | cmp RB, LJ_TISTRUECOND
3134 | sbb RCd, 0
3135 | shl RC, 47
3136 | not RC
3137 | mov [BASE+RA*8], RC
3138 | ins_next
3139 break;
3140 case BC_UNM:
3141 | ins_AD // RA = dst, RD = src
3142 | mov RB, [BASE+RD*8]
3143 |.if DUALNUM
3144 | checkint RB, >5
3145 | neg RBd
3146 | jo >4
3147 | setint RB
3148 |9:
3149 | mov [BASE+RA*8], RB
3150 | ins_next
3151 |4:
3152 | mov64 RB, U64x(41e00000,00000000) // 2^31.
3153 | jmp <9
3154 |5:
3155 | ja ->vmeta_unm
3156 |.else
3157 | checknum RB, ->vmeta_unm
3158 |.endif
3159 | mov64 RD, U64x(80000000,00000000)
3160 | xor RB, RD
3161 |.if DUALNUM
3162 | jmp <9
3163 |.else
3164 | mov [BASE+RA*8], RB
3165 | ins_next
3166 |.endif
3167 break;
3168 case BC_LEN:
3169 | ins_AD // RA = dst, RD = src
3170 | mov RD, [BASE+RD*8]
3171 | checkstr RD, >2
3172 |.if DUALNUM
3173 | mov RDd, dword STR:RD->len
3174 |1:
3175 | setint RD
3176 | mov [BASE+RA*8], RD
3177 |.else
3178 | xorps xmm0, xmm0
3179 | cvtsi2sd xmm0, dword STR:RD->len
3180 |1:
3181 | movsd qword [BASE+RA*8], xmm0
3182 |.endif
3183 | ins_next
3184 |2:
3185 | cmp ITYPEd, LJ_TTAB; jne ->vmeta_len
3186 | mov TAB:CARG1, TAB:RD
3187#if LJ_52
3188 | mov TAB:RB, TAB:RD->metatable
3189 | cmp TAB:RB, 0
3190 | jnz >9
3191 |3:
3192#endif
3193 |->BC_LEN_Z:
3194 | mov RB, BASE // Save BASE.
3195 | call extern lj_tab_len // (GCtab *t)
3196 | // Length of table returned in eax (RD).
3197 |.if DUALNUM
3198 | // Nothing to do.
3199 |.else
3200 | cvtsi2sd xmm0, RDd
3201 |.endif
3202 | mov BASE, RB // Restore BASE.
3203 | movzx RAd, PC_RA
3204 | jmp <1
3205#if LJ_52
3206 |9: // Check for __len.
3207 | test byte TAB:RB->nomm, 1<<MM_len
3208 | jnz <3
3209 | jmp ->vmeta_len // 'no __len' flag NOT set: check.
3210#endif
3211 break;
3212
3213 /* -- Binary ops -------------------------------------------------------- */
3214
3215 |.macro ins_arithpre, sseins, ssereg
3216 | ins_ABC
3217 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
3218 ||switch (vk) {
3219 ||case 0:
3220 | checknumtp [BASE+RB*8], ->vmeta_arith_vn
3221 | .if DUALNUM
3222 | checknumtp [KBASE+RC*8], ->vmeta_arith_vn
3223 | .endif
3224 | movsd xmm0, qword [BASE+RB*8]
3225 | sseins ssereg, qword [KBASE+RC*8]
3226 || break;
3227 ||case 1:
3228 | checknumtp [BASE+RB*8], ->vmeta_arith_nv
3229 | .if DUALNUM
3230 | checknumtp [KBASE+RC*8], ->vmeta_arith_nv
3231 | .endif
3232 | movsd xmm0, qword [KBASE+RC*8]
3233 | sseins ssereg, qword [BASE+RB*8]
3234 || break;
3235 ||default:
3236 | checknumtp [BASE+RB*8], ->vmeta_arith_vv
3237 | checknumtp [BASE+RC*8], ->vmeta_arith_vv
3238 | movsd xmm0, qword [BASE+RB*8]
3239 | sseins ssereg, qword [BASE+RC*8]
3240 || break;
3241 ||}
3242 |.endmacro
3243 |
3244 |.macro ins_arithdn, intins
3245 | ins_ABC
3246 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
3247 ||switch (vk) {
3248 ||case 0:
3249 | mov RB, [BASE+RB*8]
3250 | mov RC, [KBASE+RC*8]
3251 | checkint RB, ->vmeta_arith_vno
3252 | checkint RC, ->vmeta_arith_vno
3253 | intins RBd, RCd; jo ->vmeta_arith_vno
3254 || break;
3255 ||case 1:
3256 | mov RB, [BASE+RB*8]
3257 | mov RC, [KBASE+RC*8]
3258 | checkint RB, ->vmeta_arith_nvo
3259 | checkint RC, ->vmeta_arith_nvo
3260 | intins RCd, RBd; jo ->vmeta_arith_nvo
3261 || break;
3262 ||default:
3263 | mov RB, [BASE+RB*8]
3264 | mov RC, [BASE+RC*8]
3265 | checkint RB, ->vmeta_arith_vvo
3266 | checkint RC, ->vmeta_arith_vvo
3267 | intins RBd, RCd; jo ->vmeta_arith_vvo
3268 || break;
3269 ||}
3270 ||if (vk == 1) {
3271 | setint RC
3272 | mov [BASE+RA*8], RC
3273 ||} else {
3274 | setint RB
3275 | mov [BASE+RA*8], RB
3276 ||}
3277 | ins_next
3278 |.endmacro
3279 |
3280 |.macro ins_arithpost
3281 | movsd qword [BASE+RA*8], xmm0
3282 |.endmacro
3283 |
3284 |.macro ins_arith, sseins
3285 | ins_arithpre sseins, xmm0
3286 | ins_arithpost
3287 | ins_next
3288 |.endmacro
3289 |
3290 |.macro ins_arith, intins, sseins
3291 |.if DUALNUM
3292 | ins_arithdn intins
3293 |.else
3294 | ins_arith, sseins
3295 |.endif
3296 |.endmacro
3297
3298 | // RA = dst, RB = src1 or num const, RC = src2 or num const
3299 case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
3300 | ins_arith add, addsd
3301 break;
3302 case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
3303 | ins_arith sub, subsd
3304 break;
3305 case BC_MULVN: case BC_MULNV: case BC_MULVV:
3306 | ins_arith imul, mulsd
3307 break;
3308 case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
3309 | ins_arith divsd
3310 break;
3311 case BC_MODVN:
3312 | ins_arithpre movsd, xmm1
3313 |->BC_MODVN_Z:
3314 | call ->vm_mod
3315 | ins_arithpost
3316 | ins_next
3317 break;
3318 case BC_MODNV: case BC_MODVV:
3319 | ins_arithpre movsd, xmm1
3320 | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
3321 break;
3322 case BC_POW:
3323 | ins_arithpre movsd, xmm1
3324 | mov RB, BASE
3325 | call extern pow
3326 | movzx RAd, PC_RA
3327 | mov BASE, RB
3328 | ins_arithpost
3329 | ins_next
3330 break;
3331
3332 case BC_CAT:
3333 | ins_ABC // RA = dst, RB = src_start, RC = src_end
3334 | mov L:CARG1, SAVE_L
3335 | mov L:CARG1->base, BASE
3336 | lea CARG2, [BASE+RC*8]
3337 | mov CARG3d, RCd
3338 | sub CARG3d, RBd
3339 |->BC_CAT_Z:
3340 | mov L:RB, L:CARG1
3341 | mov SAVE_PC, PC
3342 | call extern lj_meta_cat // (lua_State *L, TValue *top, int left)
3343 | // NULL (finished) or TValue * (metamethod) returned in eax (RC).
3344 | mov BASE, L:RB->base
3345 | test RC, RC
3346 | jnz ->vmeta_binop
3347 | movzx RBd, PC_RB // Copy result to Stk[RA] from Stk[RB].
3348 | movzx RAd, PC_RA
3349 | mov RC, [BASE+RB*8]
3350 | mov [BASE+RA*8], RC
3351 | ins_next
3352 break;
3353
3354 /* -- Constant ops ------------------------------------------------------ */
3355
3356 case BC_KSTR:
3357 | ins_AND // RA = dst, RD = str const (~)
3358 | mov RD, [KBASE+RD*8]
3359 | settp RD, LJ_TSTR
3360 | mov [BASE+RA*8], RD
3361 | ins_next
3362 break;
3363 case BC_KCDATA:
3364 |.if FFI
3365 | ins_AND // RA = dst, RD = cdata const (~)
3366 | mov RD, [KBASE+RD*8]
3367 | settp RD, LJ_TCDATA
3368 | mov [BASE+RA*8], RD
3369 | ins_next
3370 |.endif
3371 break;
3372 case BC_KSHORT:
3373 | ins_AD // RA = dst, RD = signed int16 literal
3374 |.if DUALNUM
3375 | movsx RDd, RDW
3376 | setint RD
3377 | mov [BASE+RA*8], RD
3378 |.else
3379 | movsx RDd, RDW // Sign-extend literal.
3380 | cvtsi2sd xmm0, RDd
3381 | movsd qword [BASE+RA*8], xmm0
3382 |.endif
3383 | ins_next
3384 break;
3385 case BC_KNUM:
3386 | ins_AD // RA = dst, RD = num const
3387 | movsd xmm0, qword [KBASE+RD*8]
3388 | movsd qword [BASE+RA*8], xmm0
3389 | ins_next
3390 break;
3391 case BC_KPRI:
3392 | ins_AD // RA = dst, RD = primitive type (~)
3393 | shl RD, 47
3394 | not RD
3395 | mov [BASE+RA*8], RD
3396 | ins_next
3397 break;
3398 case BC_KNIL:
3399 | ins_AD // RA = dst_start, RD = dst_end
3400 | lea RA, [BASE+RA*8+8]
3401 | lea RD, [BASE+RD*8]
3402 | mov RB, LJ_TNIL
3403 | mov [RA-8], RB // Sets minimum 2 slots.
3404 |1:
3405 | mov [RA], RB
3406 | add RA, 8
3407 | cmp RA, RD
3408 | jbe <1
3409 | ins_next
3410 break;
3411
3412 /* -- Upvalue and function ops ------------------------------------------ */
3413
3414 case BC_UGET:
3415 | ins_AD // RA = dst, RD = upvalue #
3416 | mov LFUNC:RB, [BASE-16]
3417 | cleartp LFUNC:RB
3418 | mov UPVAL:RB, [LFUNC:RB+RD*8+offsetof(GCfuncL, uvptr)]
3419 | mov RB, UPVAL:RB->v
3420 | mov RD, [RB]
3421 | mov [BASE+RA*8], RD
3422 | ins_next
3423 break;
3424 case BC_USETV:
3425#define TV2MARKOFS \
3426 ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv))
3427 | ins_AD // RA = upvalue #, RD = src
3428 | mov LFUNC:RB, [BASE-16]
3429 | cleartp LFUNC:RB
3430 | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
3431 | cmp byte UPVAL:RB->closed, 0
3432 | mov RB, UPVAL:RB->v
3433 | mov RA, [BASE+RD*8]
3434 | mov [RB], RA
3435 | jz >1
3436 | // Check barrier for closed upvalue.
3437 | test byte [RB+TV2MARKOFS], LJ_GC_BLACK // isblack(uv)
3438 | jnz >2
3439 |1:
3440 | ins_next
3441 |
3442 |2: // Upvalue is black. Check if new value is collectable and white.
3443 | mov RD, RA
3444 | sar RD, 47
3445 | sub RDd, LJ_TISGCV
3446 | cmp RDd, LJ_TNUMX - LJ_TISGCV // tvisgcv(v)
3447 | jbe <1
3448 | cleartp GCOBJ:RA
3449 | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v)
3450 | jz <1
3451 | // Crossed a write barrier. Move the barrier forward.
3452 |.if not X64WIN
3453 | mov CARG2, RB
3454 | mov RB, BASE // Save BASE.
3455 |.else
3456 | xchg CARG2, RB // Save BASE (CARG2 == BASE).
3457 |.endif
3458 | lea GL:CARG1, [DISPATCH+GG_DISP2G]
3459 | call extern lj_gc_barrieruv // (global_State *g, TValue *tv)
3460 | mov BASE, RB // Restore BASE.
3461 | jmp <1
3462 break;
3463#undef TV2MARKOFS
3464 case BC_USETS:
3465 | ins_AND // RA = upvalue #, RD = str const (~)
3466 | mov LFUNC:RB, [BASE-16]
3467 | cleartp LFUNC:RB
3468 | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
3469 | mov STR:RA, [KBASE+RD*8]
3470 | mov RD, UPVAL:RB->v
3471 | settp STR:ITYPE, STR:RA, LJ_TSTR
3472 | mov [RD], STR:ITYPE
3473 | test byte UPVAL:RB->marked, LJ_GC_BLACK // isblack(uv)
3474 | jnz >2
3475 |1:
3476 | ins_next
3477 |
3478 |2: // Check if string is white and ensure upvalue is closed.
3479 | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(str)
3480 | jz <1
3481 | cmp byte UPVAL:RB->closed, 0
3482 | jz <1
3483 | // Crossed a write barrier. Move the barrier forward.
3484 | mov RB, BASE // Save BASE (CARG2 == BASE).
3485 | mov CARG2, RD
3486 | lea GL:CARG1, [DISPATCH+GG_DISP2G]
3487 | call extern lj_gc_barrieruv // (global_State *g, TValue *tv)
3488 | mov BASE, RB // Restore BASE.
3489 | jmp <1
3490 break;
3491 case BC_USETN:
3492 | ins_AD // RA = upvalue #, RD = num const
3493 | mov LFUNC:RB, [BASE-16]
3494 | cleartp LFUNC:RB
3495 | movsd xmm0, qword [KBASE+RD*8]
3496 | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
3497 | mov RA, UPVAL:RB->v
3498 | movsd qword [RA], xmm0
3499 | ins_next
3500 break;
3501 case BC_USETP:
3502 | ins_AD // RA = upvalue #, RD = primitive type (~)
3503 | mov LFUNC:RB, [BASE-16]
3504 | cleartp LFUNC:RB
3505 | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
3506 | shl RD, 47
3507 | not RD
3508 | mov RA, UPVAL:RB->v
3509 | mov [RA], RD
3510 | ins_next
3511 break;
3512 case BC_UCLO:
3513 | ins_AD // RA = level, RD = target
3514 | branchPC RD // Do this first to free RD.
3515 | mov L:RB, SAVE_L
3516 | cmp aword L:RB->openupval, 0
3517 | je >1
3518 | mov L:RB->base, BASE
3519 | lea CARG2, [BASE+RA*8] // Caveat: CARG2 == BASE
3520 | mov L:CARG1, L:RB // Caveat: CARG1 == RA
3521 | call extern lj_func_closeuv // (lua_State *L, TValue *level)
3522 | mov BASE, L:RB->base
3523 |1:
3524 | ins_next
3525 break;
3526
3527 case BC_FNEW:
3528 | ins_AND // RA = dst, RD = proto const (~) (holding function prototype)
3529 | mov L:RB, SAVE_L
3530 | mov L:RB->base, BASE // Caveat: CARG2/CARG3 may be BASE.
3531 | mov CARG3, [BASE-16]
3532 | cleartp CARG3
3533 | mov CARG2, [KBASE+RD*8] // Fetch GCproto *.
3534 | mov CARG1, L:RB
3535 | mov SAVE_PC, PC
3536 | // (lua_State *L, GCproto *pt, GCfuncL *parent)
3537 | call extern lj_func_newL_gc
3538 | // GCfuncL * returned in eax (RC).
3539 | mov BASE, L:RB->base
3540 | movzx RAd, PC_RA
3541 | settp LFUNC:RC, LJ_TFUNC
3542 | mov [BASE+RA*8], LFUNC:RC
3543 | ins_next
3544 break;
3545
3546 /* -- Table ops --------------------------------------------------------- */
3547
3548 case BC_TNEW:
3549 | ins_AD // RA = dst, RD = hbits|asize
3550 | mov L:RB, SAVE_L
3551 | mov L:RB->base, BASE
3552 | mov RA, [DISPATCH+DISPATCH_GL(gc.total)]
3553 | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)]
3554 | mov SAVE_PC, PC
3555 | jae >5
3556 |1:
3557 | mov CARG3d, RDd
3558 | and RDd, 0x7ff
3559 | shr CARG3d, 11
3560 | cmp RDd, 0x7ff
3561 | je >3
3562 |2:
3563 | mov L:CARG1, L:RB
3564 | mov CARG2d, RDd
3565 | call extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
3566 | // Table * returned in eax (RC).
3567 | mov BASE, L:RB->base
3568 | movzx RAd, PC_RA
3569 | settp TAB:RC, LJ_TTAB
3570 | mov [BASE+RA*8], TAB:RC
3571 | ins_next
3572 |3: // Turn 0x7ff into 0x801.
3573 | mov RDd, 0x801
3574 | jmp <2
3575 |5:
3576 | mov L:CARG1, L:RB
3577 | call extern lj_gc_step_fixtop // (lua_State *L)
3578 | movzx RDd, PC_RD
3579 | jmp <1
3580 break;
3581 case BC_TDUP:
3582 | ins_AND // RA = dst, RD = table const (~) (holding template table)
3583 | mov L:RB, SAVE_L
3584 | mov RA, [DISPATCH+DISPATCH_GL(gc.total)]
3585 | mov SAVE_PC, PC
3586 | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)]
3587 | mov L:RB->base, BASE
3588 | jae >3
3589 |2:
3590 | mov TAB:CARG2, [KBASE+RD*8] // Caveat: CARG2 == BASE
3591 | mov L:CARG1, L:RB // Caveat: CARG1 == RA
3592 | call extern lj_tab_dup // (lua_State *L, Table *kt)
3593 | // Table * returned in eax (RC).
3594 | mov BASE, L:RB->base
3595 | movzx RAd, PC_RA
3596 | settp TAB:RC, LJ_TTAB
3597 | mov [BASE+RA*8], TAB:RC
3598 | ins_next
3599 |3:
3600 | mov L:CARG1, L:RB
3601 | call extern lj_gc_step_fixtop // (lua_State *L)
3602 | movzx RDd, PC_RD // Need to reload RD.
3603 | not RD
3604 | jmp <2
3605 break;
3606
3607 case BC_GGET:
3608 | ins_AND // RA = dst, RD = str const (~)
3609 | mov LFUNC:RB, [BASE-16]
3610 | cleartp LFUNC:RB
3611 | mov TAB:RB, LFUNC:RB->env
3612 | mov STR:RC, [KBASE+RD*8]
3613 | jmp ->BC_TGETS_Z
3614 break;
3615 case BC_GSET:
3616 | ins_AND // RA = src, RD = str const (~)
3617 | mov LFUNC:RB, [BASE-16]
3618 | cleartp LFUNC:RB
3619 | mov TAB:RB, LFUNC:RB->env
3620 | mov STR:RC, [KBASE+RD*8]
3621 | jmp ->BC_TSETS_Z
3622 break;
3623
3624 case BC_TGETV:
3625 | ins_ABC // RA = dst, RB = table, RC = key
3626 | mov TAB:RB, [BASE+RB*8]
3627 | mov RC, [BASE+RC*8]
3628 | checktab TAB:RB, ->vmeta_tgetv
3629 |
3630 | // Integer key?
3631 |.if DUALNUM
3632 | checkint RC, >5
3633 |.else
3634 | // Convert number to int and back and compare.
3635 | checknum RC, >5
3636 | movd xmm0, RC
3637 | cvttsd2si RCd, xmm0
3638 | cvtsi2sd xmm1, RCd
3639 | ucomisd xmm0, xmm1
3640 | jne ->vmeta_tgetv // Generic numeric key? Use fallback.
3641 |.endif
3642 | cmp RCd, TAB:RB->asize // Takes care of unordered, too.
3643 | jae ->vmeta_tgetv // Not in array part? Use fallback.
3644 | shl RCd, 3
3645 | add RC, TAB:RB->array
3646 | // Get array slot.
3647 | mov ITYPE, [RC]
3648 | cmp ITYPE, LJ_TNIL // Avoid overwriting RB in fastpath.
3649 | je >2
3650 |1:
3651 | mov [BASE+RA*8], ITYPE
3652 | ins_next
3653 |
3654 |2: // Check for __index if table value is nil.
3655 | mov TAB:TMPR, TAB:RB->metatable
3656 | test TAB:TMPR, TAB:TMPR
3657 | jz <1
3658 | test byte TAB:TMPR->nomm, 1<<MM_index
3659 | jz ->vmeta_tgetv // 'no __index' flag NOT set: check.
3660 | jmp <1
3661 |
3662 |5: // String key?
3663 | cmp ITYPEd, LJ_TSTR; jne ->vmeta_tgetv
3664 | cleartp STR:RC
3665 | jmp ->BC_TGETS_Z
3666 break;
3667 case BC_TGETS:
3668 | ins_ABC // RA = dst, RB = table, RC = str const (~)
3669 | mov TAB:RB, [BASE+RB*8]
3670 | not RC
3671 | mov STR:RC, [KBASE+RC*8]
3672 | checktab TAB:RB, ->vmeta_tgets
3673 |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr *
3674 | mov TMPRd, TAB:RB->hmask
3675 | and TMPRd, STR:RC->sid
3676 | imul TMPRd, #NODE
3677 | add NODE:TMPR, TAB:RB->node
3678 | settp ITYPE, STR:RC, LJ_TSTR
3679 |1:
3680 | cmp NODE:TMPR->key, ITYPE
3681 | jne >4
3682 | // Get node value.
3683 | mov ITYPE, NODE:TMPR->val
3684 | cmp ITYPE, LJ_TNIL
3685 | je >5 // Key found, but nil value?
3686 |2:
3687 | mov [BASE+RA*8], ITYPE
3688 | ins_next
3689 |
3690 |4: // Follow hash chain.
3691 | mov NODE:TMPR, NODE:TMPR->next
3692 | test NODE:TMPR, NODE:TMPR
3693 | jnz <1
3694 | // End of hash chain: key not found, nil result.
3695 | mov ITYPE, LJ_TNIL
3696 |
3697 |5: // Check for __index if table value is nil.
3698 | mov TAB:TMPR, TAB:RB->metatable
3699 | test TAB:TMPR, TAB:TMPR
3700 | jz <2 // No metatable: done.
3701 | test byte TAB:TMPR->nomm, 1<<MM_index
3702 | jnz <2 // 'no __index' flag set: done.
3703 | jmp ->vmeta_tgets // Caveat: preserve STR:RC.
3704 break;
3705 case BC_TGETB:
3706 | ins_ABC // RA = dst, RB = table, RC = byte literal
3707 | mov TAB:RB, [BASE+RB*8]
3708 | checktab TAB:RB, ->vmeta_tgetb
3709 | cmp RCd, TAB:RB->asize
3710 | jae ->vmeta_tgetb
3711 | shl RCd, 3
3712 | add RC, TAB:RB->array
3713 | // Get array slot.
3714 | mov ITYPE, [RC]
3715 | cmp ITYPE, LJ_TNIL
3716 | je >2
3717 |1:
3718 | mov [BASE+RA*8], ITYPE
3719 | ins_next
3720 |
3721 |2: // Check for __index if table value is nil.
3722 | mov TAB:TMPR, TAB:RB->metatable
3723 | test TAB:TMPR, TAB:TMPR
3724 | jz <1
3725 | test byte TAB:TMPR->nomm, 1<<MM_index
3726 | jz ->vmeta_tgetb // 'no __index' flag NOT set: check.
3727 | jmp <1
3728 break;
3729 case BC_TGETR:
3730 | ins_ABC // RA = dst, RB = table, RC = key
3731 | mov TAB:RB, [BASE+RB*8]
3732 | cleartp TAB:RB
3733 |.if DUALNUM
3734 | mov RCd, dword [BASE+RC*8]
3735 |.else
3736 | cvttsd2si RCd, qword [BASE+RC*8]
3737 |.endif
3738 | cmp RCd, TAB:RB->asize
3739 | jae ->vmeta_tgetr // Not in array part? Use fallback.
3740 | shl RCd, 3
3741 | add RC, TAB:RB->array
3742 | // Get array slot.
3743 |->BC_TGETR_Z:
3744 | mov ITYPE, [RC]
3745 |->BC_TGETR2_Z:
3746 | mov [BASE+RA*8], ITYPE
3747 | ins_next
3748 break;
3749
3750 case BC_TSETV:
3751 | ins_ABC // RA = src, RB = table, RC = key
3752 | mov TAB:RB, [BASE+RB*8]
3753 | mov RC, [BASE+RC*8]
3754 | checktab TAB:RB, ->vmeta_tsetv
3755 |
3756 | // Integer key?
3757 |.if DUALNUM
3758 | checkint RC, >5
3759 |.else
3760 | // Convert number to int and back and compare.
3761 | checknum RC, >5
3762 | movd xmm0, RC
3763 | cvttsd2si RCd, xmm0
3764 | cvtsi2sd xmm1, RCd
3765 | ucomisd xmm0, xmm1
3766 | jne ->vmeta_tsetv // Generic numeric key? Use fallback.
3767 |.endif
3768 | cmp RCd, TAB:RB->asize // Takes care of unordered, too.
3769 | jae ->vmeta_tsetv
3770 | shl RCd, 3
3771 | add RC, TAB:RB->array
3772 | cmp aword [RC], LJ_TNIL
3773 | je >3 // Previous value is nil?
3774 |1:
3775 | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
3776 | jnz >7
3777 |2: // Set array slot.
3778 | mov RB, [BASE+RA*8]
3779 | mov [RC], RB
3780 | ins_next
3781 |
3782 |3: // Check for __newindex if previous value is nil.
3783 | mov TAB:TMPR, TAB:RB->metatable
3784 | test TAB:TMPR, TAB:TMPR
3785 | jz <1
3786 | test byte TAB:TMPR->nomm, 1<<MM_newindex
3787 | jz ->vmeta_tsetv // 'no __newindex' flag NOT set: check.
3788 | jmp <1
3789 |
3790 |5: // String key?
3791 | cmp ITYPEd, LJ_TSTR; jne ->vmeta_tsetv
3792 | cleartp STR:RC
3793 | jmp ->BC_TSETS_Z
3794 |
3795 |7: // Possible table write barrier for the value. Skip valiswhite check.
3796 | barrierback TAB:RB, TMPR
3797 | jmp <2
3798 break;
3799 case BC_TSETS:
3800 | ins_ABC // RA = src, RB = table, RC = str const (~)
3801 | mov TAB:RB, [BASE+RB*8]
3802 | not RC
3803 | mov STR:RC, [KBASE+RC*8]
3804 | checktab TAB:RB, ->vmeta_tsets
3805 |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr *
3806 | mov TMPRd, TAB:RB->hmask
3807 | and TMPRd, STR:RC->sid
3808 | imul TMPRd, #NODE
3809 | mov byte TAB:RB->nomm, 0 // Clear metamethod cache.
3810 | add NODE:TMPR, TAB:RB->node
3811 | settp ITYPE, STR:RC, LJ_TSTR
3812 |1:
3813 | cmp NODE:TMPR->key, ITYPE
3814 | jne >5
3815 | // Ok, key found. Assumes: offsetof(Node, val) == 0
3816 | cmp aword [TMPR], LJ_TNIL
3817 | je >4 // Previous value is nil?
3818 |2:
3819 | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
3820 | jnz >7
3821 |3: // Set node value.
3822 | mov ITYPE, [BASE+RA*8]
3823 | mov [TMPR], ITYPE
3824 | ins_next
3825 |
3826 |4: // Check for __newindex if previous value is nil.
3827 | mov TAB:ITYPE, TAB:RB->metatable
3828 | test TAB:ITYPE, TAB:ITYPE
3829 | jz <2
3830 | test byte TAB:ITYPE->nomm, 1<<MM_newindex
3831 | jz ->vmeta_tsets // 'no __newindex' flag NOT set: check.
3832 | jmp <2
3833 |
3834 |5: // Follow hash chain.
3835 | mov NODE:TMPR, NODE:TMPR->next
3836 | test NODE:TMPR, NODE:TMPR
3837 | jnz <1
3838 | // End of hash chain: key not found, add a new one.
3839 |
3840 | // But check for __newindex first.
3841 | mov TAB:TMPR, TAB:RB->metatable
3842 | test TAB:TMPR, TAB:TMPR
3843 | jz >6 // No metatable: continue.
3844 | test byte TAB:TMPR->nomm, 1<<MM_newindex
3845 | jz ->vmeta_tsets // 'no __newindex' flag NOT set: check.
3846 |6:
3847 | mov TMP1, ITYPE
3848 | mov L:CARG1, SAVE_L
3849 | mov L:CARG1->base, BASE
3850 | lea CARG3, TMP1
3851 | mov CARG2, TAB:RB
3852 | mov SAVE_PC, PC
3853 | call extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
3854 | // Handles write barrier for the new key. TValue * returned in eax (RC).
3855 | mov L:CARG1, SAVE_L
3856 | mov BASE, L:CARG1->base
3857 | mov TMPR, rax
3858 | movzx RAd, PC_RA
3859 | jmp <2 // Must check write barrier for value.
3860 |
3861 |7: // Possible table write barrier for the value. Skip valiswhite check.
3862 | barrierback TAB:RB, ITYPE
3863 | jmp <3
3864 break;
3865 case BC_TSETB:
3866 | ins_ABC // RA = src, RB = table, RC = byte literal
3867 | mov TAB:RB, [BASE+RB*8]
3868 | checktab TAB:RB, ->vmeta_tsetb
3869 | cmp RCd, TAB:RB->asize
3870 | jae ->vmeta_tsetb
3871 | shl RCd, 3
3872 | add RC, TAB:RB->array
3873 | cmp aword [RC], LJ_TNIL
3874 | je >3 // Previous value is nil?
3875 |1:
3876 | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
3877 | jnz >7
3878 |2: // Set array slot.
3879 | mov ITYPE, [BASE+RA*8]
3880 | mov [RC], ITYPE
3881 | ins_next
3882 |
3883 |3: // Check for __newindex if previous value is nil.
3884 | mov TAB:TMPR, TAB:RB->metatable
3885 | test TAB:TMPR, TAB:TMPR
3886 | jz <1
3887 | test byte TAB:TMPR->nomm, 1<<MM_newindex
3888 | jz ->vmeta_tsetb // 'no __newindex' flag NOT set: check.
3889 | jmp <1
3890 |
3891 |7: // Possible table write barrier for the value. Skip valiswhite check.
3892 | barrierback TAB:RB, TMPR
3893 | jmp <2
3894 break;
3895 case BC_TSETR:
3896 | ins_ABC // RA = src, RB = table, RC = key
3897 | mov TAB:RB, [BASE+RB*8]
3898 | cleartp TAB:RB
3899 |.if DUALNUM
3900 | mov RC, [BASE+RC*8]
3901 |.else
3902 | cvttsd2si RCd, qword [BASE+RC*8]
3903 |.endif
3904 | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
3905 | jnz >7
3906 |2:
3907 | cmp RCd, TAB:RB->asize
3908 | jae ->vmeta_tsetr
3909 | shl RCd, 3
3910 | add RC, TAB:RB->array
3911 | // Set array slot.
3912 |->BC_TSETR_Z:
3913 | mov ITYPE, [BASE+RA*8]
3914 | mov [RC], ITYPE
3915 | ins_next
3916 |
3917 |7: // Possible table write barrier for the value. Skip valiswhite check.
3918 | barrierback TAB:RB, TMPR
3919 | jmp <2
3920 break;
3921
3922 case BC_TSETM:
3923 | ins_AD // RA = base (table at base-1), RD = num const (start index)
3924 |1:
3925 | mov TMPRd, dword [KBASE+RD*8] // Integer constant is in lo-word.
3926 | lea RA, [BASE+RA*8]
3927 | mov TAB:RB, [RA-8] // Guaranteed to be a table.
3928 | cleartp TAB:RB
3929 | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
3930 | jnz >7
3931 |2:
3932 | mov RDd, MULTRES
3933 | sub RDd, 1
3934 | jz >4 // Nothing to copy?
3935 | add RDd, TMPRd // Compute needed size.
3936 | cmp RDd, TAB:RB->asize
3937 | ja >5 // Doesn't fit into array part?
3938 | sub RDd, TMPRd
3939 | shl TMPRd, 3
3940 | add TMPR, TAB:RB->array
3941 |3: // Copy result slots to table.
3942 | mov RB, [RA]
3943 | add RA, 8
3944 | mov [TMPR], RB
3945 | add TMPR, 8
3946 | sub RDd, 1
3947 | jnz <3
3948 |4:
3949 | ins_next
3950 |
3951 |5: // Need to resize array part.
3952 | mov L:CARG1, SAVE_L
3953 | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE.
3954 | mov CARG2, TAB:RB
3955 | mov CARG3d, RDd
3956 | mov L:RB, L:CARG1
3957 | mov SAVE_PC, PC
3958 | call extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
3959 | mov BASE, L:RB->base
3960 | movzx RAd, PC_RA // Restore RA.
3961 | movzx RDd, PC_RD // Restore RD.
3962 | jmp <1 // Retry.
3963 |
3964 |7: // Possible table write barrier for any value. Skip valiswhite check.
3965 | barrierback TAB:RB, RD
3966 | jmp <2
3967 break;
3968
3969 /* -- Calls and vararg handling ----------------------------------------- */
3970
3971 case BC_CALL: case BC_CALLM:
3972 | ins_A_C // RA = base, (RB = nresults+1,) RC = nargs+1 | extra_nargs
3973 if (op == BC_CALLM) {
3974 | add NARGS:RDd, MULTRES
3975 }
3976 | mov LFUNC:RB, [BASE+RA*8]
3977 | checkfunc LFUNC:RB, ->vmeta_call_ra
3978 | lea BASE, [BASE+RA*8+16]
3979 | ins_call
3980 break;
3981
3982 case BC_CALLMT:
3983 | ins_AD // RA = base, RD = extra_nargs
3984 | add NARGS:RDd, MULTRES
3985 | // Fall through. Assumes BC_CALLT follows and ins_AD is a no-op.
3986 break;
3987 case BC_CALLT:
3988 | ins_AD // RA = base, RD = nargs+1
3989 | lea RA, [BASE+RA*8+16]
3990 | mov KBASE, BASE // Use KBASE for move + vmeta_call hint.
3991 | mov LFUNC:RB, [RA-16]
3992 | checktp_nc LFUNC:RB, LJ_TFUNC, ->vmeta_call
3993 |->BC_CALLT_Z:
3994 | mov PC, [BASE-8]
3995 | test PCd, FRAME_TYPE
3996 | jnz >7
3997 |1:
3998 | mov [BASE-16], LFUNC:RB // Copy func+tag down, reloaded below.
3999 | mov MULTRES, NARGS:RDd
4000 | sub NARGS:RDd, 1
4001 | jz >3
4002 |2: // Move args down.
4003 | mov RB, [RA]
4004 | add RA, 8
4005 | mov [KBASE], RB
4006 | add KBASE, 8
4007 | sub NARGS:RDd, 1
4008 | jnz <2
4009 |
4010 | mov LFUNC:RB, [BASE-16]
4011 |3:
4012 | cleartp LFUNC:RB
4013 | mov NARGS:RDd, MULTRES
4014 | cmp byte LFUNC:RB->ffid, 1 // (> FF_C) Calling a fast function?
4015 | ja >5
4016 |4:
4017 | ins_callt
4018 |
4019 |5: // Tailcall to a fast function.
4020 | test PCd, FRAME_TYPE // Lua frame below?
4021 | jnz <4
4022 | movzx RAd, PC_RA
4023 | neg RA
4024 | mov LFUNC:KBASE, [BASE+RA*8-32] // Need to prepare KBASE.
4025 | cleartp LFUNC:KBASE
4026 | mov KBASE, LFUNC:KBASE->pc
4027 | mov KBASE, [KBASE+PC2PROTO(k)]
4028 | jmp <4
4029 |
4030 |7: // Tailcall from a vararg function.
4031 | sub PC, FRAME_VARG
4032 | test PCd, FRAME_TYPEP
4033 | jnz >8 // Vararg frame below?
4034 | sub BASE, PC // Need to relocate BASE/KBASE down.
4035 | mov KBASE, BASE
4036 | mov PC, [BASE-8]
4037 | jmp <1
4038 |8:
4039 | add PCd, FRAME_VARG
4040 | jmp <1
4041 break;
4042
4043 case BC_ITERC:
4044 | ins_A // RA = base, (RB = nresults+1,) RC = nargs+1 (2+1)
4045 | lea RA, [BASE+RA*8+16] // fb = base+2
4046 | mov RB, [RA-32] // Copy state. fb[0] = fb[-4].
4047 | mov RC, [RA-24] // Copy control var. fb[1] = fb[-3].
4048 | mov [RA], RB
4049 | mov [RA+8], RC
4050 | mov LFUNC:RB, [RA-40] // Copy callable. fb[-2] = fb[-5]
4051 | mov [RA-16], LFUNC:RB
4052 | mov NARGS:RDd, 2+1 // Handle like a regular 2-arg call.
4053 | checkfunc LFUNC:RB, ->vmeta_call
4054 | mov BASE, RA
4055 | ins_call
4056 break;
4057
4058 case BC_ITERN:
4059 | ins_A // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
4060 |.if JIT
4061 | // NYI: add hotloop, record BC_ITERN.
4062 |.endif
4063 | mov TAB:RB, [BASE+RA*8-16]
4064 | cleartp TAB:RB
4065 | mov RCd, [BASE+RA*8-8] // Get index from control var.
4066 | mov TMPRd, TAB:RB->asize
4067 | add PC, 4
4068 | mov ITYPE, TAB:RB->array
4069 |1: // Traverse array part.
4070 | cmp RCd, TMPRd; jae >5 // Index points after array part?
4071 | cmp aword [ITYPE+RC*8], LJ_TNIL; je >4
4072 |.if not DUALNUM
4073 | cvtsi2sd xmm0, RCd
4074 |.endif
4075 | // Copy array slot to returned value.
4076 | mov RB, [ITYPE+RC*8]
4077 | mov [BASE+RA*8+8], RB
4078 | // Return array index as a numeric key.
4079 |.if DUALNUM
4080 | setint ITYPE, RC
4081 | mov [BASE+RA*8], ITYPE
4082 |.else
4083 | movsd qword [BASE+RA*8], xmm0
4084 |.endif
4085 | add RCd, 1
4086 | mov [BASE+RA*8-8], RCd // Update control var.
4087 |2:
4088 | movzx RDd, PC_RD // Get target from ITERL.
4089 | branchPC RD
4090 |3:
4091 | ins_next
4092 |
4093 |4: // Skip holes in array part.
4094 | add RCd, 1
4095 | jmp <1
4096 |
4097 |5: // Traverse hash part.
4098 | sub RCd, TMPRd
4099 |6:
4100 | cmp RCd, TAB:RB->hmask; ja <3 // End of iteration? Branch to ITERL+1.
4101 | imul ITYPEd, RCd, #NODE
4102 | add NODE:ITYPE, TAB:RB->node
4103 | cmp aword NODE:ITYPE->val, LJ_TNIL; je >7
4104 | lea TMPRd, [RCd+TMPRd+1]
4105 | // Copy key and value from hash slot.
4106 | mov RB, NODE:ITYPE->key
4107 | mov RC, NODE:ITYPE->val
4108 | mov [BASE+RA*8], RB
4109 | mov [BASE+RA*8+8], RC
4110 | mov [BASE+RA*8-8], TMPRd
4111 | jmp <2
4112 |
4113 |7: // Skip holes in hash part.
4114 | add RCd, 1
4115 | jmp <6
4116 break;
4117
4118 case BC_ISNEXT:
4119 | ins_AD // RA = base, RD = target (points to ITERN)
4120 | mov CFUNC:RB, [BASE+RA*8-24]
4121 | checkfunc CFUNC:RB, >5
4122 | checktptp [BASE+RA*8-16], LJ_TTAB, >5
4123 | cmp aword [BASE+RA*8-8], LJ_TNIL; jne >5
4124 | cmp byte CFUNC:RB->ffid, FF_next_N; jne >5
4125 | branchPC RD
4126 | mov64 TMPR, U64x(fffe7fff, 00000000)
4127 | mov [BASE+RA*8-8], TMPR // Initialize control var.
4128 |1:
4129 | ins_next
4130 |5: // Despecialize bytecode if any of the checks fail.
4131 | mov PC_OP, BC_JMP
4132 | branchPC RD
4133 | mov byte [PC], BC_ITERC
4134 | jmp <1
4135 break;
4136
4137 case BC_VARG:
4138 | ins_ABC // RA = base, RB = nresults+1, RC = numparams
4139 | lea TMPR, [BASE+RC*8+(16+FRAME_VARG)]
4140 | lea RA, [BASE+RA*8]
4141 | sub TMPR, [BASE-8]
4142 | // Note: TMPR may now be even _above_ BASE if nargs was < numparams.
4143 | test RB, RB
4144 | jz >5 // Copy all varargs?
4145 | lea RB, [RA+RB*8-8]
4146 | cmp TMPR, BASE // No vararg slots?
4147 | jnb >2
4148 |1: // Copy vararg slots to destination slots.
4149 | mov RC, [TMPR-16]
4150 | add TMPR, 8
4151 | mov [RA], RC
4152 | add RA, 8
4153 | cmp RA, RB // All destination slots filled?
4154 | jnb >3
4155 | cmp TMPR, BASE // No more vararg slots?
4156 | jb <1
4157 |2: // Fill up remainder with nil.
4158 | mov aword [RA], LJ_TNIL
4159 | add RA, 8
4160 | cmp RA, RB
4161 | jb <2
4162 |3:
4163 | ins_next
4164 |
4165 |5: // Copy all varargs.
4166 | mov MULTRES, 1 // MULTRES = 0+1
4167 | mov RC, BASE
4168 | sub RC, TMPR
4169 | jbe <3 // No vararg slots?
4170 | mov RBd, RCd
4171 | shr RBd, 3
4172 | add RBd, 1
4173 | mov MULTRES, RBd // MULTRES = #varargs+1
4174 | mov L:RB, SAVE_L
4175 | add RC, RA
4176 | cmp RC, L:RB->maxstack
4177 | ja >7 // Need to grow stack?
4178 |6: // Copy all vararg slots.
4179 | mov RC, [TMPR-16]
4180 | add TMPR, 8
4181 | mov [RA], RC
4182 | add RA, 8
4183 | cmp TMPR, BASE // No more vararg slots?
4184 | jb <6
4185 | jmp <3
4186 |
4187 |7: // Grow stack for varargs.
4188 | mov L:RB->base, BASE
4189 | mov L:RB->top, RA
4190 | mov SAVE_PC, PC
4191 | sub TMPR, BASE // Need delta, because BASE may change.
4192 | mov TMP1hi, TMPRd
4193 | mov CARG2d, MULTRES
4194 | sub CARG2d, 1
4195 | mov CARG1, L:RB
4196 | call extern lj_state_growstack // (lua_State *L, int n)
4197 | mov BASE, L:RB->base
4198 | movsxd TMPR, TMP1hi
4199 | mov RA, L:RB->top
4200 | add TMPR, BASE
4201 | jmp <6
4202 break;
4203
4204 /* -- Returns ----------------------------------------------------------- */
4205
4206 case BC_RETM:
4207 | ins_AD // RA = results, RD = extra_nresults
4208 | add RDd, MULTRES // MULTRES >=1, so RD >=1.
4209 | // Fall through. Assumes BC_RET follows and ins_AD is a no-op.
4210 break;
4211
4212 case BC_RET: case BC_RET0: case BC_RET1:
4213 | ins_AD // RA = results, RD = nresults+1
4214 if (op != BC_RET0) {
4215 | shl RAd, 3
4216 }
4217 |1:
4218 | mov PC, [BASE-8]
4219 | mov MULTRES, RDd // Save nresults+1.
4220 | test PCd, FRAME_TYPE // Check frame type marker.
4221 | jnz >7 // Not returning to a fixarg Lua func?
4222 switch (op) {
4223 case BC_RET:
4224 |->BC_RET_Z:
4225 | mov KBASE, BASE // Use KBASE for result move.
4226 | sub RDd, 1
4227 | jz >3
4228 |2: // Move results down.
4229 | mov RB, [KBASE+RA]
4230 | mov [KBASE-16], RB
4231 | add KBASE, 8
4232 | sub RDd, 1
4233 | jnz <2
4234 |3:
4235 | mov RDd, MULTRES // Note: MULTRES may be >255.
4236 | movzx RBd, PC_RB // So cannot compare with RDL!
4237 |5:
4238 | cmp RBd, RDd // More results expected?
4239 | ja >6
4240 break;
4241 case BC_RET1:
4242 | mov RB, [BASE+RA]
4243 | mov [BASE-16], RB
4244 /* fallthrough */
4245 case BC_RET0:
4246 |5:
4247 | cmp PC_RB, RDL // More results expected?
4248 | ja >6
4249 default:
4250 break;
4251 }
4252 | movzx RAd, PC_RA
4253 | neg RA
4254 | lea BASE, [BASE+RA*8-16] // base = base - (RA+2)*8
4255 | mov LFUNC:KBASE, [BASE-16]
4256 | cleartp LFUNC:KBASE
4257 | mov KBASE, LFUNC:KBASE->pc
4258 | mov KBASE, [KBASE+PC2PROTO(k)]
4259 | ins_next
4260 |
4261 |6: // Fill up results with nil.
4262 if (op == BC_RET) {
4263 | mov aword [KBASE-16], LJ_TNIL // Note: relies on shifted base.
4264 | add KBASE, 8
4265 } else {
4266 | mov aword [BASE+RD*8-24], LJ_TNIL
4267 }
4268 | add RD, 1
4269 | jmp <5
4270 |
4271 |7: // Non-standard return case.
4272 | lea RB, [PC-FRAME_VARG]
4273 | test RBd, FRAME_TYPEP
4274 | jnz ->vm_return
4275 | // Return from vararg function: relocate BASE down and RA up.
4276 | sub BASE, RB
4277 if (op != BC_RET0) {
4278 | add RA, RB
4279 }
4280 | jmp <1
4281 break;
4282
4283 /* -- Loops and branches ------------------------------------------------ */
4284
4285 |.define FOR_IDX, [RA]
4286 |.define FOR_STOP, [RA+8]
4287 |.define FOR_STEP, [RA+16]
4288 |.define FOR_EXT, [RA+24]
4289
4290 case BC_FORL:
4291 |.if JIT
4292 | hotloop RBd
4293 |.endif
4294 | // Fall through. Assumes BC_IFORL follows and ins_AJ is a no-op.
4295 break;
4296
4297 case BC_JFORI:
4298 case BC_JFORL:
4299#if !LJ_HASJIT
4300 break;
4301#endif
4302 case BC_FORI:
4303 case BC_IFORL:
4304 vk = (op == BC_IFORL || op == BC_JFORL);
4305 | ins_AJ // RA = base, RD = target (after end of loop or start of loop)
4306 | lea RA, [BASE+RA*8]
4307 if (LJ_DUALNUM) {
4308 | mov RB, FOR_IDX
4309 | checkint RB, >9
4310 | mov TMPR, FOR_STOP
4311 if (!vk) {
4312 | checkint TMPR, ->vmeta_for
4313 | mov ITYPE, FOR_STEP
4314 | test ITYPEd, ITYPEd; js >5
4315 | sar ITYPE, 47;
4316 | cmp ITYPEd, LJ_TISNUM; jne ->vmeta_for
4317 } else {
4318#ifdef LUA_USE_ASSERT
4319 | checkinttp FOR_STOP, ->assert_bad_for_arg_type
4320 | checkinttp FOR_STEP, ->assert_bad_for_arg_type
4321#endif
4322 | mov ITYPE, FOR_STEP
4323 | test ITYPEd, ITYPEd; js >5
4324 | add RBd, ITYPEd; jo >1
4325 | setint RB
4326 | mov FOR_IDX, RB
4327 }
4328 | cmp RBd, TMPRd
4329 | mov FOR_EXT, RB
4330 if (op == BC_FORI) {
4331 | jle >7
4332 |1:
4333 |6:
4334 | branchPC RD
4335 } else if (op == BC_JFORI) {
4336 | branchPC RD
4337 | movzx RDd, PC_RD
4338 | jle =>BC_JLOOP
4339 |1:
4340 |6:
4341 } else if (op == BC_IFORL) {
4342 | jg >7
4343 |6:
4344 | branchPC RD
4345 |1:
4346 } else {
4347 | jle =>BC_JLOOP
4348 |1:
4349 |6:
4350 }
4351 |7:
4352 | ins_next
4353 |
4354 |5: // Invert check for negative step.
4355 if (!vk) {
4356 | sar ITYPE, 47;
4357 | cmp ITYPEd, LJ_TISNUM; jne ->vmeta_for
4358 } else {
4359 | add RBd, ITYPEd; jo <1
4360 | setint RB
4361 | mov FOR_IDX, RB
4362 }
4363 | cmp RBd, TMPRd
4364 | mov FOR_EXT, RB
4365 if (op == BC_FORI) {
4366 | jge <7
4367 } else if (op == BC_JFORI) {
4368 | branchPC RD
4369 | movzx RDd, PC_RD
4370 | jge =>BC_JLOOP
4371 } else if (op == BC_IFORL) {
4372 | jl <7
4373 } else {
4374 | jge =>BC_JLOOP
4375 }
4376 | jmp <6
4377 |9: // Fallback to FP variant.
4378 if (!vk) {
4379 | jae ->vmeta_for
4380 }
4381 } else if (!vk) {
4382 | checknumtp FOR_IDX, ->vmeta_for
4383 }
4384 if (!vk) {
4385 | checknumtp FOR_STOP, ->vmeta_for
4386 } else {
4387#ifdef LUA_USE_ASSERT
4388 | checknumtp FOR_STOP, ->assert_bad_for_arg_type
4389 | checknumtp FOR_STEP, ->assert_bad_for_arg_type
4390#endif
4391 }
4392 | mov RB, FOR_STEP
4393 if (!vk) {
4394 | checknum RB, ->vmeta_for
4395 }
4396 | movsd xmm0, qword FOR_IDX
4397 | movsd xmm1, qword FOR_STOP
4398 if (vk) {
4399 | addsd xmm0, qword FOR_STEP
4400 | movsd qword FOR_IDX, xmm0
4401 | test RB, RB; js >3
4402 } else {
4403 | jl >3
4404 }
4405 | ucomisd xmm1, xmm0
4406 |1:
4407 | movsd qword FOR_EXT, xmm0
4408 if (op == BC_FORI) {
4409 |.if DUALNUM
4410 | jnb <7
4411 |.else
4412 | jnb >2
4413 | branchPC RD
4414 |.endif
4415 } else if (op == BC_JFORI) {
4416 | branchPC RD
4417 | movzx RDd, PC_RD
4418 | jnb =>BC_JLOOP
4419 } else if (op == BC_IFORL) {
4420 |.if DUALNUM
4421 | jb <7
4422 |.else
4423 | jb >2
4424 | branchPC RD
4425 |.endif
4426 } else {
4427 | jnb =>BC_JLOOP
4428 }
4429 |.if DUALNUM
4430 | jmp <6
4431 |.else
4432 |2:
4433 | ins_next
4434 |.endif
4435 |
4436 |3: // Invert comparison if step is negative.
4437 | ucomisd xmm0, xmm1
4438 | jmp <1
4439 break;
4440
4441 case BC_ITERL:
4442 |.if JIT
4443 | hotloop RBd
4444 |.endif
4445 | // Fall through. Assumes BC_IITERL follows and ins_AJ is a no-op.
4446 break;
4447
4448 case BC_JITERL:
4449#if !LJ_HASJIT
4450 break;
4451#endif
4452 case BC_IITERL:
4453 | ins_AJ // RA = base, RD = target
4454 | lea RA, [BASE+RA*8]
4455 | mov RB, [RA]
4456 | cmp RB, LJ_TNIL; je >1 // Stop if iterator returned nil.
4457 if (op == BC_JITERL) {
4458 | mov [RA-8], RB
4459 | jmp =>BC_JLOOP
4460 } else {
4461 | branchPC RD // Otherwise save control var + branch.
4462 | mov [RA-8], RB
4463 }
4464 |1:
4465 | ins_next
4466 break;
4467
4468 case BC_LOOP:
4469 | ins_A // RA = base, RD = target (loop extent)
4470 | // Note: RA/RD is only used by trace recorder to determine scope/extent
4471 | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
4472 |.if JIT
4473 | hotloop RBd
4474 |.endif
4475 | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op.
4476 break;
4477
4478 case BC_ILOOP:
4479 | ins_A // RA = base, RD = target (loop extent)
4480 | ins_next
4481 break;
4482
4483 case BC_JLOOP:
4484 |.if JIT
4485 | ins_AD // RA = base (ignored), RD = traceno
4486 | mov RA, [DISPATCH+DISPATCH_J(trace)]
4487 | mov TRACE:RD, [RA+RD*8]
4488 | mov RD, TRACE:RD->mcode
4489 | mov L:RB, SAVE_L
4490 | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE
4491 | mov [DISPATCH+DISPATCH_GL(tmpbuf.L)], L:RB
4492 | // Save additional callee-save registers only used in compiled code.
4493 |.if X64WIN
4494 | mov CSAVE_4, r12
4495 | mov CSAVE_3, r13
4496 | mov CSAVE_2, r14
4497 | mov CSAVE_1, r15
4498 | mov RA, rsp
4499 | sub rsp, 10*16+4*8
4500 | movdqa [RA-1*16], xmm6
4501 | movdqa [RA-2*16], xmm7
4502 | movdqa [RA-3*16], xmm8
4503 | movdqa [RA-4*16], xmm9
4504 | movdqa [RA-5*16], xmm10
4505 | movdqa [RA-6*16], xmm11
4506 | movdqa [RA-7*16], xmm12
4507 | movdqa [RA-8*16], xmm13
4508 | movdqa [RA-9*16], xmm14
4509 | movdqa [RA-10*16], xmm15
4510 |.else
4511 | sub rsp, 16
4512 | mov [rsp+16], r12
4513 | mov [rsp+8], r13
4514 |.endif
4515 | jmp RD
4516 |.endif
4517 break;
4518
4519 case BC_JMP:
4520 | ins_AJ // RA = unused, RD = target
4521 | branchPC RD
4522 | ins_next
4523 break;
4524
4525 /* -- Function headers -------------------------------------------------- */
4526
4527 /*
4528 ** Reminder: A function may be called with func/args above L->maxstack,
4529 ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot,
4530 ** too. This means all FUNC* ops (including fast functions) must check
4531 ** for stack overflow _before_ adding more slots!
4532 */
4533
4534 case BC_FUNCF:
4535 |.if JIT
4536 | hotcall RBd
4537 |.endif
4538 case BC_FUNCV: /* NYI: compiled vararg functions. */
4539 | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow and ins_AD is a no-op.
4540 break;
4541
4542 case BC_JFUNCF:
4543#if !LJ_HASJIT
4544 break;
4545#endif
4546 case BC_IFUNCF:
4547 | ins_AD // BASE = new base, RA = framesize, RD = nargs+1
4548 | mov KBASE, [PC-4+PC2PROTO(k)]
4549 | mov L:RB, SAVE_L
4550 | lea RA, [BASE+RA*8] // Top of frame.
4551 | cmp RA, L:RB->maxstack
4552 | ja ->vm_growstack_f
4553 | movzx RAd, byte [PC-4+PC2PROTO(numparams)]
4554 | cmp NARGS:RDd, RAd // Check for missing parameters.
4555 | jbe >3
4556 |2:
4557 if (op == BC_JFUNCF) {
4558 | movzx RDd, PC_RD
4559 | jmp =>BC_JLOOP
4560 } else {
4561 | ins_next
4562 }
4563 |
4564 |3: // Clear missing parameters.
4565 | mov aword [BASE+NARGS:RD*8-8], LJ_TNIL
4566 | add NARGS:RDd, 1
4567 | cmp NARGS:RDd, RAd
4568 | jbe <3
4569 | jmp <2
4570 break;
4571
4572 case BC_JFUNCV:
4573#if !LJ_HASJIT
4574 break;
4575#endif
4576 | int3 // NYI: compiled vararg functions
4577 break; /* NYI: compiled vararg functions. */
4578
4579 case BC_IFUNCV:
4580 | ins_AD // BASE = new base, RA = framesize, RD = nargs+1
4581 | lea RBd, [NARGS:RD*8+FRAME_VARG+8]
4582 | lea RD, [BASE+NARGS:RD*8+8]
4583 | mov LFUNC:KBASE, [BASE-16]
4584 | mov [RD-8], RB // Store delta + FRAME_VARG.
4585 | mov [RD-16], LFUNC:KBASE // Store copy of LFUNC.
4586 | mov L:RB, SAVE_L
4587 | lea RA, [RD+RA*8]
4588 | cmp RA, L:RB->maxstack
4589 | ja ->vm_growstack_v // Need to grow stack.
4590 | mov RA, BASE
4591 | mov BASE, RD
4592 | movzx RBd, byte [PC-4+PC2PROTO(numparams)]
4593 | test RBd, RBd
4594 | jz >2
4595 | add RA, 8
4596 |1: // Copy fixarg slots up to new frame.
4597 | add RA, 8
4598 | cmp RA, BASE
4599 | jnb >3 // Less args than parameters?
4600 | mov KBASE, [RA-16]
4601 | mov [RD], KBASE
4602 | add RD, 8
4603 | mov aword [RA-16], LJ_TNIL // Clear old fixarg slot (help the GC).
4604 | sub RBd, 1
4605 | jnz <1
4606 |2:
4607 if (op == BC_JFUNCV) {
4608 | movzx RDd, PC_RD
4609 | jmp =>BC_JLOOP
4610 } else {
4611 | mov KBASE, [PC-4+PC2PROTO(k)]
4612 | ins_next
4613 }
4614 |
4615 |3: // Clear missing parameters.
4616 | mov aword [RD], LJ_TNIL
4617 | add RD, 8
4618 | sub RBd, 1
4619 | jnz <3
4620 | jmp <2
4621 break;
4622
4623 case BC_FUNCC:
4624 case BC_FUNCCW:
4625 | ins_AD // BASE = new base, RA = ins RA|RD (unused), RD = nargs+1
4626 | mov CFUNC:RB, [BASE-16]
4627 | cleartp CFUNC:RB
4628 | mov KBASE, CFUNC:RB->f
4629 | mov L:RB, SAVE_L
4630 | lea RD, [BASE+NARGS:RD*8-8]
4631 | mov L:RB->base, BASE
4632 | lea RA, [RD+8*LUA_MINSTACK]
4633 | cmp RA, L:RB->maxstack
4634 | mov L:RB->top, RD
4635 if (op == BC_FUNCC) {
4636 | mov CARG1, L:RB // Caveat: CARG1 may be RA.
4637 } else {
4638 | mov CARG2, KBASE
4639 | mov CARG1, L:RB // Caveat: CARG1 may be RA.
4640 }
4641 | ja ->vm_growstack_c // Need to grow stack.
4642 | set_vmstate C
4643 if (op == BC_FUNCC) {
4644 | call KBASE // (lua_State *L)
4645 } else {
4646 | // (lua_State *L, lua_CFunction f)
4647 | call aword [DISPATCH+DISPATCH_GL(wrapf)]
4648 }
4649 | // nresults returned in eax (RD).
4650 | mov BASE, L:RB->base
4651 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
4652 | set_vmstate INTERP
4653 | lea RA, [BASE+RD*8]
4654 | neg RA
4655 | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8
4656 | mov PC, [BASE-8] // Fetch PC of caller.
4657 | jmp ->vm_returnc
4658 break;
4659
4660 /* ---------------------------------------------------------------------- */
4661
4662 default:
4663 fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
4664 exit(2);
4665 break;
4666 }
4667}
4668
4669static int build_backend(BuildCtx *ctx)
4670{
4671 int op;
4672 dasm_growpc(Dst, BC__MAX);
4673 build_subroutines(ctx);
4674 |.code_op
4675 for (op = 0; op < BC__MAX; op++)
4676 build_ins(ctx, (BCOp)op, op);
4677 return BC__MAX;
4678}
4679
4680/* Emit pseudo frame-info for all assembler functions. */
4681static void emit_asm_debug(BuildCtx *ctx)
4682{
4683 int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
4684 switch (ctx->mode) {
4685 case BUILD_elfasm:
4686 fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
4687 fprintf(ctx->fp,
4688 ".Lframe0:\n"
4689 "\t.long .LECIE0-.LSCIE0\n"
4690 ".LSCIE0:\n"
4691 "\t.long 0xffffffff\n"
4692 "\t.byte 0x1\n"
4693 "\t.string \"\"\n"
4694 "\t.uleb128 0x1\n"
4695 "\t.sleb128 -8\n"
4696 "\t.byte 0x10\n"
4697 "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n"
4698 "\t.byte 0x80+0x10\n\t.uleb128 0x1\n"
4699 "\t.align 8\n"
4700 ".LECIE0:\n\n");
4701 fprintf(ctx->fp,
4702 ".LSFDE0:\n"
4703 "\t.long .LEFDE0-.LASFDE0\n"
4704 ".LASFDE0:\n"
4705 "\t.long .Lframe0\n"
4706 "\t.quad .Lbegin\n"
4707 "\t.quad %d\n"
4708 "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
4709 "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
4710 "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
4711 "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
4712 "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
4713#if LJ_NO_UNWIND
4714 "\t.byte 0x8d\n\t.uleb128 0x6\n" /* offset r13 */
4715 "\t.byte 0x8c\n\t.uleb128 0x7\n" /* offset r12 */
4716#endif
4717 "\t.align 8\n"
4718 ".LEFDE0:\n\n", fcofs, CFRAME_SIZE);
4719#if LJ_HASFFI
4720 fprintf(ctx->fp,
4721 ".LSFDE1:\n"
4722 "\t.long .LEFDE1-.LASFDE1\n"
4723 ".LASFDE1:\n"
4724 "\t.long .Lframe0\n"
4725 "\t.quad lj_vm_ffi_call\n"
4726 "\t.quad %d\n"
4727 "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
4728 "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
4729 "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
4730 "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
4731 "\t.align 8\n"
4732 ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
4733#endif
4734#if !LJ_NO_UNWIND
4735#if LJ_TARGET_SOLARIS
4736 fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@unwind\n");
4737#else
4738 fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
4739#endif
4740 fprintf(ctx->fp,
4741 ".Lframe1:\n"
4742 "\t.long .LECIE1-.LSCIE1\n"
4743 ".LSCIE1:\n"
4744 "\t.long 0\n"
4745 "\t.byte 0x1\n"
4746 "\t.string \"zPR\"\n"
4747 "\t.uleb128 0x1\n"
4748 "\t.sleb128 -8\n"
4749 "\t.byte 0x10\n"
4750 "\t.uleb128 6\n" /* augmentation length */
4751 "\t.byte 0x1b\n" /* pcrel|sdata4 */
4752 "\t.long lj_err_unwind_dwarf-.\n"
4753 "\t.byte 0x1b\n" /* pcrel|sdata4 */
4754 "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n"
4755 "\t.byte 0x80+0x10\n\t.uleb128 0x1\n"
4756 "\t.align 8\n"
4757 ".LECIE1:\n\n");
4758 fprintf(ctx->fp,
4759 ".LSFDE2:\n"
4760 "\t.long .LEFDE2-.LASFDE2\n"
4761 ".LASFDE2:\n"
4762 "\t.long .LASFDE2-.Lframe1\n"
4763 "\t.long .Lbegin-.\n"
4764 "\t.long %d\n"
4765 "\t.uleb128 0\n" /* augmentation length */
4766 "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
4767 "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
4768 "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
4769 "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
4770 "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
4771 "\t.align 8\n"
4772 ".LEFDE2:\n\n", fcofs, CFRAME_SIZE);
4773#if LJ_HASFFI
4774 fprintf(ctx->fp,
4775 ".Lframe2:\n"
4776 "\t.long .LECIE2-.LSCIE2\n"
4777 ".LSCIE2:\n"
4778 "\t.long 0\n"
4779 "\t.byte 0x1\n"
4780 "\t.string \"zR\"\n"
4781 "\t.uleb128 0x1\n"
4782 "\t.sleb128 -8\n"
4783 "\t.byte 0x10\n"
4784 "\t.uleb128 1\n" /* augmentation length */
4785 "\t.byte 0x1b\n" /* pcrel|sdata4 */
4786 "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n"
4787 "\t.byte 0x80+0x10\n\t.uleb128 0x1\n"
4788 "\t.align 8\n"
4789 ".LECIE2:\n\n");
4790 fprintf(ctx->fp,
4791 ".LSFDE3:\n"
4792 "\t.long .LEFDE3-.LASFDE3\n"
4793 ".LASFDE3:\n"
4794 "\t.long .LASFDE3-.Lframe2\n"
4795 "\t.long lj_vm_ffi_call-.\n"
4796 "\t.long %d\n"
4797 "\t.uleb128 0\n" /* augmentation length */
4798 "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
4799 "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
4800 "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
4801 "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
4802 "\t.align 8\n"
4803 ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
4804#endif
4805#endif
4806 break;
4807#if !LJ_NO_UNWIND
4808 /* Mental note: never let Apple design an assembler.
4809 ** Or a linker. Or a plastic case. But I digress.
4810 */
4811 case BUILD_machasm: {
4812#if LJ_HASFFI
4813 int fcsize = 0;
4814#endif
4815 int i;
4816 fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n");
4817 fprintf(ctx->fp,
4818 "EH_frame1:\n"
4819 "\t.set L$set$x,LECIEX-LSCIEX\n"
4820 "\t.long L$set$x\n"
4821 "LSCIEX:\n"
4822 "\t.long 0\n"
4823 "\t.byte 0x1\n"
4824 "\t.ascii \"zPR\\0\"\n"
4825 "\t.byte 0x1\n"
4826 "\t.byte 128-8\n"
4827 "\t.byte 0x10\n"
4828 "\t.byte 6\n" /* augmentation length */
4829 "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */
4830 "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n"
4831 "\t.byte 0x1b\n" /* pcrel|sdata4 */
4832 "\t.byte 0xc\n\t.byte 0x7\n\t.byte 8\n"
4833 "\t.byte 0x80+0x10\n\t.byte 0x1\n"
4834 "\t.align 3\n"
4835 "LECIEX:\n\n");
4836 for (i = 0; i < ctx->nsym; i++) {
4837 const char *name = ctx->sym[i].name;
4838 int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs;
4839 if (size == 0) continue;
4840#if LJ_HASFFI
4841 if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; }
4842#endif
4843 fprintf(ctx->fp,
4844 "%s.eh:\n"
4845 "LSFDE%d:\n"
4846 "\t.set L$set$%d,LEFDE%d-LASFDE%d\n"
4847 "\t.long L$set$%d\n"
4848 "LASFDE%d:\n"
4849 "\t.long LASFDE%d-EH_frame1\n"
4850 "\t.long %s-.\n"
4851 "\t.long %d\n"
4852 "\t.byte 0\n" /* augmentation length */
4853 "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */
4854 "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
4855 "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
4856 "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */
4857 "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */
4858 "\t.align 3\n"
4859 "LEFDE%d:\n\n",
4860 name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i);
4861 }
4862#if LJ_HASFFI
4863 if (fcsize) {
4864 fprintf(ctx->fp,
4865 "EH_frame2:\n"
4866 "\t.set L$set$y,LECIEY-LSCIEY\n"
4867 "\t.long L$set$y\n"
4868 "LSCIEY:\n"
4869 "\t.long 0\n"
4870 "\t.byte 0x1\n"
4871 "\t.ascii \"zR\\0\"\n"
4872 "\t.byte 0x1\n"
4873 "\t.byte 128-8\n"
4874 "\t.byte 0x10\n"
4875 "\t.byte 1\n" /* augmentation length */
4876 "\t.byte 0x1b\n" /* pcrel|sdata4 */
4877 "\t.byte 0xc\n\t.byte 0x7\n\t.byte 8\n"
4878 "\t.byte 0x80+0x10\n\t.byte 0x1\n"
4879 "\t.align 3\n"
4880 "LECIEY:\n\n");
4881 fprintf(ctx->fp,
4882 "_lj_vm_ffi_call.eh:\n"
4883 "LSFDEY:\n"
4884 "\t.set L$set$yy,LEFDEY-LASFDEY\n"
4885 "\t.long L$set$yy\n"
4886 "LASFDEY:\n"
4887 "\t.long LASFDEY-EH_frame2\n"
4888 "\t.long _lj_vm_ffi_call-.\n"
4889 "\t.long %d\n"
4890 "\t.byte 0\n" /* augmentation length */
4891 "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */
4892 "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
4893 "\t.byte 0xd\n\t.byte 0x6\n" /* def_cfa_register rbp */
4894 "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
4895 "\t.align 3\n"
4896 "LEFDEY:\n\n", fcsize);
4897 }
4898#endif
4899 fprintf(ctx->fp, ".subsections_via_symbols\n");
4900 }
4901 break;
4902#endif
4903 default: /* Difficult for other modes. */
4904 break;
4905 }
4906}
4907
diff --git a/src/vm_x86.dasc b/src/vm_x86.dasc
index b23d046b..b26cde4f 100644
--- a/src/vm_x86.dasc
+++ b/src/vm_x86.dasc
@@ -18,7 +18,6 @@
18| 18|
19|.if P64 19|.if P64
20|.define X64, 1 20|.define X64, 1
21|.define SSE, 1
22|.if WIN 21|.if WIN
23|.define X64WIN, 1 22|.define X64WIN, 1
24|.endif 23|.endif
@@ -116,24 +115,74 @@
116|.type NODE, Node 115|.type NODE, Node
117|.type NARGS, int 116|.type NARGS, int
118|.type TRACE, GCtrace 117|.type TRACE, GCtrace
118|.type SBUF, SBuf
119| 119|
120|// Stack layout while in interpreter. Must match with lj_frame.h. 120|// Stack layout while in interpreter. Must match with lj_frame.h.
121|//----------------------------------------------------------------------- 121|//-----------------------------------------------------------------------
122|.if not X64 // x86 stack layout. 122|.if not X64 // x86 stack layout.
123| 123|
124|.define CFRAME_SPACE, aword*7 // Delta for esp (see <--). 124|.if WIN
125|
126|.define CFRAME_SPACE, aword*9 // Delta for esp (see <--).
125|.macro saveregs_ 127|.macro saveregs_
126| push edi; push esi; push ebx 128| push edi; push esi; push ebx
129| push extern lj_err_unwind_win
130| fs; push dword [0]
131| fs; mov [0], esp
127| sub esp, CFRAME_SPACE 132| sub esp, CFRAME_SPACE
128|.endmacro 133|.endmacro
129|.macro saveregs 134|.macro restoreregs
130| push ebp; saveregs_ 135| add esp, CFRAME_SPACE
136| fs; pop dword [0]
137| pop edi // Short for esp += 4.
138| pop ebx; pop esi; pop edi; pop ebp
139|.endmacro
140|
141|.else
142|
143|.define CFRAME_SPACE, aword*7 // Delta for esp (see <--).
144|.macro saveregs_
145| push edi; push esi; push ebx
146| sub esp, CFRAME_SPACE
131|.endmacro 147|.endmacro
132|.macro restoreregs 148|.macro restoreregs
133| add esp, CFRAME_SPACE 149| add esp, CFRAME_SPACE
134| pop ebx; pop esi; pop edi; pop ebp 150| pop ebx; pop esi; pop edi; pop ebp
135|.endmacro 151|.endmacro
136| 152|
153|.endif
154|
155|.macro saveregs
156| push ebp; saveregs_
157|.endmacro
158|
159|.if WIN
160|.define SAVE_ERRF, aword [esp+aword*19] // vm_pcall/vm_cpcall only.
161|.define SAVE_NRES, aword [esp+aword*18]
162|.define SAVE_CFRAME, aword [esp+aword*17]
163|.define SAVE_L, aword [esp+aword*16]
164|//----- 16 byte aligned, ^^^ arguments from C caller
165|.define SAVE_RET, aword [esp+aword*15] //<-- esp entering interpreter.
166|.define SAVE_R4, aword [esp+aword*14]
167|.define SAVE_R3, aword [esp+aword*13]
168|.define SAVE_R2, aword [esp+aword*12]
169|//----- 16 byte aligned
170|.define SAVE_R1, aword [esp+aword*11]
171|.define SEH_FUNC, aword [esp+aword*10]
172|.define SEH_NEXT, aword [esp+aword*9] //<-- esp after register saves.
173|.define UNUSED2, aword [esp+aword*8]
174|//----- 16 byte aligned
175|.define UNUSED1, aword [esp+aword*7]
176|.define SAVE_PC, aword [esp+aword*6]
177|.define TMP2, aword [esp+aword*5]
178|.define TMP1, aword [esp+aword*4]
179|//----- 16 byte aligned
180|.define ARG4, aword [esp+aword*3]
181|.define ARG3, aword [esp+aword*2]
182|.define ARG2, aword [esp+aword*1]
183|.define ARG1, aword [esp] //<-- esp while in interpreter.
184|//----- 16 byte aligned, ^^^ arguments for C callee
185|.else
137|.define SAVE_ERRF, aword [esp+aword*15] // vm_pcall/vm_cpcall only. 186|.define SAVE_ERRF, aword [esp+aword*15] // vm_pcall/vm_cpcall only.
138|.define SAVE_NRES, aword [esp+aword*14] 187|.define SAVE_NRES, aword [esp+aword*14]
139|.define SAVE_CFRAME, aword [esp+aword*13] 188|.define SAVE_CFRAME, aword [esp+aword*13]
@@ -154,6 +203,7 @@
154|.define ARG2, aword [esp+aword*1] 203|.define ARG2, aword [esp+aword*1]
155|.define ARG1, aword [esp] //<-- esp while in interpreter. 204|.define ARG1, aword [esp] //<-- esp while in interpreter.
156|//----- 16 byte aligned, ^^^ arguments for C callee 205|//----- 16 byte aligned, ^^^ arguments for C callee
206|.endif
157| 207|
158|// FPARGx overlaps ARGx and ARG(x+1) on x86. 208|// FPARGx overlaps ARGx and ARG(x+1) on x86.
159|.define FPARG3, qword [esp+qword*1] 209|.define FPARG3, qword [esp+qword*1]
@@ -389,7 +439,6 @@
389| fpop 439| fpop
390|.endmacro 440|.endmacro
391| 441|
392|.macro fdup; fld st0; .endmacro
393|.macro fpop1; fstp st1; .endmacro 442|.macro fpop1; fstp st1; .endmacro
394| 443|
395|// Synthesize SSE FP constants. 444|// Synthesize SSE FP constants.
@@ -555,6 +604,10 @@ static void build_subroutines(BuildCtx *ctx)
555 |.else 604 |.else
556 | mov eax, FCARG2 // Error return status for vm_pcall. 605 | mov eax, FCARG2 // Error return status for vm_pcall.
557 | mov esp, FCARG1 606 | mov esp, FCARG1
607 |.if WIN
608 | lea FCARG1, SEH_NEXT
609 | fs; mov [0], FCARG1
610 |.endif
558 |.endif 611 |.endif
559 |->vm_unwind_c_eh: // Landing pad for external unwinder. 612 |->vm_unwind_c_eh: // Landing pad for external unwinder.
560 | mov L:RB, SAVE_L 613 | mov L:RB, SAVE_L
@@ -578,6 +631,10 @@ static void build_subroutines(BuildCtx *ctx)
578 |.else 631 |.else
579 | and FCARG1, CFRAME_RAWMASK 632 | and FCARG1, CFRAME_RAWMASK
580 | mov esp, FCARG1 633 | mov esp, FCARG1
634 |.if WIN
635 | lea FCARG1, SEH_NEXT
636 | fs; mov [0], FCARG1
637 |.endif
581 |.endif 638 |.endif
582 |->vm_unwind_ff_eh: // Landing pad for external unwinder. 639 |->vm_unwind_ff_eh: // Landing pad for external unwinder.
583 | mov L:RB, SAVE_L 640 | mov L:RB, SAVE_L
@@ -591,6 +648,19 @@ static void build_subroutines(BuildCtx *ctx)
591 | set_vmstate INTERP 648 | set_vmstate INTERP
592 | jmp ->vm_returnc // Increments RD/MULTRES and returns. 649 | jmp ->vm_returnc // Increments RD/MULTRES and returns.
593 | 650 |
651 |.if WIN and not X64
652 |->vm_rtlunwind@16: // Thin layer around RtlUnwind.
653 | // (void *cframe, void *excptrec, void *unwinder, int errcode)
654 | mov [esp], FCARG1 // Return value for RtlUnwind.
655 | push FCARG2 // Exception record for RtlUnwind.
656 | push 0 // Ignored by RtlUnwind.
657 | push dword [FCARG1+CFRAME_OFS_SEH]
658 | call extern RtlUnwind@16 // Violates ABI (clobbers too much).
659 | mov FCARG1, eax
660 | mov FCARG2, [esp+4] // errcode (for vm_unwind_c).
661 | ret // Jump to unwinder.
662 |.endif
663 |
594 |//----------------------------------------------------------------------- 664 |//-----------------------------------------------------------------------
595 |//-- Grow stack for calls ----------------------------------------------- 665 |//-- Grow stack for calls -----------------------------------------------
596 |//----------------------------------------------------------------------- 666 |//-----------------------------------------------------------------------
@@ -646,17 +716,18 @@ static void build_subroutines(BuildCtx *ctx)
646 | lea KBASEa, [esp+CFRAME_RESUME] 716 | lea KBASEa, [esp+CFRAME_RESUME]
647 | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. 717 | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
648 | add DISPATCH, GG_G2DISP 718 | add DISPATCH, GG_G2DISP
649 | mov L:RB->cframe, KBASEa
650 | mov SAVE_PC, RD // Any value outside of bytecode is ok. 719 | mov SAVE_PC, RD // Any value outside of bytecode is ok.
651 | mov SAVE_CFRAME, RDa 720 | mov SAVE_CFRAME, RDa
652 |.if X64 721 |.if X64
653 | mov SAVE_NRES, RD 722 | mov SAVE_NRES, RD
654 | mov SAVE_ERRF, RD 723 | mov SAVE_ERRF, RD
655 |.endif 724 |.endif
725 | mov L:RB->cframe, KBASEa
656 | cmp byte L:RB->status, RDL 726 | cmp byte L:RB->status, RDL
657 | je >3 // Initial resume (like a call). 727 | je >2 // Initial resume (like a call).
658 | 728 |
659 | // Resume after yield (like a return). 729 | // Resume after yield (like a return).
730 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
660 | set_vmstate INTERP 731 | set_vmstate INTERP
661 | mov byte L:RB->status, RDL 732 | mov byte L:RB->status, RDL
662 | mov BASE, L:RB->base 733 | mov BASE, L:RB->base
@@ -696,20 +767,19 @@ static void build_subroutines(BuildCtx *ctx)
696 | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME! 767 | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME!
697 |.endif 768 |.endif
698 | 769 |
770 | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
699 | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain. 771 | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain.
700 | mov SAVE_CFRAME, KBASEa 772 | mov SAVE_CFRAME, KBASEa
701 | mov SAVE_PC, L:RB // Any value outside of bytecode is ok. 773 | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
774 | add DISPATCH, GG_G2DISP
702 |.if X64 775 |.if X64
703 | mov L:RB->cframe, rsp 776 | mov L:RB->cframe, rsp
704 |.else 777 |.else
705 | mov L:RB->cframe, esp 778 | mov L:RB->cframe, esp
706 |.endif 779 |.endif
707 | 780 |
708 |2: // Entry point for vm_cpcall below (RA = base, RB = L, PC = ftype). 781 |2: // Entry point for vm_resume/vm_cpcall (RA = base, RB = L, PC = ftype).
709 | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table. 782 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
710 | add DISPATCH, GG_G2DISP
711 |
712 |3: // Entry point for vm_resume above (RA = base, RB = L, PC = ftype).
713 | set_vmstate INTERP 783 | set_vmstate INTERP
714 | mov BASE, L:RB->base // BASE = old base (used in vmeta_call). 784 | mov BASE, L:RB->base // BASE = old base (used in vmeta_call).
715 | add PC, RA 785 | add PC, RA
@@ -747,14 +817,17 @@ static void build_subroutines(BuildCtx *ctx)
747 | 817 |
748 | mov KBASE, L:RB->stack // Compute -savestack(L, L->top). 818 | mov KBASE, L:RB->stack // Compute -savestack(L, L->top).
749 | sub KBASE, L:RB->top 819 | sub KBASE, L:RB->top
820 | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
750 | mov SAVE_ERRF, 0 // No error function. 821 | mov SAVE_ERRF, 0 // No error function.
751 | mov SAVE_NRES, KBASE // Neg. delta means cframe w/o frame. 822 | mov SAVE_NRES, KBASE // Neg. delta means cframe w/o frame.
823 | add DISPATCH, GG_G2DISP
752 | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe). 824 | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe).
753 | 825 |
754 |.if X64 826 |.if X64
755 | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain. 827 | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain.
756 | mov SAVE_CFRAME, KBASEa 828 | mov SAVE_CFRAME, KBASEa
757 | mov L:RB->cframe, rsp 829 | mov L:RB->cframe, rsp
830 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
758 | 831 |
759 | call CARG4 // (lua_State *L, lua_CFunction func, void *ud) 832 | call CARG4 // (lua_State *L, lua_CFunction func, void *ud)
760 |.else 833 |.else
@@ -765,6 +838,7 @@ static void build_subroutines(BuildCtx *ctx)
765 | mov KBASE, L:RB->cframe // Add our C frame to cframe chain. 838 | mov KBASE, L:RB->cframe // Add our C frame to cframe chain.
766 | mov SAVE_CFRAME, KBASE 839 | mov SAVE_CFRAME, KBASE
767 | mov L:RB->cframe, esp 840 | mov L:RB->cframe, esp
841 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
768 | 842 |
769 | call BASE // (lua_State *L, lua_CFunction func, void *ud) 843 | call BASE // (lua_State *L, lua_CFunction func, void *ud)
770 |.endif 844 |.endif
@@ -872,13 +946,9 @@ static void build_subroutines(BuildCtx *ctx)
872 |.if DUALNUM 946 |.if DUALNUM
873 | mov TMP2, LJ_TISNUM 947 | mov TMP2, LJ_TISNUM
874 | mov TMP1, RC 948 | mov TMP1, RC
875 |.elif SSE 949 |.else
876 | cvtsi2sd xmm0, RC 950 | cvtsi2sd xmm0, RC
877 | movsd TMPQ, xmm0 951 | movsd TMPQ, xmm0
878 |.else
879 | mov ARG4, RC
880 | fild ARG4
881 | fstp TMPQ
882 |.endif 952 |.endif
883 | lea RCa, TMPQ // Store temp. TValue in TMPQ. 953 | lea RCa, TMPQ // Store temp. TValue in TMPQ.
884 | jmp >1 954 | jmp >1
@@ -932,6 +1002,19 @@ static void build_subroutines(BuildCtx *ctx)
932 | mov NARGS:RD, 2+1 // 2 args for func(t, k). 1002 | mov NARGS:RD, 2+1 // 2 args for func(t, k).
933 | jmp ->vm_call_dispatch_f 1003 | jmp ->vm_call_dispatch_f
934 | 1004 |
1005 |->vmeta_tgetr:
1006 | mov FCARG1, TAB:RB
1007 | mov RB, BASE // Save BASE.
1008 | mov FCARG2, RC // Caveat: FCARG2 == BASE
1009 | call extern lj_tab_getinth@8 // (GCtab *t, int32_t key)
1010 | // cTValue * or NULL returned in eax (RC).
1011 | movzx RA, PC_RA
1012 | mov BASE, RB // Restore BASE.
1013 | test RC, RC
1014 | jnz ->BC_TGETR_Z
1015 | mov dword [BASE+RA*8+4], LJ_TNIL
1016 | jmp ->BC_TGETR2_Z
1017 |
935 |//----------------------------------------------------------------------- 1018 |//-----------------------------------------------------------------------
936 | 1019 |
937 |->vmeta_tsets: 1020 |->vmeta_tsets:
@@ -951,13 +1034,9 @@ static void build_subroutines(BuildCtx *ctx)
951 |.if DUALNUM 1034 |.if DUALNUM
952 | mov TMP2, LJ_TISNUM 1035 | mov TMP2, LJ_TISNUM
953 | mov TMP1, RC 1036 | mov TMP1, RC
954 |.elif SSE 1037 |.else
955 | cvtsi2sd xmm0, RC 1038 | cvtsi2sd xmm0, RC
956 | movsd TMPQ, xmm0 1039 | movsd TMPQ, xmm0
957 |.else
958 | mov ARG4, RC
959 | fild ARG4
960 | fstp TMPQ
961 |.endif 1040 |.endif
962 | lea RCa, TMPQ // Store temp. TValue in TMPQ. 1041 | lea RCa, TMPQ // Store temp. TValue in TMPQ.
963 | jmp >1 1042 | jmp >1
@@ -1023,6 +1102,33 @@ static void build_subroutines(BuildCtx *ctx)
1023 | mov NARGS:RD, 3+1 // 3 args for func(t, k, v). 1102 | mov NARGS:RD, 3+1 // 3 args for func(t, k, v).
1024 | jmp ->vm_call_dispatch_f 1103 | jmp ->vm_call_dispatch_f
1025 | 1104 |
1105 |->vmeta_tsetr:
1106 |.if X64WIN
1107 | mov L:CARG1d, SAVE_L
1108 | mov CARG3d, RC
1109 | mov L:CARG1d->base, BASE
1110 | xchg CARG2d, TAB:RB // Caveat: CARG2d == BASE.
1111 |.elif X64
1112 | mov L:CARG1d, SAVE_L
1113 | mov CARG2d, TAB:RB
1114 | mov L:CARG1d->base, BASE
1115 | mov RB, BASE // Save BASE.
1116 | mov CARG3d, RC // Caveat: CARG3d == BASE.
1117 |.else
1118 | mov L:RA, SAVE_L
1119 | mov ARG2, TAB:RB
1120 | mov RB, BASE // Save BASE.
1121 | mov ARG3, RC
1122 | mov ARG1, L:RA
1123 | mov L:RA->base, BASE
1124 |.endif
1125 | mov SAVE_PC, PC
1126 | call extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
1127 | // TValue * returned in eax (RC).
1128 | movzx RA, PC_RA
1129 | mov BASE, RB // Restore BASE.
1130 | jmp ->BC_TSETR_Z
1131 |
1026 |//-- Comparison metamethods --------------------------------------------- 1132 |//-- Comparison metamethods ---------------------------------------------
1027 | 1133 |
1028 |->vmeta_comp: 1134 |->vmeta_comp:
@@ -1117,6 +1223,26 @@ static void build_subroutines(BuildCtx *ctx)
1117 | jmp <3 1223 | jmp <3
1118 |.endif 1224 |.endif
1119 | 1225 |
1226 |->vmeta_istype:
1227 |.if X64
1228 | mov L:RB, SAVE_L
1229 | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
1230 | mov CARG2d, RA
1231 | movzx CARG3d, PC_RD
1232 | mov L:CARG1d, L:RB
1233 |.else
1234 | movzx RD, PC_RD
1235 | mov ARG2, RA
1236 | mov L:RB, SAVE_L
1237 | mov ARG3, RD
1238 | mov ARG1, L:RB
1239 | mov L:RB->base, BASE
1240 |.endif
1241 | mov SAVE_PC, PC
1242 | call extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
1243 | mov BASE, L:RB->base
1244 | jmp <6
1245 |
1120 |//-- Arithmetic metamethods --------------------------------------------- 1246 |//-- Arithmetic metamethods ---------------------------------------------
1121 | 1247 |
1122 |->vmeta_arith_vno: 1248 |->vmeta_arith_vno:
@@ -1289,19 +1415,6 @@ static void build_subroutines(BuildCtx *ctx)
1289 | cmp NARGS:RD, 2+1; jb ->fff_fallback 1415 | cmp NARGS:RD, 2+1; jb ->fff_fallback
1290 |.endmacro 1416 |.endmacro
1291 | 1417 |
1292 |.macro .ffunc_n, name
1293 | .ffunc_1 name
1294 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
1295 | fld qword [BASE]
1296 |.endmacro
1297 |
1298 |.macro .ffunc_n, name, op
1299 | .ffunc_1 name
1300 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
1301 | op
1302 | fld qword [BASE]
1303 |.endmacro
1304 |
1305 |.macro .ffunc_nsse, name, op 1418 |.macro .ffunc_nsse, name, op
1306 | .ffunc_1 name 1419 | .ffunc_1 name
1307 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback 1420 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
@@ -1312,14 +1425,6 @@ static void build_subroutines(BuildCtx *ctx)
1312 | .ffunc_nsse name, movsd 1425 | .ffunc_nsse name, movsd
1313 |.endmacro 1426 |.endmacro
1314 | 1427 |
1315 |.macro .ffunc_nn, name
1316 | .ffunc_2 name
1317 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
1318 | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback
1319 | fld qword [BASE]
1320 | fld qword [BASE+8]
1321 |.endmacro
1322 |
1323 |.macro .ffunc_nnsse, name 1428 |.macro .ffunc_nnsse, name
1324 | .ffunc_2 name 1429 | .ffunc_2 name
1325 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback 1430 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
@@ -1417,7 +1522,7 @@ static void build_subroutines(BuildCtx *ctx)
1417 | mov dword [BASE-4], LJ_TTAB // Store metatable as default result. 1522 | mov dword [BASE-4], LJ_TTAB // Store metatable as default result.
1418 | mov [BASE-8], TAB:RB 1523 | mov [BASE-8], TAB:RB
1419 | mov RA, TAB:RB->hmask 1524 | mov RA, TAB:RB->hmask
1420 | and RA, STR:RC->hash 1525 | and RA, STR:RC->sid
1421 | imul RA, #NODE 1526 | imul RA, #NODE
1422 | add NODE:RA, TAB:RB->node 1527 | add NODE:RA, TAB:RB->node
1423 |3: // Rearranged logic, because we expect _not_ to find the key. 1528 |3: // Rearranged logic, because we expect _not_ to find the key.
@@ -1525,11 +1630,7 @@ static void build_subroutines(BuildCtx *ctx)
1525 |.else 1630 |.else
1526 | jae ->fff_fallback 1631 | jae ->fff_fallback
1527 |.endif 1632 |.endif
1528 |.if SSE
1529 | movsd xmm0, qword [BASE]; jmp ->fff_resxmm0 1633 | movsd xmm0, qword [BASE]; jmp ->fff_resxmm0
1530 |.else
1531 | fld qword [BASE]; jmp ->fff_resn
1532 |.endif
1533 | 1634 |
1534 |.ffunc_1 tostring 1635 |.ffunc_1 tostring
1535 | // Only handles the string or number case inline. 1636 | // Only handles the string or number case inline.
@@ -1554,9 +1655,9 @@ static void build_subroutines(BuildCtx *ctx)
1554 |.endif 1655 |.endif
1555 | mov L:FCARG1, L:RB 1656 | mov L:FCARG1, L:RB
1556 |.if DUALNUM 1657 |.if DUALNUM
1557 | call extern lj_str_fromnumber@8 // (lua_State *L, cTValue *o) 1658 | call extern lj_strfmt_number@8 // (lua_State *L, cTValue *o)
1558 |.else 1659 |.else
1559 | call extern lj_str_fromnum@8 // (lua_State *L, lua_Number *np) 1660 | call extern lj_strfmt_num@8 // (lua_State *L, lua_Number *np)
1560 |.endif 1661 |.endif
1561 | // GCstr returned in eax (RD). 1662 | // GCstr returned in eax (RD).
1562 | mov BASE, L:RB->base 1663 | mov BASE, L:RB->base
@@ -1647,19 +1748,12 @@ static void build_subroutines(BuildCtx *ctx)
1647 | add RD, 1 1748 | add RD, 1
1648 | mov dword [BASE-4], LJ_TISNUM 1749 | mov dword [BASE-4], LJ_TISNUM
1649 | mov dword [BASE-8], RD 1750 | mov dword [BASE-8], RD
1650 |.elif SSE 1751 |.else
1651 | movsd xmm0, qword [BASE+8] 1752 | movsd xmm0, qword [BASE+8]
1652 | sseconst_1 xmm1, RBa 1753 | sseconst_1 xmm1, RBa
1653 | addsd xmm0, xmm1 1754 | addsd xmm0, xmm1
1654 | cvtsd2si RD, xmm0 1755 | cvttsd2si RD, xmm0
1655 | movsd qword [BASE-8], xmm0 1756 | movsd qword [BASE-8], xmm0
1656 |.else
1657 | fld qword [BASE+8]
1658 | fld1
1659 | faddp st1
1660 | fist ARG1
1661 | fstp qword [BASE-8]
1662 | mov RD, ARG1
1663 |.endif 1757 |.endif
1664 | mov TAB:RB, [BASE] 1758 | mov TAB:RB, [BASE]
1665 | cmp RD, TAB:RB->asize; jae >2 // Not in array part? 1759 | cmp RD, TAB:RB->asize; jae >2 // Not in array part?
@@ -1706,12 +1800,9 @@ static void build_subroutines(BuildCtx *ctx)
1706 |.if DUALNUM 1800 |.if DUALNUM
1707 | mov dword [BASE+12], LJ_TISNUM 1801 | mov dword [BASE+12], LJ_TISNUM
1708 | mov dword [BASE+8], 0 1802 | mov dword [BASE+8], 0
1709 |.elif SSE 1803 |.else
1710 | xorps xmm0, xmm0 1804 | xorps xmm0, xmm0
1711 | movsd qword [BASE+8], xmm0 1805 | movsd qword [BASE+8], xmm0
1712 |.else
1713 | fldz
1714 | fstp qword [BASE+8]
1715 |.endif 1806 |.endif
1716 | mov RD, 1+3 1807 | mov RD, 1+3
1717 | jmp ->fff_res 1808 | jmp ->fff_res
@@ -1818,7 +1909,6 @@ static void build_subroutines(BuildCtx *ctx)
1818 | mov ARG3, RA 1909 | mov ARG3, RA
1819 |.endif 1910 |.endif
1820 | call ->vm_resume // (lua_State *L, TValue *base, 0, 0) 1911 | call ->vm_resume // (lua_State *L, TValue *base, 0, 0)
1821 | set_vmstate INTERP
1822 | 1912 |
1823 | mov L:RB, SAVE_L 1913 | mov L:RB, SAVE_L
1824 |.if X64 1914 |.if X64
@@ -1827,6 +1917,9 @@ static void build_subroutines(BuildCtx *ctx)
1827 | mov L:PC, ARG1 // The callee doesn't modify SAVE_L. 1917 | mov L:PC, ARG1 // The callee doesn't modify SAVE_L.
1828 |.endif 1918 |.endif
1829 | mov BASE, L:RB->base 1919 | mov BASE, L:RB->base
1920 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
1921 | set_vmstate INTERP
1922 |
1830 | cmp eax, LUA_YIELD 1923 | cmp eax, LUA_YIELD
1831 | ja >8 1924 | ja >8
1832 |4: 1925 |4:
@@ -1941,12 +2034,10 @@ static void build_subroutines(BuildCtx *ctx)
1941 |->fff_resi: // Dummy. 2034 |->fff_resi: // Dummy.
1942 |.endif 2035 |.endif
1943 | 2036 |
1944 |.if SSE
1945 |->fff_resn: 2037 |->fff_resn:
1946 | mov PC, [BASE-4] 2038 | mov PC, [BASE-4]
1947 | fstp qword [BASE-8] 2039 | fstp qword [BASE-8]
1948 | jmp ->fff_res1 2040 | jmp ->fff_res1
1949 |.endif
1950 | 2041 |
1951 | .ffunc_1 math_abs 2042 | .ffunc_1 math_abs
1952 |.if DUALNUM 2043 |.if DUALNUM
@@ -1970,8 +2061,6 @@ static void build_subroutines(BuildCtx *ctx)
1970 |.else 2061 |.else
1971 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback 2062 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
1972 |.endif 2063 |.endif
1973 |
1974 |.if SSE
1975 | movsd xmm0, qword [BASE] 2064 | movsd xmm0, qword [BASE]
1976 | sseconst_abs xmm1, RDa 2065 | sseconst_abs xmm1, RDa
1977 | andps xmm0, xmm1 2066 | andps xmm0, xmm1
@@ -1979,15 +2068,6 @@ static void build_subroutines(BuildCtx *ctx)
1979 | mov PC, [BASE-4] 2068 | mov PC, [BASE-4]
1980 | movsd qword [BASE-8], xmm0 2069 | movsd qword [BASE-8], xmm0
1981 | // fallthrough 2070 | // fallthrough
1982 |.else
1983 | fld qword [BASE]
1984 | fabs
1985 | // fallthrough
1986 |->fff_resxmm0: // Dummy.
1987 |->fff_resn:
1988 | mov PC, [BASE-4]
1989 | fstp qword [BASE-8]
1990 |.endif
1991 | 2071 |
1992 |->fff_res1: 2072 |->fff_res1:
1993 | mov RD, 1+1 2073 | mov RD, 1+1
@@ -2014,6 +2094,12 @@ static void build_subroutines(BuildCtx *ctx)
2014 | mov RAa, -8 // Results start at BASE+RA = BASE-8. 2094 | mov RAa, -8 // Results start at BASE+RA = BASE-8.
2015 | jmp ->vm_return 2095 | jmp ->vm_return
2016 | 2096 |
2097 |.if X64
2098 |.define fff_resfp, fff_resxmm0
2099 |.else
2100 |.define fff_resfp, fff_resn
2101 |.endif
2102 |
2017 |.macro math_round, func 2103 |.macro math_round, func
2018 | .ffunc math_ .. func 2104 | .ffunc math_ .. func
2019 |.if DUALNUM 2105 |.if DUALNUM
@@ -2024,107 +2110,75 @@ static void build_subroutines(BuildCtx *ctx)
2024 |.else 2110 |.else
2025 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback 2111 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
2026 |.endif 2112 |.endif
2027 |.if SSE
2028 | movsd xmm0, qword [BASE] 2113 | movsd xmm0, qword [BASE]
2029 | call ->vm_ .. func 2114 | call ->vm_ .. func .. _sse
2030 | .if DUALNUM 2115 |.if DUALNUM
2031 | cvtsd2si RB, xmm0 2116 | cvttsd2si RB, xmm0
2032 | cmp RB, 0x80000000 2117 | cmp RB, 0x80000000
2033 | jne ->fff_resi 2118 | jne ->fff_resi
2034 | cvtsi2sd xmm1, RB 2119 | cvtsi2sd xmm1, RB
2035 | ucomisd xmm0, xmm1 2120 | ucomisd xmm0, xmm1
2036 | jp ->fff_resxmm0 2121 | jp ->fff_resxmm0
2037 | je ->fff_resi 2122 | je ->fff_resi
2038 | .endif
2039 | jmp ->fff_resxmm0
2040 |.else
2041 | fld qword [BASE]
2042 | call ->vm_ .. func
2043 | .if DUALNUM
2044 | fist ARG1
2045 | mov RB, ARG1
2046 | cmp RB, 0x80000000; jne >2
2047 | fdup
2048 | fild ARG1
2049 | fcomparepp
2050 | jp ->fff_resn
2051 | jne ->fff_resn
2052 |2:
2053 | fpop
2054 | jmp ->fff_resi
2055 | .else
2056 | jmp ->fff_resn
2057 | .endif
2058 |.endif 2123 |.endif
2124 | jmp ->fff_resxmm0
2059 |.endmacro 2125 |.endmacro
2060 | 2126 |
2061 | math_round floor 2127 | math_round floor
2062 | math_round ceil 2128 | math_round ceil
2063 | 2129 |
2064 |.if SSE
2065 |.ffunc_nsse math_sqrt, sqrtsd; jmp ->fff_resxmm0 2130 |.ffunc_nsse math_sqrt, sqrtsd; jmp ->fff_resxmm0
2066 |.else
2067 |.ffunc_n math_sqrt; fsqrt; jmp ->fff_resn
2068 |.endif
2069 | 2131 |
2070 |.ffunc math_log 2132 |.ffunc math_log
2071 | cmp NARGS:RD, 1+1; jne ->fff_fallback // Exactly one argument. 2133 | cmp NARGS:RD, 1+1; jne ->fff_fallback // Exactly one argument.
2072 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback 2134 | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
2073 | fldln2; fld qword [BASE]; fyl2x; jmp ->fff_resn 2135 | movsd xmm0, qword [BASE]
2074 | 2136 |.if not X64
2075 |.ffunc_n math_log10, fldlg2; fyl2x; jmp ->fff_resn 2137 | movsd FPARG1, xmm0
2076 |.ffunc_n math_exp; call ->vm_exp_x87; jmp ->fff_resn 2138 |.endif
2077 | 2139 | mov RB, BASE
2078 |.ffunc_n math_sin; fsin; jmp ->fff_resn 2140 | call extern log
2079 |.ffunc_n math_cos; fcos; jmp ->fff_resn 2141 | mov BASE, RB
2080 |.ffunc_n math_tan; fptan; fpop; jmp ->fff_resn 2142 | jmp ->fff_resfp
2081 |
2082 |.ffunc_n math_asin
2083 | fdup; fmul st0; fld1; fsubrp st1; fsqrt; fpatan
2084 | jmp ->fff_resn
2085 |.ffunc_n math_acos
2086 | fdup; fmul st0; fld1; fsubrp st1; fsqrt; fxch; fpatan
2087 | jmp ->fff_resn
2088 |.ffunc_n math_atan; fld1; fpatan; jmp ->fff_resn
2089 | 2143 |
2090 |.macro math_extern, func 2144 |.macro math_extern, func
2091 |.if SSE
2092 | .ffunc_nsse math_ .. func 2145 | .ffunc_nsse math_ .. func
2093 | .if not X64 2146 |.if not X64
2094 | movsd FPARG1, xmm0 2147 | movsd FPARG1, xmm0
2095 | .endif
2096 |.else
2097 | .ffunc_n math_ .. func
2098 | fstp FPARG1
2099 |.endif 2148 |.endif
2100 | mov RB, BASE 2149 | mov RB, BASE
2101 | call extern lj_vm_ .. func 2150 | call extern func
2102 | mov BASE, RB 2151 | mov BASE, RB
2103 | .if X64 2152 | jmp ->fff_resfp
2104 | jmp ->fff_resxmm0 2153 |.endmacro
2105 | .else 2154 |
2106 | jmp ->fff_resn 2155 |.macro math_extern2, func
2107 | .endif 2156 | .ffunc_nnsse math_ .. func
2157 |.if not X64
2158 | movsd FPARG1, xmm0
2159 | movsd FPARG3, xmm1
2160 |.endif
2161 | mov RB, BASE
2162 | call extern func
2163 | mov BASE, RB
2164 | jmp ->fff_resfp
2108 |.endmacro 2165 |.endmacro
2109 | 2166 |
2167 | math_extern log10
2168 | math_extern exp
2169 | math_extern sin
2170 | math_extern cos
2171 | math_extern tan
2172 | math_extern asin
2173 | math_extern acos
2174 | math_extern atan
2110 | math_extern sinh 2175 | math_extern sinh
2111 | math_extern cosh 2176 | math_extern cosh
2112 | math_extern tanh 2177 | math_extern tanh
2178 | math_extern2 pow
2179 | math_extern2 atan2
2180 | math_extern2 fmod
2113 | 2181 |
2114 |->ff_math_deg:
2115 |.if SSE
2116 |.ffunc_nsse math_rad
2117 | mov CFUNC:RB, [BASE-8]
2118 | mulsd xmm0, qword CFUNC:RB->upvalue[0]
2119 | jmp ->fff_resxmm0
2120 |.else
2121 |.ffunc_n math_rad
2122 | mov CFUNC:RB, [BASE-8]
2123 | fmul qword CFUNC:RB->upvalue[0]
2124 | jmp ->fff_resn
2125 |.endif
2126 |
2127 |.ffunc_nn math_atan2; fpatan; jmp ->fff_resn
2128 |.ffunc_nnr math_ldexp; fscale; fpop1; jmp ->fff_resn 2182 |.ffunc_nnr math_ldexp; fscale; fpop1; jmp ->fff_resn
2129 | 2183 |
2130 |.ffunc_1 math_frexp 2184 |.ffunc_1 math_frexp
@@ -2139,65 +2193,34 @@ static void build_subroutines(BuildCtx *ctx)
2139 | cmp RB, 0x00200000; jb >4 2193 | cmp RB, 0x00200000; jb >4
2140 |1: 2194 |1:
2141 | shr RB, 21; sub RB, RC // Extract and unbias exponent. 2195 | shr RB, 21; sub RB, RC // Extract and unbias exponent.
2142 |.if SSE
2143 | cvtsi2sd xmm0, RB 2196 | cvtsi2sd xmm0, RB
2144 |.else
2145 | mov TMP1, RB; fild TMP1
2146 |.endif
2147 | mov RB, [BASE-4] 2197 | mov RB, [BASE-4]
2148 | and RB, 0x800fffff // Mask off exponent. 2198 | and RB, 0x800fffff // Mask off exponent.
2149 | or RB, 0x3fe00000 // Put mantissa in range [0.5,1) or 0. 2199 | or RB, 0x3fe00000 // Put mantissa in range [0.5,1) or 0.
2150 | mov [BASE-4], RB 2200 | mov [BASE-4], RB
2151 |2: 2201 |2:
2152 |.if SSE
2153 | movsd qword [BASE], xmm0 2202 | movsd qword [BASE], xmm0
2154 |.else
2155 | fstp qword [BASE]
2156 |.endif
2157 | mov RD, 1+2 2203 | mov RD, 1+2
2158 | jmp ->fff_res 2204 | jmp ->fff_res
2159 |3: // Return +-0, +-Inf, NaN unmodified and an exponent of 0. 2205 |3: // Return +-0, +-Inf, NaN unmodified and an exponent of 0.
2160 |.if SSE
2161 | xorps xmm0, xmm0; jmp <2 2206 | xorps xmm0, xmm0; jmp <2
2162 |.else
2163 | fldz; jmp <2
2164 |.endif
2165 |4: // Handle denormals by multiplying with 2^54 and adjusting the bias. 2207 |4: // Handle denormals by multiplying with 2^54 and adjusting the bias.
2166 |.if SSE
2167 | movsd xmm0, qword [BASE] 2208 | movsd xmm0, qword [BASE]
2168 | sseconst_hi xmm1, RBa, 43500000 // 2^54. 2209 | sseconst_hi xmm1, RBa, 43500000 // 2^54.
2169 | mulsd xmm0, xmm1 2210 | mulsd xmm0, xmm1
2170 | movsd qword [BASE-8], xmm0 2211 | movsd qword [BASE-8], xmm0
2171 |.else
2172 | fld qword [BASE]
2173 | mov TMP1, 0x5a800000; fmul TMP1 // x = x*2^54
2174 | fstp qword [BASE-8]
2175 |.endif
2176 | mov RB, [BASE-4]; mov RC, 1076; shl RB, 1; jmp <1 2212 | mov RB, [BASE-4]; mov RC, 1076; shl RB, 1; jmp <1
2177 | 2213 |
2178 |.if SSE
2179 |.ffunc_nsse math_modf 2214 |.ffunc_nsse math_modf
2180 |.else
2181 |.ffunc_n math_modf
2182 |.endif
2183 | mov RB, [BASE+4] 2215 | mov RB, [BASE+4]
2184 | mov PC, [BASE-4] 2216 | mov PC, [BASE-4]
2185 | shl RB, 1; cmp RB, 0xffe00000; je >4 // +-Inf? 2217 | shl RB, 1; cmp RB, 0xffe00000; je >4 // +-Inf?
2186 |.if SSE
2187 | movaps xmm4, xmm0 2218 | movaps xmm4, xmm0
2188 | call ->vm_trunc 2219 | call ->vm_trunc_sse
2189 | subsd xmm4, xmm0 2220 | subsd xmm4, xmm0
2190 |1: 2221 |1:
2191 | movsd qword [BASE-8], xmm0 2222 | movsd qword [BASE-8], xmm0
2192 | movsd qword [BASE], xmm4 2223 | movsd qword [BASE], xmm4
2193 |.else
2194 | fdup
2195 | call ->vm_trunc
2196 | fsub st1, st0
2197 |1:
2198 | fstp qword [BASE-8]
2199 | fstp qword [BASE]
2200 |.endif
2201 | mov RC, [BASE-4]; mov RB, [BASE+4] 2224 | mov RC, [BASE-4]; mov RB, [BASE+4]
2202 | xor RC, RB; js >3 // Need to adjust sign? 2225 | xor RC, RB; js >3 // Need to adjust sign?
2203 |2: 2226 |2:
@@ -2207,25 +2230,10 @@ static void build_subroutines(BuildCtx *ctx)
2207 | xor RB, 0x80000000; mov [BASE+4], RB // Flip sign of fraction. 2230 | xor RB, 0x80000000; mov [BASE+4], RB // Flip sign of fraction.
2208 | jmp <2 2231 | jmp <2
2209 |4: 2232 |4:
2210 |.if SSE
2211 | xorps xmm4, xmm4; jmp <1 // Return +-Inf and +-0. 2233 | xorps xmm4, xmm4; jmp <1 // Return +-Inf and +-0.
2212 |.else
2213 | fldz; fxch; jmp <1 // Return +-Inf and +-0.
2214 |.endif
2215 |
2216 |.ffunc_nnr math_fmod
2217 |1: ; fprem; fnstsw ax; and ax, 0x400; jnz <1
2218 | fpop1
2219 | jmp ->fff_resn
2220 | 2234 |
2221 |.if SSE 2235 |.macro math_minmax, name, cmovop, sseop
2222 |.ffunc_nnsse math_pow; call ->vm_pow; jmp ->fff_resxmm0 2236 | .ffunc_1 name
2223 |.else
2224 |.ffunc_nn math_pow; call ->vm_pow; jmp ->fff_resn
2225 |.endif
2226 |
2227 |.macro math_minmax, name, cmovop, fcmovop, sseop
2228 | .ffunc name
2229 | mov RA, 2 2237 | mov RA, 2
2230 | cmp dword [BASE+4], LJ_TISNUM 2238 | cmp dword [BASE+4], LJ_TISNUM
2231 |.if DUALNUM 2239 |.if DUALNUM
@@ -2241,12 +2249,7 @@ static void build_subroutines(BuildCtx *ctx)
2241 |3: 2249 |3:
2242 | ja ->fff_fallback 2250 | ja ->fff_fallback
2243 | // Convert intermediate result to number and continue below. 2251 | // Convert intermediate result to number and continue below.
2244 |.if SSE
2245 | cvtsi2sd xmm0, RB 2252 | cvtsi2sd xmm0, RB
2246 |.else
2247 | mov TMP1, RB
2248 | fild TMP1
2249 |.endif
2250 | jmp >6 2253 | jmp >6
2251 |4: 2254 |4:
2252 | ja ->fff_fallback 2255 | ja ->fff_fallback
@@ -2254,7 +2257,6 @@ static void build_subroutines(BuildCtx *ctx)
2254 | jae ->fff_fallback 2257 | jae ->fff_fallback
2255 |.endif 2258 |.endif
2256 | 2259 |
2257 |.if SSE
2258 | movsd xmm0, qword [BASE] 2260 | movsd xmm0, qword [BASE]
2259 |5: // Handle numbers or integers. 2261 |5: // Handle numbers or integers.
2260 | cmp RA, RD; jae ->fff_resxmm0 2262 | cmp RA, RD; jae ->fff_resxmm0
@@ -2273,48 +2275,13 @@ static void build_subroutines(BuildCtx *ctx)
2273 | sseop xmm0, xmm1 2275 | sseop xmm0, xmm1
2274 | add RA, 1 2276 | add RA, 1
2275 | jmp <5 2277 | jmp <5
2276 |.else
2277 | fld qword [BASE]
2278 |5: // Handle numbers or integers.
2279 | cmp RA, RD; jae ->fff_resn
2280 | cmp dword [BASE+RA*8-4], LJ_TISNUM
2281 |.if DUALNUM
2282 | jb >6
2283 | ja >9
2284 | fild dword [BASE+RA*8-8]
2285 | jmp >7
2286 |.else
2287 | jae >9
2288 |.endif
2289 |6:
2290 | fld qword [BASE+RA*8-8]
2291 |7:
2292 | fucomi st1; fcmovop st1; fpop1
2293 | add RA, 1
2294 | jmp <5
2295 |.endif
2296 |.endmacro 2278 |.endmacro
2297 | 2279 |
2298 | math_minmax math_min, cmovg, fcmovnbe, minsd 2280 | math_minmax math_min, cmovg, minsd
2299 | math_minmax math_max, cmovl, fcmovbe, maxsd 2281 | math_minmax math_max, cmovl, maxsd
2300 |.if not SSE
2301 |9:
2302 | fpop; jmp ->fff_fallback
2303 |.endif
2304 | 2282 |
2305 |//-- String library ----------------------------------------------------- 2283 |//-- String library -----------------------------------------------------
2306 | 2284 |
2307 |.ffunc_1 string_len
2308 | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
2309 | mov STR:RB, [BASE]
2310 |.if DUALNUM
2311 | mov RB, dword STR:RB->len; jmp ->fff_resi
2312 |.elif SSE
2313 | cvtsi2sd xmm0, dword STR:RB->len; jmp ->fff_resxmm0
2314 |.else
2315 | fild dword STR:RB->len; jmp ->fff_resn
2316 |.endif
2317 |
2318 |.ffunc string_byte // Only handle the 1-arg case here. 2285 |.ffunc string_byte // Only handle the 1-arg case here.
2319 | cmp NARGS:RD, 1+1; jne ->fff_fallback 2286 | cmp NARGS:RD, 1+1; jne ->fff_fallback
2320 | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback 2287 | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
@@ -2325,10 +2292,8 @@ static void build_subroutines(BuildCtx *ctx)
2325 | movzx RB, byte STR:RB[1] 2292 | movzx RB, byte STR:RB[1]
2326 |.if DUALNUM 2293 |.if DUALNUM
2327 | jmp ->fff_resi 2294 | jmp ->fff_resi
2328 |.elif SSE
2329 | cvtsi2sd xmm0, RB; jmp ->fff_resxmm0
2330 |.else 2295 |.else
2331 | mov TMP1, RB; fild TMP1; jmp ->fff_resn 2296 | cvtsi2sd xmm0, RB; jmp ->fff_resxmm0
2332 |.endif 2297 |.endif
2333 | 2298 |
2334 |.ffunc string_char // Only handle the 1-arg case here. 2299 |.ffunc string_char // Only handle the 1-arg case here.
@@ -2340,16 +2305,11 @@ static void build_subroutines(BuildCtx *ctx)
2340 | mov RB, dword [BASE] 2305 | mov RB, dword [BASE]
2341 | cmp RB, 255; ja ->fff_fallback 2306 | cmp RB, 255; ja ->fff_fallback
2342 | mov TMP2, RB 2307 | mov TMP2, RB
2343 |.elif SSE 2308 |.else
2344 | jae ->fff_fallback 2309 | jae ->fff_fallback
2345 | cvttsd2si RB, qword [BASE] 2310 | cvttsd2si RB, qword [BASE]
2346 | cmp RB, 255; ja ->fff_fallback 2311 | cmp RB, 255; ja ->fff_fallback
2347 | mov TMP2, RB 2312 | mov TMP2, RB
2348 |.else
2349 | jae ->fff_fallback
2350 | fld qword [BASE]
2351 | fistp TMP2
2352 | cmp TMP2, 255; ja ->fff_fallback
2353 |.endif 2313 |.endif
2354 |.if X64 2314 |.if X64
2355 | mov TMP3, 1 2315 | mov TMP3, 1
@@ -2370,6 +2330,7 @@ static void build_subroutines(BuildCtx *ctx)
2370 |.endif 2330 |.endif
2371 | mov SAVE_PC, PC 2331 | mov SAVE_PC, PC
2372 | call extern lj_str_new // (lua_State *L, char *str, size_t l) 2332 | call extern lj_str_new // (lua_State *L, char *str, size_t l)
2333 |->fff_resstr:
2373 | // GCstr * returned in eax (RD). 2334 | // GCstr * returned in eax (RD).
2374 | mov BASE, L:RB->base 2335 | mov BASE, L:RB->base
2375 | mov PC, [BASE-4] 2336 | mov PC, [BASE-4]
@@ -2387,14 +2348,10 @@ static void build_subroutines(BuildCtx *ctx)
2387 | jne ->fff_fallback 2348 | jne ->fff_fallback
2388 | mov RB, dword [BASE+16] 2349 | mov RB, dword [BASE+16]
2389 | mov TMP2, RB 2350 | mov TMP2, RB
2390 |.elif SSE 2351 |.else
2391 | jae ->fff_fallback 2352 | jae ->fff_fallback
2392 | cvttsd2si RB, qword [BASE+16] 2353 | cvttsd2si RB, qword [BASE+16]
2393 | mov TMP2, RB 2354 | mov TMP2, RB
2394 |.else
2395 | jae ->fff_fallback
2396 | fld qword [BASE+16]
2397 | fistp TMP2
2398 |.endif 2355 |.endif
2399 |1: 2356 |1:
2400 | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback 2357 | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
@@ -2409,12 +2366,8 @@ static void build_subroutines(BuildCtx *ctx)
2409 | mov RB, STR:RB->len 2366 | mov RB, STR:RB->len
2410 |.if DUALNUM 2367 |.if DUALNUM
2411 | mov RA, dword [BASE+8] 2368 | mov RA, dword [BASE+8]
2412 |.elif SSE
2413 | cvttsd2si RA, qword [BASE+8]
2414 |.else 2369 |.else
2415 | fld qword [BASE+8] 2370 | cvttsd2si RA, qword [BASE+8]
2416 | fistp ARG3
2417 | mov RA, ARG3
2418 |.endif 2371 |.endif
2419 | mov RC, TMP2 2372 | mov RC, TMP2
2420 | cmp RB, RC // len < end? (unsigned compare) 2373 | cmp RB, RC // len < end? (unsigned compare)
@@ -2458,136 +2411,34 @@ static void build_subroutines(BuildCtx *ctx)
2458 | xor RC, RC // Zero length. Any ptr in RB is ok. 2411 | xor RC, RC // Zero length. Any ptr in RB is ok.
2459 | jmp <4 2412 | jmp <4
2460 | 2413 |
2461 |.ffunc string_rep // Only handle the 1-char case inline. 2414 |.macro ffstring_op, name
2462 | ffgccheck 2415 | .ffunc_1 string_ .. name
2463 | cmp NARGS:RD, 2+1; jne ->fff_fallback // Exactly 2 arguments.
2464 | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
2465 | cmp dword [BASE+12], LJ_TISNUM
2466 | mov STR:RB, [BASE]
2467 |.if DUALNUM
2468 | jne ->fff_fallback
2469 | mov RC, dword [BASE+8]
2470 |.elif SSE
2471 | jae ->fff_fallback
2472 | cvttsd2si RC, qword [BASE+8]
2473 |.else
2474 | jae ->fff_fallback
2475 | fld qword [BASE+8]
2476 | fistp TMP2
2477 | mov RC, TMP2
2478 |.endif
2479 | test RC, RC
2480 | jle ->fff_emptystr // Count <= 0? (or non-int)
2481 | cmp dword STR:RB->len, 1
2482 | jb ->fff_emptystr // Zero length string?
2483 | jne ->fff_fallback_2 // Fallback for > 1-char strings.
2484 | cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC; jb ->fff_fallback_2
2485 | movzx RA, byte STR:RB[1]
2486 | mov RB, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
2487 |.if X64
2488 | mov TMP3, RC
2489 |.else
2490 | mov ARG3, RC
2491 |.endif
2492 |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
2493 | mov [RB], RAL
2494 | add RB, 1
2495 | sub RC, 1
2496 | jnz <1
2497 | mov RD, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
2498 | jmp ->fff_newstr
2499 |
2500 |.ffunc_1 string_reverse
2501 | ffgccheck 2416 | ffgccheck
2502 | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback 2417 | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
2503 | mov STR:RB, [BASE] 2418 | mov L:RB, SAVE_L
2504 | mov RC, STR:RB->len 2419 | lea SBUF:FCARG1, [DISPATCH+DISPATCH_GL(tmpbuf)]
2505 | test RC, RC 2420 | mov L:RB->base, BASE
2506 | jz ->fff_emptystr // Zero length string? 2421 | mov STR:FCARG2, [BASE] // Caveat: FCARG2 == BASE
2507 | cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC; jb ->fff_fallback_1 2422 | mov RC, SBUF:FCARG1->b
2508 | add RB, #STR 2423 | mov SBUF:FCARG1->L, L:RB
2509 | mov TMP2, PC // Need another temp register. 2424 | mov SBUF:FCARG1->p, RC
2510 |.if X64 2425 | mov SAVE_PC, PC
2511 | mov TMP3, RC 2426 | call extern lj_buf_putstr_ .. name .. @8
2512 |.else 2427 | mov FCARG1, eax
2513 | mov ARG3, RC 2428 | call extern lj_buf_tostr@4
2514 |.endif 2429 | jmp ->fff_resstr
2515 | mov PC, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
2516 |1:
2517 | movzx RA, byte [RB]
2518 | add RB, 1
2519 | sub RC, 1
2520 | mov [PC+RC], RAL
2521 | jnz <1
2522 | mov RD, PC
2523 | mov PC, TMP2
2524 | jmp ->fff_newstr
2525 |
2526 |.macro ffstring_case, name, lo, hi
2527 | .ffunc_1 name
2528 | ffgccheck
2529 | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
2530 | mov STR:RB, [BASE]
2531 | mov RC, STR:RB->len
2532 | cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC; jb ->fff_fallback_1
2533 | add RB, #STR
2534 | mov TMP2, PC // Need another temp register.
2535 |.if X64
2536 | mov TMP3, RC
2537 |.else
2538 | mov ARG3, RC
2539 |.endif
2540 | mov PC, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
2541 | jmp >3
2542 |1: // ASCII case conversion. Yes, this is suboptimal code (do you care?).
2543 | movzx RA, byte [RB+RC]
2544 | cmp RA, lo
2545 | jb >2
2546 | cmp RA, hi
2547 | ja >2
2548 | xor RA, 0x20
2549 |2:
2550 | mov [PC+RC], RAL
2551 |3:
2552 | sub RC, 1
2553 | jns <1
2554 | mov RD, PC
2555 | mov PC, TMP2
2556 | jmp ->fff_newstr
2557 |.endmacro 2430 |.endmacro
2558 | 2431 |
2559 |ffstring_case string_lower, 0x41, 0x5a 2432 |ffstring_op reverse
2560 |ffstring_case string_upper, 0x61, 0x7a 2433 |ffstring_op lower
2561 | 2434 |ffstring_op upper
2562 |//-- Table library ------------------------------------------------------
2563 |
2564 |.ffunc_1 table_getn
2565 | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
2566 | mov RB, BASE // Save BASE.
2567 | mov TAB:FCARG1, [BASE]
2568 | call extern lj_tab_len@4 // LJ_FASTCALL (GCtab *t)
2569 | // Length of table returned in eax (RD).
2570 | mov BASE, RB // Restore BASE.
2571 |.if DUALNUM
2572 | mov RB, RD; jmp ->fff_resi
2573 |.elif SSE
2574 | cvtsi2sd xmm0, RD; jmp ->fff_resxmm0
2575 |.else
2576 | mov ARG1, RD; fild ARG1; jmp ->fff_resn
2577 |.endif
2578 | 2435 |
2579 |//-- Bit library -------------------------------------------------------- 2436 |//-- Bit library --------------------------------------------------------
2580 | 2437 |
2581 |.define TOBIT_BIAS, 0x59c00000 // 2^52 + 2^51 (float, not double!).
2582 |
2583 |.macro .ffunc_bit, name, kind, fdef 2438 |.macro .ffunc_bit, name, kind, fdef
2584 | fdef name 2439 | fdef name
2585 |.if kind == 2 2440 |.if kind == 2
2586 |.if SSE
2587 | sseconst_tobit xmm1, RBa 2441 | sseconst_tobit xmm1, RBa
2588 |.else
2589 | mov TMP1, TOBIT_BIAS
2590 |.endif
2591 |.endif 2442 |.endif
2592 | cmp dword [BASE+4], LJ_TISNUM 2443 | cmp dword [BASE+4], LJ_TISNUM
2593 |.if DUALNUM 2444 |.if DUALNUM
@@ -2603,24 +2454,12 @@ static void build_subroutines(BuildCtx *ctx)
2603 |.else 2454 |.else
2604 | jae ->fff_fallback 2455 | jae ->fff_fallback
2605 |.endif 2456 |.endif
2606 |.if SSE
2607 | movsd xmm0, qword [BASE] 2457 | movsd xmm0, qword [BASE]
2608 |.if kind < 2 2458 |.if kind < 2
2609 | sseconst_tobit xmm1, RBa 2459 | sseconst_tobit xmm1, RBa
2610 |.endif 2460 |.endif
2611 | addsd xmm0, xmm1 2461 | addsd xmm0, xmm1
2612 | movd RB, xmm0 2462 | movd RB, xmm0
2613 |.else
2614 | fld qword [BASE]
2615 |.if kind < 2
2616 | mov TMP1, TOBIT_BIAS
2617 |.endif
2618 | fadd TMP1
2619 | fstp FPARG1
2620 |.if kind > 0
2621 | mov RB, ARG1
2622 |.endif
2623 |.endif
2624 |2: 2463 |2:
2625 |.endmacro 2464 |.endmacro
2626 | 2465 |
@@ -2629,15 +2468,7 @@ static void build_subroutines(BuildCtx *ctx)
2629 |.endmacro 2468 |.endmacro
2630 | 2469 |
2631 |.ffunc_bit bit_tobit, 0 2470 |.ffunc_bit bit_tobit, 0
2632 |.if DUALNUM or SSE
2633 |.if not SSE
2634 | mov RB, ARG1
2635 |.endif
2636 | jmp ->fff_resbit 2471 | jmp ->fff_resbit
2637 |.else
2638 | fild ARG1
2639 | jmp ->fff_resn
2640 |.endif
2641 | 2472 |
2642 |.macro .ffunc_bit_op, name, ins 2473 |.macro .ffunc_bit_op, name, ins
2643 | .ffunc_bit name, 2 2474 | .ffunc_bit name, 2
@@ -2657,17 +2488,10 @@ static void build_subroutines(BuildCtx *ctx)
2657 |.else 2488 |.else
2658 | jae ->fff_fallback_bit_op 2489 | jae ->fff_fallback_bit_op
2659 |.endif 2490 |.endif
2660 |.if SSE
2661 | movsd xmm0, qword [RD] 2491 | movsd xmm0, qword [RD]
2662 | addsd xmm0, xmm1 2492 | addsd xmm0, xmm1
2663 | movd RA, xmm0 2493 | movd RA, xmm0
2664 | ins RB, RA 2494 | ins RB, RA
2665 |.else
2666 | fld qword [RD]
2667 | fadd TMP1
2668 | fstp FPARG1
2669 | ins RB, ARG1
2670 |.endif
2671 | sub RD, 8 2495 | sub RD, 8
2672 | jmp <1 2496 | jmp <1
2673 |.endmacro 2497 |.endmacro
@@ -2684,15 +2508,10 @@ static void build_subroutines(BuildCtx *ctx)
2684 | not RB 2508 | not RB
2685 |.if DUALNUM 2509 |.if DUALNUM
2686 | jmp ->fff_resbit 2510 | jmp ->fff_resbit
2687 |.elif SSE 2511 |.else
2688 |->fff_resbit: 2512 |->fff_resbit:
2689 | cvtsi2sd xmm0, RB 2513 | cvtsi2sd xmm0, RB
2690 | jmp ->fff_resxmm0 2514 | jmp ->fff_resxmm0
2691 |.else
2692 |->fff_resbit:
2693 | mov ARG1, RB
2694 | fild ARG1
2695 | jmp ->fff_resn
2696 |.endif 2515 |.endif
2697 | 2516 |
2698 |->fff_fallback_bit_op: 2517 |->fff_fallback_bit_op:
@@ -2705,22 +2524,13 @@ static void build_subroutines(BuildCtx *ctx)
2705 | // Note: no inline conversion from number for 2nd argument! 2524 | // Note: no inline conversion from number for 2nd argument!
2706 | cmp dword [BASE+12], LJ_TISNUM; jne ->fff_fallback 2525 | cmp dword [BASE+12], LJ_TISNUM; jne ->fff_fallback
2707 | mov RA, dword [BASE+8] 2526 | mov RA, dword [BASE+8]
2708 |.elif SSE 2527 |.else
2709 | .ffunc_nnsse name 2528 | .ffunc_nnsse name
2710 | sseconst_tobit xmm2, RBa 2529 | sseconst_tobit xmm2, RBa
2711 | addsd xmm0, xmm2 2530 | addsd xmm0, xmm2
2712 | addsd xmm1, xmm2 2531 | addsd xmm1, xmm2
2713 | movd RB, xmm0 2532 | movd RB, xmm0
2714 | movd RA, xmm1 2533 | movd RA, xmm1
2715 |.else
2716 | .ffunc_nn name
2717 | mov TMP1, TOBIT_BIAS
2718 | fadd TMP1
2719 | fstp FPARG3
2720 | fadd TMP1
2721 | fstp FPARG1
2722 | mov RA, ARG3
2723 | mov RB, ARG1
2724 |.endif 2534 |.endif
2725 | ins RB, cl // Assumes RA is ecx. 2535 | ins RB, cl // Assumes RA is ecx.
2726 | jmp ->fff_resbit 2536 | jmp ->fff_resbit
@@ -2854,7 +2664,7 @@ static void build_subroutines(BuildCtx *ctx)
2854 | mov FCARG2, PC // Caveat: FCARG2 == BASE 2664 | mov FCARG2, PC // Caveat: FCARG2 == BASE
2855 | mov FCARG1, L:RB 2665 | mov FCARG1, L:RB
2856 | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC. 2666 | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
2857 | call extern lj_dispatch_ins@8 // (lua_State *L, BCIns *pc) 2667 | call extern lj_dispatch_ins@8 // (lua_State *L, const BCIns *pc)
2858 |3: 2668 |3:
2859 | mov BASE, L:RB->base 2669 | mov BASE, L:RB->base
2860 |4: 2670 |4:
@@ -2925,6 +2735,79 @@ static void build_subroutines(BuildCtx *ctx)
2925 | add NARGS:RD, 1 2735 | add NARGS:RD, 1
2926 | jmp RBa 2736 | jmp RBa
2927 | 2737 |
2738 |->cont_stitch: // Trace stitching.
2739 |.if JIT
2740 | // BASE = base, RC = result, RB = mbase
2741 | mov TRACE:RA, [RB-24] // Save previous trace.
2742 | mov TMP1, TRACE:RA
2743 | mov TMP3, DISPATCH // Need one more register.
2744 | mov DISPATCH, MULTRES
2745 | movzx RA, PC_RA
2746 | lea RA, [BASE+RA*8] // Call base.
2747 | sub DISPATCH, 1
2748 | jz >2
2749 |1: // Move results down.
2750 |.if X64
2751 | mov RBa, [RC]
2752 | mov [RA], RBa
2753 |.else
2754 | mov RB, [RC]
2755 | mov [RA], RB
2756 | mov RB, [RC+4]
2757 | mov [RA+4], RB
2758 |.endif
2759 | add RC, 8
2760 | add RA, 8
2761 | sub DISPATCH, 1
2762 | jnz <1
2763 |2:
2764 | movzx RC, PC_RA
2765 | movzx RB, PC_RB
2766 | add RC, RB
2767 | lea RC, [BASE+RC*8-8]
2768 |3:
2769 | cmp RC, RA
2770 | ja >9 // More results wanted?
2771 |
2772 | mov DISPATCH, TMP3
2773 | mov TRACE:RD, TMP1 // Get previous trace.
2774 | movzx RB, word TRACE:RD->traceno
2775 | movzx RD, word TRACE:RD->link
2776 | cmp RD, RB
2777 | je ->cont_nop // Blacklisted.
2778 | test RD, RD
2779 | jne =>BC_JLOOP // Jump to stitched trace.
2780 |
2781 | // Stitch a new trace to the previous trace.
2782 | mov [DISPATCH+DISPATCH_J(exitno)], RB
2783 | mov L:RB, SAVE_L
2784 | mov L:RB->base, BASE
2785 | mov FCARG2, PC
2786 | lea FCARG1, [DISPATCH+GG_DISP2J]
2787 | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa
2788 | call extern lj_dispatch_stitch@8 // (jit_State *J, const BCIns *pc)
2789 | mov BASE, L:RB->base
2790 | jmp ->cont_nop
2791 |
2792 |9: // Fill up results with nil.
2793 | mov dword [RA+4], LJ_TNIL
2794 | add RA, 8
2795 | jmp <3
2796 |.endif
2797 |
2798 |->vm_profhook: // Dispatch target for profiler hook.
2799#if LJ_HASPROFILE
2800 | mov L:RB, SAVE_L
2801 | mov L:RB->base, BASE
2802 | mov FCARG2, PC // Caveat: FCARG2 == BASE
2803 | mov FCARG1, L:RB
2804 | call extern lj_dispatch_profile@8 // (lua_State *L, const BCIns *pc)
2805 | mov BASE, L:RB->base
2806 | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
2807 | sub PC, 4
2808 | jmp ->cont_nop
2809#endif
2810 |
2928 |//----------------------------------------------------------------------- 2811 |//-----------------------------------------------------------------------
2929 |//-- Trace exit handler ------------------------------------------------- 2812 |//-- Trace exit handler -------------------------------------------------
2930 |//----------------------------------------------------------------------- 2813 |//-----------------------------------------------------------------------
@@ -2977,10 +2860,9 @@ static void build_subroutines(BuildCtx *ctx)
2977 | movsd qword [ebp-88], xmm1; movsd qword [ebp-96], xmm0 2860 | movsd qword [ebp-88], xmm1; movsd qword [ebp-96], xmm0
2978 |.endif 2861 |.endif
2979 | // Caveat: RB is ebp. 2862 | // Caveat: RB is ebp.
2980 | mov L:RB, [DISPATCH+DISPATCH_GL(jit_L)] 2863 | mov L:RB, [DISPATCH+DISPATCH_GL(cur_L)]
2981 | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)] 2864 | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)]
2982 | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa 2865 | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa
2983 | mov dword [DISPATCH+DISPATCH_GL(jit_L)], 0
2984 | mov L:RB->base, BASE 2866 | mov L:RB->base, BASE
2985 |.if X64WIN 2867 |.if X64WIN
2986 | lea CARG2, [rsp+4*8] 2868 | lea CARG2, [rsp+4*8]
@@ -2990,6 +2872,7 @@ static void build_subroutines(BuildCtx *ctx)
2990 | lea FCARG2, [esp+16] 2872 | lea FCARG2, [esp+16]
2991 |.endif 2873 |.endif
2992 | lea FCARG1, [DISPATCH+GG_DISP2J] 2874 | lea FCARG1, [DISPATCH+GG_DISP2J]
2875 | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0
2993 | call extern lj_trace_exit@8 // (jit_State *J, ExitState *ex) 2876 | call extern lj_trace_exit@8 // (jit_State *J, ExitState *ex)
2994 | // MULTRES or negated error code returned in eax (RD). 2877 | // MULTRES or negated error code returned in eax (RD).
2995 | mov RAa, L:RB->cframe 2878 | mov RAa, L:RB->cframe
@@ -3036,12 +2919,14 @@ static void build_subroutines(BuildCtx *ctx)
3036 | mov r13, TMPa 2919 | mov r13, TMPa
3037 | mov r12, TMPQ 2920 | mov r12, TMPQ
3038 |.endif 2921 |.endif
3039 | test RD, RD; js >3 // Check for error from exit. 2922 | test RD, RD; js >9 // Check for error from exit.
2923 | mov L:RB, SAVE_L
3040 | mov MULTRES, RD 2924 | mov MULTRES, RD
3041 | mov LFUNC:KBASE, [BASE-8] 2925 | mov LFUNC:KBASE, [BASE-8]
3042 | mov KBASE, LFUNC:KBASE->pc 2926 | mov KBASE, LFUNC:KBASE->pc
3043 | mov KBASE, [KBASE+PC2PROTO(k)] 2927 | mov KBASE, [KBASE+PC2PROTO(k)]
3044 | mov dword [DISPATCH+DISPATCH_GL(jit_L)], 0 2928 | mov L:RB->base, BASE
2929 | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0
3045 | set_vmstate INTERP 2930 | set_vmstate INTERP
3046 | // Modified copy of ins_next which handles function header dispatch, too. 2931 | // Modified copy of ins_next which handles function header dispatch, too.
3047 | mov RC, [PC] 2932 | mov RC, [PC]
@@ -3050,16 +2935,31 @@ static void build_subroutines(BuildCtx *ctx)
3050 | add PC, 4 2935 | add PC, 4
3051 | shr RC, 16 2936 | shr RC, 16
3052 | cmp OP, BC_FUNCF // Function header? 2937 | cmp OP, BC_FUNCF // Function header?
3053 | jb >2 2938 | jb >3
3054 | mov RC, MULTRES // RC/RD holds nres+1. 2939 | cmp OP, BC_FUNCC+2 // Fast function?
2940 | jae >4
3055 |2: 2941 |2:
2942 | mov RC, MULTRES // RC/RD holds nres+1.
2943 |3:
3056 |.if X64 2944 |.if X64
3057 | jmp aword [DISPATCH+OP*8] 2945 | jmp aword [DISPATCH+OP*8]
3058 |.else 2946 |.else
3059 | jmp aword [DISPATCH+OP*4] 2947 | jmp aword [DISPATCH+OP*4]
3060 |.endif 2948 |.endif
3061 | 2949 |
3062 |3: // Rethrow error from the right C frame. 2950 |4: // Check frame below fast function.
2951 | mov RC, [BASE-4]
2952 | test RC, FRAME_TYPE
2953 | jnz <2 // Trace stitching continuation?
2954 | // Otherwise set KBASE for Lua function below fast function.
2955 | movzx RC, byte [RC-3]
2956 | not RCa
2957 | mov LFUNC:KBASE, [BASE+RC*8-8]
2958 | mov KBASE, LFUNC:KBASE->pc
2959 | mov KBASE, [KBASE+PC2PROTO(k)]
2960 | jmp <2
2961 |
2962 |9: // Rethrow error from the right C frame.
3063 | mov FCARG1, L:RB 2963 | mov FCARG1, L:RB
3064 | call extern lj_err_run@4 // (lua_State *L) 2964 | call extern lj_err_run@4 // (lua_State *L)
3065 |.endif 2965 |.endif
@@ -3069,27 +2969,18 @@ static void build_subroutines(BuildCtx *ctx)
3069 |//----------------------------------------------------------------------- 2969 |//-----------------------------------------------------------------------
3070 | 2970 |
3071 |// FP value rounding. Called by math.floor/math.ceil fast functions 2971 |// FP value rounding. Called by math.floor/math.ceil fast functions
3072 |// and from JIT code. 2972 |// and from JIT code. arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified.
3073 | 2973 |.macro vm_round, name, mode, cond
3074 |// x87 variant: Arg/ret on x87 stack. No int/xmm registers modified. 2974 |->name:
3075 |.macro vm_round_x87, mode1, mode2 2975 |.if not X64 and cond
3076 | fnstcw word [esp+4] // Caveat: overwrites ARG1 and ARG2. 2976 | movsd xmm0, qword [esp+4]
3077 | mov [esp+8], eax 2977 | call ->name .. _sse
3078 | mov ax, mode1 2978 | movsd qword [esp+4], xmm0 // Overwrite callee-owned arg.
3079 | or ax, [esp+4] 2979 | fld qword [esp+4]
3080 |.if mode2 ~= 0xffff
3081 | and ax, mode2
3082 |.endif
3083 | mov [esp+6], ax
3084 | fldcw word [esp+6]
3085 | frndint
3086 | fldcw word [esp+4]
3087 | mov eax, [esp+8]
3088 | ret 2980 | ret
3089 |.endmacro 2981 |.endif
3090 | 2982 |
3091 |// SSE variant: arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified. 2983 |->name .. _sse:
3092 |.macro vm_round_sse, mode
3093 | sseconst_abs xmm2, RDa 2984 | sseconst_abs xmm2, RDa
3094 | sseconst_2p52 xmm3, RDa 2985 | sseconst_2p52 xmm3, RDa
3095 | movaps xmm1, xmm0 2986 | movaps xmm1, xmm0
@@ -3125,22 +3016,12 @@ static void build_subroutines(BuildCtx *ctx)
3125 | ret 3016 | ret
3126 |.endmacro 3017 |.endmacro
3127 | 3018 |
3128 |.macro vm_round, name, ssemode, mode1, mode2 3019 | vm_round vm_floor, 0, 1
3129 |->name: 3020 | vm_round vm_ceil, 1, JIT
3130 |.if not SSE 3021 | vm_round vm_trunc, 2, JIT
3131 | vm_round_x87 mode1, mode2
3132 |.endif
3133 |->name .. _sse:
3134 | vm_round_sse ssemode
3135 |.endmacro
3136 |
3137 | vm_round vm_floor, 0, 0x0400, 0xf7ff
3138 | vm_round vm_ceil, 1, 0x0800, 0xfbff
3139 | vm_round vm_trunc, 2, 0x0c00, 0xffff
3140 | 3022 |
3141 |// FP modulo x%y. Called by BC_MOD* and vm_arith. 3023 |// FP modulo x%y. Called by BC_MOD* and vm_arith.
3142 |->vm_mod: 3024 |->vm_mod:
3143 |.if SSE
3144 |// Args in xmm0/xmm1, return value in xmm0. 3025 |// Args in xmm0/xmm1, return value in xmm0.
3145 |// Caveat: xmm0-xmm5 and RC (eax) modified! 3026 |// Caveat: xmm0-xmm5 and RC (eax) modified!
3146 | movaps xmm5, xmm0 3027 | movaps xmm5, xmm0
@@ -3168,172 +3049,6 @@ static void build_subroutines(BuildCtx *ctx)
3168 | movaps xmm0, xmm5 3049 | movaps xmm0, xmm5
3169 | subsd xmm0, xmm1 3050 | subsd xmm0, xmm1
3170 | ret 3051 | ret
3171 |.else
3172 |// Args/ret on x87 stack (y on top). No xmm registers modified.
3173 |// Caveat: needs 3 slots on x87 stack! RC (eax) modified!
3174 | fld st1
3175 | fdiv st1
3176 | fnstcw word [esp+4]
3177 | mov ax, 0x0400
3178 | or ax, [esp+4]
3179 | and ax, 0xf7ff
3180 | mov [esp+6], ax
3181 | fldcw word [esp+6]
3182 | frndint
3183 | fldcw word [esp+4]
3184 | fmulp st1
3185 | fsubp st1
3186 | ret
3187 |.endif
3188 |
3189 |// FP log2(x). Called by math.log(x, base).
3190 |->vm_log2:
3191 |.if X64WIN
3192 | movsd qword [rsp+8], xmm0 // Use scratch area.
3193 | fld1
3194 | fld qword [rsp+8]
3195 | fyl2x
3196 | fstp qword [rsp+8]
3197 | movsd xmm0, qword [rsp+8]
3198 |.elif X64
3199 | movsd qword [rsp-8], xmm0 // Use red zone.
3200 | fld1
3201 | fld qword [rsp-8]
3202 | fyl2x
3203 | fstp qword [rsp-8]
3204 | movsd xmm0, qword [rsp-8]
3205 |.else
3206 | fld1
3207 | fld qword [esp+4]
3208 | fyl2x
3209 |.endif
3210 | ret
3211 |
3212 |// FP exponentiation e^x and 2^x. Called by math.exp fast function and
3213 |// from JIT code. Arg/ret on x87 stack. No int/xmm regs modified.
3214 |// Caveat: needs 3 slots on x87 stack!
3215 |->vm_exp_x87:
3216 | fldl2e; fmulp st1 // e^x ==> 2^(x*log2(e))
3217 |->vm_exp2_x87:
3218 | .if X64WIN
3219 | .define expscratch, dword [rsp+8] // Use scratch area.
3220 | .elif X64
3221 | .define expscratch, dword [rsp-8] // Use red zone.
3222 | .else
3223 | .define expscratch, dword [esp+4] // Needs 4 byte scratch area.
3224 | .endif
3225 | fst expscratch // Caveat: overwrites ARG1.
3226 | cmp expscratch, 0x7f800000; je >1 // Special case: e^+Inf = +Inf
3227 | cmp expscratch, 0xff800000; je >2 // Special case: e^-Inf = 0
3228 |->vm_exp2raw: // Entry point for vm_pow. Without +-Inf check.
3229 | fdup; frndint; fsub st1, st0; fxch // Split into frac/int part.
3230 | f2xm1; fld1; faddp st1; fscale; fpop1 // ==> (2^frac-1 +1) << int
3231 |1:
3232 | ret
3233 |2:
3234 | fpop; fldz; ret
3235 |
3236 |// Generic power function x^y. Called by BC_POW, math.pow fast function,
3237 |// and vm_arith.
3238 |// Args/ret on x87 stack (y on top). RC (eax) modified.
3239 |// Caveat: needs 3 slots on x87 stack!
3240 |->vm_pow:
3241 |.if not SSE
3242 | fist dword [esp+4] // Store/reload int before comparison.
3243 | fild dword [esp+4] // Integral exponent used in vm_powi.
3244 | fucomip st1
3245 | jnz >8 // Branch for FP exponents.
3246 | jp >9 // Branch for NaN exponent.
3247 | fpop // Pop y and fallthrough to vm_powi.
3248 |
3249 |// FP/int power function x^i. Arg1/ret on x87 stack.
3250 |// Arg2 (int) on C stack. RC (eax) modified.
3251 |// Caveat: needs 2 slots on x87 stack!
3252 | mov eax, [esp+4]
3253 | cmp eax, 1; jle >6 // i<=1?
3254 | // Now 1 < (unsigned)i <= 0x80000000.
3255 |1: // Handle leading zeros.
3256 | test eax, 1; jnz >2
3257 | fmul st0
3258 | shr eax, 1
3259 | jmp <1
3260 |2:
3261 | shr eax, 1; jz >5
3262 | fdup
3263 |3: // Handle trailing bits.
3264 | fmul st0
3265 | shr eax, 1; jz >4
3266 | jnc <3
3267 | fmul st1, st0
3268 | jmp <3
3269 |4:
3270 | fmulp st1
3271 |5:
3272 | ret
3273 |6:
3274 | je <5 // x^1 ==> x
3275 | jb >7
3276 | fld1; fdivrp st1
3277 | neg eax
3278 | cmp eax, 1; je <5 // x^-1 ==> 1/x
3279 | jmp <1 // x^-i ==> (1/x)^i
3280 |7:
3281 | fpop; fld1 // x^0 ==> 1
3282 | ret
3283 |
3284 |8: // FP/FP power function x^y.
3285 | fst dword [esp+4]
3286 | fxch
3287 | fst dword [esp+8]
3288 | mov eax, [esp+4]; shl eax, 1
3289 | cmp eax, 0xff000000; je >2 // x^+-Inf?
3290 | mov eax, [esp+8]; shl eax, 1; je >4 // +-0^y?
3291 | cmp eax, 0xff000000; je >4 // +-Inf^y?
3292 | fyl2x
3293 | jmp ->vm_exp2raw
3294 |
3295 |9: // Handle x^NaN.
3296 | fld1
3297 | fucomip st2
3298 | je >1 // 1^NaN ==> 1
3299 | fxch // x^NaN ==> NaN
3300 |1:
3301 | fpop
3302 | ret
3303 |
3304 |2: // Handle x^+-Inf.
3305 | fabs
3306 | fld1
3307 | fucomip st1
3308 | je >3 // +-1^+-Inf ==> 1
3309 | fpop; fabs; fldz; mov eax, 0; setc al
3310 | ror eax, 1; xor eax, [esp+4]; jns >3 // |x|<>1, x^+-Inf ==> +Inf/0
3311 | fxch
3312 |3:
3313 | fpop1; fabs
3314 | ret
3315 |
3316 |4: // Handle +-0^y or +-Inf^y.
3317 | cmp dword [esp+4], 0; jge <3 // y >= 0, x^y ==> |x|
3318 | fpop; fpop
3319 | test eax, eax; jz >5 // y < 0, +-0^y ==> +Inf
3320 | fldz // y < 0, +-Inf^y ==> 0
3321 | ret
3322 |5:
3323 | mov dword [esp+4], 0x7f800000 // Return +Inf.
3324 | fld dword [esp+4]
3325 | ret
3326 |.endif
3327 |
3328 |// Args in xmm0/xmm1. Ret in xmm0. xmm0-xmm2 and RC (eax) modified.
3329 |// Needs 16 byte scratch area for x86. Also called from JIT code.
3330 |->vm_pow_sse:
3331 | cvtsd2si eax, xmm1
3332 | cvtsi2sd xmm2, eax
3333 | ucomisd xmm1, xmm2
3334 | jnz >8 // Branch for FP exponents.
3335 | jp >9 // Branch for NaN exponent.
3336 | // Fallthrough to vm_powi_sse.
3337 | 3052 |
3338 |// Args in xmm0/eax. Ret in xmm0. xmm0-xmm1 and eax modified. 3053 |// Args in xmm0/eax. Ret in xmm0. xmm0-xmm1 and eax modified.
3339 |->vm_powi_sse: 3054 |->vm_powi_sse:
@@ -3370,287 +3085,6 @@ static void build_subroutines(BuildCtx *ctx)
3370 | sseconst_1 xmm0, RDa 3085 | sseconst_1 xmm0, RDa
3371 | ret 3086 | ret
3372 | 3087 |
3373 |8: // FP/FP power function x^y.
3374 |.if X64
3375 | movd rax, xmm1; shl rax, 1
3376 | rol rax, 12; cmp rax, 0xffe; je >2 // x^+-Inf?
3377 | movd rax, xmm0; shl rax, 1; je >4 // +-0^y?
3378 | rol rax, 12; cmp rax, 0xffe; je >5 // +-Inf^y?
3379 | .if X64WIN
3380 | movsd qword [rsp+16], xmm1 // Use scratch area.
3381 | movsd qword [rsp+8], xmm0
3382 | fld qword [rsp+16]
3383 | fld qword [rsp+8]
3384 | .else
3385 | movsd qword [rsp-16], xmm1 // Use red zone.
3386 | movsd qword [rsp-8], xmm0
3387 | fld qword [rsp-16]
3388 | fld qword [rsp-8]
3389 | .endif
3390 |.else
3391 | movsd qword [esp+12], xmm1 // Needs 16 byte scratch area.
3392 | movsd qword [esp+4], xmm0
3393 | cmp dword [esp+12], 0; jne >1
3394 | mov eax, [esp+16]; shl eax, 1
3395 | cmp eax, 0xffe00000; je >2 // x^+-Inf?
3396 |1:
3397 | cmp dword [esp+4], 0; jne >1
3398 | mov eax, [esp+8]; shl eax, 1; je >4 // +-0^y?
3399 | cmp eax, 0xffe00000; je >5 // +-Inf^y?
3400 |1:
3401 | fld qword [esp+12]
3402 | fld qword [esp+4]
3403 |.endif
3404 | fyl2x // y*log2(x)
3405 | fdup; frndint; fsub st1, st0; fxch // Split into frac/int part.
3406 | f2xm1; fld1; faddp st1; fscale; fpop1 // ==> (2^frac-1 +1) << int
3407 |.if X64WIN
3408 | fstp qword [rsp+8] // Use scratch area.
3409 | movsd xmm0, qword [rsp+8]
3410 |.elif X64
3411 | fstp qword [rsp-8] // Use red zone.
3412 | movsd xmm0, qword [rsp-8]
3413 |.else
3414 | fstp qword [esp+4] // Needs 8 byte scratch area.
3415 | movsd xmm0, qword [esp+4]
3416 |.endif
3417 | ret
3418 |
3419 |9: // Handle x^NaN.
3420 | sseconst_1 xmm2, RDa
3421 | ucomisd xmm0, xmm2; je >1 // 1^NaN ==> 1
3422 | movaps xmm0, xmm1 // x^NaN ==> NaN
3423 |1:
3424 | ret
3425 |
3426 |2: // Handle x^+-Inf.
3427 | sseconst_abs xmm2, RDa
3428 | andpd xmm0, xmm2 // |x|
3429 | sseconst_1 xmm2, RDa
3430 | ucomisd xmm0, xmm2; je <1 // +-1^+-Inf ==> 1
3431 | movmskpd eax, xmm1
3432 | xorps xmm0, xmm0
3433 | mov ah, al; setc al; xor al, ah; jne <1 // |x|<>1, x^+-Inf ==> +Inf/0
3434 |3:
3435 | sseconst_hi xmm0, RDa, 7ff00000 // +Inf
3436 | ret
3437 |
3438 |4: // Handle +-0^y.
3439 | movmskpd eax, xmm1; test eax, eax; jnz <3 // y < 0, +-0^y ==> +Inf
3440 | xorps xmm0, xmm0 // y >= 0, +-0^y ==> 0
3441 | ret
3442 |
3443 |5: // Handle +-Inf^y.
3444 | movmskpd eax, xmm1; test eax, eax; jz <3 // y >= 0, +-Inf^y ==> +Inf
3445 | xorps xmm0, xmm0 // y < 0, +-Inf^y ==> 0
3446 | ret
3447 |
3448 |// Callable from C: double lj_vm_foldfpm(double x, int fpm)
3449 |// Computes fpm(x) for extended math functions. ORDER FPM.
3450 |->vm_foldfpm:
3451 |.if JIT
3452 |.if X64
3453 | .if X64WIN
3454 | .define fpmop, CARG2d
3455 | .else
3456 | .define fpmop, CARG1d
3457 | .endif
3458 | cmp fpmop, 1; jb ->vm_floor; je ->vm_ceil
3459 | cmp fpmop, 3; jb ->vm_trunc; ja >2
3460 | sqrtsd xmm0, xmm0; ret
3461 |2:
3462 | .if X64WIN
3463 | movsd qword [rsp+8], xmm0 // Use scratch area.
3464 | fld qword [rsp+8]
3465 | .else
3466 | movsd qword [rsp-8], xmm0 // Use red zone.
3467 | fld qword [rsp-8]
3468 | .endif
3469 | cmp fpmop, 5; ja >2
3470 | .if X64WIN; pop rax; .endif
3471 | je >1
3472 | call ->vm_exp_x87
3473 | .if X64WIN; push rax; .endif
3474 | jmp >7
3475 |1:
3476 | call ->vm_exp2_x87
3477 | .if X64WIN; push rax; .endif
3478 | jmp >7
3479 |2: ; cmp fpmop, 7; je >1; ja >2
3480 | fldln2; fxch; fyl2x; jmp >7
3481 |1: ; fld1; fxch; fyl2x; jmp >7
3482 |2: ; cmp fpmop, 9; je >1; ja >2
3483 | fldlg2; fxch; fyl2x; jmp >7
3484 |1: ; fsin; jmp >7
3485 |2: ; cmp fpmop, 11; je >1; ja >9
3486 | fcos; jmp >7
3487 |1: ; fptan; fpop
3488 |7:
3489 | .if X64WIN
3490 | fstp qword [rsp+8] // Use scratch area.
3491 | movsd xmm0, qword [rsp+8]
3492 | .else
3493 | fstp qword [rsp-8] // Use red zone.
3494 | movsd xmm0, qword [rsp-8]
3495 | .endif
3496 | ret
3497 |.else // x86 calling convention.
3498 | .define fpmop, eax
3499 |.if SSE
3500 | mov fpmop, [esp+12]
3501 | movsd xmm0, qword [esp+4]
3502 | cmp fpmop, 1; je >1; ja >2
3503 | call ->vm_floor; jmp >7
3504 |1: ; call ->vm_ceil; jmp >7
3505 |2: ; cmp fpmop, 3; je >1; ja >2
3506 | call ->vm_trunc; jmp >7
3507 |1:
3508 | sqrtsd xmm0, xmm0
3509 |7:
3510 | movsd qword [esp+4], xmm0 // Overwrite callee-owned args.
3511 | fld qword [esp+4]
3512 | ret
3513 |2: ; fld qword [esp+4]
3514 | cmp fpmop, 5; jb ->vm_exp_x87; je ->vm_exp2_x87
3515 |2: ; cmp fpmop, 7; je >1; ja >2
3516 | fldln2; fxch; fyl2x; ret
3517 |1: ; fld1; fxch; fyl2x; ret
3518 |2: ; cmp fpmop, 9; je >1; ja >2
3519 | fldlg2; fxch; fyl2x; ret
3520 |1: ; fsin; ret
3521 |2: ; cmp fpmop, 11; je >1; ja >9
3522 | fcos; ret
3523 |1: ; fptan; fpop; ret
3524 |.else
3525 | mov fpmop, [esp+12]
3526 | fld qword [esp+4]
3527 | cmp fpmop, 1; jb ->vm_floor; je ->vm_ceil
3528 | cmp fpmop, 3; jb ->vm_trunc; ja >2
3529 | fsqrt; ret
3530 |2: ; cmp fpmop, 5; jb ->vm_exp_x87; je ->vm_exp2_x87
3531 | cmp fpmop, 7; je >1; ja >2
3532 | fldln2; fxch; fyl2x; ret
3533 |1: ; fld1; fxch; fyl2x; ret
3534 |2: ; cmp fpmop, 9; je >1; ja >2
3535 | fldlg2; fxch; fyl2x; ret
3536 |1: ; fsin; ret
3537 |2: ; cmp fpmop, 11; je >1; ja >9
3538 | fcos; ret
3539 |1: ; fptan; fpop; ret
3540 |.endif
3541 |.endif
3542 |9: ; int3 // Bad fpm.
3543 |.endif
3544 |
3545 |// Callable from C: double lj_vm_foldarith(double x, double y, int op)
3546 |// Compute x op y for basic arithmetic operators (+ - * / % ^ and unary -)
3547 |// and basic math functions. ORDER ARITH
3548 |->vm_foldarith:
3549 |.if X64
3550 |
3551 | .if X64WIN
3552 | .define foldop, CARG3d
3553 | .else
3554 | .define foldop, CARG1d
3555 | .endif
3556 | cmp foldop, 1; je >1; ja >2
3557 | addsd xmm0, xmm1; ret
3558 |1: ; subsd xmm0, xmm1; ret
3559 |2: ; cmp foldop, 3; je >1; ja >2
3560 | mulsd xmm0, xmm1; ret
3561 |1: ; divsd xmm0, xmm1; ret
3562 |2: ; cmp foldop, 5; jb ->vm_mod; je ->vm_pow
3563 | cmp foldop, 7; je >1; ja >2
3564 | sseconst_sign xmm1, RDa; xorps xmm0, xmm1; ret
3565 |1: ; sseconst_abs xmm1, RDa; andps xmm0, xmm1; ret
3566 |2: ; cmp foldop, 9; ja >2
3567 |.if X64WIN
3568 | movsd qword [rsp+8], xmm0 // Use scratch area.
3569 | movsd qword [rsp+16], xmm1
3570 | fld qword [rsp+8]
3571 | fld qword [rsp+16]
3572 |.else
3573 | movsd qword [rsp-8], xmm0 // Use red zone.
3574 | movsd qword [rsp-16], xmm1
3575 | fld qword [rsp-8]
3576 | fld qword [rsp-16]
3577 |.endif
3578 | je >1
3579 | fpatan
3580 |7:
3581 |.if X64WIN
3582 | fstp qword [rsp+8] // Use scratch area.
3583 | movsd xmm0, qword [rsp+8]
3584 |.else
3585 | fstp qword [rsp-8] // Use red zone.
3586 | movsd xmm0, qword [rsp-8]
3587 |.endif
3588 | ret
3589 |1: ; fxch; fscale; fpop1; jmp <7
3590 |2: ; cmp foldop, 11; je >1; ja >9
3591 | minsd xmm0, xmm1; ret
3592 |1: ; maxsd xmm0, xmm1; ret
3593 |9: ; int3 // Bad op.
3594 |
3595 |.elif SSE // x86 calling convention with SSE ops.
3596 |
3597 | .define foldop, eax
3598 | mov foldop, [esp+20]
3599 | movsd xmm0, qword [esp+4]
3600 | movsd xmm1, qword [esp+12]
3601 | cmp foldop, 1; je >1; ja >2
3602 | addsd xmm0, xmm1
3603 |7:
3604 | movsd qword [esp+4], xmm0 // Overwrite callee-owned args.
3605 | fld qword [esp+4]
3606 | ret
3607 |1: ; subsd xmm0, xmm1; jmp <7
3608 |2: ; cmp foldop, 3; je >1; ja >2
3609 | mulsd xmm0, xmm1; jmp <7
3610 |1: ; divsd xmm0, xmm1; jmp <7
3611 |2: ; cmp foldop, 5
3612 | je >1; ja >2
3613 | call ->vm_mod; jmp <7
3614 |1: ; pop edx; call ->vm_pow; push edx; jmp <7 // Writes to scratch area.
3615 |2: ; cmp foldop, 7; je >1; ja >2
3616 | sseconst_sign xmm1, RDa; xorps xmm0, xmm1; jmp <7
3617 |1: ; sseconst_abs xmm1, RDa; andps xmm0, xmm1; jmp <7
3618 |2: ; cmp foldop, 9; ja >2
3619 | fld qword [esp+4] // Reload from stack
3620 | fld qword [esp+12]
3621 | je >1
3622 | fpatan; ret
3623 |1: ; fxch; fscale; fpop1; ret
3624 |2: ; cmp foldop, 11; je >1; ja >9
3625 | minsd xmm0, xmm1; jmp <7
3626 |1: ; maxsd xmm0, xmm1; jmp <7
3627 |9: ; int3 // Bad op.
3628 |
3629 |.else // x86 calling convention with x87 ops.
3630 |
3631 | mov eax, [esp+20]
3632 | fld qword [esp+4]
3633 | fld qword [esp+12]
3634 | cmp eax, 1; je >1; ja >2
3635 | faddp st1; ret
3636 |1: ; fsubp st1; ret
3637 |2: ; cmp eax, 3; je >1; ja >2
3638 | fmulp st1; ret
3639 |1: ; fdivp st1; ret
3640 |2: ; cmp eax, 5; jb ->vm_mod; je ->vm_pow
3641 | cmp eax, 7; je >1; ja >2
3642 | fpop; fchs; ret
3643 |1: ; fpop; fabs; ret
3644 |2: ; cmp eax, 9; je >1; ja >2
3645 | fpatan; ret
3646 |1: ; fxch; fscale; fpop1; ret
3647 |2: ; cmp eax, 11; je >1; ja >9
3648 | fucomi st1; fcmovnbe st1; fpop1; ret
3649 |1: ; fucomi st1; fcmovbe st1; fpop1; ret
3650 |9: ; int3 // Bad op.
3651 |
3652 |.endif
3653 |
3654 |//----------------------------------------------------------------------- 3088 |//-----------------------------------------------------------------------
3655 |//-- Miscellaneous functions -------------------------------------------- 3089 |//-- Miscellaneous functions --------------------------------------------
3656 |//----------------------------------------------------------------------- 3090 |//-----------------------------------------------------------------------
@@ -3661,6 +3095,7 @@ static void build_subroutines(BuildCtx *ctx)
3661 | mov eax, CARG1d 3095 | mov eax, CARG1d
3662 | .if X64WIN; push rsi; mov rsi, CARG2; .endif 3096 | .if X64WIN; push rsi; mov rsi, CARG2; .endif
3663 | push rbx 3097 | push rbx
3098 | xor ecx, ecx
3664 | cpuid 3099 | cpuid
3665 | mov [rsi], eax 3100 | mov [rsi], eax
3666 | mov [rsi+4], ebx 3101 | mov [rsi+4], ebx
@@ -3684,6 +3119,7 @@ static void build_subroutines(BuildCtx *ctx)
3684 | mov eax, [esp+4] // Argument 1 is function number. 3119 | mov eax, [esp+4] // Argument 1 is function number.
3685 | push edi 3120 | push edi
3686 | push ebx 3121 | push ebx
3122 | xor ecx, ecx
3687 | cpuid 3123 | cpuid
3688 | mov edi, [esp+16] // Argument 2 is result area. 3124 | mov edi, [esp+16] // Argument 2 is result area.
3689 | mov [edi], eax 3125 | mov [edi], eax
@@ -3961,19 +3397,12 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3961 | // RA is a number. 3397 | // RA is a number.
3962 | cmp dword [BASE+RD*8+4], LJ_TISNUM; jb >1; jne ->vmeta_comp 3398 | cmp dword [BASE+RD*8+4], LJ_TISNUM; jb >1; jne ->vmeta_comp
3963 | // RA is a number, RD is an integer. 3399 | // RA is a number, RD is an integer.
3964 |.if SSE
3965 | cvtsi2sd xmm0, dword [BASE+RD*8] 3400 | cvtsi2sd xmm0, dword [BASE+RD*8]
3966 | jmp >2 3401 | jmp >2
3967 |.else
3968 | fld qword [BASE+RA*8]
3969 | fild dword [BASE+RD*8]
3970 | jmp >3
3971 |.endif
3972 | 3402 |
3973 |8: // RA is an integer, RD is not an integer. 3403 |8: // RA is an integer, RD is not an integer.
3974 | ja ->vmeta_comp 3404 | ja ->vmeta_comp
3975 | // RA is an integer, RD is a number. 3405 | // RA is an integer, RD is a number.
3976 |.if SSE
3977 | cvtsi2sd xmm1, dword [BASE+RA*8] 3406 | cvtsi2sd xmm1, dword [BASE+RA*8]
3978 | movsd xmm0, qword [BASE+RD*8] 3407 | movsd xmm0, qword [BASE+RD*8]
3979 | add PC, 4 3408 | add PC, 4
@@ -3981,29 +3410,15 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
3981 | jmp_comp jbe, ja, jb, jae, <9 3410 | jmp_comp jbe, ja, jb, jae, <9
3982 | jmp <6 3411 | jmp <6
3983 |.else 3412 |.else
3984 | fild dword [BASE+RA*8]
3985 | jmp >2
3986 |.endif
3987 |.else
3988 | checknum RA, ->vmeta_comp 3413 | checknum RA, ->vmeta_comp
3989 | checknum RD, ->vmeta_comp 3414 | checknum RD, ->vmeta_comp
3990 |.endif 3415 |.endif
3991 |.if SSE
3992 |1: 3416 |1:
3993 | movsd xmm0, qword [BASE+RD*8] 3417 | movsd xmm0, qword [BASE+RD*8]
3994 |2: 3418 |2:
3995 | add PC, 4 3419 | add PC, 4
3996 | ucomisd xmm0, qword [BASE+RA*8] 3420 | ucomisd xmm0, qword [BASE+RA*8]
3997 |3: 3421 |3:
3998 |.else
3999 |1:
4000 | fld qword [BASE+RA*8] // Reverse order, i.e like cmp D, A.
4001 |2:
4002 | fld qword [BASE+RD*8]
4003 |3:
4004 | add PC, 4
4005 | fcomparepp
4006 |.endif
4007 | // Unordered: all of ZF CF PF set, ordered: PF clear. 3422 | // Unordered: all of ZF CF PF set, ordered: PF clear.
4008 | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't. 3423 | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
4009 |.if DUALNUM 3424 |.if DUALNUM
@@ -4043,43 +3458,25 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4043 | // RD is a number. 3458 | // RD is a number.
4044 | cmp dword [BASE+RA*8+4], LJ_TISNUM; jb >1; jne >5 3459 | cmp dword [BASE+RA*8+4], LJ_TISNUM; jb >1; jne >5
4045 | // RD is a number, RA is an integer. 3460 | // RD is a number, RA is an integer.
4046 |.if SSE
4047 | cvtsi2sd xmm0, dword [BASE+RA*8] 3461 | cvtsi2sd xmm0, dword [BASE+RA*8]
4048 |.else
4049 | fild dword [BASE+RA*8]
4050 |.endif
4051 | jmp >2 3462 | jmp >2
4052 | 3463 |
4053 |8: // RD is an integer, RA is not an integer. 3464 |8: // RD is an integer, RA is not an integer.
4054 | ja >5 3465 | ja >5
4055 | // RD is an integer, RA is a number. 3466 | // RD is an integer, RA is a number.
4056 |.if SSE
4057 | cvtsi2sd xmm0, dword [BASE+RD*8] 3467 | cvtsi2sd xmm0, dword [BASE+RD*8]
4058 | ucomisd xmm0, qword [BASE+RA*8] 3468 | ucomisd xmm0, qword [BASE+RA*8]
4059 |.else
4060 | fild dword [BASE+RD*8]
4061 | fld qword [BASE+RA*8]
4062 |.endif
4063 | jmp >4 3469 | jmp >4
4064 | 3470 |
4065 |.else 3471 |.else
4066 | cmp RB, LJ_TISNUM; jae >5 3472 | cmp RB, LJ_TISNUM; jae >5
4067 | checknum RA, >5 3473 | checknum RA, >5
4068 |.endif 3474 |.endif
4069 |.if SSE
4070 |1: 3475 |1:
4071 | movsd xmm0, qword [BASE+RA*8] 3476 | movsd xmm0, qword [BASE+RA*8]
4072 |2: 3477 |2:
4073 | ucomisd xmm0, qword [BASE+RD*8] 3478 | ucomisd xmm0, qword [BASE+RD*8]
4074 |4: 3479 |4:
4075 |.else
4076 |1:
4077 | fld qword [BASE+RA*8]
4078 |2:
4079 | fld qword [BASE+RD*8]
4080 |4:
4081 | fcomparepp
4082 |.endif
4083 iseqne_fp: 3480 iseqne_fp:
4084 if (vk) { 3481 if (vk) {
4085 | jp >2 // Unordered means not equal. 3482 | jp >2 // Unordered means not equal.
@@ -4202,39 +3599,21 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4202 | // RA is a number. 3599 | // RA is a number.
4203 | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jb >1 3600 | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jb >1
4204 | // RA is a number, RD is an integer. 3601 | // RA is a number, RD is an integer.
4205 |.if SSE
4206 | cvtsi2sd xmm0, dword [KBASE+RD*8] 3602 | cvtsi2sd xmm0, dword [KBASE+RD*8]
4207 |.else
4208 | fild dword [KBASE+RD*8]
4209 |.endif
4210 | jmp >2 3603 | jmp >2
4211 | 3604 |
4212 |8: // RA is an integer, RD is a number. 3605 |8: // RA is an integer, RD is a number.
4213 |.if SSE
4214 | cvtsi2sd xmm0, dword [BASE+RA*8] 3606 | cvtsi2sd xmm0, dword [BASE+RA*8]
4215 | ucomisd xmm0, qword [KBASE+RD*8] 3607 | ucomisd xmm0, qword [KBASE+RD*8]
4216 |.else
4217 | fild dword [BASE+RA*8]
4218 | fld qword [KBASE+RD*8]
4219 |.endif
4220 | jmp >4 3608 | jmp >4
4221 |.else 3609 |.else
4222 | cmp RB, LJ_TISNUM; jae >3 3610 | cmp RB, LJ_TISNUM; jae >3
4223 |.endif 3611 |.endif
4224 |.if SSE
4225 |1: 3612 |1:
4226 | movsd xmm0, qword [KBASE+RD*8] 3613 | movsd xmm0, qword [KBASE+RD*8]
4227 |2: 3614 |2:
4228 | ucomisd xmm0, qword [BASE+RA*8] 3615 | ucomisd xmm0, qword [BASE+RA*8]
4229 |4: 3616 |4:
4230 |.else
4231 |1:
4232 | fld qword [KBASE+RD*8]
4233 |2:
4234 | fld qword [BASE+RA*8]
4235 |4:
4236 | fcomparepp
4237 |.endif
4238 goto iseqne_fp; 3617 goto iseqne_fp;
4239 case BC_ISEQP: case BC_ISNEP: 3618 case BC_ISEQP: case BC_ISNEP:
4240 vk = op == BC_ISEQP; 3619 vk = op == BC_ISEQP;
@@ -4285,6 +3664,18 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4285 | ins_next 3664 | ins_next
4286 break; 3665 break;
4287 3666
3667 case BC_ISTYPE:
3668 | ins_AD // RA = src, RD = -type
3669 | add RD, [BASE+RA*8+4]
3670 | jne ->vmeta_istype
3671 | ins_next
3672 break;
3673 case BC_ISNUM:
3674 | ins_AD // RA = src, RD = -(TISNUM-1)
3675 | checknum RA, ->vmeta_istype
3676 | ins_next
3677 break;
3678
4288 /* -- Unary ops --------------------------------------------------------- */ 3679 /* -- Unary ops --------------------------------------------------------- */
4289 3680
4290 case BC_MOV: 3681 case BC_MOV:
@@ -4328,16 +3719,10 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4328 |.else 3719 |.else
4329 | checknum RD, ->vmeta_unm 3720 | checknum RD, ->vmeta_unm
4330 |.endif 3721 |.endif
4331 |.if SSE
4332 | movsd xmm0, qword [BASE+RD*8] 3722 | movsd xmm0, qword [BASE+RD*8]
4333 | sseconst_sign xmm1, RDa 3723 | sseconst_sign xmm1, RDa
4334 | xorps xmm0, xmm1 3724 | xorps xmm0, xmm1
4335 | movsd qword [BASE+RA*8], xmm0 3725 | movsd qword [BASE+RA*8], xmm0
4336 |.else
4337 | fld qword [BASE+RD*8]
4338 | fchs
4339 | fstp qword [BASE+RA*8]
4340 |.endif
4341 |.if DUALNUM 3726 |.if DUALNUM
4342 | jmp <9 3727 | jmp <9
4343 |.else 3728 |.else
@@ -4353,15 +3738,11 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4353 |1: 3738 |1:
4354 | mov dword [BASE+RA*8+4], LJ_TISNUM 3739 | mov dword [BASE+RA*8+4], LJ_TISNUM
4355 | mov dword [BASE+RA*8], RD 3740 | mov dword [BASE+RA*8], RD
4356 |.elif SSE 3741 |.else
4357 | xorps xmm0, xmm0 3742 | xorps xmm0, xmm0
4358 | cvtsi2sd xmm0, dword STR:RD->len 3743 | cvtsi2sd xmm0, dword STR:RD->len
4359 |1: 3744 |1:
4360 | movsd qword [BASE+RA*8], xmm0 3745 | movsd qword [BASE+RA*8], xmm0
4361 |.else
4362 | fild dword STR:RD->len
4363 |1:
4364 | fstp qword [BASE+RA*8]
4365 |.endif 3746 |.endif
4366 | ins_next 3747 | ins_next
4367 |2: 3748 |2:
@@ -4379,11 +3760,8 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4379 | // Length of table returned in eax (RD). 3760 | // Length of table returned in eax (RD).
4380 |.if DUALNUM 3761 |.if DUALNUM
4381 | // Nothing to do. 3762 | // Nothing to do.
4382 |.elif SSE
4383 | cvtsi2sd xmm0, RD
4384 |.else 3763 |.else
4385 | mov ARG1, RD 3764 | cvtsi2sd xmm0, RD
4386 | fild ARG1
4387 |.endif 3765 |.endif
4388 | mov BASE, RB // Restore BASE. 3766 | mov BASE, RB // Restore BASE.
4389 | movzx RA, PC_RA 3767 | movzx RA, PC_RA
@@ -4398,7 +3776,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4398 3776
4399 /* -- Binary ops -------------------------------------------------------- */ 3777 /* -- Binary ops -------------------------------------------------------- */
4400 3778
4401 |.macro ins_arithpre, x87ins, sseins, ssereg 3779 |.macro ins_arithpre, sseins, ssereg
4402 | ins_ABC 3780 | ins_ABC
4403 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN); 3781 ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
4404 ||switch (vk) { 3782 ||switch (vk) {
@@ -4407,37 +3785,22 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4407 | .if DUALNUM 3785 | .if DUALNUM
4408 | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_vn 3786 | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_vn
4409 | .endif 3787 | .endif
4410 | .if SSE 3788 | movsd xmm0, qword [BASE+RB*8]
4411 | movsd xmm0, qword [BASE+RB*8] 3789 | sseins ssereg, qword [KBASE+RC*8]
4412 | sseins ssereg, qword [KBASE+RC*8]
4413 | .else
4414 | fld qword [BASE+RB*8]
4415 | x87ins qword [KBASE+RC*8]
4416 | .endif
4417 || break; 3790 || break;
4418 ||case 1: 3791 ||case 1:
4419 | checknum RB, ->vmeta_arith_nv 3792 | checknum RB, ->vmeta_arith_nv
4420 | .if DUALNUM 3793 | .if DUALNUM
4421 | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_nv 3794 | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_nv
4422 | .endif 3795 | .endif
4423 | .if SSE 3796 | movsd xmm0, qword [KBASE+RC*8]
4424 | movsd xmm0, qword [KBASE+RC*8] 3797 | sseins ssereg, qword [BASE+RB*8]
4425 | sseins ssereg, qword [BASE+RB*8]
4426 | .else
4427 | fld qword [KBASE+RC*8]
4428 | x87ins qword [BASE+RB*8]
4429 | .endif
4430 || break; 3798 || break;
4431 ||default: 3799 ||default:
4432 | checknum RB, ->vmeta_arith_vv 3800 | checknum RB, ->vmeta_arith_vv
4433 | checknum RC, ->vmeta_arith_vv 3801 | checknum RC, ->vmeta_arith_vv
4434 | .if SSE 3802 | movsd xmm0, qword [BASE+RB*8]
4435 | movsd xmm0, qword [BASE+RB*8] 3803 | sseins ssereg, qword [BASE+RC*8]
4436 | sseins ssereg, qword [BASE+RC*8]
4437 | .else
4438 | fld qword [BASE+RB*8]
4439 | x87ins qword [BASE+RC*8]
4440 | .endif
4441 || break; 3804 || break;
4442 ||} 3805 ||}
4443 |.endmacro 3806 |.endmacro
@@ -4475,55 +3838,62 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4475 |.endmacro 3838 |.endmacro
4476 | 3839 |
4477 |.macro ins_arithpost 3840 |.macro ins_arithpost
4478 |.if SSE
4479 | movsd qword [BASE+RA*8], xmm0 3841 | movsd qword [BASE+RA*8], xmm0
4480 |.else
4481 | fstp qword [BASE+RA*8]
4482 |.endif
4483 |.endmacro 3842 |.endmacro
4484 | 3843 |
4485 |.macro ins_arith, x87ins, sseins 3844 |.macro ins_arith, sseins
4486 | ins_arithpre x87ins, sseins, xmm0 3845 | ins_arithpre sseins, xmm0
4487 | ins_arithpost 3846 | ins_arithpost
4488 | ins_next 3847 | ins_next
4489 |.endmacro 3848 |.endmacro
4490 | 3849 |
4491 |.macro ins_arith, intins, x87ins, sseins 3850 |.macro ins_arith, intins, sseins
4492 |.if DUALNUM 3851 |.if DUALNUM
4493 | ins_arithdn intins 3852 | ins_arithdn intins
4494 |.else 3853 |.else
4495 | ins_arith, x87ins, sseins 3854 | ins_arith, sseins
4496 |.endif 3855 |.endif
4497 |.endmacro 3856 |.endmacro
4498 3857
4499 | // RA = dst, RB = src1 or num const, RC = src2 or num const 3858 | // RA = dst, RB = src1 or num const, RC = src2 or num const
4500 case BC_ADDVN: case BC_ADDNV: case BC_ADDVV: 3859 case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
4501 | ins_arith add, fadd, addsd 3860 | ins_arith add, addsd
4502 break; 3861 break;
4503 case BC_SUBVN: case BC_SUBNV: case BC_SUBVV: 3862 case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
4504 | ins_arith sub, fsub, subsd 3863 | ins_arith sub, subsd
4505 break; 3864 break;
4506 case BC_MULVN: case BC_MULNV: case BC_MULVV: 3865 case BC_MULVN: case BC_MULNV: case BC_MULVV:
4507 | ins_arith imul, fmul, mulsd 3866 | ins_arith imul, mulsd
4508 break; 3867 break;
4509 case BC_DIVVN: case BC_DIVNV: case BC_DIVVV: 3868 case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
4510 | ins_arith fdiv, divsd 3869 | ins_arith divsd
4511 break; 3870 break;
4512 case BC_MODVN: 3871 case BC_MODVN:
4513 | ins_arithpre fld, movsd, xmm1 3872 | ins_arithpre movsd, xmm1
4514 |->BC_MODVN_Z: 3873 |->BC_MODVN_Z:
4515 | call ->vm_mod 3874 | call ->vm_mod
4516 | ins_arithpost 3875 | ins_arithpost
4517 | ins_next 3876 | ins_next
4518 break; 3877 break;
4519 case BC_MODNV: case BC_MODVV: 3878 case BC_MODNV: case BC_MODVV:
4520 | ins_arithpre fld, movsd, xmm1 3879 | ins_arithpre movsd, xmm1
4521 | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway. 3880 | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
4522 break; 3881 break;
4523 case BC_POW: 3882 case BC_POW:
4524 | ins_arithpre fld, movsd, xmm1 3883 | ins_arithpre movsd, xmm1
4525 | call ->vm_pow 3884 | mov RB, BASE
3885 |.if not X64
3886 | movsd FPARG1, xmm0
3887 | movsd FPARG3, xmm1
3888 |.endif
3889 | call extern pow
3890 | movzx RA, PC_RA
3891 | mov BASE, RB
3892 |.if X64
4526 | ins_arithpost 3893 | ins_arithpost
3894 |.else
3895 | fstp qword [BASE+RA*8]
3896 |.endif
4527 | ins_next 3897 | ins_next
4528 break; 3898 break;
4529 3899
@@ -4591,25 +3961,17 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4591 | movsx RD, RDW 3961 | movsx RD, RDW
4592 | mov dword [BASE+RA*8+4], LJ_TISNUM 3962 | mov dword [BASE+RA*8+4], LJ_TISNUM
4593 | mov dword [BASE+RA*8], RD 3963 | mov dword [BASE+RA*8], RD
4594 |.elif SSE 3964 |.else
4595 | movsx RD, RDW // Sign-extend literal. 3965 | movsx RD, RDW // Sign-extend literal.
4596 | cvtsi2sd xmm0, RD 3966 | cvtsi2sd xmm0, RD
4597 | movsd qword [BASE+RA*8], xmm0 3967 | movsd qword [BASE+RA*8], xmm0
4598 |.else
4599 | fild PC_RD // Refetch signed RD from instruction.
4600 | fstp qword [BASE+RA*8]
4601 |.endif 3968 |.endif
4602 | ins_next 3969 | ins_next
4603 break; 3970 break;
4604 case BC_KNUM: 3971 case BC_KNUM:
4605 | ins_AD // RA = dst, RD = num const 3972 | ins_AD // RA = dst, RD = num const
4606 |.if SSE
4607 | movsd xmm0, qword [KBASE+RD*8] 3973 | movsd xmm0, qword [KBASE+RD*8]
4608 | movsd qword [BASE+RA*8], xmm0 3974 | movsd qword [BASE+RA*8], xmm0
4609 |.else
4610 | fld qword [KBASE+RD*8]
4611 | fstp qword [BASE+RA*8]
4612 |.endif
4613 | ins_next 3975 | ins_next
4614 break; 3976 break;
4615 case BC_KPRI: 3977 case BC_KPRI:
@@ -4716,18 +4078,10 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4716 case BC_USETN: 4078 case BC_USETN:
4717 | ins_AD // RA = upvalue #, RD = num const 4079 | ins_AD // RA = upvalue #, RD = num const
4718 | mov LFUNC:RB, [BASE-8] 4080 | mov LFUNC:RB, [BASE-8]
4719 |.if SSE
4720 | movsd xmm0, qword [KBASE+RD*8] 4081 | movsd xmm0, qword [KBASE+RD*8]
4721 |.else
4722 | fld qword [KBASE+RD*8]
4723 |.endif
4724 | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)] 4082 | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)]
4725 | mov RA, UPVAL:RB->v 4083 | mov RA, UPVAL:RB->v
4726 |.if SSE
4727 | movsd qword [RA], xmm0 4084 | movsd qword [RA], xmm0
4728 |.else
4729 | fstp qword [RA]
4730 |.endif
4731 | ins_next 4085 | ins_next
4732 break; 4086 break;
4733 case BC_USETP: 4087 case BC_USETP:
@@ -4881,18 +4235,10 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4881 |.else 4235 |.else
4882 | // Convert number to int and back and compare. 4236 | // Convert number to int and back and compare.
4883 | checknum RC, >5 4237 | checknum RC, >5
4884 |.if SSE
4885 | movsd xmm0, qword [BASE+RC*8] 4238 | movsd xmm0, qword [BASE+RC*8]
4886 | cvtsd2si RC, xmm0 4239 | cvttsd2si RC, xmm0
4887 | cvtsi2sd xmm1, RC 4240 | cvtsi2sd xmm1, RC
4888 | ucomisd xmm0, xmm1 4241 | ucomisd xmm0, xmm1
4889 |.else
4890 | fld qword [BASE+RC*8]
4891 | fist ARG1
4892 | fild ARG1
4893 | fcomparepp
4894 | mov RC, ARG1
4895 |.endif
4896 | jne ->vmeta_tgetv // Generic numeric key? Use fallback. 4242 | jne ->vmeta_tgetv // Generic numeric key? Use fallback.
4897 |.endif 4243 |.endif
4898 | cmp RC, TAB:RB->asize // Takes care of unordered, too. 4244 | cmp RC, TAB:RB->asize // Takes care of unordered, too.
@@ -4938,7 +4284,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
4938 | mov TAB:RB, [BASE+RB*8] 4284 | mov TAB:RB, [BASE+RB*8]
4939 |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA. 4285 |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA.
4940 | mov RA, TAB:RB->hmask 4286 | mov RA, TAB:RB->hmask
4941 | and RA, STR:RC->hash 4287 | and RA, STR:RC->sid
4942 | imul RA, #NODE 4288 | imul RA, #NODE
4943 | add NODE:RA, TAB:RB->node 4289 | add NODE:RA, TAB:RB->node
4944 |1: 4290 |1:
@@ -5016,6 +4362,32 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5016 | mov dword [BASE+RA*8+4], LJ_TNIL 4362 | mov dword [BASE+RA*8+4], LJ_TNIL
5017 | jmp <1 4363 | jmp <1
5018 break; 4364 break;
4365 case BC_TGETR:
4366 | ins_ABC // RA = dst, RB = table, RC = key
4367 | mov TAB:RB, [BASE+RB*8]
4368 |.if DUALNUM
4369 | mov RC, dword [BASE+RC*8]
4370 |.else
4371 | cvttsd2si RC, qword [BASE+RC*8]
4372 |.endif
4373 | cmp RC, TAB:RB->asize
4374 | jae ->vmeta_tgetr // Not in array part? Use fallback.
4375 | shl RC, 3
4376 | add RC, TAB:RB->array
4377 | // Get array slot.
4378 |->BC_TGETR_Z:
4379 |.if X64
4380 | mov RBa, [RC]
4381 | mov [BASE+RA*8], RBa
4382 |.else
4383 | mov RB, [RC]
4384 | mov RC, [RC+4]
4385 | mov [BASE+RA*8], RB
4386 | mov [BASE+RA*8+4], RC
4387 |.endif
4388 |->BC_TGETR2_Z:
4389 | ins_next
4390 break;
5019 4391
5020 case BC_TSETV: 4392 case BC_TSETV:
5021 | ins_ABC // RA = src, RB = table, RC = key 4393 | ins_ABC // RA = src, RB = table, RC = key
@@ -5029,18 +4401,10 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5029 |.else 4401 |.else
5030 | // Convert number to int and back and compare. 4402 | // Convert number to int and back and compare.
5031 | checknum RC, >5 4403 | checknum RC, >5
5032 |.if SSE
5033 | movsd xmm0, qword [BASE+RC*8] 4404 | movsd xmm0, qword [BASE+RC*8]
5034 | cvtsd2si RC, xmm0 4405 | cvttsd2si RC, xmm0
5035 | cvtsi2sd xmm1, RC 4406 | cvtsi2sd xmm1, RC
5036 | ucomisd xmm0, xmm1 4407 | ucomisd xmm0, xmm1
5037 |.else
5038 | fld qword [BASE+RC*8]
5039 | fist ARG1
5040 | fild ARG1
5041 | fcomparepp
5042 | mov RC, ARG1
5043 |.endif
5044 | jne ->vmeta_tsetv // Generic numeric key? Use fallback. 4408 | jne ->vmeta_tsetv // Generic numeric key? Use fallback.
5045 |.endif 4409 |.endif
5046 | cmp RC, TAB:RB->asize // Takes care of unordered, too. 4410 | cmp RC, TAB:RB->asize // Takes care of unordered, too.
@@ -5091,7 +4455,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5091 | mov TAB:RB, [BASE+RB*8] 4455 | mov TAB:RB, [BASE+RB*8]
5092 |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA. 4456 |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA.
5093 | mov RA, TAB:RB->hmask 4457 | mov RA, TAB:RB->hmask
5094 | and RA, STR:RC->hash 4458 | and RA, STR:RC->sid
5095 | imul RA, #NODE 4459 | imul RA, #NODE
5096 | mov byte TAB:RB->nomm, 0 // Clear metamethod cache. 4460 | mov byte TAB:RB->nomm, 0 // Clear metamethod cache.
5097 | add NODE:RA, TAB:RB->node 4461 | add NODE:RA, TAB:RB->node
@@ -5210,6 +4574,39 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5210 | movzx RA, PC_RA // Restore RA. 4574 | movzx RA, PC_RA // Restore RA.
5211 | jmp <2 4575 | jmp <2
5212 break; 4576 break;
4577 case BC_TSETR:
4578 | ins_ABC // RA = src, RB = table, RC = key
4579 | mov TAB:RB, [BASE+RB*8]
4580 |.if DUALNUM
4581 | mov RC, dword [BASE+RC*8]
4582 |.else
4583 | cvttsd2si RC, qword [BASE+RC*8]
4584 |.endif
4585 | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
4586 | jnz >7
4587 |2:
4588 | cmp RC, TAB:RB->asize
4589 | jae ->vmeta_tsetr
4590 | shl RC, 3
4591 | add RC, TAB:RB->array
4592 | // Set array slot.
4593 |->BC_TSETR_Z:
4594 |.if X64
4595 | mov RBa, [BASE+RA*8]
4596 | mov [RC], RBa
4597 |.else
4598 | mov RB, [BASE+RA*8+4]
4599 | mov RA, [BASE+RA*8]
4600 | mov [RC+4], RB
4601 | mov [RC], RA
4602 |.endif
4603 | ins_next
4604 |
4605 |7: // Possible table write barrier for the value. Skip valiswhite check.
4606 | barrierback TAB:RB, RA
4607 | movzx RA, PC_RA // Restore RA.
4608 | jmp <2
4609 break;
5213 4610
5214 case BC_TSETM: 4611 case BC_TSETM:
5215 | ins_AD // RA = base (table at base-1), RD = num const (start index) 4612 | ins_AD // RA = base (table at base-1), RD = num const (start index)
@@ -5403,10 +4800,8 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5403 |.if DUALNUM 4800 |.if DUALNUM
5404 | mov dword [BASE+RA*8+4], LJ_TISNUM 4801 | mov dword [BASE+RA*8+4], LJ_TISNUM
5405 | mov dword [BASE+RA*8], RC 4802 | mov dword [BASE+RA*8], RC
5406 |.elif SSE
5407 | cvtsi2sd xmm0, RC
5408 |.else 4803 |.else
5409 | fild dword [BASE+RA*8-8] 4804 | cvtsi2sd xmm0, RC
5410 |.endif 4805 |.endif
5411 | // Copy array slot to returned value. 4806 | // Copy array slot to returned value.
5412 |.if X64 4807 |.if X64
@@ -5422,10 +4817,8 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5422 | // Return array index as a numeric key. 4817 | // Return array index as a numeric key.
5423 |.if DUALNUM 4818 |.if DUALNUM
5424 | // See above. 4819 | // See above.
5425 |.elif SSE
5426 | movsd qword [BASE+RA*8], xmm0
5427 |.else 4820 |.else
5428 | fstp qword [BASE+RA*8] 4821 | movsd qword [BASE+RA*8], xmm0
5429 |.endif 4822 |.endif
5430 | mov [BASE+RA*8-8], RC // Update control var. 4823 | mov [BASE+RA*8-8], RC // Update control var.
5431 |2: 4824 |2:
@@ -5438,9 +4831,6 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5438 | 4831 |
5439 |4: // Skip holes in array part. 4832 |4: // Skip holes in array part.
5440 | add RC, 1 4833 | add RC, 1
5441 |.if not (DUALNUM or SSE)
5442 | mov [BASE+RA*8-8], RC
5443 |.endif
5444 | jmp <1 4834 | jmp <1
5445 | 4835 |
5446 |5: // Traverse hash part. 4836 |5: // Traverse hash part.
@@ -5774,7 +5164,6 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5774 if (!vk) { 5164 if (!vk) {
5775 | cmp RB, LJ_TISNUM; jae ->vmeta_for 5165 | cmp RB, LJ_TISNUM; jae ->vmeta_for
5776 } 5166 }
5777 |.if SSE
5778 | movsd xmm0, qword FOR_IDX 5167 | movsd xmm0, qword FOR_IDX
5779 | movsd xmm1, qword FOR_STOP 5168 | movsd xmm1, qword FOR_STOP
5780 if (vk) { 5169 if (vk) {
@@ -5787,22 +5176,6 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5787 | ucomisd xmm1, xmm0 5176 | ucomisd xmm1, xmm0
5788 |1: 5177 |1:
5789 | movsd qword FOR_EXT, xmm0 5178 | movsd qword FOR_EXT, xmm0
5790 |.else
5791 | fld qword FOR_STOP
5792 | fld qword FOR_IDX
5793 if (vk) {
5794 | fadd qword FOR_STEP // nidx = idx + step
5795 | fst qword FOR_IDX
5796 | fst qword FOR_EXT
5797 | test RB, RB; js >1
5798 } else {
5799 | fst qword FOR_EXT
5800 | jl >1
5801 }
5802 | fxch // Swap lim/(n)idx if step non-negative.
5803 |1:
5804 | fcomparepp
5805 |.endif
5806 if (op == BC_FORI) { 5179 if (op == BC_FORI) {
5807 |.if DUALNUM 5180 |.if DUALNUM
5808 | jnb <7 5181 | jnb <7
@@ -5830,11 +5203,10 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5830 |2: 5203 |2:
5831 | ins_next 5204 | ins_next
5832 |.endif 5205 |.endif
5833 |.if SSE 5206 |
5834 |3: // Invert comparison if step is negative. 5207 |3: // Invert comparison if step is negative.
5835 | ucomisd xmm0, xmm1 5208 | ucomisd xmm0, xmm1
5836 | jmp <1 5209 | jmp <1
5837 |.endif
5838 break; 5210 break;
5839 5211
5840 case BC_ITERL: 5212 case BC_ITERL:
@@ -5872,7 +5244,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5872 | ins_A // RA = base, RD = target (loop extent) 5244 | ins_A // RA = base, RD = target (loop extent)
5873 | // Note: RA/RD is only used by trace recorder to determine scope/extent 5245 | // Note: RA/RD is only used by trace recorder to determine scope/extent
5874 | // This opcode does NOT jump, it's only purpose is to detect a hot loop. 5246 | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
5875 |.if JIT 5247 |.if JIT
5876 | hotloop RB 5248 | hotloop RB
5877 |.endif 5249 |.endif
5878 | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op. 5250 | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op.
@@ -5891,7 +5263,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
5891 | mov RDa, TRACE:RD->mcode 5263 | mov RDa, TRACE:RD->mcode
5892 | mov L:RB, SAVE_L 5264 | mov L:RB, SAVE_L
5893 | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE 5265 | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE
5894 | mov [DISPATCH+DISPATCH_GL(jit_L)], L:RB 5266 | mov [DISPATCH+DISPATCH_GL(tmpbuf.L)], L:RB
5895 | // Save additional callee-save registers only used in compiled code. 5267 | // Save additional callee-save registers only used in compiled code.
5896 |.if X64WIN 5268 |.if X64WIN
5897 | mov TMPQ, r12 5269 | mov TMPQ, r12
@@ -6058,9 +5430,10 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
6058 | // (lua_State *L, lua_CFunction f) 5430 | // (lua_State *L, lua_CFunction f)
6059 | call aword [DISPATCH+DISPATCH_GL(wrapf)] 5431 | call aword [DISPATCH+DISPATCH_GL(wrapf)]
6060 } 5432 }
6061 | set_vmstate INTERP
6062 | // nresults returned in eax (RD). 5433 | // nresults returned in eax (RD).
6063 | mov BASE, L:RB->base 5434 | mov BASE, L:RB->base
5435 | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
5436 | set_vmstate INTERP
6064 | lea RA, [BASE+RD*8] 5437 | lea RA, [BASE+RD*8]
6065 | neg RA 5438 | neg RA
6066 | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8 5439 | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8
@@ -6173,7 +5546,7 @@ static void emit_asm_debug(BuildCtx *ctx)
6173 ".LEFDE1:\n\n", (int)ctx->codesz - fcofs); 5546 ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
6174#endif 5547#endif
6175#if !LJ_NO_UNWIND 5548#if !LJ_NO_UNWIND
6176#if (defined(__sun__) && defined(__svr4__)) 5549#if LJ_TARGET_SOLARIS
6177#if LJ_64 5550#if LJ_64
6178 fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@unwind\n"); 5551 fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@unwind\n");
6179#else 5552#else
@@ -6380,15 +5753,21 @@ static void emit_asm_debug(BuildCtx *ctx)
6380 "LEFDEY:\n\n", fcsize); 5753 "LEFDEY:\n\n", fcsize);
6381 } 5754 }
6382#endif 5755#endif
6383#if LJ_64 5756#if !LJ_64
6384 fprintf(ctx->fp, "\t.subsections_via_symbols\n");
6385#else
6386 fprintf(ctx->fp, 5757 fprintf(ctx->fp,
6387 "\t.non_lazy_symbol_pointer\n" 5758 "\t.non_lazy_symbol_pointer\n"
6388 "L_lj_err_unwind_dwarf$non_lazy_ptr:\n" 5759 "L_lj_err_unwind_dwarf$non_lazy_ptr:\n"
6389 ".indirect_symbol _lj_err_unwind_dwarf\n" 5760 ".indirect_symbol _lj_err_unwind_dwarf\n"
6390 ".long 0\n"); 5761 ".long 0\n\n");
5762 fprintf(ctx->fp, "\t.section __IMPORT,__jump_table,symbol_stubs,pure_instructions+self_modifying_code,5\n");
5763 {
5764 const char *const *xn;
5765 for (xn = ctx->extnames; *xn; xn++)
5766 if (strncmp(*xn, LABEL_PREFIX, sizeof(LABEL_PREFIX)-1))
5767 fprintf(ctx->fp, "L_%s$stub:\n\t.indirect_symbol _%s\n\t.ascii \"\\364\\364\\364\\364\\364\"\n", *xn, *xn);
5768 }
6391#endif 5769#endif
5770 fprintf(ctx->fp, ".subsections_via_symbols\n");
6392 } 5771 }
6393 break; 5772 break;
6394#endif 5773#endif
diff --git a/src/xb1build.bat b/src/xb1build.bat
new file mode 100644
index 00000000..12c73dd6
--- /dev/null
+++ b/src/xb1build.bat
@@ -0,0 +1,101 @@
1@rem Script to build LuaJIT with the Xbox One SDK.
2@rem Donated to the public domain.
3@rem
4@rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler)
5@rem Then cd to this directory and run this script.
6
7@if not defined INCLUDE goto :FAIL
8@if not defined DurangoXDK goto :FAIL
9
10@setlocal
11@echo ---- Host compiler ----
12@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE
13@set LJLINK=link /nologo
14@set LJMT=mt /nologo
15@set DASMDIR=..\dynasm
16@set DASM=%DASMDIR%\dynasm.lua
17@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c
18
19%LJCOMPILE% host\minilua.c
20@if errorlevel 1 goto :BAD
21%LJLINK% /out:minilua.exe minilua.obj
22@if errorlevel 1 goto :BAD
23if exist minilua.exe.manifest^
24 %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe
25
26@rem Error out for 64 bit host compiler
27@minilua
28@if not errorlevel 8 goto :FAIL
29
30@set DASMFLAGS=-D WIN -D FFI -D P64
31minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x64.dasc
32@if errorlevel 1 goto :BAD
33
34%LJCOMPILE% /I "." /I %DASMDIR% /D_DURANGO host\buildvm*.c
35@if errorlevel 1 goto :BAD
36%LJLINK% /out:buildvm.exe buildvm*.obj
37@if errorlevel 1 goto :BAD
38if exist buildvm.exe.manifest^
39 %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe
40
41buildvm -m peobj -o lj_vm.obj
42@if errorlevel 1 goto :BAD
43buildvm -m bcdef -o lj_bcdef.h %ALL_LIB%
44@if errorlevel 1 goto :BAD
45buildvm -m ffdef -o lj_ffdef.h %ALL_LIB%
46@if errorlevel 1 goto :BAD
47buildvm -m libdef -o lj_libdef.h %ALL_LIB%
48@if errorlevel 1 goto :BAD
49buildvm -m recdef -o lj_recdef.h %ALL_LIB%
50@if errorlevel 1 goto :BAD
51buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB%
52@if errorlevel 1 goto :BAD
53buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
54@if errorlevel 1 goto :BAD
55
56@echo ---- Cross compiler ----
57
58@set CWD=%cd%
59@call "%DurangoXDK%\xdk\DurangoVars.cmd" XDK
60@cd /D "%CWD%"
61@shift
62
63@set LJCOMPILE="cl" /nologo /c /W3 /GF /Gm- /GR- /GS- /Gy /openmp- /D_CRT_SECURE_NO_DEPRECATE /D_LIB /D_UNICODE /D_DURANGO
64@set LJLIB="lib" /nologo
65
66@if "%1"=="debug" (
67 @shift
68 @set LJCOMPILE=%LJCOMPILE% /Zi /MDd /Od
69 @set LJLINK=%LJLINK% /debug
70) else (
71 @set LJCOMPILE=%LJCOMPILE% /MD /O2 /DNDEBUG
72)
73
74@if "%1"=="amalg" goto :AMALG
75%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
76@if errorlevel 1 goto :BAD
77%LJLIB% /OUT:luajit.lib lj_*.obj lib_*.obj
78@if errorlevel 1 goto :BAD
79@goto :NOAMALG
80:AMALG
81%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c
82@if errorlevel 1 goto :BAD
83%LJLIB% /OUT:luajit.lib ljamalg.obj lj_vm.obj
84@if errorlevel 1 goto :BAD
85:NOAMALG
86
87@del *.obj *.manifest minilua.exe buildvm.exe
88@echo.
89@echo === Successfully built LuaJIT for Xbox One ===
90
91@goto :END
92:BAD
93@echo.
94@echo *******************************************************
95@echo *** Build FAILED -- Please check the error messages ***
96@echo *******************************************************
97@goto :END
98:FAIL
99@echo To run this script you must open a "Visual Studio .NET Command Prompt"
100@echo (64 bit host compiler). The Xbox One SDK must be installed, too.
101:END