diff options
| author | Mike Pall <mike> | 2020-06-13 00:52:54 +0200 |
|---|---|---|
| committer | Mike Pall <mike> | 2020-06-15 02:52:00 +0200 |
| commit | 8ae5170cdc9c307bd81019b3e014391c9fd00581 (patch) | |
| tree | ccf9f17035d0754c1758faee209e9a26b4e03418 | |
| parent | 8b55054473452963f24b01efb7c4cc72497c74ec (diff) | |
| download | luajit-8ae5170cdc9c307bd81019b3e014391c9fd00581.tar.gz luajit-8ae5170cdc9c307bd81019b3e014391c9fd00581.tar.bz2 luajit-8ae5170cdc9c307bd81019b3e014391c9fd00581.zip | |
Improve assertions.
71 files changed, 1363 insertions, 927 deletions
diff --git a/src/Makefile b/src/Makefile index 82a57032..a96c1997 100644 --- a/src/Makefile +++ b/src/Makefile | |||
| @@ -484,7 +484,7 @@ LJLIB_O= lib_base.o lib_math.o lib_bit.o lib_string.o lib_table.o \ | |||
| 484 | lib_io.o lib_os.o lib_package.o lib_debug.o lib_jit.o lib_ffi.o | 484 | lib_io.o lib_os.o lib_package.o lib_debug.o lib_jit.o lib_ffi.o |
| 485 | LJLIB_C= $(LJLIB_O:.o=.c) | 485 | LJLIB_C= $(LJLIB_O:.o=.c) |
| 486 | 486 | ||
| 487 | LJCORE_O= lj_gc.o lj_err.o lj_char.o lj_bc.o lj_obj.o lj_buf.o \ | 487 | LJCORE_O= lj_assert.o lj_gc.o lj_err.o lj_char.o lj_bc.o lj_obj.o lj_buf.o \ |
| 488 | lj_str.o lj_tab.o lj_func.o lj_udata.o lj_meta.o lj_debug.o \ | 488 | lj_str.o lj_tab.o lj_func.o lj_udata.o lj_meta.o lj_debug.o \ |
| 489 | lj_state.o lj_dispatch.o lj_vmevent.o lj_vmmath.o lj_strscan.o \ | 489 | lj_state.o lj_dispatch.o lj_vmevent.o lj_vmmath.o lj_strscan.o \ |
| 490 | lj_strfmt.o lj_strfmt_num.o lj_api.o lj_profile.o \ | 490 | lj_strfmt.o lj_strfmt_num.o lj_api.o lj_profile.o \ |
diff --git a/src/Makefile.dep b/src/Makefile.dep index 2b1cb5ef..03dba96b 100644 --- a/src/Makefile.dep +++ b/src/Makefile.dep | |||
| @@ -51,6 +51,7 @@ lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \ | |||
| 51 | lj_ircall.h lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h \ | 51 | lj_ircall.h lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h \ |
| 52 | lj_snap.h lj_asm.h lj_vm.h lj_target.h lj_target_*.h lj_emit_*.h \ | 52 | lj_snap.h lj_asm.h lj_vm.h lj_target.h lj_target_*.h lj_emit_*.h \ |
| 53 | lj_asm_*.h | 53 | lj_asm_*.h |
| 54 | lj_assert.o: lj_assert.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h | ||
| 54 | lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \ | 55 | lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \ |
| 55 | lj_bcdef.h | 56 | lj_bcdef.h |
| 56 | lj_bcread.o: lj_bcread.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ | 57 | lj_bcread.o: lj_bcread.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ |
| @@ -155,7 +156,7 @@ lj_opt_loop.o: lj_opt_loop.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ | |||
| 155 | lj_iropt.h lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_snap.h \ | 156 | lj_iropt.h lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_snap.h \ |
| 156 | lj_vm.h | 157 | lj_vm.h |
| 157 | lj_opt_mem.o: lj_opt_mem.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ | 158 | lj_opt_mem.o: lj_opt_mem.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ |
| 158 | lj_tab.h lj_ir.h lj_jit.h lj_iropt.h lj_ircall.h | 159 | lj_tab.h lj_ir.h lj_jit.h lj_iropt.h lj_ircall.h lj_dispatch.h lj_bc.h |
| 159 | lj_opt_narrow.o: lj_opt_narrow.c lj_obj.h lua.h luaconf.h lj_def.h \ | 160 | lj_opt_narrow.o: lj_opt_narrow.c lj_obj.h lua.h luaconf.h lj_def.h \ |
| 160 | lj_arch.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \ | 161 | lj_arch.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \ |
| 161 | lj_traceerr.h lj_vm.h lj_strscan.h | 162 | lj_traceerr.h lj_vm.h lj_strscan.h |
| @@ -206,13 +207,13 @@ lj_vmevent.o: lj_vmevent.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ | |||
| 206 | lj_vm.h lj_vmevent.h | 207 | lj_vm.h lj_vmevent.h |
| 207 | lj_vmmath.o: lj_vmmath.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ | 208 | lj_vmmath.o: lj_vmmath.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \ |
| 208 | lj_ir.h lj_vm.h | 209 | lj_ir.h lj_vm.h |
| 209 | ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_gc.c lj_obj.h lj_def.h \ | 210 | ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_assert.c lj_obj.h \ |
| 210 | lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h \ | 211 | lj_def.h lj_arch.h lj_gc.c lj_gc.h lj_err.h lj_errmsg.h lj_buf.h \ |
| 211 | lj_func.h lj_udata.h lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h \ | 212 | lj_str.h lj_tab.h lj_func.h lj_udata.h lj_meta.h lj_state.h lj_frame.h \ |
| 212 | lj_cdata.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h \ | 213 | lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \ |
| 213 | lj_vm.h lj_err.c lj_debug.h lj_ff.h lj_ffdef.h lj_strfmt.h lj_char.c \ | 214 | lj_traceerr.h lj_vm.h lj_err.c lj_debug.h lj_ff.h lj_ffdef.h lj_strfmt.h \ |
| 214 | lj_char.h lj_bc.c lj_bcdef.h lj_obj.c lj_buf.c lj_str.c lj_tab.c \ | 215 | lj_char.c lj_char.h lj_bc.c lj_bcdef.h lj_obj.c lj_buf.c lj_str.c \ |
| 215 | lj_func.c lj_udata.c lj_meta.c lj_strscan.h lj_lib.h lj_debug.c \ | 216 | lj_tab.c lj_func.c lj_udata.c lj_meta.c lj_strscan.h lj_lib.h lj_debug.c \ |
| 216 | lj_state.c lj_lex.h lj_alloc.h luajit.h lj_dispatch.c lj_ccallback.h \ | 217 | lj_state.c lj_lex.h lj_alloc.h luajit.h lj_dispatch.c lj_ccallback.h \ |
| 217 | lj_profile.h lj_vmevent.c lj_vmevent.h lj_vmmath.c lj_strscan.c \ | 218 | lj_profile.h lj_vmevent.c lj_vmevent.h lj_vmmath.c lj_strscan.c \ |
| 218 | lj_strfmt.c lj_strfmt_num.c lj_api.c lj_profile.c lj_lex.c lualib.h \ | 219 | lj_strfmt.c lj_strfmt_num.c lj_api.c lj_profile.c lj_lex.c lualib.h \ |
diff --git a/src/lib_io.c b/src/lib_io.c index 5e9d0d66..c7d1bb31 100644 --- a/src/lib_io.c +++ b/src/lib_io.c | |||
| @@ -101,9 +101,6 @@ static int io_file_close(lua_State *L, IOFileUD *iof) | |||
| 101 | stat = pclose(iof->fp); | 101 | stat = pclose(iof->fp); |
| 102 | #elif LJ_TARGET_WINDOWS && !LJ_TARGET_XBOXONE && !LJ_TARGET_UWP | 102 | #elif LJ_TARGET_WINDOWS && !LJ_TARGET_XBOXONE && !LJ_TARGET_UWP |
| 103 | stat = _pclose(iof->fp); | 103 | stat = _pclose(iof->fp); |
| 104 | #else | ||
| 105 | lua_assert(0); | ||
| 106 | return 0; | ||
| 107 | #endif | 104 | #endif |
| 108 | #if LJ_52 | 105 | #if LJ_52 |
| 109 | iof->fp = NULL; | 106 | iof->fp = NULL; |
| @@ -112,7 +109,8 @@ static int io_file_close(lua_State *L, IOFileUD *iof) | |||
| 112 | ok = (stat != -1); | 109 | ok = (stat != -1); |
| 113 | #endif | 110 | #endif |
| 114 | } else { | 111 | } else { |
| 115 | lua_assert((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_STDF); | 112 | lj_assertL((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_STDF, |
| 113 | "close of unknown FILE* type"); | ||
| 116 | setnilV(L->top++); | 114 | setnilV(L->top++); |
| 117 | lua_pushliteral(L, "cannot close standard file"); | 115 | lua_pushliteral(L, "cannot close standard file"); |
| 118 | return 2; | 116 | return 2; |
diff --git a/src/lib_jit.c b/src/lib_jit.c index acd6c293..7348ef21 100644 --- a/src/lib_jit.c +++ b/src/lib_jit.c | |||
| @@ -227,7 +227,7 @@ LJLIB_CF(jit_util_funcbc) | |||
| 227 | if (pc < pt->sizebc) { | 227 | if (pc < pt->sizebc) { |
| 228 | BCIns ins = proto_bc(pt)[pc]; | 228 | BCIns ins = proto_bc(pt)[pc]; |
| 229 | BCOp op = bc_op(ins); | 229 | BCOp op = bc_op(ins); |
| 230 | lua_assert(op < BC__MAX); | 230 | lj_assertL(op < BC__MAX, "bad bytecode op %d", op); |
| 231 | setintV(L->top, ins); | 231 | setintV(L->top, ins); |
| 232 | setintV(L->top+1, lj_bc_mode[op]); | 232 | setintV(L->top+1, lj_bc_mode[op]); |
| 233 | L->top += 2; | 233 | L->top += 2; |
| @@ -491,7 +491,7 @@ static int jitopt_param(jit_State *J, const char *str) | |||
| 491 | int i; | 491 | int i; |
| 492 | for (i = 0; i < JIT_P__MAX; i++) { | 492 | for (i = 0; i < JIT_P__MAX; i++) { |
| 493 | size_t len = *(const uint8_t *)lst; | 493 | size_t len = *(const uint8_t *)lst; |
| 494 | lua_assert(len != 0); | 494 | lj_assertJ(len != 0, "bad JIT_P_STRING"); |
| 495 | if (strncmp(str, lst+1, len) == 0 && str[len] == '=') { | 495 | if (strncmp(str, lst+1, len) == 0 && str[len] == '=') { |
| 496 | int32_t n = 0; | 496 | int32_t n = 0; |
| 497 | const char *p = &str[len+1]; | 497 | const char *p = &str[len+1]; |
diff --git a/src/lib_string.c b/src/lib_string.c index 6b88ee9b..0d9290bc 100644 --- a/src/lib_string.c +++ b/src/lib_string.c | |||
| @@ -136,7 +136,7 @@ LJLIB_CF(string_dump) | |||
| 136 | /* ------------------------------------------------------------------------ */ | 136 | /* ------------------------------------------------------------------------ */ |
| 137 | 137 | ||
| 138 | /* macro to `unsign' a character */ | 138 | /* macro to `unsign' a character */ |
| 139 | #define uchar(c) ((unsigned char)(c)) | 139 | #define uchar(c) ((unsigned char)(c)) |
| 140 | 140 | ||
| 141 | #define CAP_UNFINISHED (-1) | 141 | #define CAP_UNFINISHED (-1) |
| 142 | #define CAP_POSITION (-2) | 142 | #define CAP_POSITION (-2) |
| @@ -645,7 +645,7 @@ static GCstr *string_fmt_tostring(lua_State *L, int arg, int retry) | |||
| 645 | { | 645 | { |
| 646 | TValue *o = L->base+arg-1; | 646 | TValue *o = L->base+arg-1; |
| 647 | cTValue *mo; | 647 | cTValue *mo; |
| 648 | lua_assert(o < L->top); /* Caller already checks for existence. */ | 648 | lj_assertL(o < L->top, "bad usage"); /* Caller already checks for existence. */ |
| 649 | if (LJ_LIKELY(tvisstr(o))) | 649 | if (LJ_LIKELY(tvisstr(o))) |
| 650 | return strV(o); | 650 | return strV(o); |
| 651 | if (retry != 2 && !tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { | 651 | if (retry != 2 && !tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { |
| @@ -717,7 +717,7 @@ again: | |||
| 717 | lj_strfmt_putptr(sb, lj_obj_ptr(L->base+arg-1)); | 717 | lj_strfmt_putptr(sb, lj_obj_ptr(L->base+arg-1)); |
| 718 | break; | 718 | break; |
| 719 | default: | 719 | default: |
| 720 | lua_assert(0); | 720 | lj_assertL(0, "bad string format type"); |
| 721 | break; | 721 | break; |
| 722 | } | 722 | } |
| 723 | } | 723 | } |
diff --git a/src/lj_api.c b/src/lj_api.c index 24ae6611..f1cfebbc 100644 --- a/src/lj_api.c +++ b/src/lj_api.c | |||
| @@ -28,8 +28,8 @@ | |||
| 28 | 28 | ||
| 29 | /* -- Common helper functions --------------------------------------------- */ | 29 | /* -- Common helper functions --------------------------------------------- */ |
| 30 | 30 | ||
| 31 | #define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base)) | 31 | #define lj_checkapi_slot(idx) \ |
| 32 | #define api_checkvalidindex(L, i) api_check(L, (i) != niltv(L)) | 32 | lj_checkapi((idx) <= (L->top - L->base), "stack slot %d out of range", (idx)) |
| 33 | 33 | ||
| 34 | static TValue *index2adr(lua_State *L, int idx) | 34 | static TValue *index2adr(lua_State *L, int idx) |
| 35 | { | 35 | { |
| @@ -37,7 +37,8 @@ static TValue *index2adr(lua_State *L, int idx) | |||
| 37 | TValue *o = L->base + (idx - 1); | 37 | TValue *o = L->base + (idx - 1); |
| 38 | return o < L->top ? o : niltv(L); | 38 | return o < L->top ? o : niltv(L); |
| 39 | } else if (idx > LUA_REGISTRYINDEX) { | 39 | } else if (idx > LUA_REGISTRYINDEX) { |
| 40 | api_check(L, idx != 0 && -idx <= L->top - L->base); | 40 | lj_checkapi(idx != 0 && -idx <= L->top - L->base, |
| 41 | "bad stack slot %d", idx); | ||
| 41 | return L->top + idx; | 42 | return L->top + idx; |
| 42 | } else if (idx == LUA_GLOBALSINDEX) { | 43 | } else if (idx == LUA_GLOBALSINDEX) { |
| 43 | TValue *o = &G(L)->tmptv; | 44 | TValue *o = &G(L)->tmptv; |
| @@ -47,7 +48,8 @@ static TValue *index2adr(lua_State *L, int idx) | |||
| 47 | return registry(L); | 48 | return registry(L); |
| 48 | } else { | 49 | } else { |
| 49 | GCfunc *fn = curr_func(L); | 50 | GCfunc *fn = curr_func(L); |
| 50 | api_check(L, fn->c.gct == ~LJ_TFUNC && !isluafunc(fn)); | 51 | lj_checkapi(fn->c.gct == ~LJ_TFUNC && !isluafunc(fn), |
| 52 | "calling frame is not a C function"); | ||
| 51 | if (idx == LUA_ENVIRONINDEX) { | 53 | if (idx == LUA_ENVIRONINDEX) { |
| 52 | TValue *o = &G(L)->tmptv; | 54 | TValue *o = &G(L)->tmptv; |
| 53 | settabV(L, o, tabref(fn->c.env)); | 55 | settabV(L, o, tabref(fn->c.env)); |
| @@ -59,13 +61,27 @@ static TValue *index2adr(lua_State *L, int idx) | |||
| 59 | } | 61 | } |
| 60 | } | 62 | } |
| 61 | 63 | ||
| 62 | static TValue *stkindex2adr(lua_State *L, int idx) | 64 | static LJ_AINLINE TValue *index2adr_check(lua_State *L, int idx) |
| 65 | { | ||
| 66 | TValue *o = index2adr(L, idx); | ||
| 67 | lj_checkapi(o != niltv(L), "invalid stack slot %d", idx); | ||
| 68 | return o; | ||
| 69 | } | ||
| 70 | |||
| 71 | static TValue *index2adr_stack(lua_State *L, int idx) | ||
| 63 | { | 72 | { |
| 64 | if (idx > 0) { | 73 | if (idx > 0) { |
| 65 | TValue *o = L->base + (idx - 1); | 74 | TValue *o = L->base + (idx - 1); |
| 75 | if (o < L->top) { | ||
| 76 | return o; | ||
| 77 | } else { | ||
| 78 | lj_checkapi(0, "invalid stack slot %d", idx); | ||
| 79 | return niltv(L); | ||
| 80 | } | ||
| 66 | return o < L->top ? o : niltv(L); | 81 | return o < L->top ? o : niltv(L); |
| 67 | } else { | 82 | } else { |
| 68 | api_check(L, idx != 0 && -idx <= L->top - L->base); | 83 | lj_checkapi(idx != 0 && -idx <= L->top - L->base, |
| 84 | "invalid stack slot %d", idx); | ||
| 69 | return L->top + idx; | 85 | return L->top + idx; |
| 70 | } | 86 | } |
| 71 | } | 87 | } |
| @@ -99,17 +115,17 @@ LUALIB_API void luaL_checkstack(lua_State *L, int size, const char *msg) | |||
| 99 | lj_err_callerv(L, LJ_ERR_STKOVM, msg); | 115 | lj_err_callerv(L, LJ_ERR_STKOVM, msg); |
| 100 | } | 116 | } |
| 101 | 117 | ||
| 102 | LUA_API void lua_xmove(lua_State *from, lua_State *to, int n) | 118 | LUA_API void lua_xmove(lua_State *L, lua_State *to, int n) |
| 103 | { | 119 | { |
| 104 | TValue *f, *t; | 120 | TValue *f, *t; |
| 105 | if (from == to) return; | 121 | if (L == to) return; |
| 106 | api_checknelems(from, n); | 122 | lj_checkapi_slot(n); |
| 107 | api_check(from, G(from) == G(to)); | 123 | lj_checkapi(G(L) == G(to), "move across global states"); |
| 108 | lj_state_checkstack(to, (MSize)n); | 124 | lj_state_checkstack(to, (MSize)n); |
| 109 | f = from->top; | 125 | f = L->top; |
| 110 | t = to->top = to->top + n; | 126 | t = to->top = to->top + n; |
| 111 | while (--n >= 0) copyTV(to, --t, --f); | 127 | while (--n >= 0) copyTV(to, --t, --f); |
| 112 | from->top = f; | 128 | L->top = f; |
| 113 | } | 129 | } |
| 114 | 130 | ||
| 115 | LUA_API const lua_Number *lua_version(lua_State *L) | 131 | LUA_API const lua_Number *lua_version(lua_State *L) |
| @@ -129,7 +145,7 @@ LUA_API int lua_gettop(lua_State *L) | |||
| 129 | LUA_API void lua_settop(lua_State *L, int idx) | 145 | LUA_API void lua_settop(lua_State *L, int idx) |
| 130 | { | 146 | { |
| 131 | if (idx >= 0) { | 147 | if (idx >= 0) { |
| 132 | api_check(L, idx <= tvref(L->maxstack) - L->base); | 148 | lj_checkapi(idx <= tvref(L->maxstack) - L->base, "bad stack slot %d", idx); |
| 133 | if (L->base + idx > L->top) { | 149 | if (L->base + idx > L->top) { |
| 134 | if (L->base + idx >= tvref(L->maxstack)) | 150 | if (L->base + idx >= tvref(L->maxstack)) |
| 135 | lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base)); | 151 | lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base)); |
| @@ -138,23 +154,21 @@ LUA_API void lua_settop(lua_State *L, int idx) | |||
| 138 | L->top = L->base + idx; | 154 | L->top = L->base + idx; |
| 139 | } | 155 | } |
| 140 | } else { | 156 | } else { |
| 141 | api_check(L, -(idx+1) <= (L->top - L->base)); | 157 | lj_checkapi(-(idx+1) <= (L->top - L->base), "bad stack slot %d", idx); |
| 142 | L->top += idx+1; /* Shrinks top (idx < 0). */ | 158 | L->top += idx+1; /* Shrinks top (idx < 0). */ |
| 143 | } | 159 | } |
| 144 | } | 160 | } |
| 145 | 161 | ||
| 146 | LUA_API void lua_remove(lua_State *L, int idx) | 162 | LUA_API void lua_remove(lua_State *L, int idx) |
| 147 | { | 163 | { |
| 148 | TValue *p = stkindex2adr(L, idx); | 164 | TValue *p = index2adr_stack(L, idx); |
| 149 | api_checkvalidindex(L, p); | ||
| 150 | while (++p < L->top) copyTV(L, p-1, p); | 165 | while (++p < L->top) copyTV(L, p-1, p); |
| 151 | L->top--; | 166 | L->top--; |
| 152 | } | 167 | } |
| 153 | 168 | ||
| 154 | LUA_API void lua_insert(lua_State *L, int idx) | 169 | LUA_API void lua_insert(lua_State *L, int idx) |
| 155 | { | 170 | { |
| 156 | TValue *q, *p = stkindex2adr(L, idx); | 171 | TValue *q, *p = index2adr_stack(L, idx); |
| 157 | api_checkvalidindex(L, p); | ||
| 158 | for (q = L->top; q > p; q--) copyTV(L, q, q-1); | 172 | for (q = L->top; q > p; q--) copyTV(L, q, q-1); |
| 159 | copyTV(L, p, L->top); | 173 | copyTV(L, p, L->top); |
| 160 | } | 174 | } |
| @@ -162,19 +176,18 @@ LUA_API void lua_insert(lua_State *L, int idx) | |||
| 162 | static void copy_slot(lua_State *L, TValue *f, int idx) | 176 | static void copy_slot(lua_State *L, TValue *f, int idx) |
| 163 | { | 177 | { |
| 164 | if (idx == LUA_GLOBALSINDEX) { | 178 | if (idx == LUA_GLOBALSINDEX) { |
| 165 | api_check(L, tvistab(f)); | 179 | lj_checkapi(tvistab(f), "stack slot %d is not a table", idx); |
| 166 | /* NOBARRIER: A thread (i.e. L) is never black. */ | 180 | /* NOBARRIER: A thread (i.e. L) is never black. */ |
| 167 | setgcref(L->env, obj2gco(tabV(f))); | 181 | setgcref(L->env, obj2gco(tabV(f))); |
| 168 | } else if (idx == LUA_ENVIRONINDEX) { | 182 | } else if (idx == LUA_ENVIRONINDEX) { |
| 169 | GCfunc *fn = curr_func(L); | 183 | GCfunc *fn = curr_func(L); |
| 170 | if (fn->c.gct != ~LJ_TFUNC) | 184 | if (fn->c.gct != ~LJ_TFUNC) |
| 171 | lj_err_msg(L, LJ_ERR_NOENV); | 185 | lj_err_msg(L, LJ_ERR_NOENV); |
| 172 | api_check(L, tvistab(f)); | 186 | lj_checkapi(tvistab(f), "stack slot %d is not a table", idx); |
| 173 | setgcref(fn->c.env, obj2gco(tabV(f))); | 187 | setgcref(fn->c.env, obj2gco(tabV(f))); |
| 174 | lj_gc_barrier(L, fn, f); | 188 | lj_gc_barrier(L, fn, f); |
| 175 | } else { | 189 | } else { |
| 176 | TValue *o = index2adr(L, idx); | 190 | TValue *o = index2adr_check(L, idx); |
| 177 | api_checkvalidindex(L, o); | ||
| 178 | copyTV(L, o, f); | 191 | copyTV(L, o, f); |
| 179 | if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */ | 192 | if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */ |
| 180 | lj_gc_barrier(L, curr_func(L), f); | 193 | lj_gc_barrier(L, curr_func(L), f); |
| @@ -183,7 +196,7 @@ static void copy_slot(lua_State *L, TValue *f, int idx) | |||
| 183 | 196 | ||
| 184 | LUA_API void lua_replace(lua_State *L, int idx) | 197 | LUA_API void lua_replace(lua_State *L, int idx) |
| 185 | { | 198 | { |
| 186 | api_checknelems(L, 1); | 199 | lj_checkapi_slot(1); |
| 187 | copy_slot(L, L->top - 1, idx); | 200 | copy_slot(L, L->top - 1, idx); |
| 188 | L->top--; | 201 | L->top--; |
| 189 | } | 202 | } |
| @@ -219,7 +232,7 @@ LUA_API int lua_type(lua_State *L, int idx) | |||
| 219 | #else | 232 | #else |
| 220 | int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u); | 233 | int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u); |
| 221 | #endif | 234 | #endif |
| 222 | lua_assert(tt != LUA_TNIL || tvisnil(o)); | 235 | lj_assertL(tt != LUA_TNIL || tvisnil(o), "bad tag conversion"); |
| 223 | return tt; | 236 | return tt; |
| 224 | } | 237 | } |
| 225 | } | 238 | } |
| @@ -677,14 +690,14 @@ LUA_API void lua_pushcclosure(lua_State *L, lua_CFunction f, int n) | |||
| 677 | { | 690 | { |
| 678 | GCfunc *fn; | 691 | GCfunc *fn; |
| 679 | lj_gc_check(L); | 692 | lj_gc_check(L); |
| 680 | api_checknelems(L, n); | 693 | lj_checkapi_slot(n); |
| 681 | fn = lj_func_newC(L, (MSize)n, getcurrenv(L)); | 694 | fn = lj_func_newC(L, (MSize)n, getcurrenv(L)); |
| 682 | fn->c.f = f; | 695 | fn->c.f = f; |
| 683 | L->top -= n; | 696 | L->top -= n; |
| 684 | while (n--) | 697 | while (n--) |
| 685 | copyTV(L, &fn->c.upvalue[n], L->top+n); | 698 | copyTV(L, &fn->c.upvalue[n], L->top+n); |
| 686 | setfuncV(L, L->top, fn); | 699 | setfuncV(L, L->top, fn); |
| 687 | lua_assert(iswhite(obj2gco(fn))); | 700 | lj_assertL(iswhite(obj2gco(fn)), "new GC object is not white"); |
| 688 | incr_top(L); | 701 | incr_top(L); |
| 689 | } | 702 | } |
| 690 | 703 | ||
| @@ -754,7 +767,7 @@ LUA_API void *lua_newuserdata(lua_State *L, size_t size) | |||
| 754 | 767 | ||
| 755 | LUA_API void lua_concat(lua_State *L, int n) | 768 | LUA_API void lua_concat(lua_State *L, int n) |
| 756 | { | 769 | { |
| 757 | api_checknelems(L, n); | 770 | lj_checkapi_slot(n); |
| 758 | if (n >= 2) { | 771 | if (n >= 2) { |
| 759 | n--; | 772 | n--; |
| 760 | do { | 773 | do { |
| @@ -780,9 +793,8 @@ LUA_API void lua_concat(lua_State *L, int n) | |||
| 780 | 793 | ||
| 781 | LUA_API void lua_gettable(lua_State *L, int idx) | 794 | LUA_API void lua_gettable(lua_State *L, int idx) |
| 782 | { | 795 | { |
| 783 | cTValue *v, *t = index2adr(L, idx); | 796 | cTValue *t = index2adr_check(L, idx); |
| 784 | api_checkvalidindex(L, t); | 797 | cTValue *v = lj_meta_tget(L, t, L->top-1); |
| 785 | v = lj_meta_tget(L, t, L->top-1); | ||
| 786 | if (v == NULL) { | 798 | if (v == NULL) { |
| 787 | L->top += 2; | 799 | L->top += 2; |
| 788 | lj_vm_call(L, L->top-2, 1+1); | 800 | lj_vm_call(L, L->top-2, 1+1); |
| @@ -794,9 +806,8 @@ LUA_API void lua_gettable(lua_State *L, int idx) | |||
| 794 | 806 | ||
| 795 | LUA_API void lua_getfield(lua_State *L, int idx, const char *k) | 807 | LUA_API void lua_getfield(lua_State *L, int idx, const char *k) |
| 796 | { | 808 | { |
| 797 | cTValue *v, *t = index2adr(L, idx); | 809 | cTValue *v, *t = index2adr_check(L, idx); |
| 798 | TValue key; | 810 | TValue key; |
| 799 | api_checkvalidindex(L, t); | ||
| 800 | setstrV(L, &key, lj_str_newz(L, k)); | 811 | setstrV(L, &key, lj_str_newz(L, k)); |
| 801 | v = lj_meta_tget(L, t, &key); | 812 | v = lj_meta_tget(L, t, &key); |
| 802 | if (v == NULL) { | 813 | if (v == NULL) { |
| @@ -812,14 +823,14 @@ LUA_API void lua_getfield(lua_State *L, int idx, const char *k) | |||
| 812 | LUA_API void lua_rawget(lua_State *L, int idx) | 823 | LUA_API void lua_rawget(lua_State *L, int idx) |
| 813 | { | 824 | { |
| 814 | cTValue *t = index2adr(L, idx); | 825 | cTValue *t = index2adr(L, idx); |
| 815 | api_check(L, tvistab(t)); | 826 | lj_checkapi(tvistab(t), "stack slot %d is not a table", idx); |
| 816 | copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1)); | 827 | copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1)); |
| 817 | } | 828 | } |
| 818 | 829 | ||
| 819 | LUA_API void lua_rawgeti(lua_State *L, int idx, int n) | 830 | LUA_API void lua_rawgeti(lua_State *L, int idx, int n) |
| 820 | { | 831 | { |
| 821 | cTValue *v, *t = index2adr(L, idx); | 832 | cTValue *v, *t = index2adr(L, idx); |
| 822 | api_check(L, tvistab(t)); | 833 | lj_checkapi(tvistab(t), "stack slot %d is not a table", idx); |
| 823 | v = lj_tab_getint(tabV(t), n); | 834 | v = lj_tab_getint(tabV(t), n); |
| 824 | if (v) { | 835 | if (v) { |
| 825 | copyTV(L, L->top, v); | 836 | copyTV(L, L->top, v); |
| @@ -861,8 +872,7 @@ LUALIB_API int luaL_getmetafield(lua_State *L, int idx, const char *field) | |||
| 861 | 872 | ||
| 862 | LUA_API void lua_getfenv(lua_State *L, int idx) | 873 | LUA_API void lua_getfenv(lua_State *L, int idx) |
| 863 | { | 874 | { |
| 864 | cTValue *o = index2adr(L, idx); | 875 | cTValue *o = index2adr_check(L, idx); |
| 865 | api_checkvalidindex(L, o); | ||
| 866 | if (tvisfunc(o)) { | 876 | if (tvisfunc(o)) { |
| 867 | settabV(L, L->top, tabref(funcV(o)->c.env)); | 877 | settabV(L, L->top, tabref(funcV(o)->c.env)); |
| 868 | } else if (tvisudata(o)) { | 878 | } else if (tvisudata(o)) { |
| @@ -879,7 +889,7 @@ LUA_API int lua_next(lua_State *L, int idx) | |||
| 879 | { | 889 | { |
| 880 | cTValue *t = index2adr(L, idx); | 890 | cTValue *t = index2adr(L, idx); |
| 881 | int more; | 891 | int more; |
| 882 | api_check(L, tvistab(t)); | 892 | lj_checkapi(tvistab(t), "stack slot %d is not a table", idx); |
| 883 | more = lj_tab_next(L, tabV(t), L->top-1); | 893 | more = lj_tab_next(L, tabV(t), L->top-1); |
| 884 | if (more) { | 894 | if (more) { |
| 885 | incr_top(L); /* Return new key and value slot. */ | 895 | incr_top(L); /* Return new key and value slot. */ |
| @@ -905,7 +915,7 @@ LUA_API void *lua_upvalueid(lua_State *L, int idx, int n) | |||
| 905 | { | 915 | { |
| 906 | GCfunc *fn = funcV(index2adr(L, idx)); | 916 | GCfunc *fn = funcV(index2adr(L, idx)); |
| 907 | n--; | 917 | n--; |
| 908 | api_check(L, (uint32_t)n < fn->l.nupvalues); | 918 | lj_checkapi((uint32_t)n < fn->l.nupvalues, "bad upvalue %d", n); |
| 909 | return isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) : | 919 | return isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) : |
| 910 | (void *)&fn->c.upvalue[n]; | 920 | (void *)&fn->c.upvalue[n]; |
| 911 | } | 921 | } |
| @@ -915,8 +925,10 @@ LUA_API void lua_upvaluejoin(lua_State *L, int idx1, int n1, int idx2, int n2) | |||
| 915 | GCfunc *fn1 = funcV(index2adr(L, idx1)); | 925 | GCfunc *fn1 = funcV(index2adr(L, idx1)); |
| 916 | GCfunc *fn2 = funcV(index2adr(L, idx2)); | 926 | GCfunc *fn2 = funcV(index2adr(L, idx2)); |
| 917 | n1--; n2--; | 927 | n1--; n2--; |
| 918 | api_check(L, isluafunc(fn1) && (uint32_t)n1 < fn1->l.nupvalues); | 928 | lj_checkapi(isluafunc(fn1), "stack slot %d is not a Lua function", idx1); |
| 919 | api_check(L, isluafunc(fn2) && (uint32_t)n2 < fn2->l.nupvalues); | 929 | lj_checkapi(isluafunc(fn2), "stack slot %d is not a Lua function", idx2); |
| 930 | lj_checkapi((uint32_t)n1 < fn1->l.nupvalues, "bad upvalue %d", n1+1); | ||
| 931 | lj_checkapi((uint32_t)n2 < fn2->l.nupvalues, "bad upvalue %d", n2+1); | ||
| 920 | setgcrefr(fn1->l.uvptr[n1], fn2->l.uvptr[n2]); | 932 | setgcrefr(fn1->l.uvptr[n1], fn2->l.uvptr[n2]); |
| 921 | lj_gc_objbarrier(L, fn1, gcref(fn1->l.uvptr[n1])); | 933 | lj_gc_objbarrier(L, fn1, gcref(fn1->l.uvptr[n1])); |
| 922 | } | 934 | } |
| @@ -945,9 +957,8 @@ LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname) | |||
| 945 | LUA_API void lua_settable(lua_State *L, int idx) | 957 | LUA_API void lua_settable(lua_State *L, int idx) |
| 946 | { | 958 | { |
| 947 | TValue *o; | 959 | TValue *o; |
| 948 | cTValue *t = index2adr(L, idx); | 960 | cTValue *t = index2adr_check(L, idx); |
| 949 | api_checknelems(L, 2); | 961 | lj_checkapi_slot(2); |
| 950 | api_checkvalidindex(L, t); | ||
| 951 | o = lj_meta_tset(L, t, L->top-2); | 962 | o = lj_meta_tset(L, t, L->top-2); |
| 952 | if (o) { | 963 | if (o) { |
| 953 | /* NOBARRIER: lj_meta_tset ensures the table is not black. */ | 964 | /* NOBARRIER: lj_meta_tset ensures the table is not black. */ |
| @@ -966,9 +977,8 @@ LUA_API void lua_setfield(lua_State *L, int idx, const char *k) | |||
| 966 | { | 977 | { |
| 967 | TValue *o; | 978 | TValue *o; |
| 968 | TValue key; | 979 | TValue key; |
| 969 | cTValue *t = index2adr(L, idx); | 980 | cTValue *t = index2adr_check(L, idx); |
| 970 | api_checknelems(L, 1); | 981 | lj_checkapi_slot(1); |
| 971 | api_checkvalidindex(L, t); | ||
| 972 | setstrV(L, &key, lj_str_newz(L, k)); | 982 | setstrV(L, &key, lj_str_newz(L, k)); |
| 973 | o = lj_meta_tset(L, t, &key); | 983 | o = lj_meta_tset(L, t, &key); |
| 974 | if (o) { | 984 | if (o) { |
| @@ -987,7 +997,7 @@ LUA_API void lua_rawset(lua_State *L, int idx) | |||
| 987 | { | 997 | { |
| 988 | GCtab *t = tabV(index2adr(L, idx)); | 998 | GCtab *t = tabV(index2adr(L, idx)); |
| 989 | TValue *dst, *key; | 999 | TValue *dst, *key; |
| 990 | api_checknelems(L, 2); | 1000 | lj_checkapi_slot(2); |
| 991 | key = L->top-2; | 1001 | key = L->top-2; |
| 992 | dst = lj_tab_set(L, t, key); | 1002 | dst = lj_tab_set(L, t, key); |
| 993 | copyTV(L, dst, key+1); | 1003 | copyTV(L, dst, key+1); |
| @@ -999,7 +1009,7 @@ LUA_API void lua_rawseti(lua_State *L, int idx, int n) | |||
| 999 | { | 1009 | { |
| 1000 | GCtab *t = tabV(index2adr(L, idx)); | 1010 | GCtab *t = tabV(index2adr(L, idx)); |
| 1001 | TValue *dst, *src; | 1011 | TValue *dst, *src; |
| 1002 | api_checknelems(L, 1); | 1012 | lj_checkapi_slot(1); |
| 1003 | dst = lj_tab_setint(L, t, n); | 1013 | dst = lj_tab_setint(L, t, n); |
| 1004 | src = L->top-1; | 1014 | src = L->top-1; |
| 1005 | copyTV(L, dst, src); | 1015 | copyTV(L, dst, src); |
| @@ -1011,13 +1021,12 @@ LUA_API int lua_setmetatable(lua_State *L, int idx) | |||
| 1011 | { | 1021 | { |
| 1012 | global_State *g; | 1022 | global_State *g; |
| 1013 | GCtab *mt; | 1023 | GCtab *mt; |
| 1014 | cTValue *o = index2adr(L, idx); | 1024 | cTValue *o = index2adr_check(L, idx); |
| 1015 | api_checknelems(L, 1); | 1025 | lj_checkapi_slot(1); |
| 1016 | api_checkvalidindex(L, o); | ||
| 1017 | if (tvisnil(L->top-1)) { | 1026 | if (tvisnil(L->top-1)) { |
| 1018 | mt = NULL; | 1027 | mt = NULL; |
| 1019 | } else { | 1028 | } else { |
| 1020 | api_check(L, tvistab(L->top-1)); | 1029 | lj_checkapi(tvistab(L->top-1), "top stack slot is not a table"); |
| 1021 | mt = tabV(L->top-1); | 1030 | mt = tabV(L->top-1); |
| 1022 | } | 1031 | } |
| 1023 | g = G(L); | 1032 | g = G(L); |
| @@ -1054,11 +1063,10 @@ LUALIB_API void luaL_setmetatable(lua_State *L, const char *tname) | |||
| 1054 | 1063 | ||
| 1055 | LUA_API int lua_setfenv(lua_State *L, int idx) | 1064 | LUA_API int lua_setfenv(lua_State *L, int idx) |
| 1056 | { | 1065 | { |
| 1057 | cTValue *o = index2adr(L, idx); | 1066 | cTValue *o = index2adr_check(L, idx); |
| 1058 | GCtab *t; | 1067 | GCtab *t; |
| 1059 | api_checknelems(L, 1); | 1068 | lj_checkapi_slot(1); |
| 1060 | api_checkvalidindex(L, o); | 1069 | lj_checkapi(tvistab(L->top-1), "top stack slot is not a table"); |
| 1061 | api_check(L, tvistab(L->top-1)); | ||
| 1062 | t = tabV(L->top-1); | 1070 | t = tabV(L->top-1); |
| 1063 | if (tvisfunc(o)) { | 1071 | if (tvisfunc(o)) { |
| 1064 | setgcref(funcV(o)->c.env, obj2gco(t)); | 1072 | setgcref(funcV(o)->c.env, obj2gco(t)); |
| @@ -1081,7 +1089,7 @@ LUA_API const char *lua_setupvalue(lua_State *L, int idx, int n) | |||
| 1081 | TValue *val; | 1089 | TValue *val; |
| 1082 | GCobj *o; | 1090 | GCobj *o; |
| 1083 | const char *name; | 1091 | const char *name; |
| 1084 | api_checknelems(L, 1); | 1092 | lj_checkapi_slot(1); |
| 1085 | name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val, &o); | 1093 | name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val, &o); |
| 1086 | if (name) { | 1094 | if (name) { |
| 1087 | L->top--; | 1095 | L->top--; |
| @@ -1108,8 +1116,9 @@ static TValue *api_call_base(lua_State *L, int nargs) | |||
| 1108 | 1116 | ||
| 1109 | LUA_API void lua_call(lua_State *L, int nargs, int nresults) | 1117 | LUA_API void lua_call(lua_State *L, int nargs, int nresults) |
| 1110 | { | 1118 | { |
| 1111 | api_check(L, L->status == LUA_OK || L->status == LUA_ERRERR); | 1119 | lj_checkapi(L->status == LUA_OK || L->status == LUA_ERRERR, |
| 1112 | api_checknelems(L, nargs+1); | 1120 | "thread called in wrong state %d", L->status); |
| 1121 | lj_checkapi_slot(nargs+1); | ||
| 1113 | lj_vm_call(L, api_call_base(L, nargs), nresults+1); | 1122 | lj_vm_call(L, api_call_base(L, nargs), nresults+1); |
| 1114 | } | 1123 | } |
| 1115 | 1124 | ||
| @@ -1119,13 +1128,13 @@ LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc) | |||
| 1119 | uint8_t oldh = hook_save(g); | 1128 | uint8_t oldh = hook_save(g); |
| 1120 | ptrdiff_t ef; | 1129 | ptrdiff_t ef; |
| 1121 | int status; | 1130 | int status; |
| 1122 | api_check(L, L->status == LUA_OK || L->status == LUA_ERRERR); | 1131 | lj_checkapi(L->status == LUA_OK || L->status == LUA_ERRERR, |
| 1123 | api_checknelems(L, nargs+1); | 1132 | "thread called in wrong state %d", L->status); |
| 1133 | lj_checkapi_slot(nargs+1); | ||
| 1124 | if (errfunc == 0) { | 1134 | if (errfunc == 0) { |
| 1125 | ef = 0; | 1135 | ef = 0; |
| 1126 | } else { | 1136 | } else { |
| 1127 | cTValue *o = stkindex2adr(L, errfunc); | 1137 | cTValue *o = index2adr_stack(L, errfunc); |
| 1128 | api_checkvalidindex(L, o); | ||
| 1129 | ef = savestack(L, o); | 1138 | ef = savestack(L, o); |
| 1130 | } | 1139 | } |
| 1131 | status = lj_vm_pcall(L, api_call_base(L, nargs), nresults+1, ef); | 1140 | status = lj_vm_pcall(L, api_call_base(L, nargs), nresults+1, ef); |
| @@ -1151,7 +1160,8 @@ LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud) | |||
| 1151 | global_State *g = G(L); | 1160 | global_State *g = G(L); |
| 1152 | uint8_t oldh = hook_save(g); | 1161 | uint8_t oldh = hook_save(g); |
| 1153 | int status; | 1162 | int status; |
| 1154 | api_check(L, L->status == LUA_OK || L->status == LUA_ERRERR); | 1163 | lj_checkapi(L->status == LUA_OK || L->status == LUA_ERRERR, |
| 1164 | "thread called in wrong state %d", L->status); | ||
| 1155 | status = lj_vm_cpcall(L, func, ud, cpcall); | 1165 | status = lj_vm_cpcall(L, func, ud, cpcall); |
| 1156 | if (status) hook_restore(g, oldh); | 1166 | if (status) hook_restore(g, oldh); |
| 1157 | return status; | 1167 | return status; |
diff --git a/src/lj_asm.c b/src/lj_asm.c index 90373f27..2659c8a2 100644 --- a/src/lj_asm.c +++ b/src/lj_asm.c | |||
| @@ -96,6 +96,12 @@ typedef struct ASMState { | |||
| 96 | uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */ | 96 | uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */ |
| 97 | } ASMState; | 97 | } ASMState; |
| 98 | 98 | ||
| 99 | #ifdef LUA_USE_ASSERT | ||
| 100 | #define lj_assertA(c, ...) lj_assertG_(J2G(as->J), (c), __VA_ARGS__) | ||
| 101 | #else | ||
| 102 | #define lj_assertA(c, ...) ((void)as) | ||
| 103 | #endif | ||
| 104 | |||
| 99 | #define IR(ref) (&as->ir[(ref)]) | 105 | #define IR(ref) (&as->ir[(ref)]) |
| 100 | 106 | ||
| 101 | #define ASMREF_TMP1 REF_TRUE /* Temp. register. */ | 107 | #define ASMREF_TMP1 REF_TRUE /* Temp. register. */ |
| @@ -127,9 +133,8 @@ static LJ_AINLINE void checkmclim(ASMState *as) | |||
| 127 | #ifdef LUA_USE_ASSERT | 133 | #ifdef LUA_USE_ASSERT |
| 128 | if (as->mcp + MCLIM_REDZONE < as->mcp_prev) { | 134 | if (as->mcp + MCLIM_REDZONE < as->mcp_prev) { |
| 129 | IRIns *ir = IR(as->curins+1); | 135 | IRIns *ir = IR(as->curins+1); |
| 130 | fprintf(stderr, "RED ZONE OVERFLOW: %p IR %04d %02d %04d %04d\n", as->mcp, | 136 | lj_assertA(0, "red zone overflow: %p IR %04d %02d %04d %04d\n", as->mcp, |
| 131 | as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS); | 137 | as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS); |
| 132 | lua_assert(0); | ||
| 133 | } | 138 | } |
| 134 | #endif | 139 | #endif |
| 135 | if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as); | 140 | if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as); |
| @@ -243,7 +248,7 @@ static void ra_dprintf(ASMState *as, const char *fmt, ...) | |||
| 243 | *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q; | 248 | *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q; |
| 244 | } else { | 249 | } else { |
| 245 | *p++ = '?'; | 250 | *p++ = '?'; |
| 246 | lua_assert(0); | 251 | lj_assertA(0, "bad register %d for debug format \"%s\"", r, fmt); |
| 247 | } | 252 | } |
| 248 | } else if (e[1] == 'f' || e[1] == 'i') { | 253 | } else if (e[1] == 'f' || e[1] == 'i') { |
| 249 | IRRef ref; | 254 | IRRef ref; |
| @@ -261,7 +266,7 @@ static void ra_dprintf(ASMState *as, const char *fmt, ...) | |||
| 261 | } else if (e[1] == 'x') { | 266 | } else if (e[1] == 'x') { |
| 262 | p += sprintf(p, "%08x", va_arg(argp, int32_t)); | 267 | p += sprintf(p, "%08x", va_arg(argp, int32_t)); |
| 263 | } else { | 268 | } else { |
| 264 | lua_assert(0); | 269 | lj_assertA(0, "bad debug format code"); |
| 265 | } | 270 | } |
| 266 | fmt = e+2; | 271 | fmt = e+2; |
| 267 | } | 272 | } |
| @@ -320,7 +325,7 @@ static Reg ra_rematk(ASMState *as, IRRef ref) | |||
| 320 | Reg r; | 325 | Reg r; |
| 321 | if (ra_iskref(ref)) { | 326 | if (ra_iskref(ref)) { |
| 322 | r = ra_krefreg(ref); | 327 | r = ra_krefreg(ref); |
| 323 | lua_assert(!rset_test(as->freeset, r)); | 328 | lj_assertA(!rset_test(as->freeset, r), "rematk of free reg %d", r); |
| 324 | ra_free(as, r); | 329 | ra_free(as, r); |
| 325 | ra_modified(as, r); | 330 | ra_modified(as, r); |
| 326 | #if LJ_64 | 331 | #if LJ_64 |
| @@ -332,7 +337,9 @@ static Reg ra_rematk(ASMState *as, IRRef ref) | |||
| 332 | } | 337 | } |
| 333 | ir = IR(ref); | 338 | ir = IR(ref); |
| 334 | r = ir->r; | 339 | r = ir->r; |
| 335 | lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s)); | 340 | lj_assertA(ra_hasreg(r), "rematk of K%03d has no reg", REF_BIAS - ref); |
| 341 | lj_assertA(!ra_hasspill(ir->s), | ||
| 342 | "rematk of K%03d has spill slot [%x]", REF_BIAS - ref, ir->s); | ||
| 336 | ra_free(as, r); | 343 | ra_free(as, r); |
| 337 | ra_modified(as, r); | 344 | ra_modified(as, r); |
| 338 | ir->r = RID_INIT; /* Do not keep any hint. */ | 345 | ir->r = RID_INIT; /* Do not keep any hint. */ |
| @@ -346,7 +353,8 @@ static Reg ra_rematk(ASMState *as, IRRef ref) | |||
| 346 | ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */ | 353 | ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */ |
| 347 | emit_getgl(as, r, jit_base); | 354 | emit_getgl(as, r, jit_base); |
| 348 | } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) { | 355 | } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) { |
| 349 | lua_assert(irt_isnil(ir->t)); /* REF_NIL stores ASMREF_L register. */ | 356 | /* REF_NIL stores ASMREF_L register. */ |
| 357 | lj_assertA(irt_isnil(ir->t), "rematk of bad ASMREF_L"); | ||
| 350 | emit_getgl(as, r, cur_L); | 358 | emit_getgl(as, r, cur_L); |
| 351 | #if LJ_64 | 359 | #if LJ_64 |
| 352 | } else if (ir->o == IR_KINT64) { | 360 | } else if (ir->o == IR_KINT64) { |
| @@ -359,8 +367,9 @@ static Reg ra_rematk(ASMState *as, IRRef ref) | |||
| 359 | #endif | 367 | #endif |
| 360 | #endif | 368 | #endif |
| 361 | } else { | 369 | } else { |
| 362 | lua_assert(ir->o == IR_KINT || ir->o == IR_KGC || | 370 | lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC || |
| 363 | ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL); | 371 | ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL, |
| 372 | "rematk of bad IR op %d", ir->o); | ||
| 364 | emit_loadi(as, r, ir->i); | 373 | emit_loadi(as, r, ir->i); |
| 365 | } | 374 | } |
| 366 | return r; | 375 | return r; |
| @@ -370,7 +379,8 @@ static Reg ra_rematk(ASMState *as, IRRef ref) | |||
| 370 | static int32_t ra_spill(ASMState *as, IRIns *ir) | 379 | static int32_t ra_spill(ASMState *as, IRIns *ir) |
| 371 | { | 380 | { |
| 372 | int32_t slot = ir->s; | 381 | int32_t slot = ir->s; |
| 373 | lua_assert(ir >= as->ir + REF_TRUE); | 382 | lj_assertA(ir >= as->ir + REF_TRUE, |
| 383 | "spill of K%03d", REF_BIAS - (int)(ir - as->ir)); | ||
| 374 | if (!ra_hasspill(slot)) { | 384 | if (!ra_hasspill(slot)) { |
| 375 | if (irt_is64(ir->t)) { | 385 | if (irt_is64(ir->t)) { |
| 376 | slot = as->evenspill; | 386 | slot = as->evenspill; |
| @@ -395,7 +405,9 @@ static Reg ra_releasetmp(ASMState *as, IRRef ref) | |||
| 395 | { | 405 | { |
| 396 | IRIns *ir = IR(ref); | 406 | IRIns *ir = IR(ref); |
| 397 | Reg r = ir->r; | 407 | Reg r = ir->r; |
| 398 | lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s)); | 408 | lj_assertA(ra_hasreg(r), "release of TMP%d has no reg", ref-ASMREF_TMP1+1); |
| 409 | lj_assertA(!ra_hasspill(ir->s), | ||
| 410 | "release of TMP%d has spill slot [%x]", ref-ASMREF_TMP1+1, ir->s); | ||
| 399 | ra_free(as, r); | 411 | ra_free(as, r); |
| 400 | ra_modified(as, r); | 412 | ra_modified(as, r); |
| 401 | ir->r = RID_INIT; | 413 | ir->r = RID_INIT; |
| @@ -411,7 +423,7 @@ static Reg ra_restore(ASMState *as, IRRef ref) | |||
| 411 | IRIns *ir = IR(ref); | 423 | IRIns *ir = IR(ref); |
| 412 | int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */ | 424 | int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */ |
| 413 | Reg r = ir->r; | 425 | Reg r = ir->r; |
| 414 | lua_assert(ra_hasreg(r)); | 426 | lj_assertA(ra_hasreg(r), "restore of IR %04d has no reg", ref - REF_BIAS); |
| 415 | ra_sethint(ir->r, r); /* Keep hint. */ | 427 | ra_sethint(ir->r, r); /* Keep hint. */ |
| 416 | ra_free(as, r); | 428 | ra_free(as, r); |
| 417 | if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */ | 429 | if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */ |
| @@ -440,14 +452,15 @@ static Reg ra_evict(ASMState *as, RegSet allow) | |||
| 440 | { | 452 | { |
| 441 | IRRef ref; | 453 | IRRef ref; |
| 442 | RegCost cost = ~(RegCost)0; | 454 | RegCost cost = ~(RegCost)0; |
| 443 | lua_assert(allow != RSET_EMPTY); | 455 | lj_assertA(allow != RSET_EMPTY, "evict from empty set"); |
| 444 | if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) { | 456 | if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) { |
| 445 | GPRDEF(MINCOST) | 457 | GPRDEF(MINCOST) |
| 446 | } else { | 458 | } else { |
| 447 | FPRDEF(MINCOST) | 459 | FPRDEF(MINCOST) |
| 448 | } | 460 | } |
| 449 | ref = regcost_ref(cost); | 461 | ref = regcost_ref(cost); |
| 450 | lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins)); | 462 | lj_assertA(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins), |
| 463 | "evict of out-of-range IR %04d", ref - REF_BIAS); | ||
| 451 | /* Preferably pick any weak ref instead of a non-weak, non-const ref. */ | 464 | /* Preferably pick any weak ref instead of a non-weak, non-const ref. */ |
| 452 | if (!irref_isk(ref) && (as->weakset & allow)) { | 465 | if (!irref_isk(ref) && (as->weakset & allow)) { |
| 453 | IRIns *ir = IR(ref); | 466 | IRIns *ir = IR(ref); |
| @@ -605,7 +618,8 @@ static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow) | |||
| 605 | IRIns *ir = IR(ref); | 618 | IRIns *ir = IR(ref); |
| 606 | RegSet pick = as->freeset & allow; | 619 | RegSet pick = as->freeset & allow; |
| 607 | Reg r; | 620 | Reg r; |
| 608 | lua_assert(ra_noreg(ir->r)); | 621 | lj_assertA(ra_noreg(ir->r), |
| 622 | "IR %04d already has reg %d", ref - REF_BIAS, ir->r); | ||
| 609 | if (pick) { | 623 | if (pick) { |
| 610 | /* First check register hint from propagation or PHI. */ | 624 | /* First check register hint from propagation or PHI. */ |
| 611 | if (ra_hashint(ir->r)) { | 625 | if (ra_hashint(ir->r)) { |
| @@ -669,8 +683,10 @@ static void ra_rename(ASMState *as, Reg down, Reg up) | |||
| 669 | IRIns *ir = IR(ref); | 683 | IRIns *ir = IR(ref); |
| 670 | ir->r = (uint8_t)up; | 684 | ir->r = (uint8_t)up; |
| 671 | as->cost[down] = 0; | 685 | as->cost[down] = 0; |
| 672 | lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR)); | 686 | lj_assertA((down < RID_MAX_GPR) == (up < RID_MAX_GPR), |
| 673 | lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up)); | 687 | "rename between GPR/FPR %d and %d", down, up); |
| 688 | lj_assertA(!rset_test(as->freeset, down), "rename from free reg %d", down); | ||
| 689 | lj_assertA(rset_test(as->freeset, up), "rename to non-free reg %d", up); | ||
| 674 | ra_free(as, down); /* 'down' is free ... */ | 690 | ra_free(as, down); /* 'down' is free ... */ |
| 675 | ra_modified(as, down); | 691 | ra_modified(as, down); |
| 676 | rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */ | 692 | rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */ |
| @@ -711,7 +727,7 @@ static void ra_destreg(ASMState *as, IRIns *ir, Reg r) | |||
| 711 | { | 727 | { |
| 712 | Reg dest = ra_dest(as, ir, RID2RSET(r)); | 728 | Reg dest = ra_dest(as, ir, RID2RSET(r)); |
| 713 | if (dest != r) { | 729 | if (dest != r) { |
| 714 | lua_assert(rset_test(as->freeset, r)); | 730 | lj_assertA(rset_test(as->freeset, r), "dest reg %d is not free", r); |
| 715 | ra_modified(as, r); | 731 | ra_modified(as, r); |
| 716 | emit_movrr(as, ir, dest, r); | 732 | emit_movrr(as, ir, dest, r); |
| 717 | } | 733 | } |
| @@ -744,8 +760,9 @@ static void ra_left(ASMState *as, Reg dest, IRRef lref) | |||
| 744 | #endif | 760 | #endif |
| 745 | #endif | 761 | #endif |
| 746 | } else if (ir->o != IR_KPRI) { | 762 | } else if (ir->o != IR_KPRI) { |
| 747 | lua_assert(ir->o == IR_KINT || ir->o == IR_KGC || | 763 | lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC || |
| 748 | ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL); | 764 | ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL, |
| 765 | "K%03d has bad IR op %d", REF_BIAS - lref, ir->o); | ||
| 749 | emit_loadi(as, dest, ir->i); | 766 | emit_loadi(as, dest, ir->i); |
| 750 | return; | 767 | return; |
| 751 | } | 768 | } |
| @@ -887,11 +904,14 @@ static void asm_snap_alloc1(ASMState *as, IRRef ref) | |||
| 887 | #endif | 904 | #endif |
| 888 | { /* Allocate stored values for TNEW, TDUP and CNEW. */ | 905 | { /* Allocate stored values for TNEW, TDUP and CNEW. */ |
| 889 | IRIns *irs; | 906 | IRIns *irs; |
| 890 | lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW); | 907 | lj_assertA(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW, |
| 908 | "sink of IR %04d has bad op %d", ref - REF_BIAS, ir->o); | ||
| 891 | for (irs = IR(as->snapref-1); irs > ir; irs--) | 909 | for (irs = IR(as->snapref-1); irs > ir; irs--) |
| 892 | if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) { | 910 | if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) { |
| 893 | lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || | 911 | lj_assertA(irs->o == IR_ASTORE || irs->o == IR_HSTORE || |
| 894 | irs->o == IR_FSTORE || irs->o == IR_XSTORE); | 912 | irs->o == IR_FSTORE || irs->o == IR_XSTORE, |
| 913 | "sunk store IR %04d has bad op %d", | ||
| 914 | (int)(irs - as->ir) - REF_BIAS, irs->o); | ||
| 895 | asm_snap_alloc1(as, irs->op2); | 915 | asm_snap_alloc1(as, irs->op2); |
| 896 | if (LJ_32 && (irs+1)->o == IR_HIOP) | 916 | if (LJ_32 && (irs+1)->o == IR_HIOP) |
| 897 | asm_snap_alloc1(as, (irs+1)->op2); | 917 | asm_snap_alloc1(as, (irs+1)->op2); |
| @@ -938,7 +958,9 @@ static void asm_snap_alloc(ASMState *as) | |||
| 938 | if (!irref_isk(ref)) { | 958 | if (!irref_isk(ref)) { |
| 939 | asm_snap_alloc1(as, ref); | 959 | asm_snap_alloc1(as, ref); |
| 940 | if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) { | 960 | if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) { |
| 941 | lua_assert(irt_type(IR(ref+1)->t) == IRT_SOFTFP); | 961 | lj_assertA(irt_type(IR(ref+1)->t) == IRT_SOFTFP, |
| 962 | "snap %d[%d] points to bad SOFTFP IR %04d", | ||
| 963 | as->snapno, n, ref - REF_BIAS); | ||
| 942 | asm_snap_alloc1(as, ref+1); | 964 | asm_snap_alloc1(as, ref+1); |
| 943 | } | 965 | } |
| 944 | } | 966 | } |
| @@ -1002,19 +1024,20 @@ static int32_t asm_stack_adjust(ASMState *as) | |||
| 1002 | } | 1024 | } |
| 1003 | 1025 | ||
| 1004 | /* Must match with hash*() in lj_tab.c. */ | 1026 | /* Must match with hash*() in lj_tab.c. */ |
| 1005 | static uint32_t ir_khash(IRIns *ir) | 1027 | static uint32_t ir_khash(ASMState *as, IRIns *ir) |
| 1006 | { | 1028 | { |
| 1007 | uint32_t lo, hi; | 1029 | uint32_t lo, hi; |
| 1030 | UNUSED(as); | ||
| 1008 | if (irt_isstr(ir->t)) { | 1031 | if (irt_isstr(ir->t)) { |
| 1009 | return ir_kstr(ir)->hash; | 1032 | return ir_kstr(ir)->hash; |
| 1010 | } else if (irt_isnum(ir->t)) { | 1033 | } else if (irt_isnum(ir->t)) { |
| 1011 | lo = ir_knum(ir)->u32.lo; | 1034 | lo = ir_knum(ir)->u32.lo; |
| 1012 | hi = ir_knum(ir)->u32.hi << 1; | 1035 | hi = ir_knum(ir)->u32.hi << 1; |
| 1013 | } else if (irt_ispri(ir->t)) { | 1036 | } else if (irt_ispri(ir->t)) { |
| 1014 | lua_assert(!irt_isnil(ir->t)); | 1037 | lj_assertA(!irt_isnil(ir->t), "hash of nil key"); |
| 1015 | return irt_type(ir->t)-IRT_FALSE; | 1038 | return irt_type(ir->t)-IRT_FALSE; |
| 1016 | } else { | 1039 | } else { |
| 1017 | lua_assert(irt_isgcv(ir->t)); | 1040 | lj_assertA(irt_isgcv(ir->t), "hash of bad IR type %d", irt_type(ir->t)); |
| 1018 | lo = u32ptr(ir_kgc(ir)); | 1041 | lo = u32ptr(ir_kgc(ir)); |
| 1019 | #if LJ_GC64 | 1042 | #if LJ_GC64 |
| 1020 | hi = (uint32_t)(u64ptr(ir_kgc(ir)) >> 32) | (irt_toitype(ir->t) << 15); | 1043 | hi = (uint32_t)(u64ptr(ir_kgc(ir)) >> 32) | (irt_toitype(ir->t) << 15); |
| @@ -1122,7 +1145,8 @@ static void asm_bufput(ASMState *as, IRIns *ir) | |||
| 1122 | args[0] = ir->op1; /* SBuf * */ | 1145 | args[0] = ir->op1; /* SBuf * */ |
| 1123 | args[1] = ir->op2; /* GCstr * */ | 1146 | args[1] = ir->op2; /* GCstr * */ |
| 1124 | irs = IR(ir->op2); | 1147 | irs = IR(ir->op2); |
| 1125 | lua_assert(irt_isstr(irs->t)); | 1148 | lj_assertA(irt_isstr(irs->t), |
| 1149 | "BUFPUT of non-string IR %04d", ir->op2 - REF_BIAS); | ||
| 1126 | if (irs->o == IR_KGC) { | 1150 | if (irs->o == IR_KGC) { |
| 1127 | GCstr *s = ir_kstr(irs); | 1151 | GCstr *s = ir_kstr(irs); |
| 1128 | if (s->len == 1) { /* Optimize put of single-char string constant. */ | 1152 | if (s->len == 1) { /* Optimize put of single-char string constant. */ |
| @@ -1136,7 +1160,8 @@ static void asm_bufput(ASMState *as, IRIns *ir) | |||
| 1136 | args[1] = ASMREF_TMP1; /* TValue * */ | 1160 | args[1] = ASMREF_TMP1; /* TValue * */ |
| 1137 | ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum]; | 1161 | ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum]; |
| 1138 | } else { | 1162 | } else { |
| 1139 | lua_assert(irt_isinteger(IR(irs->op1)->t)); | 1163 | lj_assertA(irt_isinteger(IR(irs->op1)->t), |
| 1164 | "TOSTR of non-numeric IR %04d", irs->op1); | ||
| 1140 | args[1] = irs->op1; /* int */ | 1165 | args[1] = irs->op1; /* int */ |
| 1141 | if (irs->op2 == IRTOSTR_INT) | 1166 | if (irs->op2 == IRTOSTR_INT) |
| 1142 | ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint]; | 1167 | ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint]; |
| @@ -1201,7 +1226,8 @@ static void asm_conv64(ASMState *as, IRIns *ir) | |||
| 1201 | IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); | 1226 | IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); |
| 1202 | IRCallID id; | 1227 | IRCallID id; |
| 1203 | IRRef args[2]; | 1228 | IRRef args[2]; |
| 1204 | lua_assert((ir-1)->o == IR_CONV && ir->o == IR_HIOP); | 1229 | lj_assertA((ir-1)->o == IR_CONV && ir->o == IR_HIOP, |
| 1230 | "not a CONV/HIOP pair at IR %04d", (int)(ir - as->ir) - REF_BIAS); | ||
| 1205 | args[LJ_BE] = (ir-1)->op1; | 1231 | args[LJ_BE] = (ir-1)->op1; |
| 1206 | args[LJ_LE] = ir->op1; | 1232 | args[LJ_LE] = ir->op1; |
| 1207 | if (st == IRT_NUM || st == IRT_FLOAT) { | 1233 | if (st == IRT_NUM || st == IRT_FLOAT) { |
| @@ -1256,15 +1282,16 @@ static void asm_collectargs(ASMState *as, IRIns *ir, | |||
| 1256 | const CCallInfo *ci, IRRef *args) | 1282 | const CCallInfo *ci, IRRef *args) |
| 1257 | { | 1283 | { |
| 1258 | uint32_t n = CCI_XNARGS(ci); | 1284 | uint32_t n = CCI_XNARGS(ci); |
| 1259 | lua_assert(n <= CCI_NARGS_MAX*2); /* Account for split args. */ | 1285 | /* Account for split args. */ |
| 1286 | lj_assertA(n <= CCI_NARGS_MAX*2, "too many args %d to collect", n); | ||
| 1260 | if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; } | 1287 | if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; } |
| 1261 | while (n-- > 1) { | 1288 | while (n-- > 1) { |
| 1262 | ir = IR(ir->op1); | 1289 | ir = IR(ir->op1); |
| 1263 | lua_assert(ir->o == IR_CARG); | 1290 | lj_assertA(ir->o == IR_CARG, "malformed CALL arg tree"); |
| 1264 | args[n] = ir->op2 == REF_NIL ? 0 : ir->op2; | 1291 | args[n] = ir->op2 == REF_NIL ? 0 : ir->op2; |
| 1265 | } | 1292 | } |
| 1266 | args[0] = ir->op1 == REF_NIL ? 0 : ir->op1; | 1293 | args[0] = ir->op1 == REF_NIL ? 0 : ir->op1; |
| 1267 | lua_assert(IR(ir->op1)->o != IR_CARG); | 1294 | lj_assertA(IR(ir->op1)->o != IR_CARG, "malformed CALL arg tree"); |
| 1268 | } | 1295 | } |
| 1269 | 1296 | ||
| 1270 | /* Reconstruct CCallInfo flags for CALLX*. */ | 1297 | /* Reconstruct CCallInfo flags for CALLX*. */ |
| @@ -1648,7 +1675,10 @@ static void asm_ir(ASMState *as, IRIns *ir) | |||
| 1648 | switch ((IROp)ir->o) { | 1675 | switch ((IROp)ir->o) { |
| 1649 | /* Miscellaneous ops. */ | 1676 | /* Miscellaneous ops. */ |
| 1650 | case IR_LOOP: asm_loop(as); break; | 1677 | case IR_LOOP: asm_loop(as); break; |
| 1651 | case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break; | 1678 | case IR_NOP: case IR_XBAR: |
| 1679 | lj_assertA(!ra_used(ir), | ||
| 1680 | "IR %04d not unused", (int)(ir - as->ir) - REF_BIAS); | ||
| 1681 | break; | ||
| 1652 | case IR_USE: | 1682 | case IR_USE: |
| 1653 | ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break; | 1683 | ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break; |
| 1654 | case IR_PHI: asm_phi(as, ir); break; | 1684 | case IR_PHI: asm_phi(as, ir); break; |
| @@ -1687,7 +1717,9 @@ static void asm_ir(ASMState *as, IRIns *ir) | |||
| 1687 | #if LJ_SOFTFP32 | 1717 | #if LJ_SOFTFP32 |
| 1688 | case IR_DIV: case IR_POW: case IR_ABS: | 1718 | case IR_DIV: case IR_POW: case IR_ABS: |
| 1689 | case IR_LDEXP: case IR_FPMATH: case IR_TOBIT: | 1719 | case IR_LDEXP: case IR_FPMATH: case IR_TOBIT: |
| 1690 | lua_assert(0); /* Unused for LJ_SOFTFP32. */ | 1720 | /* Unused for LJ_SOFTFP32. */ |
| 1721 | lj_assertA(0, "IR %04d with unused op %d", | ||
| 1722 | (int)(ir - as->ir) - REF_BIAS, ir->o); | ||
| 1691 | break; | 1723 | break; |
| 1692 | #else | 1724 | #else |
| 1693 | case IR_DIV: asm_div(as, ir); break; | 1725 | case IR_DIV: asm_div(as, ir); break; |
| @@ -1736,7 +1768,8 @@ static void asm_ir(ASMState *as, IRIns *ir) | |||
| 1736 | #if LJ_HASFFI | 1768 | #if LJ_HASFFI |
| 1737 | asm_cnew(as, ir); | 1769 | asm_cnew(as, ir); |
| 1738 | #else | 1770 | #else |
| 1739 | lua_assert(0); | 1771 | lj_assertA(0, "IR %04d with unused op %d", |
| 1772 | (int)(ir - as->ir) - REF_BIAS, ir->o); | ||
| 1740 | #endif | 1773 | #endif |
| 1741 | break; | 1774 | break; |
| 1742 | 1775 | ||
| @@ -1814,8 +1847,10 @@ static void asm_head_side(ASMState *as) | |||
| 1814 | for (i = as->stopins; i > REF_BASE; i--) { | 1847 | for (i = as->stopins; i > REF_BASE; i--) { |
| 1815 | IRIns *ir = IR(i); | 1848 | IRIns *ir = IR(i); |
| 1816 | RegSP rs; | 1849 | RegSP rs; |
| 1817 | lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) || | 1850 | lj_assertA((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) || |
| 1818 | (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL); | 1851 | (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL, |
| 1852 | "IR %04d has bad parent op %d", | ||
| 1853 | (int)(ir - as->ir) - REF_BIAS, ir->o); | ||
| 1819 | rs = as->parentmap[i - REF_FIRST]; | 1854 | rs = as->parentmap[i - REF_FIRST]; |
| 1820 | if (ra_hasreg(ir->r)) { | 1855 | if (ra_hasreg(ir->r)) { |
| 1821 | rset_clear(allow, ir->r); | 1856 | rset_clear(allow, ir->r); |
| @@ -2074,7 +2109,7 @@ static void asm_setup_regsp(ASMState *as) | |||
| 2074 | ir = IR(REF_FIRST); | 2109 | ir = IR(REF_FIRST); |
| 2075 | if (as->parent) { | 2110 | if (as->parent) { |
| 2076 | uint16_t *p; | 2111 | uint16_t *p; |
| 2077 | lastir = lj_snap_regspmap(as->parent, as->J->exitno, ir); | 2112 | lastir = lj_snap_regspmap(as->J, as->parent, as->J->exitno, ir); |
| 2078 | if (lastir - ir > LJ_MAX_JSLOTS) | 2113 | if (lastir - ir > LJ_MAX_JSLOTS) |
| 2079 | lj_trace_err(as->J, LJ_TRERR_NYICOAL); | 2114 | lj_trace_err(as->J, LJ_TRERR_NYICOAL); |
| 2080 | as->stopins = (IRRef)((lastir-1) - as->ir); | 2115 | as->stopins = (IRRef)((lastir-1) - as->ir); |
| @@ -2378,7 +2413,10 @@ void lj_asm_trace(jit_State *J, GCtrace *T) | |||
| 2378 | /* Assemble a trace in linear backwards order. */ | 2413 | /* Assemble a trace in linear backwards order. */ |
| 2379 | for (as->curins--; as->curins > as->stopins; as->curins--) { | 2414 | for (as->curins--; as->curins > as->stopins; as->curins--) { |
| 2380 | IRIns *ir = IR(as->curins); | 2415 | IRIns *ir = IR(as->curins); |
| 2381 | lua_assert(!(LJ_32 && irt_isint64(ir->t))); /* Handled by SPLIT. */ | 2416 | /* 64 bit types handled by SPLIT for 32 bit archs. */ |
| 2417 | lj_assertA(!(LJ_32 && irt_isint64(ir->t)), | ||
| 2418 | "IR %04d has unsplit 64 bit type", | ||
| 2419 | (int)(ir - as->ir) - REF_BIAS); | ||
| 2382 | if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE)) | 2420 | if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE)) |
| 2383 | continue; /* Dead-code elimination can be soooo easy. */ | 2421 | continue; /* Dead-code elimination can be soooo easy. */ |
| 2384 | if (irt_isguard(ir->t)) | 2422 | if (irt_isguard(ir->t)) |
| @@ -2408,7 +2446,7 @@ void lj_asm_trace(jit_State *J, GCtrace *T) | |||
| 2408 | asm_phi_fixup(as); | 2446 | asm_phi_fixup(as); |
| 2409 | 2447 | ||
| 2410 | if (J->curfinal->nins >= T->nins) { /* IR didn't grow? */ | 2448 | if (J->curfinal->nins >= T->nins) { /* IR didn't grow? */ |
| 2411 | lua_assert(J->curfinal->nk == T->nk); | 2449 | lj_assertA(J->curfinal->nk == T->nk, "unexpected IR constant growth"); |
| 2412 | memcpy(J->curfinal->ir + as->orignins, T->ir + as->orignins, | 2450 | memcpy(J->curfinal->ir + as->orignins, T->ir + as->orignins, |
| 2413 | (T->nins - as->orignins) * sizeof(IRIns)); /* Copy RENAMEs. */ | 2451 | (T->nins - as->orignins) * sizeof(IRIns)); /* Copy RENAMEs. */ |
| 2414 | T->nins = J->curfinal->nins; | 2452 | T->nins = J->curfinal->nins; |
diff --git a/src/lj_asm_arm.h b/src/lj_asm_arm.h index f922ed0f..56ce4a07 100644 --- a/src/lj_asm_arm.h +++ b/src/lj_asm_arm.h | |||
| @@ -41,7 +41,7 @@ static Reg ra_scratchpair(ASMState *as, RegSet allow) | |||
| 41 | } | 41 | } |
| 42 | } | 42 | } |
| 43 | } | 43 | } |
| 44 | lua_assert(rset_test(RSET_GPREVEN, r)); | 44 | lj_assertA(rset_test(RSET_GPREVEN, r), "odd reg %d", r); |
| 45 | ra_modified(as, r); | 45 | ra_modified(as, r); |
| 46 | ra_modified(as, r+1); | 46 | ra_modified(as, r+1); |
| 47 | RA_DBGX((as, "scratchpair $r $r", r, r+1)); | 47 | RA_DBGX((as, "scratchpair $r $r", r, r+1)); |
| @@ -269,7 +269,7 @@ static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref, | |||
| 269 | return; | 269 | return; |
| 270 | } | 270 | } |
| 271 | } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) { | 271 | } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) { |
| 272 | lua_assert(ofs == 0); | 272 | lj_assertA(ofs == 0, "bad usage"); |
| 273 | ofs = (int32_t)sizeof(GCstr); | 273 | ofs = (int32_t)sizeof(GCstr); |
| 274 | if (irref_isk(ir->op2)) { | 274 | if (irref_isk(ir->op2)) { |
| 275 | ofs += IR(ir->op2)->i; | 275 | ofs += IR(ir->op2)->i; |
| @@ -389,9 +389,11 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 389 | as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); | 389 | as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); |
| 390 | if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u; | 390 | if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u; |
| 391 | if (gpr <= REGARG_LASTGPR) { | 391 | if (gpr <= REGARG_LASTGPR) { |
| 392 | lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ | 392 | lj_assertA(rset_test(as->freeset, gpr), |
| 393 | "reg %d not free", gpr); /* Must have been evicted. */ | ||
| 393 | if (irt_isnum(ir->t)) { | 394 | if (irt_isnum(ir->t)) { |
| 394 | lua_assert(rset_test(as->freeset, gpr+1)); /* Ditto. */ | 395 | lj_assertA(rset_test(as->freeset, gpr+1), |
| 396 | "reg %d not free", gpr+1); /* Ditto. */ | ||
| 395 | emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15)); | 397 | emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15)); |
| 396 | gpr += 2; | 398 | gpr += 2; |
| 397 | } else { | 399 | } else { |
| @@ -408,7 +410,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 408 | #endif | 410 | #endif |
| 409 | { | 411 | { |
| 410 | if (gpr <= REGARG_LASTGPR) { | 412 | if (gpr <= REGARG_LASTGPR) { |
| 411 | lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ | 413 | lj_assertA(rset_test(as->freeset, gpr), |
| 414 | "reg %d not free", gpr); /* Must have been evicted. */ | ||
| 412 | if (ref) ra_leftov(as, gpr, ref); | 415 | if (ref) ra_leftov(as, gpr, ref); |
| 413 | gpr++; | 416 | gpr++; |
| 414 | } else { | 417 | } else { |
| @@ -433,7 +436,7 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | |||
| 433 | rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ | 436 | rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ |
| 434 | ra_evictset(as, drop); /* Evictions must be performed first. */ | 437 | ra_evictset(as, drop); /* Evictions must be performed first. */ |
| 435 | if (ra_used(ir)) { | 438 | if (ra_used(ir)) { |
| 436 | lua_assert(!irt_ispri(ir->t)); | 439 | lj_assertA(!irt_ispri(ir->t), "PRI dest"); |
| 437 | if (!LJ_SOFTFP && irt_isfp(ir->t)) { | 440 | if (!LJ_SOFTFP && irt_isfp(ir->t)) { |
| 438 | if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) { | 441 | if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) { |
| 439 | Reg dest = (ra_dest(as, ir, RSET_FPR) & 15); | 442 | Reg dest = (ra_dest(as, ir, RSET_FPR) & 15); |
| @@ -530,13 +533,17 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 530 | #endif | 533 | #endif |
| 531 | IRRef lref = ir->op1; | 534 | IRRef lref = ir->op1; |
| 532 | /* 64 bit integer conversions are handled by SPLIT. */ | 535 | /* 64 bit integer conversions are handled by SPLIT. */ |
| 533 | lua_assert(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64)); | 536 | lj_assertA(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64), |
| 537 | "IR %04d has unsplit 64 bit type", | ||
| 538 | (int)(ir - as->ir) - REF_BIAS); | ||
| 534 | #if LJ_SOFTFP | 539 | #if LJ_SOFTFP |
| 535 | /* FP conversions are handled by SPLIT. */ | 540 | /* FP conversions are handled by SPLIT. */ |
| 536 | lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT)); | 541 | lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT), |
| 542 | "IR %04d has FP type", | ||
| 543 | (int)(ir - as->ir) - REF_BIAS); | ||
| 537 | /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ | 544 | /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ |
| 538 | #else | 545 | #else |
| 539 | lua_assert(irt_type(ir->t) != st); | 546 | lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV"); |
| 540 | if (irt_isfp(ir->t)) { | 547 | if (irt_isfp(ir->t)) { |
| 541 | Reg dest = ra_dest(as, ir, RSET_FPR); | 548 | Reg dest = ra_dest(as, ir, RSET_FPR); |
| 542 | if (stfp) { /* FP to FP conversion. */ | 549 | if (stfp) { /* FP to FP conversion. */ |
| @@ -553,7 +560,8 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 553 | } else if (stfp) { /* FP to integer conversion. */ | 560 | } else if (stfp) { /* FP to integer conversion. */ |
| 554 | if (irt_isguard(ir->t)) { | 561 | if (irt_isguard(ir->t)) { |
| 555 | /* Checked conversions are only supported from number to int. */ | 562 | /* Checked conversions are only supported from number to int. */ |
| 556 | lua_assert(irt_isint(ir->t) && st == IRT_NUM); | 563 | lj_assertA(irt_isint(ir->t) && st == IRT_NUM, |
| 564 | "bad type for checked CONV"); | ||
| 557 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); | 565 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); |
| 558 | } else { | 566 | } else { |
| 559 | Reg left = ra_alloc1(as, lref, RSET_FPR); | 567 | Reg left = ra_alloc1(as, lref, RSET_FPR); |
| @@ -572,7 +580,7 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 572 | Reg dest = ra_dest(as, ir, RSET_GPR); | 580 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| 573 | if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ | 581 | if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ |
| 574 | Reg left = ra_alloc1(as, lref, RSET_GPR); | 582 | Reg left = ra_alloc1(as, lref, RSET_GPR); |
| 575 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); | 583 | lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT"); |
| 576 | if ((as->flags & JIT_F_ARMV6)) { | 584 | if ((as->flags & JIT_F_ARMV6)) { |
| 577 | ARMIns ai = st == IRT_I8 ? ARMI_SXTB : | 585 | ARMIns ai = st == IRT_I8 ? ARMI_SXTB : |
| 578 | st == IRT_U8 ? ARMI_UXTB : | 586 | st == IRT_U8 ? ARMI_UXTB : |
| @@ -667,7 +675,7 @@ static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) | |||
| 667 | ra_allockreg(as, i32ptr(ir_knum(ir)), dest); | 675 | ra_allockreg(as, i32ptr(ir_knum(ir)), dest); |
| 668 | } else { | 676 | } else { |
| 669 | #if LJ_SOFTFP | 677 | #if LJ_SOFTFP |
| 670 | lua_assert(0); | 678 | lj_assertA(0, "unsplit FP op"); |
| 671 | #else | 679 | #else |
| 672 | /* Otherwise force a spill and use the spill slot. */ | 680 | /* Otherwise force a spill and use the spill slot. */ |
| 673 | emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR); | 681 | emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR); |
| @@ -811,7 +819,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |||
| 811 | *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu); | 819 | *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu); |
| 812 | 820 | ||
| 813 | /* Load main position relative to tab->node into dest. */ | 821 | /* Load main position relative to tab->node into dest. */ |
| 814 | khash = irref_isk(refkey) ? ir_khash(irkey) : 1; | 822 | khash = irref_isk(refkey) ? ir_khash(as, irkey) : 1; |
| 815 | if (khash == 0) { | 823 | if (khash == 0) { |
| 816 | emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); | 824 | emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node)); |
| 817 | } else { | 825 | } else { |
| @@ -867,7 +875,7 @@ static void asm_hrefk(ASMState *as, IRIns *ir) | |||
| 867 | Reg node = ra_alloc1(as, ir->op1, RSET_GPR); | 875 | Reg node = ra_alloc1(as, ir->op1, RSET_GPR); |
| 868 | Reg key = RID_NONE, type = RID_TMP, idx = node; | 876 | Reg key = RID_NONE, type = RID_TMP, idx = node; |
| 869 | RegSet allow = rset_exclude(RSET_GPR, node); | 877 | RegSet allow = rset_exclude(RSET_GPR, node); |
| 870 | lua_assert(ofs % sizeof(Node) == 0); | 878 | lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot"); |
| 871 | if (ofs > 4095) { | 879 | if (ofs > 4095) { |
| 872 | idx = dest; | 880 | idx = dest; |
| 873 | rset_clear(allow, dest); | 881 | rset_clear(allow, dest); |
| @@ -934,7 +942,7 @@ static void asm_uref(ASMState *as, IRIns *ir) | |||
| 934 | static void asm_fref(ASMState *as, IRIns *ir) | 942 | static void asm_fref(ASMState *as, IRIns *ir) |
| 935 | { | 943 | { |
| 936 | UNUSED(as); UNUSED(ir); | 944 | UNUSED(as); UNUSED(ir); |
| 937 | lua_assert(!ra_used(ir)); | 945 | lj_assertA(!ra_used(ir), "unfused FREF"); |
| 938 | } | 946 | } |
| 939 | 947 | ||
| 940 | static void asm_strref(ASMState *as, IRIns *ir) | 948 | static void asm_strref(ASMState *as, IRIns *ir) |
| @@ -971,25 +979,27 @@ static void asm_strref(ASMState *as, IRIns *ir) | |||
| 971 | 979 | ||
| 972 | /* -- Loads and stores ---------------------------------------------------- */ | 980 | /* -- Loads and stores ---------------------------------------------------- */ |
| 973 | 981 | ||
| 974 | static ARMIns asm_fxloadins(IRIns *ir) | 982 | static ARMIns asm_fxloadins(ASMState *as, IRIns *ir) |
| 975 | { | 983 | { |
| 984 | UNUSED(as); | ||
| 976 | switch (irt_type(ir->t)) { | 985 | switch (irt_type(ir->t)) { |
| 977 | case IRT_I8: return ARMI_LDRSB; | 986 | case IRT_I8: return ARMI_LDRSB; |
| 978 | case IRT_U8: return ARMI_LDRB; | 987 | case IRT_U8: return ARMI_LDRB; |
| 979 | case IRT_I16: return ARMI_LDRSH; | 988 | case IRT_I16: return ARMI_LDRSH; |
| 980 | case IRT_U16: return ARMI_LDRH; | 989 | case IRT_U16: return ARMI_LDRH; |
| 981 | case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VLDR_D; | 990 | case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return ARMI_VLDR_D; |
| 982 | case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S; /* fallthrough */ | 991 | case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S; /* fallthrough */ |
| 983 | default: return ARMI_LDR; | 992 | default: return ARMI_LDR; |
| 984 | } | 993 | } |
| 985 | } | 994 | } |
| 986 | 995 | ||
| 987 | static ARMIns asm_fxstoreins(IRIns *ir) | 996 | static ARMIns asm_fxstoreins(ASMState *as, IRIns *ir) |
| 988 | { | 997 | { |
| 998 | UNUSED(as); | ||
| 989 | switch (irt_type(ir->t)) { | 999 | switch (irt_type(ir->t)) { |
| 990 | case IRT_I8: case IRT_U8: return ARMI_STRB; | 1000 | case IRT_I8: case IRT_U8: return ARMI_STRB; |
| 991 | case IRT_I16: case IRT_U16: return ARMI_STRH; | 1001 | case IRT_I16: case IRT_U16: return ARMI_STRH; |
| 992 | case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VSTR_D; | 1002 | case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return ARMI_VSTR_D; |
| 993 | case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S; /* fallthrough */ | 1003 | case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S; /* fallthrough */ |
| 994 | default: return ARMI_STR; | 1004 | default: return ARMI_STR; |
| 995 | } | 1005 | } |
| @@ -997,12 +1007,13 @@ static ARMIns asm_fxstoreins(IRIns *ir) | |||
| 997 | 1007 | ||
| 998 | static void asm_fload(ASMState *as, IRIns *ir) | 1008 | static void asm_fload(ASMState *as, IRIns *ir) |
| 999 | { | 1009 | { |
| 1000 | if (ir->op1 == REF_NIL) { | 1010 | if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */ |
| 1001 | lua_assert(!ra_used(ir)); /* We can end up here if DCE is turned off. */ | 1011 | /* We can end up here if DCE is turned off. */ |
| 1012 | lj_assertA(!ra_used(ir), "NYI FLOAD GG_State"); | ||
| 1002 | } else { | 1013 | } else { |
| 1003 | Reg dest = ra_dest(as, ir, RSET_GPR); | 1014 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| 1004 | Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); | 1015 | Reg idx = ra_alloc1(as, ir->op1, RSET_GPR); |
| 1005 | ARMIns ai = asm_fxloadins(ir); | 1016 | ARMIns ai = asm_fxloadins(as, ir); |
| 1006 | int32_t ofs; | 1017 | int32_t ofs; |
| 1007 | if (ir->op2 == IRFL_TAB_ARRAY) { | 1018 | if (ir->op2 == IRFL_TAB_ARRAY) { |
| 1008 | ofs = asm_fuseabase(as, ir->op1); | 1019 | ofs = asm_fuseabase(as, ir->op1); |
| @@ -1026,7 +1037,7 @@ static void asm_fstore(ASMState *as, IRIns *ir) | |||
| 1026 | IRIns *irf = IR(ir->op1); | 1037 | IRIns *irf = IR(ir->op1); |
| 1027 | Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); | 1038 | Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); |
| 1028 | int32_t ofs = field_ofs[irf->op2]; | 1039 | int32_t ofs = field_ofs[irf->op2]; |
| 1029 | ARMIns ai = asm_fxstoreins(ir); | 1040 | ARMIns ai = asm_fxstoreins(as, ir); |
| 1030 | if ((ai & 0x04000000)) | 1041 | if ((ai & 0x04000000)) |
| 1031 | emit_lso(as, ai, src, idx, ofs); | 1042 | emit_lso(as, ai, src, idx, ofs); |
| 1032 | else | 1043 | else |
| @@ -1038,8 +1049,8 @@ static void asm_xload(ASMState *as, IRIns *ir) | |||
| 1038 | { | 1049 | { |
| 1039 | Reg dest = ra_dest(as, ir, | 1050 | Reg dest = ra_dest(as, ir, |
| 1040 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); | 1051 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); |
| 1041 | lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); | 1052 | lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD"); |
| 1042 | asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); | 1053 | asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0); |
| 1043 | } | 1054 | } |
| 1044 | 1055 | ||
| 1045 | static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) | 1056 | static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) |
| @@ -1047,7 +1058,7 @@ static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) | |||
| 1047 | if (ir->r != RID_SINK) { | 1058 | if (ir->r != RID_SINK) { |
| 1048 | Reg src = ra_alloc1(as, ir->op2, | 1059 | Reg src = ra_alloc1(as, ir->op2, |
| 1049 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); | 1060 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); |
| 1050 | asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, | 1061 | asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1, |
| 1051 | rset_exclude(RSET_GPR, src), ofs); | 1062 | rset_exclude(RSET_GPR, src), ofs); |
| 1052 | } | 1063 | } |
| 1053 | } | 1064 | } |
| @@ -1066,8 +1077,9 @@ static void asm_ahuvload(ASMState *as, IRIns *ir) | |||
| 1066 | rset_clear(allow, type); | 1077 | rset_clear(allow, type); |
| 1067 | } | 1078 | } |
| 1068 | if (ra_used(ir)) { | 1079 | if (ra_used(ir)) { |
| 1069 | lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || | 1080 | lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || |
| 1070 | irt_isint(ir->t) || irt_isaddr(ir->t)); | 1081 | irt_isint(ir->t) || irt_isaddr(ir->t), |
| 1082 | "bad load type %d", irt_type(ir->t)); | ||
| 1071 | dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow); | 1083 | dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow); |
| 1072 | rset_clear(allow, dest); | 1084 | rset_clear(allow, dest); |
| 1073 | } | 1085 | } |
| @@ -1133,10 +1145,13 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1133 | IRType t = hiop ? IRT_NUM : irt_type(ir->t); | 1145 | IRType t = hiop ? IRT_NUM : irt_type(ir->t); |
| 1134 | Reg dest = RID_NONE, type = RID_NONE, base; | 1146 | Reg dest = RID_NONE, type = RID_NONE, base; |
| 1135 | RegSet allow = RSET_GPR; | 1147 | RegSet allow = RSET_GPR; |
| 1136 | lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ | 1148 | lj_assertA(!(ir->op2 & IRSLOAD_PARENT), |
| 1137 | lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK)); | 1149 | "bad parent SLOAD"); /* Handled by asm_head_side(). */ |
| 1150 | lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK), | ||
| 1151 | "inconsistent SLOAD variant"); | ||
| 1138 | #if LJ_SOFTFP | 1152 | #if LJ_SOFTFP |
| 1139 | lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */ | 1153 | lj_assertA(!(ir->op2 & IRSLOAD_CONVERT), |
| 1154 | "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */ | ||
| 1140 | if (hiop && ra_used(ir+1)) { | 1155 | if (hiop && ra_used(ir+1)) { |
| 1141 | type = ra_dest(as, ir+1, allow); | 1156 | type = ra_dest(as, ir+1, allow); |
| 1142 | rset_clear(allow, type); | 1157 | rset_clear(allow, type); |
| @@ -1152,8 +1167,9 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1152 | Reg tmp = RID_NONE; | 1167 | Reg tmp = RID_NONE; |
| 1153 | if ((ir->op2 & IRSLOAD_CONVERT)) | 1168 | if ((ir->op2 & IRSLOAD_CONVERT)) |
| 1154 | tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR); | 1169 | tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR); |
| 1155 | lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || | 1170 | lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || |
| 1156 | irt_isint(ir->t) || irt_isaddr(ir->t)); | 1171 | irt_isint(ir->t) || irt_isaddr(ir->t), |
| 1172 | "bad SLOAD type %d", irt_type(ir->t)); | ||
| 1157 | dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow); | 1173 | dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow); |
| 1158 | rset_clear(allow, dest); | 1174 | rset_clear(allow, dest); |
| 1159 | base = ra_alloc1(as, REF_BASE, allow); | 1175 | base = ra_alloc1(as, REF_BASE, allow); |
| @@ -1218,7 +1234,8 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1218 | IRRef args[4]; | 1234 | IRRef args[4]; |
| 1219 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | 1235 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); |
| 1220 | RegSet drop = RSET_SCRATCH; | 1236 | RegSet drop = RSET_SCRATCH; |
| 1221 | lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); | 1237 | lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL), |
| 1238 | "bad CNEW/CNEWI operands"); | ||
| 1222 | 1239 | ||
| 1223 | as->gcsteps++; | 1240 | as->gcsteps++; |
| 1224 | if (ra_hasreg(ir->r)) | 1241 | if (ra_hasreg(ir->r)) |
| @@ -1230,10 +1247,10 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1230 | /* Initialize immutable cdata object. */ | 1247 | /* Initialize immutable cdata object. */ |
| 1231 | if (ir->o == IR_CNEWI) { | 1248 | if (ir->o == IR_CNEWI) { |
| 1232 | int32_t ofs = sizeof(GCcdata); | 1249 | int32_t ofs = sizeof(GCcdata); |
| 1233 | lua_assert(sz == 4 || sz == 8); | 1250 | lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz); |
| 1234 | if (sz == 8) { | 1251 | if (sz == 8) { |
| 1235 | ofs += 4; ir++; | 1252 | ofs += 4; ir++; |
| 1236 | lua_assert(ir->o == IR_HIOP); | 1253 | lj_assertA(ir->o == IR_HIOP, "expected HIOP for CNEWI"); |
| 1237 | } | 1254 | } |
| 1238 | for (;;) { | 1255 | for (;;) { |
| 1239 | Reg r = ra_alloc1(as, ir->op2, allow); | 1256 | Reg r = ra_alloc1(as, ir->op2, allow); |
| @@ -1299,7 +1316,7 @@ static void asm_obar(ASMState *as, IRIns *ir) | |||
| 1299 | MCLabel l_end; | 1316 | MCLabel l_end; |
| 1300 | Reg obj, val, tmp; | 1317 | Reg obj, val, tmp; |
| 1301 | /* No need for other object barriers (yet). */ | 1318 | /* No need for other object barriers (yet). */ |
| 1302 | lua_assert(IR(ir->op1)->o == IR_UREFC); | 1319 | lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type"); |
| 1303 | ra_evictset(as, RSET_SCRATCH); | 1320 | ra_evictset(as, RSET_SCRATCH); |
| 1304 | l_end = emit_label(as); | 1321 | l_end = emit_label(as); |
| 1305 | args[0] = ASMREF_TMP1; /* global_State *g */ | 1322 | args[0] = ASMREF_TMP1; /* global_State *g */ |
| @@ -1575,7 +1592,7 @@ static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh) | |||
| 1575 | #define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR) | 1592 | #define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR) |
| 1576 | #define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR) | 1593 | #define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR) |
| 1577 | #define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR) | 1594 | #define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR) |
| 1578 | #define asm_brol(as, ir) lua_assert(0) | 1595 | #define asm_brol(as, ir) lj_assertA(0, "unexpected BROL") |
| 1579 | 1596 | ||
| 1580 | static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) | 1597 | static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) |
| 1581 | { | 1598 | { |
| @@ -1726,7 +1743,8 @@ static void asm_intcomp(ASMState *as, IRIns *ir) | |||
| 1726 | Reg left; | 1743 | Reg left; |
| 1727 | uint32_t m; | 1744 | uint32_t m; |
| 1728 | int cmpprev0 = 0; | 1745 | int cmpprev0 = 0; |
| 1729 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); | 1746 | lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t), |
| 1747 | "bad comparison data type %d", irt_type(ir->t)); | ||
| 1730 | if (asm_swapops(as, lref, rref)) { | 1748 | if (asm_swapops(as, lref, rref)) { |
| 1731 | Reg tmp = lref; lref = rref; rref = tmp; | 1749 | Reg tmp = lref; lref = rref; rref = tmp; |
| 1732 | if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */ | 1750 | if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */ |
| @@ -1895,10 +1913,11 @@ static void asm_hiop(ASMState *as, IRIns *ir) | |||
| 1895 | case IR_CNEWI: | 1913 | case IR_CNEWI: |
| 1896 | /* Nothing to do here. Handled by lo op itself. */ | 1914 | /* Nothing to do here. Handled by lo op itself. */ |
| 1897 | break; | 1915 | break; |
| 1898 | default: lua_assert(0); break; | 1916 | default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break; |
| 1899 | } | 1917 | } |
| 1900 | #else | 1918 | #else |
| 1901 | UNUSED(as); UNUSED(ir); lua_assert(0); | 1919 | /* Unused without SOFTFP or FFI. */ |
| 1920 | UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP"); | ||
| 1902 | #endif | 1921 | #endif |
| 1903 | } | 1922 | } |
| 1904 | 1923 | ||
| @@ -1923,7 +1942,7 @@ static void asm_stack_check(ASMState *as, BCReg topslot, | |||
| 1923 | if (irp) { | 1942 | if (irp) { |
| 1924 | if (!ra_hasspill(irp->s)) { | 1943 | if (!ra_hasspill(irp->s)) { |
| 1925 | pbase = irp->r; | 1944 | pbase = irp->r; |
| 1926 | lua_assert(ra_hasreg(pbase)); | 1945 | lj_assertA(ra_hasreg(pbase), "base reg lost"); |
| 1927 | } else if (allow) { | 1946 | } else if (allow) { |
| 1928 | pbase = rset_pickbot(allow); | 1947 | pbase = rset_pickbot(allow); |
| 1929 | } else { | 1948 | } else { |
| @@ -1935,7 +1954,7 @@ static void asm_stack_check(ASMState *as, BCReg topslot, | |||
| 1935 | } | 1954 | } |
| 1936 | emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno)); | 1955 | emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno)); |
| 1937 | k = emit_isk12(0, (int32_t)(8*topslot)); | 1956 | k = emit_isk12(0, (int32_t)(8*topslot)); |
| 1938 | lua_assert(k); | 1957 | lj_assertA(k, "slot offset %d does not fit in K12", 8*topslot); |
| 1939 | emit_n(as, ARMI_CMP^k, RID_TMP); | 1958 | emit_n(as, ARMI_CMP^k, RID_TMP); |
| 1940 | emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase); | 1959 | emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase); |
| 1941 | emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, | 1960 | emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, |
| @@ -1972,7 +1991,8 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 1972 | #if LJ_SOFTFP | 1991 | #if LJ_SOFTFP |
| 1973 | RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); | 1992 | RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); |
| 1974 | Reg tmp; | 1993 | Reg tmp; |
| 1975 | lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */ | 1994 | /* LJ_SOFTFP: must be a number constant. */ |
| 1995 | lj_assertA(irref_isk(ref), "unsplit FP op"); | ||
| 1976 | tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, | 1996 | tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, |
| 1977 | rset_exclude(RSET_GPREVEN, RID_BASE)); | 1997 | rset_exclude(RSET_GPREVEN, RID_BASE)); |
| 1978 | emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs); | 1998 | emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs); |
| @@ -1986,7 +2006,8 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 1986 | } else { | 2006 | } else { |
| 1987 | RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); | 2007 | RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE); |
| 1988 | Reg type; | 2008 | Reg type; |
| 1989 | lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); | 2009 | lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t), |
| 2010 | "restore of IR type %d", irt_type(ir->t)); | ||
| 1990 | if (!irt_ispri(ir->t)) { | 2011 | if (!irt_ispri(ir->t)) { |
| 1991 | Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE)); | 2012 | Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE)); |
| 1992 | emit_lso(as, ARMI_STR, src, RID_BASE, ofs); | 2013 | emit_lso(as, ARMI_STR, src, RID_BASE, ofs); |
| @@ -2006,7 +2027,7 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 2006 | } | 2027 | } |
| 2007 | checkmclim(as); | 2028 | checkmclim(as); |
| 2008 | } | 2029 | } |
| 2009 | lua_assert(map + nent == flinks); | 2030 | lj_assertA(map + nent == flinks, "inconsistent frames in snapshot"); |
| 2010 | } | 2031 | } |
| 2011 | 2032 | ||
| 2012 | /* -- GC handling --------------------------------------------------------- */ | 2033 | /* -- GC handling --------------------------------------------------------- */ |
| @@ -2092,7 +2113,7 @@ static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) | |||
| 2092 | rset_clear(allow, ra_dest(as, ir, allow)); | 2113 | rset_clear(allow, ra_dest(as, ir, allow)); |
| 2093 | } else { | 2114 | } else { |
| 2094 | Reg r = irp->r; | 2115 | Reg r = irp->r; |
| 2095 | lua_assert(ra_hasreg(r)); | 2116 | lj_assertA(ra_hasreg(r), "base reg lost"); |
| 2096 | rset_clear(allow, r); | 2117 | rset_clear(allow, r); |
| 2097 | if (r != ir->r && !rset_test(as->freeset, r)) | 2118 | if (r != ir->r && !rset_test(as->freeset, r)) |
| 2098 | ra_restore(as, regcost_ref(as->cost[r])); | 2119 | ra_restore(as, regcost_ref(as->cost[r])); |
| @@ -2114,7 +2135,7 @@ static void asm_tail_fixup(ASMState *as, TraceNo lnk) | |||
| 2114 | } else { | 2135 | } else { |
| 2115 | /* Patch stack adjustment. */ | 2136 | /* Patch stack adjustment. */ |
| 2116 | uint32_t k = emit_isk12(ARMI_ADD, spadj); | 2137 | uint32_t k = emit_isk12(ARMI_ADD, spadj); |
| 2117 | lua_assert(k); | 2138 | lj_assertA(k, "stack adjustment %d does not fit in K12", spadj); |
| 2118 | p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP); | 2139 | p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP); |
| 2119 | } | 2140 | } |
| 2120 | /* Patch exit branch. */ | 2141 | /* Patch exit branch. */ |
| @@ -2196,7 +2217,7 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | |||
| 2196 | if (!cstart) cstart = p; | 2217 | if (!cstart) cstart = p; |
| 2197 | } | 2218 | } |
| 2198 | } | 2219 | } |
| 2199 | lua_assert(cstart != NULL); | 2220 | lj_assertJ(cstart != NULL, "exit stub %d not found", exitno); |
| 2200 | lj_mcode_sync(cstart, cend); | 2221 | lj_mcode_sync(cstart, cend); |
| 2201 | lj_mcode_patch(J, mcarea, 1); | 2222 | lj_mcode_patch(J, mcarea, 1); |
| 2202 | } | 2223 | } |
diff --git a/src/lj_asm_arm64.h b/src/lj_asm_arm64.h index a3502223..0729a3a5 100644 --- a/src/lj_asm_arm64.h +++ b/src/lj_asm_arm64.h | |||
| @@ -213,7 +213,7 @@ static uint32_t asm_fuseopm(ASMState *as, A64Ins ai, IRRef ref, RegSet allow) | |||
| 213 | return A64F_M(ir->r); | 213 | return A64F_M(ir->r); |
| 214 | } else if (irref_isk(ref)) { | 214 | } else if (irref_isk(ref)) { |
| 215 | uint32_t m; | 215 | uint32_t m; |
| 216 | int64_t k = get_k64val(ir); | 216 | int64_t k = get_k64val(as, ref); |
| 217 | if ((ai & 0x1f000000) == 0x0a000000) | 217 | if ((ai & 0x1f000000) == 0x0a000000) |
| 218 | m = emit_isk13(k, irt_is64(ir->t)); | 218 | m = emit_isk13(k, irt_is64(ir->t)); |
| 219 | else | 219 | else |
| @@ -354,9 +354,9 @@ static int asm_fusemadd(ASMState *as, IRIns *ir, A64Ins ai, A64Ins air) | |||
| 354 | static int asm_fuseandshift(ASMState *as, IRIns *ir) | 354 | static int asm_fuseandshift(ASMState *as, IRIns *ir) |
| 355 | { | 355 | { |
| 356 | IRIns *irl = IR(ir->op1); | 356 | IRIns *irl = IR(ir->op1); |
| 357 | lua_assert(ir->o == IR_BAND); | 357 | lj_assertA(ir->o == IR_BAND, "bad usage"); |
| 358 | if (canfuse(as, irl) && irref_isk(ir->op2)) { | 358 | if (canfuse(as, irl) && irref_isk(ir->op2)) { |
| 359 | uint64_t mask = get_k64val(IR(ir->op2)); | 359 | uint64_t mask = get_k64val(as, ir->op2); |
| 360 | if (irref_isk(irl->op2) && (irl->o == IR_BSHR || irl->o == IR_BSHL)) { | 360 | if (irref_isk(irl->op2) && (irl->o == IR_BSHR || irl->o == IR_BSHL)) { |
| 361 | int32_t shmask = irt_is64(irl->t) ? 63 : 31; | 361 | int32_t shmask = irt_is64(irl->t) ? 63 : 31; |
| 362 | int32_t shift = (IR(irl->op2)->i & shmask); | 362 | int32_t shift = (IR(irl->op2)->i & shmask); |
| @@ -384,7 +384,7 @@ static int asm_fuseandshift(ASMState *as, IRIns *ir) | |||
| 384 | static int asm_fuseorshift(ASMState *as, IRIns *ir) | 384 | static int asm_fuseorshift(ASMState *as, IRIns *ir) |
| 385 | { | 385 | { |
| 386 | IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); | 386 | IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); |
| 387 | lua_assert(ir->o == IR_BOR); | 387 | lj_assertA(ir->o == IR_BOR, "bad usage"); |
| 388 | if (canfuse(as, irl) && canfuse(as, irr) && | 388 | if (canfuse(as, irl) && canfuse(as, irr) && |
| 389 | ((irl->o == IR_BSHR && irr->o == IR_BSHL) || | 389 | ((irl->o == IR_BSHR && irr->o == IR_BSHL) || |
| 390 | (irl->o == IR_BSHL && irr->o == IR_BSHR))) { | 390 | (irl->o == IR_BSHL && irr->o == IR_BSHR))) { |
| @@ -428,7 +428,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 428 | if (ref) { | 428 | if (ref) { |
| 429 | if (irt_isfp(ir->t)) { | 429 | if (irt_isfp(ir->t)) { |
| 430 | if (fpr <= REGARG_LASTFPR) { | 430 | if (fpr <= REGARG_LASTFPR) { |
| 431 | lua_assert(rset_test(as->freeset, fpr)); /* Must have been evicted. */ | 431 | lj_assertA(rset_test(as->freeset, fpr), |
| 432 | "reg %d not free", fpr); /* Must have been evicted. */ | ||
| 432 | ra_leftov(as, fpr, ref); | 433 | ra_leftov(as, fpr, ref); |
| 433 | fpr++; | 434 | fpr++; |
| 434 | } else { | 435 | } else { |
| @@ -438,7 +439,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 438 | } | 439 | } |
| 439 | } else { | 440 | } else { |
| 440 | if (gpr <= REGARG_LASTGPR) { | 441 | if (gpr <= REGARG_LASTGPR) { |
| 441 | lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */ | 442 | lj_assertA(rset_test(as->freeset, gpr), |
| 443 | "reg %d not free", gpr); /* Must have been evicted. */ | ||
| 442 | ra_leftov(as, gpr, ref); | 444 | ra_leftov(as, gpr, ref); |
| 443 | gpr++; | 445 | gpr++; |
| 444 | } else { | 446 | } else { |
| @@ -459,7 +461,7 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | |||
| 459 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | 461 | rset_clear(drop, ir->r); /* Dest reg handled below. */ |
| 460 | ra_evictset(as, drop); /* Evictions must be performed first. */ | 462 | ra_evictset(as, drop); /* Evictions must be performed first. */ |
| 461 | if (ra_used(ir)) { | 463 | if (ra_used(ir)) { |
| 462 | lua_assert(!irt_ispri(ir->t)); | 464 | lj_assertA(!irt_ispri(ir->t), "PRI dest"); |
| 463 | if (irt_isfp(ir->t)) { | 465 | if (irt_isfp(ir->t)) { |
| 464 | if (ci->flags & CCI_CASTU64) { | 466 | if (ci->flags & CCI_CASTU64) { |
| 465 | Reg dest = ra_dest(as, ir, RSET_FPR) & 31; | 467 | Reg dest = ra_dest(as, ir, RSET_FPR) & 31; |
| @@ -546,7 +548,7 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 546 | int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64); | 548 | int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64); |
| 547 | int stfp = (st == IRT_NUM || st == IRT_FLOAT); | 549 | int stfp = (st == IRT_NUM || st == IRT_FLOAT); |
| 548 | IRRef lref = ir->op1; | 550 | IRRef lref = ir->op1; |
| 549 | lua_assert(irt_type(ir->t) != st); | 551 | lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV"); |
| 550 | if (irt_isfp(ir->t)) { | 552 | if (irt_isfp(ir->t)) { |
| 551 | Reg dest = ra_dest(as, ir, RSET_FPR); | 553 | Reg dest = ra_dest(as, ir, RSET_FPR); |
| 552 | if (stfp) { /* FP to FP conversion. */ | 554 | if (stfp) { /* FP to FP conversion. */ |
| @@ -566,7 +568,8 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 566 | } else if (stfp) { /* FP to integer conversion. */ | 568 | } else if (stfp) { /* FP to integer conversion. */ |
| 567 | if (irt_isguard(ir->t)) { | 569 | if (irt_isguard(ir->t)) { |
| 568 | /* Checked conversions are only supported from number to int. */ | 570 | /* Checked conversions are only supported from number to int. */ |
| 569 | lua_assert(irt_isint(ir->t) && st == IRT_NUM); | 571 | lj_assertA(irt_isint(ir->t) && st == IRT_NUM, |
| 572 | "bad type for checked CONV"); | ||
| 570 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); | 573 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); |
| 571 | } else { | 574 | } else { |
| 572 | Reg left = ra_alloc1(as, lref, RSET_FPR); | 575 | Reg left = ra_alloc1(as, lref, RSET_FPR); |
| @@ -586,7 +589,7 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 586 | A64Ins ai = st == IRT_I8 ? A64I_SXTBw : | 589 | A64Ins ai = st == IRT_I8 ? A64I_SXTBw : |
| 587 | st == IRT_U8 ? A64I_UXTBw : | 590 | st == IRT_U8 ? A64I_UXTBw : |
| 588 | st == IRT_I16 ? A64I_SXTHw : A64I_UXTHw; | 591 | st == IRT_I16 ? A64I_SXTHw : A64I_UXTHw; |
| 589 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); | 592 | lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT"); |
| 590 | emit_dn(as, ai, dest, left); | 593 | emit_dn(as, ai, dest, left); |
| 591 | } else { | 594 | } else { |
| 592 | Reg dest = ra_dest(as, ir, RSET_GPR); | 595 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| @@ -650,7 +653,8 @@ static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref) | |||
| 650 | { | 653 | { |
| 651 | RegSet allow = rset_exclude(RSET_GPR, base); | 654 | RegSet allow = rset_exclude(RSET_GPR, base); |
| 652 | IRIns *ir = IR(ref); | 655 | IRIns *ir = IR(ref); |
| 653 | lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); | 656 | lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t), |
| 657 | "store of IR type %d", irt_type(ir->t)); | ||
| 654 | if (irref_isk(ref)) { | 658 | if (irref_isk(ref)) { |
| 655 | TValue k; | 659 | TValue k; |
| 656 | lj_ir_kvalue(as->J->L, &k, ir); | 660 | lj_ir_kvalue(as->J->L, &k, ir); |
| @@ -770,7 +774,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |||
| 770 | } | 774 | } |
| 771 | rset_clear(allow, scr); | 775 | rset_clear(allow, scr); |
| 772 | } else { | 776 | } else { |
| 773 | lua_assert(irt_ispri(kt) && !irt_isnil(kt)); | 777 | lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type"); |
| 774 | type = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow); | 778 | type = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow); |
| 775 | scr = ra_scratch(as, rset_clear(allow, type)); | 779 | scr = ra_scratch(as, rset_clear(allow, type)); |
| 776 | rset_clear(allow, scr); | 780 | rset_clear(allow, scr); |
| @@ -831,7 +835,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |||
| 831 | rset_clear(allow, type); | 835 | rset_clear(allow, type); |
| 832 | } | 836 | } |
| 833 | /* Load main position relative to tab->node into dest. */ | 837 | /* Load main position relative to tab->node into dest. */ |
| 834 | khash = isk ? ir_khash(irkey) : 1; | 838 | khash = isk ? ir_khash(as, irkey) : 1; |
| 835 | if (khash == 0) { | 839 | if (khash == 0) { |
| 836 | emit_lso(as, A64I_LDRx, dest, tab, offsetof(GCtab, node)); | 840 | emit_lso(as, A64I_LDRx, dest, tab, offsetof(GCtab, node)); |
| 837 | } else { | 841 | } else { |
| @@ -886,7 +890,7 @@ static void asm_hrefk(ASMState *as, IRIns *ir) | |||
| 886 | Reg key, idx = node; | 890 | Reg key, idx = node; |
| 887 | RegSet allow = rset_exclude(RSET_GPR, node); | 891 | RegSet allow = rset_exclude(RSET_GPR, node); |
| 888 | uint64_t k; | 892 | uint64_t k; |
| 889 | lua_assert(ofs % sizeof(Node) == 0); | 893 | lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot"); |
| 890 | if (bigofs) { | 894 | if (bigofs) { |
| 891 | idx = dest; | 895 | idx = dest; |
| 892 | rset_clear(allow, dest); | 896 | rset_clear(allow, dest); |
| @@ -936,7 +940,7 @@ static void asm_uref(ASMState *as, IRIns *ir) | |||
| 936 | static void asm_fref(ASMState *as, IRIns *ir) | 940 | static void asm_fref(ASMState *as, IRIns *ir) |
| 937 | { | 941 | { |
| 938 | UNUSED(as); UNUSED(ir); | 942 | UNUSED(as); UNUSED(ir); |
| 939 | lua_assert(!ra_used(ir)); | 943 | lj_assertA(!ra_used(ir), "unfused FREF"); |
| 940 | } | 944 | } |
| 941 | 945 | ||
| 942 | static void asm_strref(ASMState *as, IRIns *ir) | 946 | static void asm_strref(ASMState *as, IRIns *ir) |
| @@ -988,7 +992,7 @@ static void asm_fload(ASMState *as, IRIns *ir) | |||
| 988 | Reg idx; | 992 | Reg idx; |
| 989 | A64Ins ai = asm_fxloadins(ir); | 993 | A64Ins ai = asm_fxloadins(ir); |
| 990 | int32_t ofs; | 994 | int32_t ofs; |
| 991 | if (ir->op1 == REF_NIL) { | 995 | if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */ |
| 992 | idx = RID_GL; | 996 | idx = RID_GL; |
| 993 | ofs = (ir->op2 << 2) - GG_OFS(g); | 997 | ofs = (ir->op2 << 2) - GG_OFS(g); |
| 994 | } else { | 998 | } else { |
| @@ -1019,7 +1023,7 @@ static void asm_fstore(ASMState *as, IRIns *ir) | |||
| 1019 | static void asm_xload(ASMState *as, IRIns *ir) | 1023 | static void asm_xload(ASMState *as, IRIns *ir) |
| 1020 | { | 1024 | { |
| 1021 | Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); | 1025 | Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); |
| 1022 | lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); | 1026 | lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD"); |
| 1023 | asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR); | 1027 | asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR); |
| 1024 | } | 1028 | } |
| 1025 | 1029 | ||
| @@ -1037,8 +1041,9 @@ static void asm_ahuvload(ASMState *as, IRIns *ir) | |||
| 1037 | Reg idx, tmp, type; | 1041 | Reg idx, tmp, type; |
| 1038 | int32_t ofs = 0; | 1042 | int32_t ofs = 0; |
| 1039 | RegSet gpr = RSET_GPR, allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; | 1043 | RegSet gpr = RSET_GPR, allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; |
| 1040 | lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || | 1044 | lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || |
| 1041 | irt_isint(ir->t)); | 1045 | irt_isint(ir->t), |
| 1046 | "bad load type %d", irt_type(ir->t)); | ||
| 1042 | if (ra_used(ir)) { | 1047 | if (ra_used(ir)) { |
| 1043 | Reg dest = ra_dest(as, ir, allow); | 1048 | Reg dest = ra_dest(as, ir, allow); |
| 1044 | tmp = irt_isnum(ir->t) ? ra_scratch(as, rset_clear(gpr, dest)) : dest; | 1049 | tmp = irt_isnum(ir->t) ? ra_scratch(as, rset_clear(gpr, dest)) : dest; |
| @@ -1057,7 +1062,8 @@ static void asm_ahuvload(ASMState *as, IRIns *ir) | |||
| 1057 | /* Always do the type check, even if the load result is unused. */ | 1062 | /* Always do the type check, even if the load result is unused. */ |
| 1058 | asm_guardcc(as, irt_isnum(ir->t) ? CC_LS : CC_NE); | 1063 | asm_guardcc(as, irt_isnum(ir->t) ? CC_LS : CC_NE); |
| 1059 | if (irt_type(ir->t) >= IRT_NUM) { | 1064 | if (irt_type(ir->t) >= IRT_NUM) { |
| 1060 | lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t)); | 1065 | lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t), |
| 1066 | "bad load type %d", irt_type(ir->t)); | ||
| 1061 | emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), | 1067 | emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), |
| 1062 | ra_allock(as, LJ_TISNUM << 15, rset_exclude(gpr, idx)), tmp); | 1068 | ra_allock(as, LJ_TISNUM << 15, rset_exclude(gpr, idx)), tmp); |
| 1063 | } else if (irt_isaddr(ir->t)) { | 1069 | } else if (irt_isaddr(ir->t)) { |
| @@ -1122,8 +1128,10 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1122 | IRType1 t = ir->t; | 1128 | IRType1 t = ir->t; |
| 1123 | Reg dest = RID_NONE, base; | 1129 | Reg dest = RID_NONE, base; |
| 1124 | RegSet allow = RSET_GPR; | 1130 | RegSet allow = RSET_GPR; |
| 1125 | lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ | 1131 | lj_assertA(!(ir->op2 & IRSLOAD_PARENT), |
| 1126 | lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); | 1132 | "bad parent SLOAD"); /* Handled by asm_head_side(). */ |
| 1133 | lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK), | ||
| 1134 | "inconsistent SLOAD variant"); | ||
| 1127 | if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { | 1135 | if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { |
| 1128 | dest = ra_scratch(as, RSET_FPR); | 1136 | dest = ra_scratch(as, RSET_FPR); |
| 1129 | asm_tointg(as, ir, dest); | 1137 | asm_tointg(as, ir, dest); |
| @@ -1132,7 +1140,8 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1132 | Reg tmp = RID_NONE; | 1140 | Reg tmp = RID_NONE; |
| 1133 | if ((ir->op2 & IRSLOAD_CONVERT)) | 1141 | if ((ir->op2 & IRSLOAD_CONVERT)) |
| 1134 | tmp = ra_scratch(as, irt_isint(t) ? RSET_FPR : RSET_GPR); | 1142 | tmp = ra_scratch(as, irt_isint(t) ? RSET_FPR : RSET_GPR); |
| 1135 | lua_assert((irt_isnum(t)) || irt_isint(t) || irt_isaddr(t)); | 1143 | lj_assertA((irt_isnum(t)) || irt_isint(t) || irt_isaddr(t), |
| 1144 | "bad SLOAD type %d", irt_type(t)); | ||
| 1136 | dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : allow); | 1145 | dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : allow); |
| 1137 | base = ra_alloc1(as, REF_BASE, rset_clear(allow, dest)); | 1146 | base = ra_alloc1(as, REF_BASE, rset_clear(allow, dest)); |
| 1138 | if (irt_isaddr(t)) { | 1147 | if (irt_isaddr(t)) { |
| @@ -1172,7 +1181,8 @@ dotypecheck: | |||
| 1172 | /* Need type check, even if the load result is unused. */ | 1181 | /* Need type check, even if the load result is unused. */ |
| 1173 | asm_guardcc(as, irt_isnum(t) ? CC_LS : CC_NE); | 1182 | asm_guardcc(as, irt_isnum(t) ? CC_LS : CC_NE); |
| 1174 | if (irt_type(t) >= IRT_NUM) { | 1183 | if (irt_type(t) >= IRT_NUM) { |
| 1175 | lua_assert(irt_isinteger(t) || irt_isnum(t)); | 1184 | lj_assertA(irt_isinteger(t) || irt_isnum(t), |
| 1185 | "bad SLOAD type %d", irt_type(t)); | ||
| 1176 | emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), | 1186 | emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), |
| 1177 | ra_allock(as, LJ_TISNUM << 15, allow), tmp); | 1187 | ra_allock(as, LJ_TISNUM << 15, allow), tmp); |
| 1178 | } else if (irt_isnil(t)) { | 1188 | } else if (irt_isnil(t)) { |
| @@ -1207,7 +1217,8 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1207 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; | 1217 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; |
| 1208 | IRRef args[4]; | 1218 | IRRef args[4]; |
| 1209 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | 1219 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); |
| 1210 | lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); | 1220 | lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL), |
| 1221 | "bad CNEW/CNEWI operands"); | ||
| 1211 | 1222 | ||
| 1212 | as->gcsteps++; | 1223 | as->gcsteps++; |
| 1213 | asm_setupresult(as, ir, ci); /* GCcdata * */ | 1224 | asm_setupresult(as, ir, ci); /* GCcdata * */ |
| @@ -1215,7 +1226,7 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1215 | if (ir->o == IR_CNEWI) { | 1226 | if (ir->o == IR_CNEWI) { |
| 1216 | int32_t ofs = sizeof(GCcdata); | 1227 | int32_t ofs = sizeof(GCcdata); |
| 1217 | Reg r = ra_alloc1(as, ir->op2, allow); | 1228 | Reg r = ra_alloc1(as, ir->op2, allow); |
| 1218 | lua_assert(sz == 4 || sz == 8); | 1229 | lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz); |
| 1219 | emit_lso(as, sz == 8 ? A64I_STRx : A64I_STRw, r, RID_RET, ofs); | 1230 | emit_lso(as, sz == 8 ? A64I_STRx : A64I_STRw, r, RID_RET, ofs); |
| 1220 | } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ | 1231 | } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ |
| 1221 | ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; | 1232 | ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; |
| @@ -1274,7 +1285,7 @@ static void asm_obar(ASMState *as, IRIns *ir) | |||
| 1274 | RegSet allow = RSET_GPR; | 1285 | RegSet allow = RSET_GPR; |
| 1275 | Reg obj, val, tmp; | 1286 | Reg obj, val, tmp; |
| 1276 | /* No need for other object barriers (yet). */ | 1287 | /* No need for other object barriers (yet). */ |
| 1277 | lua_assert(IR(ir->op1)->o == IR_UREFC); | 1288 | lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type"); |
| 1278 | ra_evictset(as, RSET_SCRATCH); | 1289 | ra_evictset(as, RSET_SCRATCH); |
| 1279 | l_end = emit_label(as); | 1290 | l_end = emit_label(as); |
| 1280 | args[0] = ASMREF_TMP1; /* global_State *g */ | 1291 | args[0] = ASMREF_TMP1; /* global_State *g */ |
| @@ -1544,7 +1555,7 @@ static void asm_bitshift(ASMState *as, IRIns *ir, A64Ins ai, A64Shift sh) | |||
| 1544 | #define asm_bshr(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSR) | 1555 | #define asm_bshr(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSR) |
| 1545 | #define asm_bsar(as, ir) asm_bitshift(as, ir, A64I_SBFMw, A64SH_ASR) | 1556 | #define asm_bsar(as, ir) asm_bitshift(as, ir, A64I_SBFMw, A64SH_ASR) |
| 1546 | #define asm_bror(as, ir) asm_bitshift(as, ir, A64I_EXTRw, A64SH_ROR) | 1557 | #define asm_bror(as, ir) asm_bitshift(as, ir, A64I_EXTRw, A64SH_ROR) |
| 1547 | #define asm_brol(as, ir) lua_assert(0) | 1558 | #define asm_brol(as, ir) lj_assertA(0, "unexpected BROL") |
| 1548 | 1559 | ||
| 1549 | static void asm_intmin_max(ASMState *as, IRIns *ir, A64CC cc) | 1560 | static void asm_intmin_max(ASMState *as, IRIns *ir, A64CC cc) |
| 1550 | { | 1561 | { |
| @@ -1625,15 +1636,16 @@ static void asm_intcomp(ASMState *as, IRIns *ir) | |||
| 1625 | Reg left; | 1636 | Reg left; |
| 1626 | uint32_t m; | 1637 | uint32_t m; |
| 1627 | int cmpprev0 = 0; | 1638 | int cmpprev0 = 0; |
| 1628 | lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || | 1639 | lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) || |
| 1629 | irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t)); | 1640 | irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t), |
| 1641 | "bad comparison data type %d", irt_type(ir->t)); | ||
| 1630 | if (asm_swapops(as, lref, rref)) { | 1642 | if (asm_swapops(as, lref, rref)) { |
| 1631 | IRRef tmp = lref; lref = rref; rref = tmp; | 1643 | IRRef tmp = lref; lref = rref; rref = tmp; |
| 1632 | if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */ | 1644 | if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */ |
| 1633 | else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */ | 1645 | else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */ |
| 1634 | } | 1646 | } |
| 1635 | oldcc = cc; | 1647 | oldcc = cc; |
| 1636 | if (irref_isk(rref) && get_k64val(IR(rref)) == 0) { | 1648 | if (irref_isk(rref) && get_k64val(as, rref) == 0) { |
| 1637 | IRIns *irl = IR(lref); | 1649 | IRIns *irl = IR(lref); |
| 1638 | if (cc == CC_GE) cc = CC_PL; | 1650 | if (cc == CC_GE) cc = CC_PL; |
| 1639 | else if (cc == CC_LT) cc = CC_MI; | 1651 | else if (cc == CC_LT) cc = CC_MI; |
| @@ -1648,7 +1660,7 @@ static void asm_intcomp(ASMState *as, IRIns *ir) | |||
| 1648 | Reg tmp = blref; blref = brref; brref = tmp; | 1660 | Reg tmp = blref; blref = brref; brref = tmp; |
| 1649 | } | 1661 | } |
| 1650 | if (irref_isk(brref)) { | 1662 | if (irref_isk(brref)) { |
| 1651 | uint64_t k = get_k64val(IR(brref)); | 1663 | uint64_t k = get_k64val(as, brref); |
| 1652 | if (k && !(k & (k-1)) && (cc == CC_EQ || cc == CC_NE)) { | 1664 | if (k && !(k & (k-1)) && (cc == CC_EQ || cc == CC_NE)) { |
| 1653 | asm_guardtnb(as, cc == CC_EQ ? A64I_TBZ : A64I_TBNZ, | 1665 | asm_guardtnb(as, cc == CC_EQ ? A64I_TBZ : A64I_TBNZ, |
| 1654 | ra_alloc1(as, blref, RSET_GPR), emit_ctz64(k)); | 1666 | ra_alloc1(as, blref, RSET_GPR), emit_ctz64(k)); |
| @@ -1697,7 +1709,8 @@ static void asm_comp(ASMState *as, IRIns *ir) | |||
| 1697 | /* Hiword op of a split 64 bit op. Previous op must be the loword op. */ | 1709 | /* Hiword op of a split 64 bit op. Previous op must be the loword op. */ |
| 1698 | static void asm_hiop(ASMState *as, IRIns *ir) | 1710 | static void asm_hiop(ASMState *as, IRIns *ir) |
| 1699 | { | 1711 | { |
| 1700 | UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on 64 bit. */ | 1712 | UNUSED(as); UNUSED(ir); |
| 1713 | lj_assertA(0, "unexpected HIOP"); /* Unused on 64 bit. */ | ||
| 1701 | } | 1714 | } |
| 1702 | 1715 | ||
| 1703 | /* -- Profiling ----------------------------------------------------------- */ | 1716 | /* -- Profiling ----------------------------------------------------------- */ |
| @@ -1705,7 +1718,7 @@ static void asm_hiop(ASMState *as, IRIns *ir) | |||
| 1705 | static void asm_prof(ASMState *as, IRIns *ir) | 1718 | static void asm_prof(ASMState *as, IRIns *ir) |
| 1706 | { | 1719 | { |
| 1707 | uint32_t k = emit_isk13(HOOK_PROFILE, 0); | 1720 | uint32_t k = emit_isk13(HOOK_PROFILE, 0); |
| 1708 | lua_assert(k != 0); | 1721 | lj_assertA(k != 0, "HOOK_PROFILE does not fit in K13"); |
| 1709 | UNUSED(ir); | 1722 | UNUSED(ir); |
| 1710 | asm_guardcc(as, CC_NE); | 1723 | asm_guardcc(as, CC_NE); |
| 1711 | emit_n(as, A64I_TSTw^k, RID_TMP); | 1724 | emit_n(as, A64I_TSTw^k, RID_TMP); |
| @@ -1723,7 +1736,7 @@ static void asm_stack_check(ASMState *as, BCReg topslot, | |||
| 1723 | if (irp) { | 1736 | if (irp) { |
| 1724 | if (!ra_hasspill(irp->s)) { | 1737 | if (!ra_hasspill(irp->s)) { |
| 1725 | pbase = irp->r; | 1738 | pbase = irp->r; |
| 1726 | lua_assert(ra_hasreg(pbase)); | 1739 | lj_assertA(ra_hasreg(pbase), "base reg lost"); |
| 1727 | } else if (allow) { | 1740 | } else if (allow) { |
| 1728 | pbase = rset_pickbot(allow); | 1741 | pbase = rset_pickbot(allow); |
| 1729 | } else { | 1742 | } else { |
| @@ -1735,7 +1748,7 @@ static void asm_stack_check(ASMState *as, BCReg topslot, | |||
| 1735 | } | 1748 | } |
| 1736 | emit_cond_branch(as, CC_LS, asm_exitstub_addr(as, exitno)); | 1749 | emit_cond_branch(as, CC_LS, asm_exitstub_addr(as, exitno)); |
| 1737 | k = emit_isk12((8*topslot)); | 1750 | k = emit_isk12((8*topslot)); |
| 1738 | lua_assert(k); | 1751 | lj_assertA(k, "slot offset %d does not fit in K12", 8*topslot); |
| 1739 | emit_n(as, A64I_CMPx^k, RID_TMP); | 1752 | emit_n(as, A64I_CMPx^k, RID_TMP); |
| 1740 | emit_dnm(as, A64I_SUBx, RID_TMP, RID_TMP, pbase); | 1753 | emit_dnm(as, A64I_SUBx, RID_TMP, RID_TMP, pbase); |
| 1741 | emit_lso(as, A64I_LDRx, RID_TMP, RID_TMP, | 1754 | emit_lso(as, A64I_LDRx, RID_TMP, RID_TMP, |
| @@ -1776,7 +1789,7 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 1776 | } | 1789 | } |
| 1777 | checkmclim(as); | 1790 | checkmclim(as); |
| 1778 | } | 1791 | } |
| 1779 | lua_assert(map + nent == flinks); | 1792 | lj_assertA(map + nent == flinks, "inconsistent frames in snapshot"); |
| 1780 | } | 1793 | } |
| 1781 | 1794 | ||
| 1782 | /* -- GC handling --------------------------------------------------------- */ | 1795 | /* -- GC handling --------------------------------------------------------- */ |
| @@ -1864,7 +1877,7 @@ static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) | |||
| 1864 | rset_clear(allow, ra_dest(as, ir, allow)); | 1877 | rset_clear(allow, ra_dest(as, ir, allow)); |
| 1865 | } else { | 1878 | } else { |
| 1866 | Reg r = irp->r; | 1879 | Reg r = irp->r; |
| 1867 | lua_assert(ra_hasreg(r)); | 1880 | lj_assertA(ra_hasreg(r), "base reg lost"); |
| 1868 | rset_clear(allow, r); | 1881 | rset_clear(allow, r); |
| 1869 | if (r != ir->r && !rset_test(as->freeset, r)) | 1882 | if (r != ir->r && !rset_test(as->freeset, r)) |
| 1870 | ra_restore(as, regcost_ref(as->cost[r])); | 1883 | ra_restore(as, regcost_ref(as->cost[r])); |
| @@ -1888,7 +1901,7 @@ static void asm_tail_fixup(ASMState *as, TraceNo lnk) | |||
| 1888 | } else { | 1901 | } else { |
| 1889 | /* Patch stack adjustment. */ | 1902 | /* Patch stack adjustment. */ |
| 1890 | uint32_t k = emit_isk12(spadj); | 1903 | uint32_t k = emit_isk12(spadj); |
| 1891 | lua_assert(k); | 1904 | lj_assertA(k, "stack adjustment %d does not fit in K12", spadj); |
| 1892 | p[-2] = (A64I_ADDx^k) | A64F_D(RID_SP) | A64F_N(RID_SP); | 1905 | p[-2] = (A64I_ADDx^k) | A64F_D(RID_SP) | A64F_N(RID_SP); |
| 1893 | } | 1906 | } |
| 1894 | /* Patch exit branch. */ | 1907 | /* Patch exit branch. */ |
| @@ -1974,7 +1987,7 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | |||
| 1974 | } else if ((ins & 0xfc000000u) == 0x14000000u && | 1987 | } else if ((ins & 0xfc000000u) == 0x14000000u && |
| 1975 | ((ins ^ (px-p)) & 0x03ffffffu) == 0) { | 1988 | ((ins ^ (px-p)) & 0x03ffffffu) == 0) { |
| 1976 | /* Patch b. */ | 1989 | /* Patch b. */ |
| 1977 | lua_assert(A64F_S_OK(delta, 26)); | 1990 | lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range"); |
| 1978 | *p = A64I_LE((ins & 0xfc000000u) | A64F_S26(delta)); | 1991 | *p = A64I_LE((ins & 0xfc000000u) | A64F_S26(delta)); |
| 1979 | if (!cstart) cstart = p; | 1992 | if (!cstart) cstart = p; |
| 1980 | } else if ((ins & 0x7e000000u) == 0x34000000u && | 1993 | } else if ((ins & 0x7e000000u) == 0x34000000u && |
| @@ -1995,7 +2008,7 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | |||
| 1995 | } | 2008 | } |
| 1996 | { /* Always patch long-range branch in exit stub itself. */ | 2009 | { /* Always patch long-range branch in exit stub itself. */ |
| 1997 | ptrdiff_t delta = target - px; | 2010 | ptrdiff_t delta = target - px; |
| 1998 | lua_assert(A64F_S_OK(delta, 26)); | 2011 | lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range"); |
| 1999 | *px = A64I_B | A64F_S26(delta); | 2012 | *px = A64I_B | A64F_S26(delta); |
| 2000 | if (!cstart) cstart = px; | 2013 | if (!cstart) cstart = px; |
| 2001 | } | 2014 | } |
diff --git a/src/lj_asm_mips.h b/src/lj_asm_mips.h index 6d898c5f..a2b8d8e0 100644 --- a/src/lj_asm_mips.h +++ b/src/lj_asm_mips.h | |||
| @@ -23,7 +23,7 @@ static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow) | |||
| 23 | { | 23 | { |
| 24 | Reg r = IR(ref)->r; | 24 | Reg r = IR(ref)->r; |
| 25 | if (ra_noreg(r)) { | 25 | if (ra_noreg(r)) { |
| 26 | if (!(allow & RSET_FPR) && irref_isk(ref) && get_kval(IR(ref)) == 0) | 26 | if (!(allow & RSET_FPR) && irref_isk(ref) && get_kval(as, ref) == 0) |
| 27 | return RID_ZERO; | 27 | return RID_ZERO; |
| 28 | r = ra_allocref(as, ref, allow); | 28 | r = ra_allocref(as, ref, allow); |
| 29 | } else { | 29 | } else { |
| @@ -66,10 +66,10 @@ static void asm_sparejump_setup(ASMState *as) | |||
| 66 | { | 66 | { |
| 67 | MCode *mxp = as->mcbot; | 67 | MCode *mxp = as->mcbot; |
| 68 | if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == sizeof(MCLink)) { | 68 | if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == sizeof(MCLink)) { |
| 69 | lua_assert(MIPSI_NOP == 0); | 69 | lj_assertA(MIPSI_NOP == 0, "bad NOP"); |
| 70 | memset(mxp, 0, MIPS_SPAREJUMP*2*sizeof(MCode)); | 70 | memset(mxp, 0, MIPS_SPAREJUMP*2*sizeof(MCode)); |
| 71 | mxp += MIPS_SPAREJUMP*2; | 71 | mxp += MIPS_SPAREJUMP*2; |
| 72 | lua_assert(mxp < as->mctop); | 72 | lj_assertA(mxp < as->mctop, "MIPS_SPAREJUMP too big"); |
| 73 | lj_mcode_sync(as->mcbot, mxp); | 73 | lj_mcode_sync(as->mcbot, mxp); |
| 74 | lj_mcode_commitbot(as->J, mxp); | 74 | lj_mcode_commitbot(as->J, mxp); |
| 75 | as->mcbot = mxp; | 75 | as->mcbot = mxp; |
| @@ -84,7 +84,8 @@ static void asm_exitstub_setup(ASMState *as) | |||
| 84 | /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */ | 84 | /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */ |
| 85 | *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno; | 85 | *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno; |
| 86 | *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu); | 86 | *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu); |
| 87 | lua_assert(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0); | 87 | lj_assertA(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0, |
| 88 | "branch target out of range"); | ||
| 88 | *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0; | 89 | *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0; |
| 89 | as->mctop = mxp; | 90 | as->mctop = mxp; |
| 90 | } | 91 | } |
| @@ -195,20 +196,20 @@ static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref, | |||
| 195 | if (ra_noreg(ir->r) && canfuse(as, ir)) { | 196 | if (ra_noreg(ir->r) && canfuse(as, ir)) { |
| 196 | if (ir->o == IR_ADD) { | 197 | if (ir->o == IR_ADD) { |
| 197 | intptr_t ofs2; | 198 | intptr_t ofs2; |
| 198 | if (irref_isk(ir->op2) && (ofs2 = ofs + get_kval(IR(ir->op2)), | 199 | if (irref_isk(ir->op2) && (ofs2 = ofs + get_kval(as, ir->op2), |
| 199 | checki16(ofs2))) { | 200 | checki16(ofs2))) { |
| 200 | ref = ir->op1; | 201 | ref = ir->op1; |
| 201 | ofs = (int32_t)ofs2; | 202 | ofs = (int32_t)ofs2; |
| 202 | } | 203 | } |
| 203 | } else if (ir->o == IR_STRREF) { | 204 | } else if (ir->o == IR_STRREF) { |
| 204 | intptr_t ofs2 = 65536; | 205 | intptr_t ofs2 = 65536; |
| 205 | lua_assert(ofs == 0); | 206 | lj_assertA(ofs == 0, "bad usage"); |
| 206 | ofs = (int32_t)sizeof(GCstr); | 207 | ofs = (int32_t)sizeof(GCstr); |
| 207 | if (irref_isk(ir->op2)) { | 208 | if (irref_isk(ir->op2)) { |
| 208 | ofs2 = ofs + get_kval(IR(ir->op2)); | 209 | ofs2 = ofs + get_kval(as, ir->op2); |
| 209 | ref = ir->op1; | 210 | ref = ir->op1; |
| 210 | } else if (irref_isk(ir->op1)) { | 211 | } else if (irref_isk(ir->op1)) { |
| 211 | ofs2 = ofs + get_kval(IR(ir->op1)); | 212 | ofs2 = ofs + get_kval(as, ir->op1); |
| 212 | ref = ir->op2; | 213 | ref = ir->op2; |
| 213 | } | 214 | } |
| 214 | if (!checki16(ofs2)) { | 215 | if (!checki16(ofs2)) { |
| @@ -252,7 +253,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 252 | #if !LJ_SOFTFP | 253 | #if !LJ_SOFTFP |
| 253 | if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR && | 254 | if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR && |
| 254 | !(ci->flags & CCI_VARARG)) { | 255 | !(ci->flags & CCI_VARARG)) { |
| 255 | lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */ | 256 | lj_assertA(rset_test(as->freeset, fpr), |
| 257 | "reg %d not free", fpr); /* Already evicted. */ | ||
| 256 | ra_leftov(as, fpr, ref); | 258 | ra_leftov(as, fpr, ref); |
| 257 | fpr += LJ_32 ? 2 : 1; | 259 | fpr += LJ_32 ? 2 : 1; |
| 258 | gpr += (LJ_32 && irt_isnum(ir->t)) ? 2 : 1; | 260 | gpr += (LJ_32 && irt_isnum(ir->t)) ? 2 : 1; |
| @@ -264,7 +266,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 264 | #endif | 266 | #endif |
| 265 | if (LJ_32 && irt_isnum(ir->t)) gpr = (gpr+1) & ~1; | 267 | if (LJ_32 && irt_isnum(ir->t)) gpr = (gpr+1) & ~1; |
| 266 | if (gpr <= REGARG_LASTGPR) { | 268 | if (gpr <= REGARG_LASTGPR) { |
| 267 | lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */ | 269 | lj_assertA(rset_test(as->freeset, gpr), |
| 270 | "reg %d not free", gpr); /* Already evicted. */ | ||
| 268 | #if !LJ_SOFTFP | 271 | #if !LJ_SOFTFP |
| 269 | if (irt_isfp(ir->t)) { | 272 | if (irt_isfp(ir->t)) { |
| 270 | RegSet of = as->freeset; | 273 | RegSet of = as->freeset; |
| @@ -277,7 +280,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 277 | #if LJ_32 | 280 | #if LJ_32 |
| 278 | emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1); | 281 | emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1); |
| 279 | emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r); | 282 | emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r); |
| 280 | lua_assert(rset_test(as->freeset, gpr+1)); /* Already evicted. */ | 283 | lj_assertA(rset_test(as->freeset, gpr+1), |
| 284 | "reg %d not free", gpr+1); /* Already evicted. */ | ||
| 281 | gpr += 2; | 285 | gpr += 2; |
| 282 | #else | 286 | #else |
| 283 | emit_tg(as, MIPSI_DMFC1, gpr, r); | 287 | emit_tg(as, MIPSI_DMFC1, gpr, r); |
| @@ -347,7 +351,7 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | |||
| 347 | #endif | 351 | #endif |
| 348 | ra_evictset(as, drop); /* Evictions must be performed first. */ | 352 | ra_evictset(as, drop); /* Evictions must be performed first. */ |
| 349 | if (ra_used(ir)) { | 353 | if (ra_used(ir)) { |
| 350 | lua_assert(!irt_ispri(ir->t)); | 354 | lj_assertA(!irt_ispri(ir->t), "PRI dest"); |
| 351 | if (!LJ_SOFTFP && irt_isfp(ir->t)) { | 355 | if (!LJ_SOFTFP && irt_isfp(ir->t)) { |
| 352 | if ((ci->flags & CCI_CASTU64)) { | 356 | if ((ci->flags & CCI_CASTU64)) { |
| 353 | int32_t ofs = sps_scale(ir->s); | 357 | int32_t ofs = sps_scale(ir->s); |
| @@ -395,7 +399,7 @@ static void asm_callx(ASMState *as, IRIns *ir) | |||
| 395 | func = ir->op2; irf = IR(func); | 399 | func = ir->op2; irf = IR(func); |
| 396 | if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } | 400 | if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } |
| 397 | if (irref_isk(func)) { /* Call to constant address. */ | 401 | if (irref_isk(func)) { /* Call to constant address. */ |
| 398 | ci.func = (ASMFunction)(void *)get_kval(irf); | 402 | ci.func = (ASMFunction)(void *)get_kval(as, func); |
| 399 | } else { /* Need specific register for indirect calls. */ | 403 | } else { /* Need specific register for indirect calls. */ |
| 400 | Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR)); | 404 | Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR)); |
| 401 | MCode *p = as->mcp; | 405 | MCode *p = as->mcp; |
| @@ -512,15 +516,19 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 512 | #endif | 516 | #endif |
| 513 | IRRef lref = ir->op1; | 517 | IRRef lref = ir->op1; |
| 514 | #if LJ_32 | 518 | #if LJ_32 |
| 515 | lua_assert(!(irt_isint64(ir->t) || | 519 | /* 64 bit integer conversions are handled by SPLIT. */ |
| 516 | (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */ | 520 | lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)), |
| 521 | "IR %04d has unsplit 64 bit type", | ||
| 522 | (int)(ir - as->ir) - REF_BIAS); | ||
| 517 | #endif | 523 | #endif |
| 518 | #if LJ_SOFTFP32 | 524 | #if LJ_SOFTFP32 |
| 519 | /* FP conversions are handled by SPLIT. */ | 525 | /* FP conversions are handled by SPLIT. */ |
| 520 | lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT)); | 526 | lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT), |
| 527 | "IR %04d has FP type", | ||
| 528 | (int)(ir - as->ir) - REF_BIAS); | ||
| 521 | /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ | 529 | /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ |
| 522 | #else | 530 | #else |
| 523 | lua_assert(irt_type(ir->t) != st); | 531 | lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV"); |
| 524 | #if !LJ_SOFTFP | 532 | #if !LJ_SOFTFP |
| 525 | if (irt_isfp(ir->t)) { | 533 | if (irt_isfp(ir->t)) { |
| 526 | Reg dest = ra_dest(as, ir, RSET_FPR); | 534 | Reg dest = ra_dest(as, ir, RSET_FPR); |
| @@ -579,7 +587,8 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 579 | } else if (stfp) { /* FP to integer conversion. */ | 587 | } else if (stfp) { /* FP to integer conversion. */ |
| 580 | if (irt_isguard(ir->t)) { | 588 | if (irt_isguard(ir->t)) { |
| 581 | /* Checked conversions are only supported from number to int. */ | 589 | /* Checked conversions are only supported from number to int. */ |
| 582 | lua_assert(irt_isint(ir->t) && st == IRT_NUM); | 590 | lj_assertA(irt_isint(ir->t) && st == IRT_NUM, |
| 591 | "bad type for checked CONV"); | ||
| 583 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); | 592 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); |
| 584 | } else { | 593 | } else { |
| 585 | Reg dest = ra_dest(as, ir, RSET_GPR); | 594 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| @@ -679,7 +688,8 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 679 | } else if (stfp) { /* FP to integer conversion. */ | 688 | } else if (stfp) { /* FP to integer conversion. */ |
| 680 | if (irt_isguard(ir->t)) { | 689 | if (irt_isguard(ir->t)) { |
| 681 | /* Checked conversions are only supported from number to int. */ | 690 | /* Checked conversions are only supported from number to int. */ |
| 682 | lua_assert(irt_isint(ir->t) && st == IRT_NUM); | 691 | lj_assertA(irt_isint(ir->t) && st == IRT_NUM, |
| 692 | "bad type for checked CONV"); | ||
| 683 | asm_tointg(as, ir, RID_NONE); | 693 | asm_tointg(as, ir, RID_NONE); |
| 684 | } else { | 694 | } else { |
| 685 | IRCallID cid = irt_is64(ir->t) ? | 695 | IRCallID cid = irt_is64(ir->t) ? |
| @@ -698,7 +708,7 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 698 | Reg dest = ra_dest(as, ir, RSET_GPR); | 708 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| 699 | if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ | 709 | if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ |
| 700 | Reg left = ra_alloc1(as, ir->op1, RSET_GPR); | 710 | Reg left = ra_alloc1(as, ir->op1, RSET_GPR); |
| 701 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); | 711 | lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT"); |
| 702 | if ((ir->op2 & IRCONV_SEXT)) { | 712 | if ((ir->op2 & IRCONV_SEXT)) { |
| 703 | if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) { | 713 | if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) { |
| 704 | emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left); | 714 | emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left); |
| @@ -795,7 +805,8 @@ static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref) | |||
| 795 | { | 805 | { |
| 796 | RegSet allow = rset_exclude(RSET_GPR, base); | 806 | RegSet allow = rset_exclude(RSET_GPR, base); |
| 797 | IRIns *ir = IR(ref); | 807 | IRIns *ir = IR(ref); |
| 798 | lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); | 808 | lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t), |
| 809 | "store of IR type %d", irt_type(ir->t)); | ||
| 799 | if (irref_isk(ref)) { | 810 | if (irref_isk(ref)) { |
| 800 | TValue k; | 811 | TValue k; |
| 801 | lj_ir_kvalue(as->J->L, &k, ir); | 812 | lj_ir_kvalue(as->J->L, &k, ir); |
| @@ -944,7 +955,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |||
| 944 | if (isk && irt_isaddr(kt)) { | 955 | if (isk && irt_isaddr(kt)) { |
| 945 | k = ((int64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64; | 956 | k = ((int64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64; |
| 946 | } else { | 957 | } else { |
| 947 | lua_assert(irt_ispri(kt) && !irt_isnil(kt)); | 958 | lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type"); |
| 948 | k = ~((int64_t)~irt_toitype(ir->t) << 47); | 959 | k = ~((int64_t)~irt_toitype(ir->t) << 47); |
| 949 | } | 960 | } |
| 950 | cmp64 = ra_allock(as, k, allow); | 961 | cmp64 = ra_allock(as, k, allow); |
| @@ -1012,7 +1023,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |||
| 1012 | #endif | 1023 | #endif |
| 1013 | 1024 | ||
| 1014 | /* Load main position relative to tab->node into dest. */ | 1025 | /* Load main position relative to tab->node into dest. */ |
| 1015 | khash = isk ? ir_khash(irkey) : 1; | 1026 | khash = isk ? ir_khash(as, irkey) : 1; |
| 1016 | if (khash == 0) { | 1027 | if (khash == 0) { |
| 1017 | emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node)); | 1028 | emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node)); |
| 1018 | } else { | 1029 | } else { |
| @@ -1020,7 +1031,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |||
| 1020 | if (isk) | 1031 | if (isk) |
| 1021 | tmphash = ra_allock(as, khash, allow); | 1032 | tmphash = ra_allock(as, khash, allow); |
| 1022 | emit_dst(as, MIPSI_AADDU, dest, dest, tmp1); | 1033 | emit_dst(as, MIPSI_AADDU, dest, dest, tmp1); |
| 1023 | lua_assert(sizeof(Node) == 24); | 1034 | lj_assertA(sizeof(Node) == 24, "bad Node size"); |
| 1024 | emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1); | 1035 | emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1); |
| 1025 | emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3); | 1036 | emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3); |
| 1026 | emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5); | 1037 | emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5); |
| @@ -1098,7 +1109,7 @@ static void asm_hrefk(ASMState *as, IRIns *ir) | |||
| 1098 | Reg key = ra_scratch(as, allow); | 1109 | Reg key = ra_scratch(as, allow); |
| 1099 | int64_t k; | 1110 | int64_t k; |
| 1100 | #endif | 1111 | #endif |
| 1101 | lua_assert(ofs % sizeof(Node) == 0); | 1112 | lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot"); |
| 1102 | if (ofs > 32736) { | 1113 | if (ofs > 32736) { |
| 1103 | idx = dest; | 1114 | idx = dest; |
| 1104 | rset_clear(allow, dest); | 1115 | rset_clear(allow, dest); |
| @@ -1127,7 +1138,7 @@ nolo: | |||
| 1127 | emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4)); | 1138 | emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4)); |
| 1128 | #else | 1139 | #else |
| 1129 | if (irt_ispri(irkey->t)) { | 1140 | if (irt_ispri(irkey->t)) { |
| 1130 | lua_assert(!irt_isnil(irkey->t)); | 1141 | lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type"); |
| 1131 | k = ~((int64_t)~irt_toitype(irkey->t) << 47); | 1142 | k = ~((int64_t)~irt_toitype(irkey->t) << 47); |
| 1132 | } else if (irt_isnum(irkey->t)) { | 1143 | } else if (irt_isnum(irkey->t)) { |
| 1133 | k = (int64_t)ir_knum(irkey)->u64; | 1144 | k = (int64_t)ir_knum(irkey)->u64; |
| @@ -1166,7 +1177,7 @@ static void asm_uref(ASMState *as, IRIns *ir) | |||
| 1166 | static void asm_fref(ASMState *as, IRIns *ir) | 1177 | static void asm_fref(ASMState *as, IRIns *ir) |
| 1167 | { | 1178 | { |
| 1168 | UNUSED(as); UNUSED(ir); | 1179 | UNUSED(as); UNUSED(ir); |
| 1169 | lua_assert(!ra_used(ir)); | 1180 | lj_assertA(!ra_used(ir), "unfused FREF"); |
| 1170 | } | 1181 | } |
| 1171 | 1182 | ||
| 1172 | static void asm_strref(ASMState *as, IRIns *ir) | 1183 | static void asm_strref(ASMState *as, IRIns *ir) |
| @@ -1221,14 +1232,17 @@ static void asm_strref(ASMState *as, IRIns *ir) | |||
| 1221 | 1232 | ||
| 1222 | /* -- Loads and stores ---------------------------------------------------- */ | 1233 | /* -- Loads and stores ---------------------------------------------------- */ |
| 1223 | 1234 | ||
| 1224 | static MIPSIns asm_fxloadins(IRIns *ir) | 1235 | static MIPSIns asm_fxloadins(ASMState *as, IRIns *ir) |
| 1225 | { | 1236 | { |
| 1237 | UNUSED(as); | ||
| 1226 | switch (irt_type(ir->t)) { | 1238 | switch (irt_type(ir->t)) { |
| 1227 | case IRT_I8: return MIPSI_LB; | 1239 | case IRT_I8: return MIPSI_LB; |
| 1228 | case IRT_U8: return MIPSI_LBU; | 1240 | case IRT_U8: return MIPSI_LBU; |
| 1229 | case IRT_I16: return MIPSI_LH; | 1241 | case IRT_I16: return MIPSI_LH; |
| 1230 | case IRT_U16: return MIPSI_LHU; | 1242 | case IRT_U16: return MIPSI_LHU; |
| 1231 | case IRT_NUM: lua_assert(!LJ_SOFTFP32); if (!LJ_SOFTFP) return MIPSI_LDC1; | 1243 | case IRT_NUM: |
| 1244 | lj_assertA(!LJ_SOFTFP32, "unsplit FP op"); | ||
| 1245 | if (!LJ_SOFTFP) return MIPSI_LDC1; | ||
| 1232 | /* fallthrough */ | 1246 | /* fallthrough */ |
| 1233 | case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_LWC1; | 1247 | case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_LWC1; |
| 1234 | /* fallthrough */ | 1248 | /* fallthrough */ |
| @@ -1236,12 +1250,15 @@ static MIPSIns asm_fxloadins(IRIns *ir) | |||
| 1236 | } | 1250 | } |
| 1237 | } | 1251 | } |
| 1238 | 1252 | ||
| 1239 | static MIPSIns asm_fxstoreins(IRIns *ir) | 1253 | static MIPSIns asm_fxstoreins(ASMState *as, IRIns *ir) |
| 1240 | { | 1254 | { |
| 1255 | UNUSED(as); | ||
| 1241 | switch (irt_type(ir->t)) { | 1256 | switch (irt_type(ir->t)) { |
| 1242 | case IRT_I8: case IRT_U8: return MIPSI_SB; | 1257 | case IRT_I8: case IRT_U8: return MIPSI_SB; |
| 1243 | case IRT_I16: case IRT_U16: return MIPSI_SH; | 1258 | case IRT_I16: case IRT_U16: return MIPSI_SH; |
| 1244 | case IRT_NUM: lua_assert(!LJ_SOFTFP32); if (!LJ_SOFTFP) return MIPSI_SDC1; | 1259 | case IRT_NUM: |
| 1260 | lj_assertA(!LJ_SOFTFP32, "unsplit FP op"); | ||
| 1261 | if (!LJ_SOFTFP) return MIPSI_SDC1; | ||
| 1245 | /* fallthrough */ | 1262 | /* fallthrough */ |
| 1246 | case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_SWC1; | 1263 | case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_SWC1; |
| 1247 | /* fallthrough */ | 1264 | /* fallthrough */ |
| @@ -1252,10 +1269,10 @@ static MIPSIns asm_fxstoreins(IRIns *ir) | |||
| 1252 | static void asm_fload(ASMState *as, IRIns *ir) | 1269 | static void asm_fload(ASMState *as, IRIns *ir) |
| 1253 | { | 1270 | { |
| 1254 | Reg dest = ra_dest(as, ir, RSET_GPR); | 1271 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| 1255 | MIPSIns mi = asm_fxloadins(ir); | 1272 | MIPSIns mi = asm_fxloadins(as, ir); |
| 1256 | Reg idx; | 1273 | Reg idx; |
| 1257 | int32_t ofs; | 1274 | int32_t ofs; |
| 1258 | if (ir->op1 == REF_NIL) { | 1275 | if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */ |
| 1259 | idx = RID_JGL; | 1276 | idx = RID_JGL; |
| 1260 | ofs = (ir->op2 << 2) - 32768 - GG_OFS(g); | 1277 | ofs = (ir->op2 << 2) - 32768 - GG_OFS(g); |
| 1261 | } else { | 1278 | } else { |
| @@ -1269,7 +1286,7 @@ static void asm_fload(ASMState *as, IRIns *ir) | |||
| 1269 | } | 1286 | } |
| 1270 | ofs = field_ofs[ir->op2]; | 1287 | ofs = field_ofs[ir->op2]; |
| 1271 | } | 1288 | } |
| 1272 | lua_assert(!irt_isfp(ir->t)); | 1289 | lj_assertA(!irt_isfp(ir->t), "bad FP FLOAD"); |
| 1273 | emit_tsi(as, mi, dest, idx, ofs); | 1290 | emit_tsi(as, mi, dest, idx, ofs); |
| 1274 | } | 1291 | } |
| 1275 | 1292 | ||
| @@ -1280,8 +1297,8 @@ static void asm_fstore(ASMState *as, IRIns *ir) | |||
| 1280 | IRIns *irf = IR(ir->op1); | 1297 | IRIns *irf = IR(ir->op1); |
| 1281 | Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); | 1298 | Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); |
| 1282 | int32_t ofs = field_ofs[irf->op2]; | 1299 | int32_t ofs = field_ofs[irf->op2]; |
| 1283 | MIPSIns mi = asm_fxstoreins(ir); | 1300 | MIPSIns mi = asm_fxstoreins(as, ir); |
| 1284 | lua_assert(!irt_isfp(ir->t)); | 1301 | lj_assertA(!irt_isfp(ir->t), "bad FP FSTORE"); |
| 1285 | emit_tsi(as, mi, src, idx, ofs); | 1302 | emit_tsi(as, mi, src, idx, ofs); |
| 1286 | } | 1303 | } |
| 1287 | } | 1304 | } |
| @@ -1290,8 +1307,9 @@ static void asm_xload(ASMState *as, IRIns *ir) | |||
| 1290 | { | 1307 | { |
| 1291 | Reg dest = ra_dest(as, ir, | 1308 | Reg dest = ra_dest(as, ir, |
| 1292 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); | 1309 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); |
| 1293 | lua_assert(LJ_TARGET_UNALIGNED || !(ir->op2 & IRXLOAD_UNALIGNED)); | 1310 | lj_assertA(LJ_TARGET_UNALIGNED || !(ir->op2 & IRXLOAD_UNALIGNED), |
| 1294 | asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); | 1311 | "unaligned XLOAD"); |
| 1312 | asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0); | ||
| 1295 | } | 1313 | } |
| 1296 | 1314 | ||
| 1297 | static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) | 1315 | static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) |
| @@ -1299,7 +1317,7 @@ static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) | |||
| 1299 | if (ir->r != RID_SINK) { | 1317 | if (ir->r != RID_SINK) { |
| 1300 | Reg src = ra_alloc1z(as, ir->op2, | 1318 | Reg src = ra_alloc1z(as, ir->op2, |
| 1301 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); | 1319 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); |
| 1302 | asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, | 1320 | asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1, |
| 1303 | rset_exclude(RSET_GPR, src), ofs); | 1321 | rset_exclude(RSET_GPR, src), ofs); |
| 1304 | } | 1322 | } |
| 1305 | } | 1323 | } |
| @@ -1321,8 +1339,9 @@ static void asm_ahuvload(ASMState *as, IRIns *ir) | |||
| 1321 | } | 1339 | } |
| 1322 | } | 1340 | } |
| 1323 | if (ra_used(ir)) { | 1341 | if (ra_used(ir)) { |
| 1324 | lua_assert((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) || | 1342 | lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) || |
| 1325 | irt_isint(ir->t) || irt_isaddr(ir->t)); | 1343 | irt_isint(ir->t) || irt_isaddr(ir->t), |
| 1344 | "bad load type %d", irt_type(ir->t)); | ||
| 1326 | dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); | 1345 | dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); |
| 1327 | rset_clear(allow, dest); | 1346 | rset_clear(allow, dest); |
| 1328 | #if LJ_64 | 1347 | #if LJ_64 |
| @@ -1427,10 +1446,13 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1427 | #else | 1446 | #else |
| 1428 | int32_t ofs = 8*((int32_t)ir->op1-2); | 1447 | int32_t ofs = 8*((int32_t)ir->op1-2); |
| 1429 | #endif | 1448 | #endif |
| 1430 | lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ | 1449 | lj_assertA(!(ir->op2 & IRSLOAD_PARENT), |
| 1431 | lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK)); | 1450 | "bad parent SLOAD"); /* Handled by asm_head_side(). */ |
| 1451 | lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK), | ||
| 1452 | "inconsistent SLOAD variant"); | ||
| 1432 | #if LJ_SOFTFP32 | 1453 | #if LJ_SOFTFP32 |
| 1433 | lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */ | 1454 | lj_assertA(!(ir->op2 & IRSLOAD_CONVERT), |
| 1455 | "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */ | ||
| 1434 | if (hiop && ra_used(ir+1)) { | 1456 | if (hiop && ra_used(ir+1)) { |
| 1435 | type = ra_dest(as, ir+1, allow); | 1457 | type = ra_dest(as, ir+1, allow); |
| 1436 | rset_clear(allow, type); | 1458 | rset_clear(allow, type); |
| @@ -1443,8 +1465,9 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1443 | } else | 1465 | } else |
| 1444 | #endif | 1466 | #endif |
| 1445 | if (ra_used(ir)) { | 1467 | if (ra_used(ir)) { |
| 1446 | lua_assert((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) || | 1468 | lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) || |
| 1447 | irt_isint(ir->t) || irt_isaddr(ir->t)); | 1469 | irt_isint(ir->t) || irt_isaddr(ir->t), |
| 1470 | "bad SLOAD type %d", irt_type(ir->t)); | ||
| 1448 | dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); | 1471 | dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); |
| 1449 | rset_clear(allow, dest); | 1472 | rset_clear(allow, dest); |
| 1450 | base = ra_alloc1(as, REF_BASE, allow); | 1473 | base = ra_alloc1(as, REF_BASE, allow); |
| @@ -1554,7 +1577,8 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1554 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; | 1577 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; |
| 1555 | IRRef args[4]; | 1578 | IRRef args[4]; |
| 1556 | RegSet drop = RSET_SCRATCH; | 1579 | RegSet drop = RSET_SCRATCH; |
| 1557 | lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); | 1580 | lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL), |
| 1581 | "bad CNEW/CNEWI operands"); | ||
| 1558 | 1582 | ||
| 1559 | as->gcsteps++; | 1583 | as->gcsteps++; |
| 1560 | if (ra_hasreg(ir->r)) | 1584 | if (ra_hasreg(ir->r)) |
| @@ -1570,7 +1594,7 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1570 | int32_t ofs = sizeof(GCcdata); | 1594 | int32_t ofs = sizeof(GCcdata); |
| 1571 | if (sz == 8) { | 1595 | if (sz == 8) { |
| 1572 | ofs += 4; | 1596 | ofs += 4; |
| 1573 | lua_assert((ir+1)->o == IR_HIOP); | 1597 | lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI"); |
| 1574 | if (LJ_LE) ir++; | 1598 | if (LJ_LE) ir++; |
| 1575 | } | 1599 | } |
| 1576 | for (;;) { | 1600 | for (;;) { |
| @@ -1584,7 +1608,7 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1584 | emit_tsi(as, sz == 8 ? MIPSI_SD : MIPSI_SW, ra_alloc1(as, ir->op2, allow), | 1608 | emit_tsi(as, sz == 8 ? MIPSI_SD : MIPSI_SW, ra_alloc1(as, ir->op2, allow), |
| 1585 | RID_RET, sizeof(GCcdata)); | 1609 | RID_RET, sizeof(GCcdata)); |
| 1586 | #endif | 1610 | #endif |
| 1587 | lua_assert(sz == 4 || sz == 8); | 1611 | lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz); |
| 1588 | } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ | 1612 | } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ |
| 1589 | ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; | 1613 | ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; |
| 1590 | args[0] = ASMREF_L; /* lua_State *L */ | 1614 | args[0] = ASMREF_L; /* lua_State *L */ |
| @@ -1634,7 +1658,7 @@ static void asm_obar(ASMState *as, IRIns *ir) | |||
| 1634 | MCLabel l_end; | 1658 | MCLabel l_end; |
| 1635 | Reg obj, val, tmp; | 1659 | Reg obj, val, tmp; |
| 1636 | /* No need for other object barriers (yet). */ | 1660 | /* No need for other object barriers (yet). */ |
| 1637 | lua_assert(IR(ir->op1)->o == IR_UREFC); | 1661 | lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type"); |
| 1638 | ra_evictset(as, RSET_SCRATCH); | 1662 | ra_evictset(as, RSET_SCRATCH); |
| 1639 | l_end = emit_label(as); | 1663 | l_end = emit_label(as); |
| 1640 | args[0] = ASMREF_TMP1; /* global_State *g */ | 1664 | args[0] = ASMREF_TMP1; /* global_State *g */ |
| @@ -1709,7 +1733,7 @@ static void asm_add(ASMState *as, IRIns *ir) | |||
| 1709 | Reg dest = ra_dest(as, ir, RSET_GPR); | 1733 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| 1710 | Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); | 1734 | Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); |
| 1711 | if (irref_isk(ir->op2)) { | 1735 | if (irref_isk(ir->op2)) { |
| 1712 | intptr_t k = get_kval(IR(ir->op2)); | 1736 | intptr_t k = get_kval(as, ir->op2); |
| 1713 | if (checki16(k)) { | 1737 | if (checki16(k)) { |
| 1714 | emit_tsi(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDIU : MIPSI_ADDIU, dest, | 1738 | emit_tsi(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDIU : MIPSI_ADDIU, dest, |
| 1715 | left, k); | 1739 | left, k); |
| @@ -1810,7 +1834,7 @@ static void asm_arithov(ASMState *as, IRIns *ir) | |||
| 1810 | { | 1834 | { |
| 1811 | /* TODO MIPSR6: bovc/bnvc. Caveat: no delay slot to load RID_TMP. */ | 1835 | /* TODO MIPSR6: bovc/bnvc. Caveat: no delay slot to load RID_TMP. */ |
| 1812 | Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR); | 1836 | Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR); |
| 1813 | lua_assert(!irt_is64(ir->t)); | 1837 | lj_assertA(!irt_is64(ir->t), "bad usage"); |
| 1814 | if (irref_isk(ir->op2)) { | 1838 | if (irref_isk(ir->op2)) { |
| 1815 | int k = IR(ir->op2)->i; | 1839 | int k = IR(ir->op2)->i; |
| 1816 | if (ir->o == IR_SUBOV) k = -k; | 1840 | if (ir->o == IR_SUBOV) k = -k; |
| @@ -1997,7 +2021,7 @@ static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) | |||
| 1997 | Reg dest = ra_dest(as, ir, RSET_GPR); | 2021 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| 1998 | Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); | 2022 | Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); |
| 1999 | if (irref_isk(ir->op2)) { | 2023 | if (irref_isk(ir->op2)) { |
| 2000 | intptr_t k = get_kval(IR(ir->op2)); | 2024 | intptr_t k = get_kval(as, ir->op2); |
| 2001 | if (checku16(k)) { | 2025 | if (checku16(k)) { |
| 2002 | emit_tsi(as, mik, dest, left, k); | 2026 | emit_tsi(as, mik, dest, left, k); |
| 2003 | return; | 2027 | return; |
| @@ -2030,7 +2054,7 @@ static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) | |||
| 2030 | #define asm_bshl(as, ir) asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL) | 2054 | #define asm_bshl(as, ir) asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL) |
| 2031 | #define asm_bshr(as, ir) asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL) | 2055 | #define asm_bshr(as, ir) asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL) |
| 2032 | #define asm_bsar(as, ir) asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA) | 2056 | #define asm_bsar(as, ir) asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA) |
| 2033 | #define asm_brol(as, ir) lua_assert(0) | 2057 | #define asm_brol(as, ir) lj_assertA(0, "unexpected BROL") |
| 2034 | 2058 | ||
| 2035 | static void asm_bror(ASMState *as, IRIns *ir) | 2059 | static void asm_bror(ASMState *as, IRIns *ir) |
| 2036 | { | 2060 | { |
| @@ -2222,13 +2246,13 @@ static void asm_comp(ASMState *as, IRIns *ir) | |||
| 2222 | } else { | 2246 | } else { |
| 2223 | Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); | 2247 | Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); |
| 2224 | if (op == IR_ABC) op = IR_UGT; | 2248 | if (op == IR_ABC) op = IR_UGT; |
| 2225 | if ((op&4) == 0 && irref_isk(ir->op2) && get_kval(IR(ir->op2)) == 0) { | 2249 | if ((op&4) == 0 && irref_isk(ir->op2) && get_kval(as, ir->op2) == 0) { |
| 2226 | MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) : | 2250 | MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) : |
| 2227 | ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ); | 2251 | ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ); |
| 2228 | asm_guard(as, mi, left, 0); | 2252 | asm_guard(as, mi, left, 0); |
| 2229 | } else { | 2253 | } else { |
| 2230 | if (irref_isk(ir->op2)) { | 2254 | if (irref_isk(ir->op2)) { |
| 2231 | intptr_t k = get_kval(IR(ir->op2)); | 2255 | intptr_t k = get_kval(as, ir->op2); |
| 2232 | if ((op&2)) k++; | 2256 | if ((op&2)) k++; |
| 2233 | if (checki16(k)) { | 2257 | if (checki16(k)) { |
| 2234 | asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); | 2258 | asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); |
| @@ -2384,10 +2408,11 @@ static void asm_hiop(ASMState *as, IRIns *ir) | |||
| 2384 | case IR_CNEWI: | 2408 | case IR_CNEWI: |
| 2385 | /* Nothing to do here. Handled by lo op itself. */ | 2409 | /* Nothing to do here. Handled by lo op itself. */ |
| 2386 | break; | 2410 | break; |
| 2387 | default: lua_assert(0); break; | 2411 | default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break; |
| 2388 | } | 2412 | } |
| 2389 | #else | 2413 | #else |
| 2390 | UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */ | 2414 | /* Unused on MIPS64 or without SOFTFP or FFI. */ |
| 2415 | UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP"); | ||
| 2391 | #endif | 2416 | #endif |
| 2392 | } | 2417 | } |
| 2393 | 2418 | ||
| @@ -2456,7 +2481,8 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 2456 | #if LJ_SOFTFP32 | 2481 | #if LJ_SOFTFP32 |
| 2457 | Reg tmp; | 2482 | Reg tmp; |
| 2458 | RegSet allow = rset_exclude(RSET_GPR, RID_BASE); | 2483 | RegSet allow = rset_exclude(RSET_GPR, RID_BASE); |
| 2459 | lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */ | 2484 | /* LJ_SOFTFP: must be a number constant. */ |
| 2485 | lj_assertA(irref_isk(ref), "unsplit FP op"); | ||
| 2460 | tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow); | 2486 | tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow); |
| 2461 | emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?4:0)); | 2487 | emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?4:0)); |
| 2462 | if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1); | 2488 | if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1); |
| @@ -2473,7 +2499,8 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 2473 | #if LJ_32 | 2499 | #if LJ_32 |
| 2474 | RegSet allow = rset_exclude(RSET_GPR, RID_BASE); | 2500 | RegSet allow = rset_exclude(RSET_GPR, RID_BASE); |
| 2475 | Reg type; | 2501 | Reg type; |
| 2476 | lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); | 2502 | lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t), |
| 2503 | "restore of IR type %d", irt_type(ir->t)); | ||
| 2477 | if (!irt_ispri(ir->t)) { | 2504 | if (!irt_ispri(ir->t)) { |
| 2478 | Reg src = ra_alloc1(as, ref, allow); | 2505 | Reg src = ra_alloc1(as, ref, allow); |
| 2479 | rset_clear(allow, src); | 2506 | rset_clear(allow, src); |
| @@ -2496,7 +2523,7 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 2496 | } | 2523 | } |
| 2497 | checkmclim(as); | 2524 | checkmclim(as); |
| 2498 | } | 2525 | } |
| 2499 | lua_assert(map + nent == flinks); | 2526 | lj_assertA(map + nent == flinks, "inconsistent frames in snapshot"); |
| 2500 | } | 2527 | } |
| 2501 | 2528 | ||
| 2502 | /* -- GC handling --------------------------------------------------------- */ | 2529 | /* -- GC handling --------------------------------------------------------- */ |
| @@ -2694,7 +2721,7 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | |||
| 2694 | } | 2721 | } |
| 2695 | } else if (p+1 == pe) { | 2722 | } else if (p+1 == pe) { |
| 2696 | /* Patch NOP after code for inverted loop branch. Use of J is ok. */ | 2723 | /* Patch NOP after code for inverted loop branch. Use of J is ok. */ |
| 2697 | lua_assert(p[1] == MIPSI_NOP); | 2724 | lj_assertJ(p[1] == MIPSI_NOP, "expected NOP"); |
| 2698 | p[1] = tjump; | 2725 | p[1] = tjump; |
| 2699 | *p = MIPSI_NOP; /* Replace the load of the exit number. */ | 2726 | *p = MIPSI_NOP; /* Replace the load of the exit number. */ |
| 2700 | cstop = p+2; | 2727 | cstop = p+2; |
diff --git a/src/lj_asm_ppc.h b/src/lj_asm_ppc.h index c15b89fe..498fdac3 100644 --- a/src/lj_asm_ppc.h +++ b/src/lj_asm_ppc.h | |||
| @@ -181,7 +181,7 @@ static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref, | |||
| 181 | return; | 181 | return; |
| 182 | } | 182 | } |
| 183 | } else if (ir->o == IR_STRREF) { | 183 | } else if (ir->o == IR_STRREF) { |
| 184 | lua_assert(ofs == 0); | 184 | lj_assertA(ofs == 0, "bad usage"); |
| 185 | ofs = (int32_t)sizeof(GCstr); | 185 | ofs = (int32_t)sizeof(GCstr); |
| 186 | if (irref_isk(ir->op2)) { | 186 | if (irref_isk(ir->op2)) { |
| 187 | ofs += IR(ir->op2)->i; | 187 | ofs += IR(ir->op2)->i; |
| @@ -268,7 +268,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 268 | #if !LJ_SOFTFP | 268 | #if !LJ_SOFTFP |
| 269 | if (irt_isfp(ir->t)) { | 269 | if (irt_isfp(ir->t)) { |
| 270 | if (fpr <= REGARG_LASTFPR) { | 270 | if (fpr <= REGARG_LASTFPR) { |
| 271 | lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */ | 271 | lj_assertA(rset_test(as->freeset, fpr), |
| 272 | "reg %d not free", fpr); /* Already evicted. */ | ||
| 272 | ra_leftov(as, fpr, ref); | 273 | ra_leftov(as, fpr, ref); |
| 273 | fpr++; | 274 | fpr++; |
| 274 | } else { | 275 | } else { |
| @@ -281,7 +282,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 281 | #endif | 282 | #endif |
| 282 | { | 283 | { |
| 283 | if (gpr <= REGARG_LASTGPR) { | 284 | if (gpr <= REGARG_LASTGPR) { |
| 284 | lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */ | 285 | lj_assertA(rset_test(as->freeset, gpr), |
| 286 | "reg %d not free", gpr); /* Already evicted. */ | ||
| 285 | ra_leftov(as, gpr, ref); | 287 | ra_leftov(as, gpr, ref); |
| 286 | gpr++; | 288 | gpr++; |
| 287 | } else { | 289 | } else { |
| @@ -319,7 +321,7 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | |||
| 319 | rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ | 321 | rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ |
| 320 | ra_evictset(as, drop); /* Evictions must be performed first. */ | 322 | ra_evictset(as, drop); /* Evictions must be performed first. */ |
| 321 | if (ra_used(ir)) { | 323 | if (ra_used(ir)) { |
| 322 | lua_assert(!irt_ispri(ir->t)); | 324 | lj_assertA(!irt_ispri(ir->t), "PRI dest"); |
| 323 | if (!LJ_SOFTFP && irt_isfp(ir->t)) { | 325 | if (!LJ_SOFTFP && irt_isfp(ir->t)) { |
| 324 | if ((ci->flags & CCI_CASTU64)) { | 326 | if ((ci->flags & CCI_CASTU64)) { |
| 325 | /* Use spill slot or temp slots. */ | 327 | /* Use spill slot or temp slots. */ |
| @@ -431,14 +433,18 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 431 | int stfp = (st == IRT_NUM || st == IRT_FLOAT); | 433 | int stfp = (st == IRT_NUM || st == IRT_FLOAT); |
| 432 | #endif | 434 | #endif |
| 433 | IRRef lref = ir->op1; | 435 | IRRef lref = ir->op1; |
| 434 | lua_assert(!(irt_isint64(ir->t) || | 436 | /* 64 bit integer conversions are handled by SPLIT. */ |
| 435 | (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */ | 437 | lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)), |
| 438 | "IR %04d has unsplit 64 bit type", | ||
| 439 | (int)(ir - as->ir) - REF_BIAS); | ||
| 436 | #if LJ_SOFTFP | 440 | #if LJ_SOFTFP |
| 437 | /* FP conversions are handled by SPLIT. */ | 441 | /* FP conversions are handled by SPLIT. */ |
| 438 | lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT)); | 442 | lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT), |
| 443 | "IR %04d has FP type", | ||
| 444 | (int)(ir - as->ir) - REF_BIAS); | ||
| 439 | /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ | 445 | /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ |
| 440 | #else | 446 | #else |
| 441 | lua_assert(irt_type(ir->t) != st); | 447 | lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV"); |
| 442 | if (irt_isfp(ir->t)) { | 448 | if (irt_isfp(ir->t)) { |
| 443 | Reg dest = ra_dest(as, ir, RSET_FPR); | 449 | Reg dest = ra_dest(as, ir, RSET_FPR); |
| 444 | if (stfp) { /* FP to FP conversion. */ | 450 | if (stfp) { /* FP to FP conversion. */ |
| @@ -467,7 +473,8 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 467 | } else if (stfp) { /* FP to integer conversion. */ | 473 | } else if (stfp) { /* FP to integer conversion. */ |
| 468 | if (irt_isguard(ir->t)) { | 474 | if (irt_isguard(ir->t)) { |
| 469 | /* Checked conversions are only supported from number to int. */ | 475 | /* Checked conversions are only supported from number to int. */ |
| 470 | lua_assert(irt_isint(ir->t) && st == IRT_NUM); | 476 | lj_assertA(irt_isint(ir->t) && st == IRT_NUM, |
| 477 | "bad type for checked CONV"); | ||
| 471 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); | 478 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); |
| 472 | } else { | 479 | } else { |
| 473 | Reg dest = ra_dest(as, ir, RSET_GPR); | 480 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| @@ -503,7 +510,7 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 503 | Reg dest = ra_dest(as, ir, RSET_GPR); | 510 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| 504 | if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ | 511 | if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ |
| 505 | Reg left = ra_alloc1(as, ir->op1, RSET_GPR); | 512 | Reg left = ra_alloc1(as, ir->op1, RSET_GPR); |
| 506 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); | 513 | lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT"); |
| 507 | if ((ir->op2 & IRCONV_SEXT)) | 514 | if ((ir->op2 & IRCONV_SEXT)) |
| 508 | emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left); | 515 | emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left); |
| 509 | else | 516 | else |
| @@ -699,7 +706,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |||
| 699 | (((char *)as->mcp-(char *)l_loop) & 0xffffu); | 706 | (((char *)as->mcp-(char *)l_loop) & 0xffffu); |
| 700 | 707 | ||
| 701 | /* Load main position relative to tab->node into dest. */ | 708 | /* Load main position relative to tab->node into dest. */ |
| 702 | khash = isk ? ir_khash(irkey) : 1; | 709 | khash = isk ? ir_khash(as, irkey) : 1; |
| 703 | if (khash == 0) { | 710 | if (khash == 0) { |
| 704 | emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node)); | 711 | emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node)); |
| 705 | } else { | 712 | } else { |
| @@ -754,7 +761,7 @@ static void asm_hrefk(ASMState *as, IRIns *ir) | |||
| 754 | Reg node = ra_alloc1(as, ir->op1, RSET_GPR); | 761 | Reg node = ra_alloc1(as, ir->op1, RSET_GPR); |
| 755 | Reg key = RID_NONE, type = RID_TMP, idx = node; | 762 | Reg key = RID_NONE, type = RID_TMP, idx = node; |
| 756 | RegSet allow = rset_exclude(RSET_GPR, node); | 763 | RegSet allow = rset_exclude(RSET_GPR, node); |
| 757 | lua_assert(ofs % sizeof(Node) == 0); | 764 | lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot"); |
| 758 | if (ofs > 32736) { | 765 | if (ofs > 32736) { |
| 759 | idx = dest; | 766 | idx = dest; |
| 760 | rset_clear(allow, dest); | 767 | rset_clear(allow, dest); |
| @@ -813,7 +820,7 @@ static void asm_uref(ASMState *as, IRIns *ir) | |||
| 813 | static void asm_fref(ASMState *as, IRIns *ir) | 820 | static void asm_fref(ASMState *as, IRIns *ir) |
| 814 | { | 821 | { |
| 815 | UNUSED(as); UNUSED(ir); | 822 | UNUSED(as); UNUSED(ir); |
| 816 | lua_assert(!ra_used(ir)); | 823 | lj_assertA(!ra_used(ir), "unfused FREF"); |
| 817 | } | 824 | } |
| 818 | 825 | ||
| 819 | static void asm_strref(ASMState *as, IRIns *ir) | 826 | static void asm_strref(ASMState *as, IRIns *ir) |
| @@ -853,25 +860,27 @@ static void asm_strref(ASMState *as, IRIns *ir) | |||
| 853 | 860 | ||
| 854 | /* -- Loads and stores ---------------------------------------------------- */ | 861 | /* -- Loads and stores ---------------------------------------------------- */ |
| 855 | 862 | ||
| 856 | static PPCIns asm_fxloadins(IRIns *ir) | 863 | static PPCIns asm_fxloadins(ASMState *as, IRIns *ir) |
| 857 | { | 864 | { |
| 865 | UNUSED(as); | ||
| 858 | switch (irt_type(ir->t)) { | 866 | switch (irt_type(ir->t)) { |
| 859 | case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */ | 867 | case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */ |
| 860 | case IRT_U8: return PPCI_LBZ; | 868 | case IRT_U8: return PPCI_LBZ; |
| 861 | case IRT_I16: return PPCI_LHA; | 869 | case IRT_I16: return PPCI_LHA; |
| 862 | case IRT_U16: return PPCI_LHZ; | 870 | case IRT_U16: return PPCI_LHZ; |
| 863 | case IRT_NUM: lua_assert(!LJ_SOFTFP); return PPCI_LFD; | 871 | case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return PPCI_LFD; |
| 864 | case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_LFS; | 872 | case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_LFS; |
| 865 | default: return PPCI_LWZ; | 873 | default: return PPCI_LWZ; |
| 866 | } | 874 | } |
| 867 | } | 875 | } |
| 868 | 876 | ||
| 869 | static PPCIns asm_fxstoreins(IRIns *ir) | 877 | static PPCIns asm_fxstoreins(ASMState *as, IRIns *ir) |
| 870 | { | 878 | { |
| 879 | UNUSED(as); | ||
| 871 | switch (irt_type(ir->t)) { | 880 | switch (irt_type(ir->t)) { |
| 872 | case IRT_I8: case IRT_U8: return PPCI_STB; | 881 | case IRT_I8: case IRT_U8: return PPCI_STB; |
| 873 | case IRT_I16: case IRT_U16: return PPCI_STH; | 882 | case IRT_I16: case IRT_U16: return PPCI_STH; |
| 874 | case IRT_NUM: lua_assert(!LJ_SOFTFP); return PPCI_STFD; | 883 | case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return PPCI_STFD; |
| 875 | case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_STFS; | 884 | case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_STFS; |
| 876 | default: return PPCI_STW; | 885 | default: return PPCI_STW; |
| 877 | } | 886 | } |
| @@ -880,10 +889,10 @@ static PPCIns asm_fxstoreins(IRIns *ir) | |||
| 880 | static void asm_fload(ASMState *as, IRIns *ir) | 889 | static void asm_fload(ASMState *as, IRIns *ir) |
| 881 | { | 890 | { |
| 882 | Reg dest = ra_dest(as, ir, RSET_GPR); | 891 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| 883 | PPCIns pi = asm_fxloadins(ir); | 892 | PPCIns pi = asm_fxloadins(as, ir); |
| 884 | Reg idx; | 893 | Reg idx; |
| 885 | int32_t ofs; | 894 | int32_t ofs; |
| 886 | if (ir->op1 == REF_NIL) { | 895 | if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */ |
| 887 | idx = RID_JGL; | 896 | idx = RID_JGL; |
| 888 | ofs = (ir->op2 << 2) - 32768; | 897 | ofs = (ir->op2 << 2) - 32768; |
| 889 | } else { | 898 | } else { |
| @@ -897,7 +906,7 @@ static void asm_fload(ASMState *as, IRIns *ir) | |||
| 897 | } | 906 | } |
| 898 | ofs = field_ofs[ir->op2]; | 907 | ofs = field_ofs[ir->op2]; |
| 899 | } | 908 | } |
| 900 | lua_assert(!irt_isi8(ir->t)); | 909 | lj_assertA(!irt_isi8(ir->t), "unsupported FLOAD I8"); |
| 901 | emit_tai(as, pi, dest, idx, ofs); | 910 | emit_tai(as, pi, dest, idx, ofs); |
| 902 | } | 911 | } |
| 903 | 912 | ||
| @@ -908,7 +917,7 @@ static void asm_fstore(ASMState *as, IRIns *ir) | |||
| 908 | IRIns *irf = IR(ir->op1); | 917 | IRIns *irf = IR(ir->op1); |
| 909 | Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); | 918 | Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); |
| 910 | int32_t ofs = field_ofs[irf->op2]; | 919 | int32_t ofs = field_ofs[irf->op2]; |
| 911 | PPCIns pi = asm_fxstoreins(ir); | 920 | PPCIns pi = asm_fxstoreins(as, ir); |
| 912 | emit_tai(as, pi, src, idx, ofs); | 921 | emit_tai(as, pi, src, idx, ofs); |
| 913 | } | 922 | } |
| 914 | } | 923 | } |
| @@ -917,10 +926,10 @@ static void asm_xload(ASMState *as, IRIns *ir) | |||
| 917 | { | 926 | { |
| 918 | Reg dest = ra_dest(as, ir, | 927 | Reg dest = ra_dest(as, ir, |
| 919 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); | 928 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); |
| 920 | lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); | 929 | lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD"); |
| 921 | if (irt_isi8(ir->t)) | 930 | if (irt_isi8(ir->t)) |
| 922 | emit_as(as, PPCI_EXTSB, dest, dest); | 931 | emit_as(as, PPCI_EXTSB, dest, dest); |
| 923 | asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); | 932 | asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0); |
| 924 | } | 933 | } |
| 925 | 934 | ||
| 926 | static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) | 935 | static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) |
| @@ -936,7 +945,7 @@ static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) | |||
| 936 | } else { | 945 | } else { |
| 937 | Reg src = ra_alloc1(as, ir->op2, | 946 | Reg src = ra_alloc1(as, ir->op2, |
| 938 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); | 947 | (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); |
| 939 | asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, | 948 | asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1, |
| 940 | rset_exclude(RSET_GPR, src), ofs); | 949 | rset_exclude(RSET_GPR, src), ofs); |
| 941 | } | 950 | } |
| 942 | } | 951 | } |
| @@ -958,8 +967,9 @@ static void asm_ahuvload(ASMState *as, IRIns *ir) | |||
| 958 | ofs = 0; | 967 | ofs = 0; |
| 959 | } | 968 | } |
| 960 | if (ra_used(ir)) { | 969 | if (ra_used(ir)) { |
| 961 | lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || | 970 | lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || |
| 962 | irt_isint(ir->t) || irt_isaddr(ir->t)); | 971 | irt_isint(ir->t) || irt_isaddr(ir->t), |
| 972 | "bad load type %d", irt_type(ir->t)); | ||
| 963 | if (LJ_SOFTFP || !irt_isnum(t)) ofs = 0; | 973 | if (LJ_SOFTFP || !irt_isnum(t)) ofs = 0; |
| 964 | dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); | 974 | dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); |
| 965 | rset_clear(allow, dest); | 975 | rset_clear(allow, dest); |
| @@ -1042,12 +1052,16 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1042 | int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); | 1052 | int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP); |
| 1043 | if (hiop) | 1053 | if (hiop) |
| 1044 | t.irt = IRT_NUM; | 1054 | t.irt = IRT_NUM; |
| 1045 | lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ | 1055 | lj_assertA(!(ir->op2 & IRSLOAD_PARENT), |
| 1046 | lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK)); | 1056 | "bad parent SLOAD"); /* Handled by asm_head_side(). */ |
| 1047 | lua_assert(LJ_DUALNUM || | 1057 | lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK), |
| 1048 | !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); | 1058 | "inconsistent SLOAD variant"); |
| 1059 | lj_assertA(LJ_DUALNUM || | ||
| 1060 | !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)), | ||
| 1061 | "bad SLOAD type"); | ||
| 1049 | #if LJ_SOFTFP | 1062 | #if LJ_SOFTFP |
| 1050 | lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */ | 1063 | lj_assertA(!(ir->op2 & IRSLOAD_CONVERT), |
| 1064 | "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */ | ||
| 1051 | if (hiop && ra_used(ir+1)) { | 1065 | if (hiop && ra_used(ir+1)) { |
| 1052 | type = ra_dest(as, ir+1, allow); | 1066 | type = ra_dest(as, ir+1, allow); |
| 1053 | rset_clear(allow, type); | 1067 | rset_clear(allow, type); |
| @@ -1060,7 +1074,8 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1060 | } else | 1074 | } else |
| 1061 | #endif | 1075 | #endif |
| 1062 | if (ra_used(ir)) { | 1076 | if (ra_used(ir)) { |
| 1063 | lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); | 1077 | lj_assertA(irt_isnum(t) || irt_isint(t) || irt_isaddr(t), |
| 1078 | "bad SLOAD type %d", irt_type(ir->t)); | ||
| 1064 | dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); | 1079 | dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); |
| 1065 | rset_clear(allow, dest); | 1080 | rset_clear(allow, dest); |
| 1066 | base = ra_alloc1(as, REF_BASE, allow); | 1081 | base = ra_alloc1(as, REF_BASE, allow); |
| @@ -1127,7 +1142,8 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1127 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; | 1142 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; |
| 1128 | IRRef args[4]; | 1143 | IRRef args[4]; |
| 1129 | RegSet drop = RSET_SCRATCH; | 1144 | RegSet drop = RSET_SCRATCH; |
| 1130 | lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); | 1145 | lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL), |
| 1146 | "bad CNEW/CNEWI operands"); | ||
| 1131 | 1147 | ||
| 1132 | as->gcsteps++; | 1148 | as->gcsteps++; |
| 1133 | if (ra_hasreg(ir->r)) | 1149 | if (ra_hasreg(ir->r)) |
| @@ -1140,10 +1156,10 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1140 | if (ir->o == IR_CNEWI) { | 1156 | if (ir->o == IR_CNEWI) { |
| 1141 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | 1157 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); |
| 1142 | int32_t ofs = sizeof(GCcdata); | 1158 | int32_t ofs = sizeof(GCcdata); |
| 1143 | lua_assert(sz == 4 || sz == 8); | 1159 | lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz); |
| 1144 | if (sz == 8) { | 1160 | if (sz == 8) { |
| 1145 | ofs += 4; | 1161 | ofs += 4; |
| 1146 | lua_assert((ir+1)->o == IR_HIOP); | 1162 | lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI"); |
| 1147 | } | 1163 | } |
| 1148 | for (;;) { | 1164 | for (;;) { |
| 1149 | Reg r = ra_alloc1(as, ir->op2, allow); | 1165 | Reg r = ra_alloc1(as, ir->op2, allow); |
| @@ -1187,7 +1203,7 @@ static void asm_tbar(ASMState *as, IRIns *ir) | |||
| 1187 | emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist)); | 1203 | emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist)); |
| 1188 | emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked)); | 1204 | emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked)); |
| 1189 | emit_setgl(as, tab, gc.grayagain); | 1205 | emit_setgl(as, tab, gc.grayagain); |
| 1190 | lua_assert(LJ_GC_BLACK == 0x04); | 1206 | lj_assertA(LJ_GC_BLACK == 0x04, "bad LJ_GC_BLACK"); |
| 1191 | emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */ | 1207 | emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */ |
| 1192 | emit_getgl(as, link, gc.grayagain); | 1208 | emit_getgl(as, link, gc.grayagain); |
| 1193 | emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end); | 1209 | emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end); |
| @@ -1202,7 +1218,7 @@ static void asm_obar(ASMState *as, IRIns *ir) | |||
| 1202 | MCLabel l_end; | 1218 | MCLabel l_end; |
| 1203 | Reg obj, val, tmp; | 1219 | Reg obj, val, tmp; |
| 1204 | /* No need for other object barriers (yet). */ | 1220 | /* No need for other object barriers (yet). */ |
| 1205 | lua_assert(IR(ir->op1)->o == IR_UREFC); | 1221 | lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type"); |
| 1206 | ra_evictset(as, RSET_SCRATCH); | 1222 | ra_evictset(as, RSET_SCRATCH); |
| 1207 | l_end = emit_label(as); | 1223 | l_end = emit_label(as); |
| 1208 | args[0] = ASMREF_TMP1; /* global_State *g */ | 1224 | args[0] = ASMREF_TMP1; /* global_State *g */ |
| @@ -1673,7 +1689,7 @@ static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik) | |||
| 1673 | #define asm_brol(as, ir) \ | 1689 | #define asm_brol(as, ir) \ |
| 1674 | asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31), \ | 1690 | asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31), \ |
| 1675 | PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31)) | 1691 | PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31)) |
| 1676 | #define asm_bror(as, ir) lua_assert(0) | 1692 | #define asm_bror(as, ir) lj_assertA(0, "unexpected BROR") |
| 1677 | 1693 | ||
| 1678 | #if LJ_SOFTFP | 1694 | #if LJ_SOFTFP |
| 1679 | static void asm_sfpmin_max(ASMState *as, IRIns *ir) | 1695 | static void asm_sfpmin_max(ASMState *as, IRIns *ir) |
| @@ -1947,10 +1963,11 @@ static void asm_hiop(ASMState *as, IRIns *ir) | |||
| 1947 | case IR_CNEWI: | 1963 | case IR_CNEWI: |
| 1948 | /* Nothing to do here. Handled by lo op itself. */ | 1964 | /* Nothing to do here. Handled by lo op itself. */ |
| 1949 | break; | 1965 | break; |
| 1950 | default: lua_assert(0); break; | 1966 | default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break; |
| 1951 | } | 1967 | } |
| 1952 | #else | 1968 | #else |
| 1953 | UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */ | 1969 | /* Unused without SOFTFP or FFI. */ |
| 1970 | UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP"); | ||
| 1954 | #endif | 1971 | #endif |
| 1955 | } | 1972 | } |
| 1956 | 1973 | ||
| @@ -2010,7 +2027,8 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 2010 | #if LJ_SOFTFP | 2027 | #if LJ_SOFTFP |
| 2011 | Reg tmp; | 2028 | Reg tmp; |
| 2012 | RegSet allow = rset_exclude(RSET_GPR, RID_BASE); | 2029 | RegSet allow = rset_exclude(RSET_GPR, RID_BASE); |
| 2013 | lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */ | 2030 | /* LJ_SOFTFP: must be a number constant. */ |
| 2031 | lj_assertA(irref_isk(ref), "unsplit FP op"); | ||
| 2014 | tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow); | 2032 | tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow); |
| 2015 | emit_tai(as, PPCI_STW, tmp, RID_BASE, ofs+(LJ_BE?4:0)); | 2033 | emit_tai(as, PPCI_STW, tmp, RID_BASE, ofs+(LJ_BE?4:0)); |
| 2016 | if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1); | 2034 | if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1); |
| @@ -2023,7 +2041,8 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 2023 | } else { | 2041 | } else { |
| 2024 | Reg type; | 2042 | Reg type; |
| 2025 | RegSet allow = rset_exclude(RSET_GPR, RID_BASE); | 2043 | RegSet allow = rset_exclude(RSET_GPR, RID_BASE); |
| 2026 | lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); | 2044 | lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t), |
| 2045 | "restore of IR type %d", irt_type(ir->t)); | ||
| 2027 | if (!irt_ispri(ir->t)) { | 2046 | if (!irt_ispri(ir->t)) { |
| 2028 | Reg src = ra_alloc1(as, ref, allow); | 2047 | Reg src = ra_alloc1(as, ref, allow); |
| 2029 | rset_clear(allow, src); | 2048 | rset_clear(allow, src); |
| @@ -2043,7 +2062,7 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 2043 | } | 2062 | } |
| 2044 | checkmclim(as); | 2063 | checkmclim(as); |
| 2045 | } | 2064 | } |
| 2046 | lua_assert(map + nent == flinks); | 2065 | lj_assertA(map + nent == flinks, "inconsistent frames in snapshot"); |
| 2047 | } | 2066 | } |
| 2048 | 2067 | ||
| 2049 | /* -- GC handling --------------------------------------------------------- */ | 2068 | /* -- GC handling --------------------------------------------------------- */ |
| @@ -2141,7 +2160,7 @@ static void asm_tail_fixup(ASMState *as, TraceNo lnk) | |||
| 2141 | as->mctop = p; | 2160 | as->mctop = p; |
| 2142 | } else { | 2161 | } else { |
| 2143 | /* Patch stack adjustment. */ | 2162 | /* Patch stack adjustment. */ |
| 2144 | lua_assert(checki16(CFRAME_SIZE+spadj)); | 2163 | lj_assertA(checki16(CFRAME_SIZE+spadj), "stack adjustment out of range"); |
| 2145 | p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj); | 2164 | p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj); |
| 2146 | p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj; | 2165 | p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj; |
| 2147 | } | 2166 | } |
| @@ -2218,14 +2237,16 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | |||
| 2218 | } else if ((ins & 0xfc000000u) == PPCI_B && | 2237 | } else if ((ins & 0xfc000000u) == PPCI_B && |
| 2219 | ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) { | 2238 | ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) { |
| 2220 | ptrdiff_t delta = (char *)target - (char *)p; | 2239 | ptrdiff_t delta = (char *)target - (char *)p; |
| 2221 | lua_assert(((delta + 0x02000000) >> 26) == 0); | 2240 | lj_assertJ(((delta + 0x02000000) >> 26) == 0, |
| 2241 | "branch target out of range"); | ||
| 2222 | *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu); | 2242 | *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu); |
| 2223 | if (!cstart) cstart = p; | 2243 | if (!cstart) cstart = p; |
| 2224 | } | 2244 | } |
| 2225 | } | 2245 | } |
| 2226 | { /* Always patch long-range branch in exit stub itself. */ | 2246 | { /* Always patch long-range branch in exit stub itself. */ |
| 2227 | ptrdiff_t delta = (char *)target - (char *)px - clearso; | 2247 | ptrdiff_t delta = (char *)target - (char *)px - clearso; |
| 2228 | lua_assert(((delta + 0x02000000) >> 26) == 0); | 2248 | lj_assertJ(((delta + 0x02000000) >> 26) == 0, |
| 2249 | "branch target out of range"); | ||
| 2229 | *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu); | 2250 | *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu); |
| 2230 | } | 2251 | } |
| 2231 | if (!cstart) cstart = px; | 2252 | if (!cstart) cstart = px; |
diff --git a/src/lj_asm_x86.h b/src/lj_asm_x86.h index 7356a5f0..a3adee14 100644 --- a/src/lj_asm_x86.h +++ b/src/lj_asm_x86.h | |||
| @@ -31,7 +31,7 @@ static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) | |||
| 31 | #endif | 31 | #endif |
| 32 | /* Jump to exit handler which fills in the ExitState. */ | 32 | /* Jump to exit handler which fills in the ExitState. */ |
| 33 | *mxp++ = XI_JMP; mxp += 4; | 33 | *mxp++ = XI_JMP; mxp += 4; |
| 34 | *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler); | 34 | *((int32_t *)(mxp-4)) = jmprel(as->J, mxp, (MCode *)(void *)lj_vm_exit_handler); |
| 35 | /* Commit the code for this group (even if assembly fails later on). */ | 35 | /* Commit the code for this group (even if assembly fails later on). */ |
| 36 | lj_mcode_commitbot(as->J, mxp); | 36 | lj_mcode_commitbot(as->J, mxp); |
| 37 | as->mcbot = mxp; | 37 | as->mcbot = mxp; |
| @@ -60,7 +60,7 @@ static void asm_guardcc(ASMState *as, int cc) | |||
| 60 | MCode *p = as->mcp; | 60 | MCode *p = as->mcp; |
| 61 | if (LJ_UNLIKELY(p == as->invmcp)) { | 61 | if (LJ_UNLIKELY(p == as->invmcp)) { |
| 62 | as->loopinv = 1; | 62 | as->loopinv = 1; |
| 63 | *(int32_t *)(p+1) = jmprel(p+5, target); | 63 | *(int32_t *)(p+1) = jmprel(as->J, p+5, target); |
| 64 | target = p; | 64 | target = p; |
| 65 | cc ^= 1; | 65 | cc ^= 1; |
| 66 | if (as->realign) { | 66 | if (as->realign) { |
| @@ -131,7 +131,7 @@ static IRRef asm_fuseabase(ASMState *as, IRRef ref) | |||
| 131 | as->mrm.ofs = 0; | 131 | as->mrm.ofs = 0; |
| 132 | if (irb->o == IR_FLOAD) { | 132 | if (irb->o == IR_FLOAD) { |
| 133 | IRIns *ira = IR(irb->op1); | 133 | IRIns *ira = IR(irb->op1); |
| 134 | lua_assert(irb->op2 == IRFL_TAB_ARRAY); | 134 | lj_assertA(irb->op2 == IRFL_TAB_ARRAY, "expected FLOAD TAB_ARRAY"); |
| 135 | /* We can avoid the FLOAD of t->array for colocated arrays. */ | 135 | /* We can avoid the FLOAD of t->array for colocated arrays. */ |
| 136 | if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE && | 136 | if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE && |
| 137 | !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) { | 137 | !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) { |
| @@ -150,7 +150,7 @@ static IRRef asm_fuseabase(ASMState *as, IRRef ref) | |||
| 150 | static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow) | 150 | static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow) |
| 151 | { | 151 | { |
| 152 | IRIns *irx; | 152 | IRIns *irx; |
| 153 | lua_assert(ir->o == IR_AREF); | 153 | lj_assertA(ir->o == IR_AREF, "expected AREF"); |
| 154 | as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow); | 154 | as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow); |
| 155 | irx = IR(ir->op2); | 155 | irx = IR(ir->op2); |
| 156 | if (irref_isk(ir->op2)) { | 156 | if (irref_isk(ir->op2)) { |
| @@ -217,8 +217,9 @@ static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow) | |||
| 217 | } | 217 | } |
| 218 | break; | 218 | break; |
| 219 | default: | 219 | default: |
| 220 | lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO || | 220 | lj_assertA(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO || |
| 221 | ir->o == IR_KKPTR); | 221 | ir->o == IR_KKPTR, |
| 222 | "bad IR op %d", ir->o); | ||
| 222 | break; | 223 | break; |
| 223 | } | 224 | } |
| 224 | } | 225 | } |
| @@ -230,9 +231,10 @@ static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow) | |||
| 230 | /* Fuse FLOAD/FREF reference into memory operand. */ | 231 | /* Fuse FLOAD/FREF reference into memory operand. */ |
| 231 | static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) | 232 | static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) |
| 232 | { | 233 | { |
| 233 | lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF); | 234 | lj_assertA(ir->o == IR_FLOAD || ir->o == IR_FREF, |
| 235 | "bad IR op %d", ir->o); | ||
| 234 | as->mrm.idx = RID_NONE; | 236 | as->mrm.idx = RID_NONE; |
| 235 | if (ir->op1 == REF_NIL) { | 237 | if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */ |
| 236 | #if LJ_GC64 | 238 | #if LJ_GC64 |
| 237 | as->mrm.ofs = (int32_t)(ir->op2 << 2) - GG_OFS(dispatch); | 239 | as->mrm.ofs = (int32_t)(ir->op2 << 2) - GG_OFS(dispatch); |
| 238 | as->mrm.base = RID_DISPATCH; | 240 | as->mrm.base = RID_DISPATCH; |
| @@ -271,7 +273,7 @@ static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) | |||
| 271 | static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow) | 273 | static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow) |
| 272 | { | 274 | { |
| 273 | IRIns *irr; | 275 | IRIns *irr; |
| 274 | lua_assert(ir->o == IR_STRREF); | 276 | lj_assertA(ir->o == IR_STRREF, "bad IR op %d", ir->o); |
| 275 | as->mrm.base = as->mrm.idx = RID_NONE; | 277 | as->mrm.base = as->mrm.idx = RID_NONE; |
| 276 | as->mrm.scale = XM_SCALE1; | 278 | as->mrm.scale = XM_SCALE1; |
| 277 | as->mrm.ofs = sizeof(GCstr); | 279 | as->mrm.ofs = sizeof(GCstr); |
| @@ -378,9 +380,10 @@ static Reg asm_fuseloadk64(ASMState *as, IRIns *ir) | |||
| 378 | checki32(mctopofs(as, k)) && checki32(mctopofs(as, k+1))) { | 380 | checki32(mctopofs(as, k)) && checki32(mctopofs(as, k+1))) { |
| 379 | as->mrm.ofs = (int32_t)mcpofs(as, k); | 381 | as->mrm.ofs = (int32_t)mcpofs(as, k); |
| 380 | as->mrm.base = RID_RIP; | 382 | as->mrm.base = RID_RIP; |
| 381 | } else { | 383 | } else { /* Intern 64 bit constant at bottom of mcode. */ |
| 382 | if (ir->i) { | 384 | if (ir->i) { |
| 383 | lua_assert(*k == *(uint64_t*)(as->mctop - ir->i)); | 385 | lj_assertA(*k == *(uint64_t*)(as->mctop - ir->i), |
| 386 | "bad interned 64 bit constant"); | ||
| 384 | } else { | 387 | } else { |
| 385 | while ((uintptr_t)as->mcbot & 7) *as->mcbot++ = XI_INT3; | 388 | while ((uintptr_t)as->mcbot & 7) *as->mcbot++ = XI_INT3; |
| 386 | *(uint64_t*)as->mcbot = *k; | 389 | *(uint64_t*)as->mcbot = *k; |
| @@ -420,12 +423,12 @@ static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow) | |||
| 420 | } | 423 | } |
| 421 | if (ir->o == IR_KNUM) { | 424 | if (ir->o == IR_KNUM) { |
| 422 | RegSet avail = as->freeset & ~as->modset & RSET_FPR; | 425 | RegSet avail = as->freeset & ~as->modset & RSET_FPR; |
| 423 | lua_assert(allow != RSET_EMPTY); | 426 | lj_assertA(allow != RSET_EMPTY, "no register allowed"); |
| 424 | if (!(avail & (avail-1))) /* Fuse if less than two regs available. */ | 427 | if (!(avail & (avail-1))) /* Fuse if less than two regs available. */ |
| 425 | return asm_fuseloadk64(as, ir); | 428 | return asm_fuseloadk64(as, ir); |
| 426 | } else if (ref == REF_BASE || ir->o == IR_KINT64) { | 429 | } else if (ref == REF_BASE || ir->o == IR_KINT64) { |
| 427 | RegSet avail = as->freeset & ~as->modset & RSET_GPR; | 430 | RegSet avail = as->freeset & ~as->modset & RSET_GPR; |
| 428 | lua_assert(allow != RSET_EMPTY); | 431 | lj_assertA(allow != RSET_EMPTY, "no register allowed"); |
| 429 | if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ | 432 | if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ |
| 430 | if (ref == REF_BASE) { | 433 | if (ref == REF_BASE) { |
| 431 | #if LJ_GC64 | 434 | #if LJ_GC64 |
| @@ -606,7 +609,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 606 | #endif | 609 | #endif |
| 607 | emit_loadi(as, r, ir->i); | 610 | emit_loadi(as, r, ir->i); |
| 608 | } else { | 611 | } else { |
| 609 | lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */ | 612 | /* Must have been evicted. */ |
| 613 | lj_assertA(rset_test(as->freeset, r), "reg %d not free", r); | ||
| 610 | if (ra_hasreg(ir->r)) { | 614 | if (ra_hasreg(ir->r)) { |
| 611 | ra_noweak(as, ir->r); | 615 | ra_noweak(as, ir->r); |
| 612 | emit_movrr(as, ir, r, ir->r); | 616 | emit_movrr(as, ir, r, ir->r); |
| @@ -615,7 +619,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |||
| 615 | } | 619 | } |
| 616 | } | 620 | } |
| 617 | } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */ | 621 | } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */ |
| 618 | lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */ | 622 | lj_assertA(!(irt_isfloat(ir->t) && irref_isk(ref)), |
| 623 | "unexpected float constant"); | ||
| 619 | if (LJ_32 && (ofs & 4) && irref_isk(ref)) { | 624 | if (LJ_32 && (ofs & 4) && irref_isk(ref)) { |
| 620 | /* Split stores for unaligned FP consts. */ | 625 | /* Split stores for unaligned FP consts. */ |
| 621 | emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo); | 626 | emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo); |
| @@ -691,7 +696,7 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | |||
| 691 | ra_destpair(as, ir); | 696 | ra_destpair(as, ir); |
| 692 | #endif | 697 | #endif |
| 693 | } else { | 698 | } else { |
| 694 | lua_assert(!irt_ispri(ir->t)); | 699 | lj_assertA(!irt_ispri(ir->t), "PRI dest"); |
| 695 | ra_destreg(as, ir, RID_RET); | 700 | ra_destreg(as, ir, RID_RET); |
| 696 | } | 701 | } |
| 697 | } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) { | 702 | } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) { |
| @@ -810,8 +815,10 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 810 | int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64)); | 815 | int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64)); |
| 811 | int stfp = (st == IRT_NUM || st == IRT_FLOAT); | 816 | int stfp = (st == IRT_NUM || st == IRT_FLOAT); |
| 812 | IRRef lref = ir->op1; | 817 | IRRef lref = ir->op1; |
| 813 | lua_assert(irt_type(ir->t) != st); | 818 | lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV"); |
| 814 | lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */ | 819 | lj_assertA(!(LJ_32 && (irt_isint64(ir->t) || st64)), |
| 820 | "IR %04d has unsplit 64 bit type", | ||
| 821 | (int)(ir - as->ir) - REF_BIAS); | ||
| 815 | if (irt_isfp(ir->t)) { | 822 | if (irt_isfp(ir->t)) { |
| 816 | Reg dest = ra_dest(as, ir, RSET_FPR); | 823 | Reg dest = ra_dest(as, ir, RSET_FPR); |
| 817 | if (stfp) { /* FP to FP conversion. */ | 824 | if (stfp) { /* FP to FP conversion. */ |
| @@ -847,7 +854,8 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 847 | } else if (stfp) { /* FP to integer conversion. */ | 854 | } else if (stfp) { /* FP to integer conversion. */ |
| 848 | if (irt_isguard(ir->t)) { | 855 | if (irt_isguard(ir->t)) { |
| 849 | /* Checked conversions are only supported from number to int. */ | 856 | /* Checked conversions are only supported from number to int. */ |
| 850 | lua_assert(irt_isint(ir->t) && st == IRT_NUM); | 857 | lj_assertA(irt_isint(ir->t) && st == IRT_NUM, |
| 858 | "bad type for checked CONV"); | ||
| 851 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); | 859 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); |
| 852 | } else { | 860 | } else { |
| 853 | Reg dest = ra_dest(as, ir, RSET_GPR); | 861 | Reg dest = ra_dest(as, ir, RSET_GPR); |
| @@ -882,7 +890,7 @@ static void asm_conv(ASMState *as, IRIns *ir) | |||
| 882 | Reg left, dest = ra_dest(as, ir, RSET_GPR); | 890 | Reg left, dest = ra_dest(as, ir, RSET_GPR); |
| 883 | RegSet allow = RSET_GPR; | 891 | RegSet allow = RSET_GPR; |
| 884 | x86Op op; | 892 | x86Op op; |
| 885 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); | 893 | lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT"); |
| 886 | if (st == IRT_I8) { | 894 | if (st == IRT_I8) { |
| 887 | op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX; | 895 | op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX; |
| 888 | } else if (st == IRT_U8) { | 896 | } else if (st == IRT_U8) { |
| @@ -953,7 +961,7 @@ static void asm_conv_fp_int64(ASMState *as, IRIns *ir) | |||
| 953 | emit_sjcc(as, CC_NS, l_end); | 961 | emit_sjcc(as, CC_NS, l_end); |
| 954 | emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */ | 962 | emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */ |
| 955 | } else { | 963 | } else { |
| 956 | lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64); | 964 | lj_assertA(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64, "bad type for CONV"); |
| 957 | } | 965 | } |
| 958 | emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0); | 966 | emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0); |
| 959 | /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */ | 967 | /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */ |
| @@ -967,8 +975,8 @@ static void asm_conv_int64_fp(ASMState *as, IRIns *ir) | |||
| 967 | IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); | 975 | IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); |
| 968 | IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); | 976 | IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); |
| 969 | Reg lo, hi; | 977 | Reg lo, hi; |
| 970 | lua_assert(st == IRT_NUM || st == IRT_FLOAT); | 978 | lj_assertA(st == IRT_NUM || st == IRT_FLOAT, "bad type for CONV"); |
| 971 | lua_assert(dt == IRT_I64 || dt == IRT_U64); | 979 | lj_assertA(dt == IRT_I64 || dt == IRT_U64, "bad type for CONV"); |
| 972 | hi = ra_dest(as, ir, RSET_GPR); | 980 | hi = ra_dest(as, ir, RSET_GPR); |
| 973 | lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi)); | 981 | lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi)); |
| 974 | if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0); | 982 | if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0); |
| @@ -1180,13 +1188,13 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |||
| 1180 | emit_rmro(as, XO_CMP, tmp|REX_64, dest, offsetof(Node, key.u64)); | 1188 | emit_rmro(as, XO_CMP, tmp|REX_64, dest, offsetof(Node, key.u64)); |
| 1181 | } | 1189 | } |
| 1182 | } else { | 1190 | } else { |
| 1183 | lua_assert(irt_ispri(kt) && !irt_isnil(kt)); | 1191 | lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type"); |
| 1184 | emit_u32(as, (irt_toitype(kt)<<15)|0x7fff); | 1192 | emit_u32(as, (irt_toitype(kt)<<15)|0x7fff); |
| 1185 | emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); | 1193 | emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); |
| 1186 | #else | 1194 | #else |
| 1187 | } else { | 1195 | } else { |
| 1188 | if (!irt_ispri(kt)) { | 1196 | if (!irt_ispri(kt)) { |
| 1189 | lua_assert(irt_isaddr(kt)); | 1197 | lj_assertA(irt_isaddr(kt), "bad HREF key type"); |
| 1190 | if (isk) | 1198 | if (isk) |
| 1191 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr), | 1199 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr), |
| 1192 | ptr2addr(ir_kgc(irkey))); | 1200 | ptr2addr(ir_kgc(irkey))); |
| @@ -1194,7 +1202,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |||
| 1194 | emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr)); | 1202 | emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr)); |
| 1195 | emit_sjcc(as, CC_NE, l_next); | 1203 | emit_sjcc(as, CC_NE, l_next); |
| 1196 | } | 1204 | } |
| 1197 | lua_assert(!irt_isnil(kt)); | 1205 | lj_assertA(!irt_isnil(kt), "bad HREF key type"); |
| 1198 | emit_i8(as, irt_toitype(kt)); | 1206 | emit_i8(as, irt_toitype(kt)); |
| 1199 | emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); | 1207 | emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); |
| 1200 | #endif | 1208 | #endif |
| @@ -1209,7 +1217,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |||
| 1209 | #endif | 1217 | #endif |
| 1210 | 1218 | ||
| 1211 | /* Load main position relative to tab->node into dest. */ | 1219 | /* Load main position relative to tab->node into dest. */ |
| 1212 | khash = isk ? ir_khash(irkey) : 1; | 1220 | khash = isk ? ir_khash(as, irkey) : 1; |
| 1213 | if (khash == 0) { | 1221 | if (khash == 0) { |
| 1214 | emit_rmro(as, XO_MOV, dest|REX_GC64, tab, offsetof(GCtab, node)); | 1222 | emit_rmro(as, XO_MOV, dest|REX_GC64, tab, offsetof(GCtab, node)); |
| 1215 | } else { | 1223 | } else { |
| @@ -1271,7 +1279,7 @@ static void asm_hrefk(ASMState *as, IRIns *ir) | |||
| 1271 | #if !LJ_64 | 1279 | #if !LJ_64 |
| 1272 | MCLabel l_exit; | 1280 | MCLabel l_exit; |
| 1273 | #endif | 1281 | #endif |
| 1274 | lua_assert(ofs % sizeof(Node) == 0); | 1282 | lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot"); |
| 1275 | if (ra_hasreg(dest)) { | 1283 | if (ra_hasreg(dest)) { |
| 1276 | if (ofs != 0) { | 1284 | if (ofs != 0) { |
| 1277 | if (dest == node) | 1285 | if (dest == node) |
| @@ -1288,7 +1296,8 @@ static void asm_hrefk(ASMState *as, IRIns *ir) | |||
| 1288 | Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node)); | 1296 | Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node)); |
| 1289 | emit_rmro(as, XO_CMP, key|REX_64, node, | 1297 | emit_rmro(as, XO_CMP, key|REX_64, node, |
| 1290 | ofs + (int32_t)offsetof(Node, key.u64)); | 1298 | ofs + (int32_t)offsetof(Node, key.u64)); |
| 1291 | lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t)); | 1299 | lj_assertA(irt_isnum(irkey->t) || irt_isgcv(irkey->t), |
| 1300 | "bad HREFK key type"); | ||
| 1292 | /* Assumes -0.0 is already canonicalized to +0.0. */ | 1301 | /* Assumes -0.0 is already canonicalized to +0.0. */ |
| 1293 | emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 : | 1302 | emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 : |
| 1294 | #if LJ_GC64 | 1303 | #if LJ_GC64 |
| @@ -1299,7 +1308,7 @@ static void asm_hrefk(ASMState *as, IRIns *ir) | |||
| 1299 | (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey))); | 1308 | (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey))); |
| 1300 | #endif | 1309 | #endif |
| 1301 | } else { | 1310 | } else { |
| 1302 | lua_assert(!irt_isnil(irkey->t)); | 1311 | lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type"); |
| 1303 | #if LJ_GC64 | 1312 | #if LJ_GC64 |
| 1304 | emit_i32(as, (irt_toitype(irkey->t)<<15)|0x7fff); | 1313 | emit_i32(as, (irt_toitype(irkey->t)<<15)|0x7fff); |
| 1305 | emit_rmro(as, XO_ARITHi, XOg_CMP, node, | 1314 | emit_rmro(as, XO_ARITHi, XOg_CMP, node, |
| @@ -1323,13 +1332,13 @@ static void asm_hrefk(ASMState *as, IRIns *ir) | |||
| 1323 | (int32_t)ir_knum(irkey)->u32.hi); | 1332 | (int32_t)ir_knum(irkey)->u32.hi); |
| 1324 | } else { | 1333 | } else { |
| 1325 | if (!irt_ispri(irkey->t)) { | 1334 | if (!irt_ispri(irkey->t)) { |
| 1326 | lua_assert(irt_isgcv(irkey->t)); | 1335 | lj_assertA(irt_isgcv(irkey->t), "bad HREFK key type"); |
| 1327 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | 1336 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, |
| 1328 | ofs + (int32_t)offsetof(Node, key.gcr), | 1337 | ofs + (int32_t)offsetof(Node, key.gcr), |
| 1329 | ptr2addr(ir_kgc(irkey))); | 1338 | ptr2addr(ir_kgc(irkey))); |
| 1330 | emit_sjcc(as, CC_NE, l_exit); | 1339 | emit_sjcc(as, CC_NE, l_exit); |
| 1331 | } | 1340 | } |
| 1332 | lua_assert(!irt_isnil(irkey->t)); | 1341 | lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type"); |
| 1333 | emit_i8(as, irt_toitype(irkey->t)); | 1342 | emit_i8(as, irt_toitype(irkey->t)); |
| 1334 | emit_rmro(as, XO_ARITHi8, XOg_CMP, node, | 1343 | emit_rmro(as, XO_ARITHi8, XOg_CMP, node, |
| 1335 | ofs + (int32_t)offsetof(Node, key.it)); | 1344 | ofs + (int32_t)offsetof(Node, key.it)); |
| @@ -1402,7 +1411,8 @@ static void asm_fxload(ASMState *as, IRIns *ir) | |||
| 1402 | if (LJ_64 && irt_is64(ir->t)) | 1411 | if (LJ_64 && irt_is64(ir->t)) |
| 1403 | dest |= REX_64; | 1412 | dest |= REX_64; |
| 1404 | else | 1413 | else |
| 1405 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); | 1414 | lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t), |
| 1415 | "unsplit 64 bit load"); | ||
| 1406 | xo = XO_MOV; | 1416 | xo = XO_MOV; |
| 1407 | break; | 1417 | break; |
| 1408 | } | 1418 | } |
| @@ -1447,13 +1457,16 @@ static void asm_fxstore(ASMState *as, IRIns *ir) | |||
| 1447 | case IRT_NUM: xo = XO_MOVSDto; break; | 1457 | case IRT_NUM: xo = XO_MOVSDto; break; |
| 1448 | case IRT_FLOAT: xo = XO_MOVSSto; break; | 1458 | case IRT_FLOAT: xo = XO_MOVSSto; break; |
| 1449 | #if LJ_64 && !LJ_GC64 | 1459 | #if LJ_64 && !LJ_GC64 |
| 1450 | case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */ | 1460 | case IRT_LIGHTUD: |
| 1461 | /* NYI: mask 64 bit lightuserdata. */ | ||
| 1462 | lj_assertA(0, "store of lightuserdata"); | ||
| 1451 | #endif | 1463 | #endif |
| 1452 | default: | 1464 | default: |
| 1453 | if (LJ_64 && irt_is64(ir->t)) | 1465 | if (LJ_64 && irt_is64(ir->t)) |
| 1454 | src |= REX_64; | 1466 | src |= REX_64; |
| 1455 | else | 1467 | else |
| 1456 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); | 1468 | lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t), |
| 1469 | "unsplit 64 bit store"); | ||
| 1457 | xo = XO_MOVto; | 1470 | xo = XO_MOVto; |
| 1458 | break; | 1471 | break; |
| 1459 | } | 1472 | } |
| @@ -1467,8 +1480,8 @@ static void asm_fxstore(ASMState *as, IRIns *ir) | |||
| 1467 | emit_i8(as, k); | 1480 | emit_i8(as, k); |
| 1468 | emit_mrm(as, XO_MOVmib, 0, RID_MRM); | 1481 | emit_mrm(as, XO_MOVmib, 0, RID_MRM); |
| 1469 | } else { | 1482 | } else { |
| 1470 | lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) || | 1483 | lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) || |
| 1471 | irt_isaddr(ir->t)); | 1484 | irt_isaddr(ir->t), "bad store type"); |
| 1472 | emit_i32(as, k); | 1485 | emit_i32(as, k); |
| 1473 | emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM); | 1486 | emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM); |
| 1474 | } | 1487 | } |
| @@ -1503,8 +1516,9 @@ static void asm_ahuvload(ASMState *as, IRIns *ir) | |||
| 1503 | #if LJ_GC64 | 1516 | #if LJ_GC64 |
| 1504 | Reg tmp = RID_NONE; | 1517 | Reg tmp = RID_NONE; |
| 1505 | #endif | 1518 | #endif |
| 1506 | lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || | 1519 | lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || |
| 1507 | (LJ_DUALNUM && irt_isint(ir->t))); | 1520 | (LJ_DUALNUM && irt_isint(ir->t)), |
| 1521 | "bad load type %d", irt_type(ir->t)); | ||
| 1508 | #if LJ_64 && !LJ_GC64 | 1522 | #if LJ_64 && !LJ_GC64 |
| 1509 | if (irt_islightud(ir->t)) { | 1523 | if (irt_islightud(ir->t)) { |
| 1510 | Reg dest = asm_load_lightud64(as, ir, 1); | 1524 | Reg dest = asm_load_lightud64(as, ir, 1); |
| @@ -1551,7 +1565,8 @@ static void asm_ahuvload(ASMState *as, IRIns *ir) | |||
| 1551 | as->mrm.ofs += 4; | 1565 | as->mrm.ofs += 4; |
| 1552 | asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE); | 1566 | asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE); |
| 1553 | if (LJ_64 && irt_type(ir->t) >= IRT_NUM) { | 1567 | if (LJ_64 && irt_type(ir->t) >= IRT_NUM) { |
| 1554 | lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t)); | 1568 | lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t), |
| 1569 | "bad load type %d", irt_type(ir->t)); | ||
| 1555 | #if LJ_GC64 | 1570 | #if LJ_GC64 |
| 1556 | emit_u32(as, LJ_TISNUM << 15); | 1571 | emit_u32(as, LJ_TISNUM << 15); |
| 1557 | #else | 1572 | #else |
| @@ -1633,13 +1648,14 @@ static void asm_ahustore(ASMState *as, IRIns *ir) | |||
| 1633 | #endif | 1648 | #endif |
| 1634 | emit_mrm(as, XO_MOVto, src, RID_MRM); | 1649 | emit_mrm(as, XO_MOVto, src, RID_MRM); |
| 1635 | } else if (!irt_ispri(irr->t)) { | 1650 | } else if (!irt_ispri(irr->t)) { |
| 1636 | lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t))); | 1651 | lj_assertA(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)), |
| 1652 | "bad store type"); | ||
| 1637 | emit_i32(as, irr->i); | 1653 | emit_i32(as, irr->i); |
| 1638 | emit_mrm(as, XO_MOVmi, 0, RID_MRM); | 1654 | emit_mrm(as, XO_MOVmi, 0, RID_MRM); |
| 1639 | } | 1655 | } |
| 1640 | as->mrm.ofs += 4; | 1656 | as->mrm.ofs += 4; |
| 1641 | #if LJ_GC64 | 1657 | #if LJ_GC64 |
| 1642 | lua_assert(LJ_DUALNUM && irt_isinteger(ir->t)); | 1658 | lj_assertA(LJ_DUALNUM && irt_isinteger(ir->t), "bad store type"); |
| 1643 | emit_i32(as, LJ_TNUMX << 15); | 1659 | emit_i32(as, LJ_TNUMX << 15); |
| 1644 | #else | 1660 | #else |
| 1645 | emit_i32(as, (int32_t)irt_toitype(ir->t)); | 1661 | emit_i32(as, (int32_t)irt_toitype(ir->t)); |
| @@ -1654,10 +1670,13 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1654 | (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0); | 1670 | (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0); |
| 1655 | IRType1 t = ir->t; | 1671 | IRType1 t = ir->t; |
| 1656 | Reg base; | 1672 | Reg base; |
| 1657 | lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ | 1673 | lj_assertA(!(ir->op2 & IRSLOAD_PARENT), |
| 1658 | lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); | 1674 | "bad parent SLOAD"); /* Handled by asm_head_side(). */ |
| 1659 | lua_assert(LJ_DUALNUM || | 1675 | lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK), |
| 1660 | !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); | 1676 | "inconsistent SLOAD variant"); |
| 1677 | lj_assertA(LJ_DUALNUM || | ||
| 1678 | !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)), | ||
| 1679 | "bad SLOAD type"); | ||
| 1661 | if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { | 1680 | if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { |
| 1662 | Reg left = ra_scratch(as, RSET_FPR); | 1681 | Reg left = ra_scratch(as, RSET_FPR); |
| 1663 | asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */ | 1682 | asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */ |
| @@ -1677,7 +1696,8 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1677 | RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR; | 1696 | RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR; |
| 1678 | Reg dest = ra_dest(as, ir, allow); | 1697 | Reg dest = ra_dest(as, ir, allow); |
| 1679 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | 1698 | base = ra_alloc1(as, REF_BASE, RSET_GPR); |
| 1680 | lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); | 1699 | lj_assertA(irt_isnum(t) || irt_isint(t) || irt_isaddr(t), |
| 1700 | "bad SLOAD type %d", irt_type(t)); | ||
| 1681 | if ((ir->op2 & IRSLOAD_CONVERT)) { | 1701 | if ((ir->op2 & IRSLOAD_CONVERT)) { |
| 1682 | t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */ | 1702 | t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */ |
| 1683 | emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTTSD2SI, dest, base, ofs); | 1703 | emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTTSD2SI, dest, base, ofs); |
| @@ -1723,7 +1743,8 @@ static void asm_sload(ASMState *as, IRIns *ir) | |||
| 1723 | /* Need type check, even if the load result is unused. */ | 1743 | /* Need type check, even if the load result is unused. */ |
| 1724 | asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE); | 1744 | asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE); |
| 1725 | if (LJ_64 && irt_type(t) >= IRT_NUM) { | 1745 | if (LJ_64 && irt_type(t) >= IRT_NUM) { |
| 1726 | lua_assert(irt_isinteger(t) || irt_isnum(t)); | 1746 | lj_assertA(irt_isinteger(t) || irt_isnum(t), |
| 1747 | "bad SLOAD type %d", irt_type(t)); | ||
| 1727 | #if LJ_GC64 | 1748 | #if LJ_GC64 |
| 1728 | emit_u32(as, LJ_TISNUM << 15); | 1749 | emit_u32(as, LJ_TISNUM << 15); |
| 1729 | #else | 1750 | #else |
| @@ -1775,7 +1796,8 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1775 | CTInfo info = lj_ctype_info(cts, id, &sz); | 1796 | CTInfo info = lj_ctype_info(cts, id, &sz); |
| 1776 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; | 1797 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; |
| 1777 | IRRef args[4]; | 1798 | IRRef args[4]; |
| 1778 | lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); | 1799 | lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL), |
| 1800 | "bad CNEW/CNEWI operands"); | ||
| 1779 | 1801 | ||
| 1780 | as->gcsteps++; | 1802 | as->gcsteps++; |
| 1781 | asm_setupresult(as, ir, ci); /* GCcdata * */ | 1803 | asm_setupresult(as, ir, ci); /* GCcdata * */ |
| @@ -1805,7 +1827,7 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1805 | int32_t ofs = sizeof(GCcdata); | 1827 | int32_t ofs = sizeof(GCcdata); |
| 1806 | if (sz == 8) { | 1828 | if (sz == 8) { |
| 1807 | ofs += 4; ir++; | 1829 | ofs += 4; ir++; |
| 1808 | lua_assert(ir->o == IR_HIOP); | 1830 | lj_assertA(ir->o == IR_HIOP, "missing CNEWI HIOP"); |
| 1809 | } | 1831 | } |
| 1810 | do { | 1832 | do { |
| 1811 | if (irref_isk(ir->op2)) { | 1833 | if (irref_isk(ir->op2)) { |
| @@ -1819,7 +1841,7 @@ static void asm_cnew(ASMState *as, IRIns *ir) | |||
| 1819 | ofs -= 4; ir--; | 1841 | ofs -= 4; ir--; |
| 1820 | } while (1); | 1842 | } while (1); |
| 1821 | #endif | 1843 | #endif |
| 1822 | lua_assert(sz == 4 || sz == 8); | 1844 | lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz); |
| 1823 | } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ | 1845 | } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ |
| 1824 | ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; | 1846 | ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; |
| 1825 | args[0] = ASMREF_L; /* lua_State *L */ | 1847 | args[0] = ASMREF_L; /* lua_State *L */ |
| @@ -1869,7 +1891,7 @@ static void asm_obar(ASMState *as, IRIns *ir) | |||
| 1869 | MCLabel l_end; | 1891 | MCLabel l_end; |
| 1870 | Reg obj; | 1892 | Reg obj; |
| 1871 | /* No need for other object barriers (yet). */ | 1893 | /* No need for other object barriers (yet). */ |
| 1872 | lua_assert(IR(ir->op1)->o == IR_UREFC); | 1894 | lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type"); |
| 1873 | ra_evictset(as, RSET_SCRATCH); | 1895 | ra_evictset(as, RSET_SCRATCH); |
| 1874 | l_end = emit_label(as); | 1896 | l_end = emit_label(as); |
| 1875 | args[0] = ASMREF_TMP1; /* global_State *g */ | 1897 | args[0] = ASMREF_TMP1; /* global_State *g */ |
| @@ -1986,7 +2008,7 @@ static int asm_swapops(ASMState *as, IRIns *ir) | |||
| 1986 | { | 2008 | { |
| 1987 | IRIns *irl = IR(ir->op1); | 2009 | IRIns *irl = IR(ir->op1); |
| 1988 | IRIns *irr = IR(ir->op2); | 2010 | IRIns *irr = IR(ir->op2); |
| 1989 | lua_assert(ra_noreg(irr->r)); | 2011 | lj_assertA(ra_noreg(irr->r), "bad usage"); |
| 1990 | if (!irm_iscomm(lj_ir_mode[ir->o])) | 2012 | if (!irm_iscomm(lj_ir_mode[ir->o])) |
| 1991 | return 0; /* Can't swap non-commutative operations. */ | 2013 | return 0; /* Can't swap non-commutative operations. */ |
| 1992 | if (irref_isk(ir->op2)) | 2014 | if (irref_isk(ir->op2)) |
| @@ -2376,8 +2398,9 @@ static void asm_comp(ASMState *as, IRIns *ir) | |||
| 2376 | IROp leftop = (IROp)(IR(lref)->o); | 2398 | IROp leftop = (IROp)(IR(lref)->o); |
| 2377 | Reg r64 = REX_64IR(ir, 0); | 2399 | Reg r64 = REX_64IR(ir, 0); |
| 2378 | int32_t imm = 0; | 2400 | int32_t imm = 0; |
| 2379 | lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || | 2401 | lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) || |
| 2380 | irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t)); | 2402 | irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t), |
| 2403 | "bad comparison data type %d", irt_type(ir->t)); | ||
| 2381 | /* Swap constants (only for ABC) and fusable loads to the right. */ | 2404 | /* Swap constants (only for ABC) and fusable loads to the right. */ |
| 2382 | if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) { | 2405 | if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) { |
| 2383 | if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */ | 2406 | if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */ |
| @@ -2459,7 +2482,7 @@ static void asm_comp(ASMState *as, IRIns *ir) | |||
| 2459 | /* Use test r,r instead of cmp r,0. */ | 2482 | /* Use test r,r instead of cmp r,0. */ |
| 2460 | x86Op xo = XO_TEST; | 2483 | x86Op xo = XO_TEST; |
| 2461 | if (irt_isu8(ir->t)) { | 2484 | if (irt_isu8(ir->t)) { |
| 2462 | lua_assert(ir->o == IR_EQ || ir->o == IR_NE); | 2485 | lj_assertA(ir->o == IR_EQ || ir->o == IR_NE, "bad usage"); |
| 2463 | xo = XO_TESTb; | 2486 | xo = XO_TESTb; |
| 2464 | if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) { | 2487 | if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) { |
| 2465 | if (LJ_64) { | 2488 | if (LJ_64) { |
| @@ -2615,10 +2638,11 @@ static void asm_hiop(ASMState *as, IRIns *ir) | |||
| 2615 | case IR_CNEWI: | 2638 | case IR_CNEWI: |
| 2616 | /* Nothing to do here. Handled by CNEWI itself. */ | 2639 | /* Nothing to do here. Handled by CNEWI itself. */ |
| 2617 | break; | 2640 | break; |
| 2618 | default: lua_assert(0); break; | 2641 | default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break; |
| 2619 | } | 2642 | } |
| 2620 | #else | 2643 | #else |
| 2621 | UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */ | 2644 | /* Unused on x64 or without FFI. */ |
| 2645 | UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP"); | ||
| 2622 | #endif | 2646 | #endif |
| 2623 | } | 2647 | } |
| 2624 | 2648 | ||
| @@ -2684,8 +2708,9 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 2684 | Reg src = ra_alloc1(as, ref, RSET_FPR); | 2708 | Reg src = ra_alloc1(as, ref, RSET_FPR); |
| 2685 | emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs); | 2709 | emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs); |
| 2686 | } else { | 2710 | } else { |
| 2687 | lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || | 2711 | lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || |
| 2688 | (LJ_DUALNUM && irt_isinteger(ir->t))); | 2712 | (LJ_DUALNUM && irt_isinteger(ir->t)), |
| 2713 | "restore of IR type %d", irt_type(ir->t)); | ||
| 2689 | if (!irref_isk(ref)) { | 2714 | if (!irref_isk(ref)) { |
| 2690 | Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); | 2715 | Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); |
| 2691 | #if LJ_GC64 | 2716 | #if LJ_GC64 |
| @@ -2730,7 +2755,7 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap) | |||
| 2730 | } | 2755 | } |
| 2731 | checkmclim(as); | 2756 | checkmclim(as); |
| 2732 | } | 2757 | } |
| 2733 | lua_assert(map + nent == flinks); | 2758 | lj_assertA(map + nent == flinks, "inconsistent frames in snapshot"); |
| 2734 | } | 2759 | } |
| 2735 | 2760 | ||
| 2736 | /* -- GC handling --------------------------------------------------------- */ | 2761 | /* -- GC handling --------------------------------------------------------- */ |
| @@ -2774,16 +2799,16 @@ static void asm_loop_fixup(ASMState *as) | |||
| 2774 | MCode *target = as->mcp; | 2799 | MCode *target = as->mcp; |
| 2775 | if (as->realign) { /* Realigned loops use short jumps. */ | 2800 | if (as->realign) { /* Realigned loops use short jumps. */ |
| 2776 | as->realign = NULL; /* Stop another retry. */ | 2801 | as->realign = NULL; /* Stop another retry. */ |
| 2777 | lua_assert(((intptr_t)target & 15) == 0); | 2802 | lj_assertA(((intptr_t)target & 15) == 0, "loop realign failed"); |
| 2778 | if (as->loopinv) { /* Inverted loop branch? */ | 2803 | if (as->loopinv) { /* Inverted loop branch? */ |
| 2779 | p -= 5; | 2804 | p -= 5; |
| 2780 | p[0] = XI_JMP; | 2805 | p[0] = XI_JMP; |
| 2781 | lua_assert(target - p >= -128); | 2806 | lj_assertA(target - p >= -128, "loop realign failed"); |
| 2782 | p[-1] = (MCode)(target - p); /* Patch sjcc. */ | 2807 | p[-1] = (MCode)(target - p); /* Patch sjcc. */ |
| 2783 | if (as->loopinv == 2) | 2808 | if (as->loopinv == 2) |
| 2784 | p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */ | 2809 | p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */ |
| 2785 | } else { | 2810 | } else { |
| 2786 | lua_assert(target - p >= -128); | 2811 | lj_assertA(target - p >= -128, "loop realign failed"); |
| 2787 | p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */ | 2812 | p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */ |
| 2788 | p[-2] = XI_JMPs; | 2813 | p[-2] = XI_JMPs; |
| 2789 | } | 2814 | } |
| @@ -2880,7 +2905,7 @@ static void asm_tail_fixup(ASMState *as, TraceNo lnk) | |||
| 2880 | } | 2905 | } |
| 2881 | /* Patch exit branch. */ | 2906 | /* Patch exit branch. */ |
| 2882 | target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; | 2907 | target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; |
| 2883 | *(int32_t *)(p-4) = jmprel(p, target); | 2908 | *(int32_t *)(p-4) = jmprel(as->J, p, target); |
| 2884 | p[-5] = XI_JMP; | 2909 | p[-5] = XI_JMP; |
| 2885 | /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */ | 2910 | /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */ |
| 2886 | for (q = as->mctop-1; q >= p; q--) | 2911 | for (q = as->mctop-1; q >= p; q--) |
| @@ -3053,17 +3078,17 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | |||
| 3053 | uint32_t statei = u32ptr(&J2G(J)->vmstate); | 3078 | uint32_t statei = u32ptr(&J2G(J)->vmstate); |
| 3054 | #endif | 3079 | #endif |
| 3055 | if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px) | 3080 | if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px) |
| 3056 | *(int32_t *)(p+len-4) = jmprel(p+len, target); | 3081 | *(int32_t *)(p+len-4) = jmprel(J, p+len, target); |
| 3057 | /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */ | 3082 | /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */ |
| 3058 | for (; p < pe; p += asm_x86_inslen(p)) { | 3083 | for (; p < pe; p += asm_x86_inslen(p)) { |
| 3059 | intptr_t ofs = LJ_GC64 ? (p[0] & 0xf0) == 0x40 : LJ_64; | 3084 | intptr_t ofs = LJ_GC64 ? (p[0] & 0xf0) == 0x40 : LJ_64; |
| 3060 | if (*(uint32_t *)(p+2+ofs) == statei && p[ofs+LJ_GC64-LJ_64] == XI_MOVmi) | 3085 | if (*(uint32_t *)(p+2+ofs) == statei && p[ofs+LJ_GC64-LJ_64] == XI_MOVmi) |
| 3061 | break; | 3086 | break; |
| 3062 | } | 3087 | } |
| 3063 | lua_assert(p < pe); | 3088 | lj_assertJ(p < pe, "instruction length decoder failed"); |
| 3064 | for (; p < pe; p += asm_x86_inslen(p)) | 3089 | for (; p < pe; p += asm_x86_inslen(p)) |
| 3065 | if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) | 3090 | if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) |
| 3066 | *(int32_t *)(p+2) = jmprel(p+6, target); | 3091 | *(int32_t *)(p+2) = jmprel(J, p+6, target); |
| 3067 | lj_mcode_sync(T->mcode, T->mcode + T->szmcode); | 3092 | lj_mcode_sync(T->mcode, T->mcode + T->szmcode); |
| 3068 | lj_mcode_patch(J, mcarea, 1); | 3093 | lj_mcode_patch(J, mcarea, 1); |
| 3069 | } | 3094 | } |
diff --git a/src/lj_assert.c b/src/lj_assert.c new file mode 100644 index 00000000..7989dbe6 --- /dev/null +++ b/src/lj_assert.c | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | /* | ||
| 2 | ** Internal assertions. | ||
| 3 | ** Copyright (C) 2005-2020 Mike Pall. See Copyright Notice in luajit.h | ||
| 4 | */ | ||
| 5 | |||
| 6 | #define lj_assert_c | ||
| 7 | #define LUA_CORE | ||
| 8 | |||
| 9 | #if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK) | ||
| 10 | |||
| 11 | #include <stdio.h> | ||
| 12 | |||
| 13 | #include "lj_obj.h" | ||
| 14 | |||
| 15 | void lj_assert_fail(global_State *g, const char *file, int line, | ||
| 16 | const char *func, const char *fmt, ...) | ||
| 17 | { | ||
| 18 | va_list argp; | ||
| 19 | va_start(argp, fmt); | ||
| 20 | fprintf(stderr, "LuaJIT ASSERT %s:%d: %s: ", file, line, func); | ||
| 21 | vfprintf(stderr, fmt, argp); | ||
| 22 | fputc('\n', stderr); | ||
| 23 | va_end(argp); | ||
| 24 | UNUSED(g); /* May be NULL. TODO: optionally dump state. */ | ||
| 25 | abort(); | ||
| 26 | } | ||
| 27 | |||
| 28 | #endif | ||
diff --git a/src/lj_bcread.c b/src/lj_bcread.c index 1585272f..1d9547be 100644 --- a/src/lj_bcread.c +++ b/src/lj_bcread.c | |||
| @@ -47,7 +47,7 @@ static LJ_NOINLINE void bcread_error(LexState *ls, ErrMsg em) | |||
| 47 | /* Refill buffer. */ | 47 | /* Refill buffer. */ |
| 48 | static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need) | 48 | static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need) |
| 49 | { | 49 | { |
| 50 | lua_assert(len != 0); | 50 | lj_assertLS(len != 0, "empty refill"); |
| 51 | if (len > LJ_MAX_BUF || ls->c < 0) | 51 | if (len > LJ_MAX_BUF || ls->c < 0) |
| 52 | bcread_error(ls, LJ_ERR_BCBAD); | 52 | bcread_error(ls, LJ_ERR_BCBAD); |
| 53 | do { | 53 | do { |
| @@ -57,7 +57,7 @@ static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need) | |||
| 57 | MSize n = (MSize)(ls->pe - ls->p); | 57 | MSize n = (MSize)(ls->pe - ls->p); |
| 58 | if (n) { /* Copy remainder to buffer. */ | 58 | if (n) { /* Copy remainder to buffer. */ |
| 59 | if (sbuflen(&ls->sb)) { /* Move down in buffer. */ | 59 | if (sbuflen(&ls->sb)) { /* Move down in buffer. */ |
| 60 | lua_assert(ls->pe == sbufP(&ls->sb)); | 60 | lj_assertLS(ls->pe == sbufP(&ls->sb), "bad buffer pointer"); |
| 61 | if (ls->p != p) memmove(p, ls->p, n); | 61 | if (ls->p != p) memmove(p, ls->p, n); |
| 62 | } else { /* Copy from buffer provided by reader. */ | 62 | } else { /* Copy from buffer provided by reader. */ |
| 63 | p = lj_buf_need(&ls->sb, len); | 63 | p = lj_buf_need(&ls->sb, len); |
| @@ -107,7 +107,7 @@ static LJ_AINLINE uint8_t *bcread_mem(LexState *ls, MSize len) | |||
| 107 | { | 107 | { |
| 108 | uint8_t *p = (uint8_t *)ls->p; | 108 | uint8_t *p = (uint8_t *)ls->p; |
| 109 | ls->p += len; | 109 | ls->p += len; |
| 110 | lua_assert(ls->p <= ls->pe); | 110 | lj_assertLS(ls->p <= ls->pe, "buffer read overflow"); |
| 111 | return p; | 111 | return p; |
| 112 | } | 112 | } |
| 113 | 113 | ||
| @@ -120,7 +120,7 @@ static void bcread_block(LexState *ls, void *q, MSize len) | |||
| 120 | /* Read byte from buffer. */ | 120 | /* Read byte from buffer. */ |
| 121 | static LJ_AINLINE uint32_t bcread_byte(LexState *ls) | 121 | static LJ_AINLINE uint32_t bcread_byte(LexState *ls) |
| 122 | { | 122 | { |
| 123 | lua_assert(ls->p < ls->pe); | 123 | lj_assertLS(ls->p < ls->pe, "buffer read overflow"); |
| 124 | return (uint32_t)(uint8_t)*ls->p++; | 124 | return (uint32_t)(uint8_t)*ls->p++; |
| 125 | } | 125 | } |
| 126 | 126 | ||
| @@ -128,7 +128,7 @@ static LJ_AINLINE uint32_t bcread_byte(LexState *ls) | |||
| 128 | static LJ_AINLINE uint32_t bcread_uleb128(LexState *ls) | 128 | static LJ_AINLINE uint32_t bcread_uleb128(LexState *ls) |
| 129 | { | 129 | { |
| 130 | uint32_t v = lj_buf_ruleb128(&ls->p); | 130 | uint32_t v = lj_buf_ruleb128(&ls->p); |
| 131 | lua_assert(ls->p <= ls->pe); | 131 | lj_assertLS(ls->p <= ls->pe, "buffer read overflow"); |
| 132 | return v; | 132 | return v; |
| 133 | } | 133 | } |
| 134 | 134 | ||
| @@ -145,7 +145,7 @@ static uint32_t bcread_uleb128_33(LexState *ls) | |||
| 145 | } while (*p++ >= 0x80); | 145 | } while (*p++ >= 0x80); |
| 146 | } | 146 | } |
| 147 | ls->p = (char *)p; | 147 | ls->p = (char *)p; |
| 148 | lua_assert(ls->p <= ls->pe); | 148 | lj_assertLS(ls->p <= ls->pe, "buffer read overflow"); |
| 149 | return v; | 149 | return v; |
| 150 | } | 150 | } |
| 151 | 151 | ||
| @@ -192,7 +192,7 @@ static void bcread_ktabk(LexState *ls, TValue *o) | |||
| 192 | o->u32.lo = bcread_uleb128(ls); | 192 | o->u32.lo = bcread_uleb128(ls); |
| 193 | o->u32.hi = bcread_uleb128(ls); | 193 | o->u32.hi = bcread_uleb128(ls); |
| 194 | } else { | 194 | } else { |
| 195 | lua_assert(tp <= BCDUMP_KTAB_TRUE); | 195 | lj_assertLS(tp <= BCDUMP_KTAB_TRUE, "bad constant type %d", tp); |
| 196 | setpriV(o, ~tp); | 196 | setpriV(o, ~tp); |
| 197 | } | 197 | } |
| 198 | } | 198 | } |
| @@ -214,7 +214,7 @@ static GCtab *bcread_ktab(LexState *ls) | |||
| 214 | for (i = 0; i < nhash; i++) { | 214 | for (i = 0; i < nhash; i++) { |
| 215 | TValue key; | 215 | TValue key; |
| 216 | bcread_ktabk(ls, &key); | 216 | bcread_ktabk(ls, &key); |
| 217 | lua_assert(!tvisnil(&key)); | 217 | lj_assertLS(!tvisnil(&key), "nil key"); |
| 218 | bcread_ktabk(ls, lj_tab_set(ls->L, t, &key)); | 218 | bcread_ktabk(ls, lj_tab_set(ls->L, t, &key)); |
| 219 | } | 219 | } |
| 220 | } | 220 | } |
| @@ -251,7 +251,7 @@ static void bcread_kgc(LexState *ls, GCproto *pt, MSize sizekgc) | |||
| 251 | #endif | 251 | #endif |
| 252 | } else { | 252 | } else { |
| 253 | lua_State *L = ls->L; | 253 | lua_State *L = ls->L; |
| 254 | lua_assert(tp == BCDUMP_KGC_CHILD); | 254 | lj_assertLS(tp == BCDUMP_KGC_CHILD, "bad constant type %d", tp); |
| 255 | if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */ | 255 | if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */ |
| 256 | bcread_error(ls, LJ_ERR_BCBAD); | 256 | bcread_error(ls, LJ_ERR_BCBAD); |
| 257 | L->top--; | 257 | L->top--; |
| @@ -422,7 +422,7 @@ static int bcread_header(LexState *ls) | |||
| 422 | GCproto *lj_bcread(LexState *ls) | 422 | GCproto *lj_bcread(LexState *ls) |
| 423 | { | 423 | { |
| 424 | lua_State *L = ls->L; | 424 | lua_State *L = ls->L; |
| 425 | lua_assert(ls->c == BCDUMP_HEAD1); | 425 | lj_assertLS(ls->c == BCDUMP_HEAD1, "bad bytecode header"); |
| 426 | bcread_savetop(L, ls, L->top); | 426 | bcread_savetop(L, ls, L->top); |
| 427 | lj_buf_reset(&ls->sb); | 427 | lj_buf_reset(&ls->sb); |
| 428 | /* Check for a valid bytecode dump header. */ | 428 | /* Check for a valid bytecode dump header. */ |
diff --git a/src/lj_bcwrite.c b/src/lj_bcwrite.c index dd38289e..a8c310b8 100644 --- a/src/lj_bcwrite.c +++ b/src/lj_bcwrite.c | |||
| @@ -29,8 +29,17 @@ typedef struct BCWriteCtx { | |||
| 29 | void *wdata; /* Writer callback data. */ | 29 | void *wdata; /* Writer callback data. */ |
| 30 | int strip; /* Strip debug info. */ | 30 | int strip; /* Strip debug info. */ |
| 31 | int status; /* Status from writer callback. */ | 31 | int status; /* Status from writer callback. */ |
| 32 | #ifdef LUA_USE_ASSERT | ||
| 33 | global_State *g; | ||
| 34 | #endif | ||
| 32 | } BCWriteCtx; | 35 | } BCWriteCtx; |
| 33 | 36 | ||
| 37 | #ifdef LUA_USE_ASSERT | ||
| 38 | #define lj_assertBCW(c, ...) lj_assertG_(ctx->g, (c), __VA_ARGS__) | ||
| 39 | #else | ||
| 40 | #define lj_assertBCW(c, ...) ((void)ctx) | ||
| 41 | #endif | ||
| 42 | |||
| 34 | /* -- Bytecode writer ----------------------------------------------------- */ | 43 | /* -- Bytecode writer ----------------------------------------------------- */ |
| 35 | 44 | ||
| 36 | /* Write a single constant key/value of a template table. */ | 45 | /* Write a single constant key/value of a template table. */ |
| @@ -61,7 +70,7 @@ static void bcwrite_ktabk(BCWriteCtx *ctx, cTValue *o, int narrow) | |||
| 61 | p = lj_strfmt_wuleb128(p, o->u32.lo); | 70 | p = lj_strfmt_wuleb128(p, o->u32.lo); |
| 62 | p = lj_strfmt_wuleb128(p, o->u32.hi); | 71 | p = lj_strfmt_wuleb128(p, o->u32.hi); |
| 63 | } else { | 72 | } else { |
| 64 | lua_assert(tvispri(o)); | 73 | lj_assertBCW(tvispri(o), "unhandled type %d", itype(o)); |
| 65 | *p++ = BCDUMP_KTAB_NIL+~itype(o); | 74 | *p++ = BCDUMP_KTAB_NIL+~itype(o); |
| 66 | } | 75 | } |
| 67 | setsbufP(&ctx->sb, p); | 76 | setsbufP(&ctx->sb, p); |
| @@ -121,7 +130,7 @@ static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt) | |||
| 121 | tp = BCDUMP_KGC_STR + gco2str(o)->len; | 130 | tp = BCDUMP_KGC_STR + gco2str(o)->len; |
| 122 | need = 5+gco2str(o)->len; | 131 | need = 5+gco2str(o)->len; |
| 123 | } else if (o->gch.gct == ~LJ_TPROTO) { | 132 | } else if (o->gch.gct == ~LJ_TPROTO) { |
| 124 | lua_assert((pt->flags & PROTO_CHILD)); | 133 | lj_assertBCW((pt->flags & PROTO_CHILD), "prototype has unexpected child"); |
| 125 | tp = BCDUMP_KGC_CHILD; | 134 | tp = BCDUMP_KGC_CHILD; |
| 126 | #if LJ_HASFFI | 135 | #if LJ_HASFFI |
| 127 | } else if (o->gch.gct == ~LJ_TCDATA) { | 136 | } else if (o->gch.gct == ~LJ_TCDATA) { |
| @@ -132,12 +141,14 @@ static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt) | |||
| 132 | } else if (id == CTID_UINT64) { | 141 | } else if (id == CTID_UINT64) { |
| 133 | tp = BCDUMP_KGC_U64; | 142 | tp = BCDUMP_KGC_U64; |
| 134 | } else { | 143 | } else { |
| 135 | lua_assert(id == CTID_COMPLEX_DOUBLE); | 144 | lj_assertBCW(id == CTID_COMPLEX_DOUBLE, |
| 145 | "bad cdata constant CTID %d", id); | ||
| 136 | tp = BCDUMP_KGC_COMPLEX; | 146 | tp = BCDUMP_KGC_COMPLEX; |
| 137 | } | 147 | } |
| 138 | #endif | 148 | #endif |
| 139 | } else { | 149 | } else { |
| 140 | lua_assert(o->gch.gct == ~LJ_TTAB); | 150 | lj_assertBCW(o->gch.gct == ~LJ_TTAB, |
| 151 | "bad constant GC type %d", o->gch.gct); | ||
| 141 | tp = BCDUMP_KGC_TAB; | 152 | tp = BCDUMP_KGC_TAB; |
| 142 | need = 1+2*5; | 153 | need = 1+2*5; |
| 143 | } | 154 | } |
| @@ -289,7 +300,7 @@ static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt) | |||
| 289 | MSize nn = (lj_fls(n)+8)*9 >> 6; | 300 | MSize nn = (lj_fls(n)+8)*9 >> 6; |
| 290 | char *q = sbufB(&ctx->sb) + (5 - nn); | 301 | char *q = sbufB(&ctx->sb) + (5 - nn); |
| 291 | p = lj_strfmt_wuleb128(q, n); /* Fill in final size. */ | 302 | p = lj_strfmt_wuleb128(q, n); /* Fill in final size. */ |
| 292 | lua_assert(p == sbufB(&ctx->sb) + 5); | 303 | lj_assertBCW(p == sbufB(&ctx->sb) + 5, "bad ULEB128 write"); |
| 293 | ctx->status = ctx->wfunc(sbufL(&ctx->sb), q, nn+n, ctx->wdata); | 304 | ctx->status = ctx->wfunc(sbufL(&ctx->sb), q, nn+n, ctx->wdata); |
| 294 | } | 305 | } |
| 295 | } | 306 | } |
| @@ -349,6 +360,9 @@ int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, void *data, | |||
| 349 | ctx.wdata = data; | 360 | ctx.wdata = data; |
| 350 | ctx.strip = strip; | 361 | ctx.strip = strip; |
| 351 | ctx.status = 0; | 362 | ctx.status = 0; |
| 363 | #ifdef LUA_USE_ASSERT | ||
| 364 | ctx.g = G(L); | ||
| 365 | #endif | ||
| 352 | lj_buf_init(L, &ctx.sb); | 366 | lj_buf_init(L, &ctx.sb); |
| 353 | status = lj_vm_cpcall(L, NULL, &ctx, cpwriter); | 367 | status = lj_vm_cpcall(L, NULL, &ctx, cpwriter); |
| 354 | if (status == 0) status = ctx.status; | 368 | if (status == 0) status = ctx.status; |
diff --git a/src/lj_buf.c b/src/lj_buf.c index c8778016..935ae488 100644 --- a/src/lj_buf.c +++ b/src/lj_buf.c | |||
| @@ -30,7 +30,7 @@ static void buf_grow(SBuf *sb, MSize sz) | |||
| 30 | 30 | ||
| 31 | LJ_NOINLINE char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz) | 31 | LJ_NOINLINE char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz) |
| 32 | { | 32 | { |
| 33 | lua_assert(sz > sbufsz(sb)); | 33 | lj_assertG_(G(sbufL(sb)), sz > sbufsz(sb), "SBuf overflow"); |
| 34 | if (LJ_UNLIKELY(sz > LJ_MAX_BUF)) | 34 | if (LJ_UNLIKELY(sz > LJ_MAX_BUF)) |
| 35 | lj_err_mem(sbufL(sb)); | 35 | lj_err_mem(sbufL(sb)); |
| 36 | buf_grow(sb, sz); | 36 | buf_grow(sb, sz); |
| @@ -40,7 +40,7 @@ LJ_NOINLINE char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz) | |||
| 40 | LJ_NOINLINE char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz) | 40 | LJ_NOINLINE char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz) |
| 41 | { | 41 | { |
| 42 | MSize len = sbuflen(sb); | 42 | MSize len = sbuflen(sb); |
| 43 | lua_assert(sz > sbufleft(sb)); | 43 | lj_assertG_(G(sbufL(sb)), sz > sbufleft(sb), "SBuf overflow"); |
| 44 | if (LJ_UNLIKELY(sz > LJ_MAX_BUF || len + sz > LJ_MAX_BUF)) | 44 | if (LJ_UNLIKELY(sz > LJ_MAX_BUF || len + sz > LJ_MAX_BUF)) |
| 45 | lj_err_mem(sbufL(sb)); | 45 | lj_err_mem(sbufL(sb)); |
| 46 | buf_grow(sb, len + sz); | 46 | buf_grow(sb, len + sz); |
diff --git a/src/lj_carith.c b/src/lj_carith.c index cf71aaf5..65ad2c10 100644 --- a/src/lj_carith.c +++ b/src/lj_carith.c | |||
| @@ -122,7 +122,7 @@ static int carith_ptr(lua_State *L, CTState *cts, CDArith *ca, MMS mm) | |||
| 122 | setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2)); | 122 | setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2)); |
| 123 | return 1; | 123 | return 1; |
| 124 | } else { | 124 | } else { |
| 125 | lua_assert(mm == MM_le); | 125 | lj_assertL(mm == MM_le, "bad metamethod %d", mm); |
| 126 | setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2)); | 126 | setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2)); |
| 127 | return 1; | 127 | return 1; |
| 128 | } | 128 | } |
| @@ -208,7 +208,9 @@ static int carith_int64(lua_State *L, CTState *cts, CDArith *ca, MMS mm) | |||
| 208 | *up = lj_carith_powu64(u0, u1); | 208 | *up = lj_carith_powu64(u0, u1); |
| 209 | break; | 209 | break; |
| 210 | case MM_unm: *up = (uint64_t)-(int64_t)u0; break; | 210 | case MM_unm: *up = (uint64_t)-(int64_t)u0; break; |
| 211 | default: lua_assert(0); break; | 211 | default: |
| 212 | lj_assertL(0, "bad metamethod %d", mm); | ||
| 213 | break; | ||
| 212 | } | 214 | } |
| 213 | lj_gc_check(L); | 215 | lj_gc_check(L); |
| 214 | return 1; | 216 | return 1; |
| @@ -301,7 +303,9 @@ uint64_t lj_carith_shift64(uint64_t x, int32_t sh, int op) | |||
| 301 | case IR_BSAR-IR_BSHL: x = lj_carith_sar64(x, sh); break; | 303 | case IR_BSAR-IR_BSHL: x = lj_carith_sar64(x, sh); break; |
| 302 | case IR_BROL-IR_BSHL: x = lj_carith_rol64(x, sh); break; | 304 | case IR_BROL-IR_BSHL: x = lj_carith_rol64(x, sh); break; |
| 303 | case IR_BROR-IR_BSHL: x = lj_carith_ror64(x, sh); break; | 305 | case IR_BROR-IR_BSHL: x = lj_carith_ror64(x, sh); break; |
| 304 | default: lua_assert(0); break; | 306 | default: |
| 307 | lj_assertX(0, "bad shift op %d", op); | ||
| 308 | break; | ||
| 305 | } | 309 | } |
| 306 | return x; | 310 | return x; |
| 307 | } | 311 | } |
diff --git a/src/lj_ccall.c b/src/lj_ccall.c index 499a01d8..5ac1b4da 100644 --- a/src/lj_ccall.c +++ b/src/lj_ccall.c | |||
| @@ -391,7 +391,8 @@ | |||
| 391 | #define CCALL_HANDLE_GPR \ | 391 | #define CCALL_HANDLE_GPR \ |
| 392 | /* Try to pass argument in GPRs. */ \ | 392 | /* Try to pass argument in GPRs. */ \ |
| 393 | if (n > 1) { \ | 393 | if (n > 1) { \ |
| 394 | lua_assert(n == 2 || n == 4); /* int64_t or complex (float). */ \ | 394 | /* int64_t or complex (float). */ \ |
| 395 | lj_assertL(n == 2 || n == 4, "bad GPR size %d", n); \ | ||
| 395 | if (ctype_isinteger(d->info) || ctype_isfp(d->info)) \ | 396 | if (ctype_isinteger(d->info) || ctype_isfp(d->info)) \ |
| 396 | ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \ | 397 | ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \ |
| 397 | else if (ngpr + n > maxgpr) \ | 398 | else if (ngpr + n > maxgpr) \ |
| @@ -642,7 +643,8 @@ static void ccall_classify_ct(CTState *cts, CType *ct, int *rcl, CTSize ofs) | |||
| 642 | ccall_classify_struct(cts, ct, rcl, ofs); | 643 | ccall_classify_struct(cts, ct, rcl, ofs); |
| 643 | } else { | 644 | } else { |
| 644 | int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT; | 645 | int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT; |
| 645 | lua_assert(ctype_hassize(ct->info)); | 646 | lj_assertCTS(ctype_hassize(ct->info), |
| 647 | "classify ctype %08x without size", ct->info); | ||
| 646 | if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */ | 648 | if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */ |
| 647 | rcl[(ofs >= 8)] |= cl; | 649 | rcl[(ofs >= 8)] |= cl; |
| 648 | } | 650 | } |
| @@ -667,12 +669,13 @@ static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs) | |||
| 667 | } | 669 | } |
| 668 | 670 | ||
| 669 | /* Try to split up a small struct into registers. */ | 671 | /* Try to split up a small struct into registers. */ |
| 670 | static int ccall_struct_reg(CCallState *cc, GPRArg *dp, int *rcl) | 672 | static int ccall_struct_reg(CCallState *cc, CTState *cts, GPRArg *dp, int *rcl) |
| 671 | { | 673 | { |
| 672 | MSize ngpr = cc->ngpr, nfpr = cc->nfpr; | 674 | MSize ngpr = cc->ngpr, nfpr = cc->nfpr; |
| 673 | uint32_t i; | 675 | uint32_t i; |
| 676 | UNUSED(cts); | ||
| 674 | for (i = 0; i < 2; i++) { | 677 | for (i = 0; i < 2; i++) { |
| 675 | lua_assert(!(rcl[i] & CCALL_RCL_MEM)); | 678 | lj_assertCTS(!(rcl[i] & CCALL_RCL_MEM), "pass mem struct in reg"); |
| 676 | if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */ | 679 | if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */ |
| 677 | if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */ | 680 | if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */ |
| 678 | cc->gpr[ngpr++] = dp[i]; | 681 | cc->gpr[ngpr++] = dp[i]; |
| @@ -693,7 +696,8 @@ static int ccall_struct_arg(CCallState *cc, CTState *cts, CType *d, int *rcl, | |||
| 693 | dp[0] = dp[1] = 0; | 696 | dp[0] = dp[1] = 0; |
| 694 | /* Convert to temp. struct. */ | 697 | /* Convert to temp. struct. */ |
| 695 | lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg)); | 698 | lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg)); |
| 696 | if (ccall_struct_reg(cc, dp, rcl)) { /* Register overflow? Pass on stack. */ | 699 | if (ccall_struct_reg(cc, cts, dp, rcl)) { |
| 700 | /* Register overflow? Pass on stack. */ | ||
| 697 | MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1; | 701 | MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1; |
| 698 | if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */ | 702 | if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */ |
| 699 | cc->nsp = nsp + n; | 703 | cc->nsp = nsp + n; |
| @@ -990,7 +994,7 @@ static int ccall_set_args(lua_State *L, CTState *cts, CType *ct, | |||
| 990 | if (fid) { /* Get argument type from field. */ | 994 | if (fid) { /* Get argument type from field. */ |
| 991 | CType *ctf = ctype_get(cts, fid); | 995 | CType *ctf = ctype_get(cts, fid); |
| 992 | fid = ctf->sib; | 996 | fid = ctf->sib; |
| 993 | lua_assert(ctype_isfield(ctf->info)); | 997 | lj_assertL(ctype_isfield(ctf->info), "field expected"); |
| 994 | did = ctype_cid(ctf->info); | 998 | did = ctype_cid(ctf->info); |
| 995 | } else { | 999 | } else { |
| 996 | if (!(ct->info & CTF_VARARG)) | 1000 | if (!(ct->info & CTF_VARARG)) |
| @@ -1138,7 +1142,8 @@ static int ccall_get_results(lua_State *L, CTState *cts, CType *ct, | |||
| 1138 | CCALL_HANDLE_RET | 1142 | CCALL_HANDLE_RET |
| 1139 | #endif | 1143 | #endif |
| 1140 | /* No reference types end up here, so there's no need for the CTypeID. */ | 1144 | /* No reference types end up here, so there's no need for the CTypeID. */ |
| 1141 | lua_assert(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info))); | 1145 | lj_assertL(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info)), |
| 1146 | "unexpected reference ctype"); | ||
| 1142 | return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp); | 1147 | return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp); |
| 1143 | } | 1148 | } |
| 1144 | 1149 | ||
diff --git a/src/lj_ccallback.c b/src/lj_ccallback.c index 4edd8a35..49775d2b 100644 --- a/src/lj_ccallback.c +++ b/src/lj_ccallback.c | |||
| @@ -107,9 +107,9 @@ MSize lj_ccallback_ptr2slot(CTState *cts, void *p) | |||
| 107 | /* Initialize machine code for callback function pointers. */ | 107 | /* Initialize machine code for callback function pointers. */ |
| 108 | #if LJ_OS_NOJIT | 108 | #if LJ_OS_NOJIT |
| 109 | /* Disabled callback support. */ | 109 | /* Disabled callback support. */ |
| 110 | #define callback_mcode_init(g, p) UNUSED(p) | 110 | #define callback_mcode_init(g, p) (p) |
| 111 | #elif LJ_TARGET_X86ORX64 | 111 | #elif LJ_TARGET_X86ORX64 |
| 112 | static void callback_mcode_init(global_State *g, uint8_t *page) | 112 | static void *callback_mcode_init(global_State *g, uint8_t *page) |
| 113 | { | 113 | { |
| 114 | uint8_t *p = page; | 114 | uint8_t *p = page; |
| 115 | uint8_t *target = (uint8_t *)(void *)lj_vm_ffi_callback; | 115 | uint8_t *target = (uint8_t *)(void *)lj_vm_ffi_callback; |
| @@ -143,10 +143,10 @@ static void callback_mcode_init(global_State *g, uint8_t *page) | |||
| 143 | *p++ = XI_JMPs; *p++ = (uint8_t)((2+2)*(31-(slot&31)) - 2); | 143 | *p++ = XI_JMPs; *p++ = (uint8_t)((2+2)*(31-(slot&31)) - 2); |
| 144 | } | 144 | } |
| 145 | } | 145 | } |
| 146 | lua_assert(p - page <= CALLBACK_MCODE_SIZE); | 146 | return p; |
| 147 | } | 147 | } |
| 148 | #elif LJ_TARGET_ARM | 148 | #elif LJ_TARGET_ARM |
| 149 | static void callback_mcode_init(global_State *g, uint32_t *page) | 149 | static void *callback_mcode_init(global_State *g, uint32_t *page) |
| 150 | { | 150 | { |
| 151 | uint32_t *p = page; | 151 | uint32_t *p = page; |
| 152 | void *target = (void *)lj_vm_ffi_callback; | 152 | void *target = (void *)lj_vm_ffi_callback; |
| @@ -165,10 +165,10 @@ static void callback_mcode_init(global_State *g, uint32_t *page) | |||
| 165 | *p = ARMI_B | ((page-p-2) & 0x00ffffffu); | 165 | *p = ARMI_B | ((page-p-2) & 0x00ffffffu); |
| 166 | p++; | 166 | p++; |
| 167 | } | 167 | } |
| 168 | lua_assert(p - page <= CALLBACK_MCODE_SIZE); | 168 | return p; |
| 169 | } | 169 | } |
| 170 | #elif LJ_TARGET_ARM64 | 170 | #elif LJ_TARGET_ARM64 |
| 171 | static void callback_mcode_init(global_State *g, uint32_t *page) | 171 | static void *callback_mcode_init(global_State *g, uint32_t *page) |
| 172 | { | 172 | { |
| 173 | uint32_t *p = page; | 173 | uint32_t *p = page; |
| 174 | void *target = (void *)lj_vm_ffi_callback; | 174 | void *target = (void *)lj_vm_ffi_callback; |
| @@ -185,10 +185,10 @@ static void callback_mcode_init(global_State *g, uint32_t *page) | |||
| 185 | *p = A64I_LE(A64I_B | A64F_S26((page-p) & 0x03ffffffu)); | 185 | *p = A64I_LE(A64I_B | A64F_S26((page-p) & 0x03ffffffu)); |
| 186 | p++; | 186 | p++; |
| 187 | } | 187 | } |
| 188 | lua_assert(p - page <= CALLBACK_MCODE_SIZE); | 188 | return p; |
| 189 | } | 189 | } |
| 190 | #elif LJ_TARGET_PPC | 190 | #elif LJ_TARGET_PPC |
| 191 | static void callback_mcode_init(global_State *g, uint32_t *page) | 191 | static void *callback_mcode_init(global_State *g, uint32_t *page) |
| 192 | { | 192 | { |
| 193 | uint32_t *p = page; | 193 | uint32_t *p = page; |
| 194 | void *target = (void *)lj_vm_ffi_callback; | 194 | void *target = (void *)lj_vm_ffi_callback; |
| @@ -204,10 +204,10 @@ static void callback_mcode_init(global_State *g, uint32_t *page) | |||
| 204 | *p = PPCI_B | (((page-p) & 0x00ffffffu) << 2); | 204 | *p = PPCI_B | (((page-p) & 0x00ffffffu) << 2); |
| 205 | p++; | 205 | p++; |
| 206 | } | 206 | } |
| 207 | lua_assert(p - page <= CALLBACK_MCODE_SIZE); | 207 | return p; |
| 208 | } | 208 | } |
| 209 | #elif LJ_TARGET_MIPS | 209 | #elif LJ_TARGET_MIPS |
| 210 | static void callback_mcode_init(global_State *g, uint32_t *page) | 210 | static void *callback_mcode_init(global_State *g, uint32_t *page) |
| 211 | { | 211 | { |
| 212 | uint32_t *p = page; | 212 | uint32_t *p = page; |
| 213 | uintptr_t target = (uintptr_t)(void *)lj_vm_ffi_callback; | 213 | uintptr_t target = (uintptr_t)(void *)lj_vm_ffi_callback; |
| @@ -236,11 +236,11 @@ static void callback_mcode_init(global_State *g, uint32_t *page) | |||
| 236 | p++; | 236 | p++; |
| 237 | *p++ = MIPSI_LI | MIPSF_T(RID_R1) | slot; | 237 | *p++ = MIPSI_LI | MIPSF_T(RID_R1) | slot; |
| 238 | } | 238 | } |
| 239 | lua_assert(p - page <= CALLBACK_MCODE_SIZE); | 239 | return p; |
| 240 | } | 240 | } |
| 241 | #else | 241 | #else |
| 242 | /* Missing support for this architecture. */ | 242 | /* Missing support for this architecture. */ |
| 243 | #define callback_mcode_init(g, p) UNUSED(p) | 243 | #define callback_mcode_init(g, p) (p) |
| 244 | #endif | 244 | #endif |
| 245 | 245 | ||
| 246 | /* -- Machine code management --------------------------------------------- */ | 246 | /* -- Machine code management --------------------------------------------- */ |
| @@ -263,7 +263,7 @@ static void callback_mcode_init(global_State *g, uint32_t *page) | |||
| 263 | static void callback_mcode_new(CTState *cts) | 263 | static void callback_mcode_new(CTState *cts) |
| 264 | { | 264 | { |
| 265 | size_t sz = (size_t)CALLBACK_MCODE_SIZE; | 265 | size_t sz = (size_t)CALLBACK_MCODE_SIZE; |
| 266 | void *p; | 266 | void *p, *pe; |
| 267 | if (CALLBACK_MAX_SLOT == 0) | 267 | if (CALLBACK_MAX_SLOT == 0) |
| 268 | lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); | 268 | lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV); |
| 269 | #if LJ_TARGET_WINDOWS | 269 | #if LJ_TARGET_WINDOWS |
| @@ -280,7 +280,10 @@ static void callback_mcode_new(CTState *cts) | |||
| 280 | p = lj_mem_new(cts->L, sz); | 280 | p = lj_mem_new(cts->L, sz); |
| 281 | #endif | 281 | #endif |
| 282 | cts->cb.mcode = p; | 282 | cts->cb.mcode = p; |
| 283 | callback_mcode_init(cts->g, p); | 283 | pe = callback_mcode_init(cts->g, p); |
| 284 | UNUSED(pe); | ||
| 285 | lj_assertCTS((size_t)((char *)pe - (char *)p) <= sz, | ||
| 286 | "miscalculated CALLBACK_MAX_SLOT"); | ||
| 284 | lj_mcode_sync(p, (char *)p + sz); | 287 | lj_mcode_sync(p, (char *)p + sz); |
| 285 | #if LJ_TARGET_WINDOWS | 288 | #if LJ_TARGET_WINDOWS |
| 286 | { | 289 | { |
| @@ -421,8 +424,9 @@ void lj_ccallback_mcode_free(CTState *cts) | |||
| 421 | 424 | ||
| 422 | #define CALLBACK_HANDLE_GPR \ | 425 | #define CALLBACK_HANDLE_GPR \ |
| 423 | if (n > 1) { \ | 426 | if (n > 1) { \ |
| 424 | lua_assert(((LJ_ABI_SOFTFP && ctype_isnum(cta->info)) || /* double. */ \ | 427 | lj_assertCTS(((LJ_ABI_SOFTFP && ctype_isnum(cta->info)) || /* double. */ \ |
| 425 | ctype_isinteger(cta->info)) && n == 2); /* int64_t. */ \ | 428 | ctype_isinteger(cta->info)) && n == 2, /* int64_t. */ \ |
| 429 | "bad GPR type"); \ | ||
| 426 | ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \ | 430 | ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \ |
| 427 | } \ | 431 | } \ |
| 428 | if (ngpr + n <= maxgpr) { \ | 432 | if (ngpr + n <= maxgpr) { \ |
| @@ -579,7 +583,7 @@ static void callback_conv_args(CTState *cts, lua_State *L) | |||
| 579 | CTSize sz; | 583 | CTSize sz; |
| 580 | int isfp; | 584 | int isfp; |
| 581 | MSize n; | 585 | MSize n; |
| 582 | lua_assert(ctype_isfield(ctf->info)); | 586 | lj_assertCTS(ctype_isfield(ctf->info), "field expected"); |
| 583 | cta = ctype_rawchild(cts, ctf); | 587 | cta = ctype_rawchild(cts, ctf); |
| 584 | isfp = ctype_isfp(cta->info); | 588 | isfp = ctype_isfp(cta->info); |
| 585 | sz = (cta->size + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1); | 589 | sz = (cta->size + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1); |
| @@ -671,7 +675,7 @@ lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf) | |||
| 671 | { | 675 | { |
| 672 | lua_State *L = cts->L; | 676 | lua_State *L = cts->L; |
| 673 | global_State *g = cts->g; | 677 | global_State *g = cts->g; |
| 674 | lua_assert(L != NULL); | 678 | lj_assertG(L != NULL, "uninitialized cts->L in callback"); |
| 675 | if (tvref(g->jit_base)) { | 679 | if (tvref(g->jit_base)) { |
| 676 | setstrV(L, L->top++, lj_err_str(L, LJ_ERR_FFI_BADCBACK)); | 680 | setstrV(L, L->top++, lj_err_str(L, LJ_ERR_FFI_BADCBACK)); |
| 677 | if (g->panic) g->panic(L); | 681 | if (g->panic) g->panic(L); |
| @@ -756,7 +760,7 @@ static CType *callback_checkfunc(CTState *cts, CType *ct) | |||
| 756 | CType *ctf = ctype_get(cts, fid); | 760 | CType *ctf = ctype_get(cts, fid); |
| 757 | if (!ctype_isattrib(ctf->info)) { | 761 | if (!ctype_isattrib(ctf->info)) { |
| 758 | CType *cta; | 762 | CType *cta; |
| 759 | lua_assert(ctype_isfield(ctf->info)); | 763 | lj_assertCTS(ctype_isfield(ctf->info), "field expected"); |
| 760 | cta = ctype_rawchild(cts, ctf); | 764 | cta = ctype_rawchild(cts, ctf); |
| 761 | if (!(ctype_isenum(cta->info) || ctype_isptr(cta->info) || | 765 | if (!(ctype_isenum(cta->info) || ctype_isptr(cta->info) || |
| 762 | (ctype_isnum(cta->info) && cta->size <= 8)) || | 766 | (ctype_isnum(cta->info) && cta->size <= 8)) || |
diff --git a/src/lj_cconv.c b/src/lj_cconv.c index 03ed0ce2..400c2ae6 100644 --- a/src/lj_cconv.c +++ b/src/lj_cconv.c | |||
| @@ -122,19 +122,25 @@ void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s, | |||
| 122 | CTInfo dinfo = d->info, sinfo = s->info; | 122 | CTInfo dinfo = d->info, sinfo = s->info; |
| 123 | void *tmpptr; | 123 | void *tmpptr; |
| 124 | 124 | ||
| 125 | lua_assert(!ctype_isenum(dinfo) && !ctype_isenum(sinfo)); | 125 | lj_assertCTS(!ctype_isenum(dinfo) && !ctype_isenum(sinfo), |
| 126 | lua_assert(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo)); | 126 | "unresolved enum"); |
| 127 | lj_assertCTS(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo), | ||
| 128 | "unstripped attribute"); | ||
| 127 | 129 | ||
| 128 | if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT) | 130 | if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT) |
| 129 | goto err_conv; | 131 | goto err_conv; |
| 130 | 132 | ||
| 131 | /* Some basic sanity checks. */ | 133 | /* Some basic sanity checks. */ |
| 132 | lua_assert(!ctype_isnum(dinfo) || dsize > 0); | 134 | lj_assertCTS(!ctype_isnum(dinfo) || dsize > 0, "bad size for number type"); |
| 133 | lua_assert(!ctype_isnum(sinfo) || ssize > 0); | 135 | lj_assertCTS(!ctype_isnum(sinfo) || ssize > 0, "bad size for number type"); |
| 134 | lua_assert(!ctype_isbool(dinfo) || dsize == 1 || dsize == 4); | 136 | lj_assertCTS(!ctype_isbool(dinfo) || dsize == 1 || dsize == 4, |
| 135 | lua_assert(!ctype_isbool(sinfo) || ssize == 1 || ssize == 4); | 137 | "bad size for bool type"); |
| 136 | lua_assert(!ctype_isinteger(dinfo) || (1u<<lj_fls(dsize)) == dsize); | 138 | lj_assertCTS(!ctype_isbool(sinfo) || ssize == 1 || ssize == 4, |
| 137 | lua_assert(!ctype_isinteger(sinfo) || (1u<<lj_fls(ssize)) == ssize); | 139 | "bad size for bool type"); |
| 140 | lj_assertCTS(!ctype_isinteger(dinfo) || (1u<<lj_fls(dsize)) == dsize, | ||
| 141 | "bad size for integer type"); | ||
| 142 | lj_assertCTS(!ctype_isinteger(sinfo) || (1u<<lj_fls(ssize)) == ssize, | ||
| 143 | "bad size for integer type"); | ||
| 138 | 144 | ||
| 139 | switch (cconv_idx2(dinfo, sinfo)) { | 145 | switch (cconv_idx2(dinfo, sinfo)) { |
| 140 | /* Destination is a bool. */ | 146 | /* Destination is a bool. */ |
| @@ -357,7 +363,7 @@ void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s, | |||
| 357 | if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s) | 363 | if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s) |
| 358 | goto err_conv; /* Must be exact same type. */ | 364 | goto err_conv; /* Must be exact same type. */ |
| 359 | copyval: /* Copy value. */ | 365 | copyval: /* Copy value. */ |
| 360 | lua_assert(dsize == ssize); | 366 | lj_assertCTS(dsize == ssize, "value copy with different sizes"); |
| 361 | memcpy(dp, sp, dsize); | 367 | memcpy(dp, sp, dsize); |
| 362 | break; | 368 | break; |
| 363 | 369 | ||
| @@ -389,7 +395,7 @@ int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid, | |||
| 389 | lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s, | 395 | lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s, |
| 390 | (uint8_t *)&o->n, sp, 0); | 396 | (uint8_t *)&o->n, sp, 0); |
| 391 | /* Numbers are NOT canonicalized here! Beware of uninitialized data. */ | 397 | /* Numbers are NOT canonicalized here! Beware of uninitialized data. */ |
| 392 | lua_assert(tvisnum(o)); | 398 | lj_assertCTS(tvisnum(o), "non-canonical NaN passed"); |
| 393 | } | 399 | } |
| 394 | } else { | 400 | } else { |
| 395 | uint32_t b = s->size == 1 ? (*sp != 0) : (*(int *)sp != 0); | 401 | uint32_t b = s->size == 1 ? (*sp != 0) : (*(int *)sp != 0); |
| @@ -406,7 +412,7 @@ int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid, | |||
| 406 | CTSize sz; | 412 | CTSize sz; |
| 407 | copyval: /* Copy value. */ | 413 | copyval: /* Copy value. */ |
| 408 | sz = s->size; | 414 | sz = s->size; |
| 409 | lua_assert(sz != CTSIZE_INVALID); | 415 | lj_assertCTS(sz != CTSIZE_INVALID, "value copy with invalid size"); |
| 410 | /* Attributes are stripped, qualifiers are kept (but mostly ignored). */ | 416 | /* Attributes are stripped, qualifiers are kept (but mostly ignored). */ |
| 411 | cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz); | 417 | cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz); |
| 412 | setcdataV(cts->L, o, cd); | 418 | setcdataV(cts->L, o, cd); |
| @@ -421,19 +427,22 @@ int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp) | |||
| 421 | CTInfo info = s->info; | 427 | CTInfo info = s->info; |
| 422 | CTSize pos, bsz; | 428 | CTSize pos, bsz; |
| 423 | uint32_t val; | 429 | uint32_t val; |
| 424 | lua_assert(ctype_isbitfield(info)); | 430 | lj_assertCTS(ctype_isbitfield(info), "bitfield expected"); |
| 425 | /* NYI: packed bitfields may cause misaligned reads. */ | 431 | /* NYI: packed bitfields may cause misaligned reads. */ |
| 426 | switch (ctype_bitcsz(info)) { | 432 | switch (ctype_bitcsz(info)) { |
| 427 | case 4: val = *(uint32_t *)sp; break; | 433 | case 4: val = *(uint32_t *)sp; break; |
| 428 | case 2: val = *(uint16_t *)sp; break; | 434 | case 2: val = *(uint16_t *)sp; break; |
| 429 | case 1: val = *(uint8_t *)sp; break; | 435 | case 1: val = *(uint8_t *)sp; break; |
| 430 | default: lua_assert(0); val = 0; break; | 436 | default: |
| 437 | lj_assertCTS(0, "bad bitfield container size %d", ctype_bitcsz(info)); | ||
| 438 | val = 0; | ||
| 439 | break; | ||
| 431 | } | 440 | } |
| 432 | /* Check if a packed bitfield crosses a container boundary. */ | 441 | /* Check if a packed bitfield crosses a container boundary. */ |
| 433 | pos = ctype_bitpos(info); | 442 | pos = ctype_bitpos(info); |
| 434 | bsz = ctype_bitbsz(info); | 443 | bsz = ctype_bitbsz(info); |
| 435 | lua_assert(pos < 8*ctype_bitcsz(info)); | 444 | lj_assertCTS(pos < 8*ctype_bitcsz(info), "bad bitfield position"); |
| 436 | lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info)); | 445 | lj_assertCTS(bsz > 0 && bsz <= 8*ctype_bitcsz(info), "bad bitfield size"); |
| 437 | if (pos + bsz > 8*ctype_bitcsz(info)) | 446 | if (pos + bsz > 8*ctype_bitcsz(info)) |
| 438 | lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); | 447 | lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); |
| 439 | if (!(info & CTF_BOOL)) { | 448 | if (!(info & CTF_BOOL)) { |
| @@ -449,7 +458,7 @@ int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp) | |||
| 449 | } | 458 | } |
| 450 | } else { | 459 | } else { |
| 451 | uint32_t b = (val >> pos) & 1; | 460 | uint32_t b = (val >> pos) & 1; |
| 452 | lua_assert(bsz == 1); | 461 | lj_assertCTS(bsz == 1, "bad bool bitfield size"); |
| 453 | setboolV(o, b); | 462 | setboolV(o, b); |
| 454 | setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */ | 463 | setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */ |
| 455 | } | 464 | } |
| @@ -553,7 +562,7 @@ void lj_cconv_ct_tv(CTState *cts, CType *d, | |||
| 553 | sid = cdataV(o)->ctypeid; | 562 | sid = cdataV(o)->ctypeid; |
| 554 | s = ctype_get(cts, sid); | 563 | s = ctype_get(cts, sid); |
| 555 | if (ctype_isref(s->info)) { /* Resolve reference for value. */ | 564 | if (ctype_isref(s->info)) { /* Resolve reference for value. */ |
| 556 | lua_assert(s->size == CTSIZE_PTR); | 565 | lj_assertCTS(s->size == CTSIZE_PTR, "ref is not pointer-sized"); |
| 557 | sp = *(void **)sp; | 566 | sp = *(void **)sp; |
| 558 | sid = ctype_cid(s->info); | 567 | sid = ctype_cid(s->info); |
| 559 | } | 568 | } |
| @@ -571,7 +580,7 @@ void lj_cconv_ct_tv(CTState *cts, CType *d, | |||
| 571 | CType *cct = lj_ctype_getfield(cts, d, str, &ofs); | 580 | CType *cct = lj_ctype_getfield(cts, d, str, &ofs); |
| 572 | if (!cct || !ctype_isconstval(cct->info)) | 581 | if (!cct || !ctype_isconstval(cct->info)) |
| 573 | goto err_conv; | 582 | goto err_conv; |
| 574 | lua_assert(d->size == 4); | 583 | lj_assertCTS(d->size == 4, "only 32 bit enum supported"); /* NYI */ |
| 575 | sp = (uint8_t *)&cct->size; | 584 | sp = (uint8_t *)&cct->size; |
| 576 | sid = ctype_cid(cct->info); | 585 | sid = ctype_cid(cct->info); |
| 577 | } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */ | 586 | } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */ |
| @@ -635,10 +644,10 @@ void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o) | |||
| 635 | CTInfo info = d->info; | 644 | CTInfo info = d->info; |
| 636 | CTSize pos, bsz; | 645 | CTSize pos, bsz; |
| 637 | uint32_t val, mask; | 646 | uint32_t val, mask; |
| 638 | lua_assert(ctype_isbitfield(info)); | 647 | lj_assertCTS(ctype_isbitfield(info), "bitfield expected"); |
| 639 | if ((info & CTF_BOOL)) { | 648 | if ((info & CTF_BOOL)) { |
| 640 | uint8_t tmpbool; | 649 | uint8_t tmpbool; |
| 641 | lua_assert(ctype_bitbsz(info) == 1); | 650 | lj_assertCTS(ctype_bitbsz(info) == 1, "bad bool bitfield size"); |
| 642 | lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0); | 651 | lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0); |
| 643 | val = tmpbool; | 652 | val = tmpbool; |
| 644 | } else { | 653 | } else { |
| @@ -647,8 +656,8 @@ void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o) | |||
| 647 | } | 656 | } |
| 648 | pos = ctype_bitpos(info); | 657 | pos = ctype_bitpos(info); |
| 649 | bsz = ctype_bitbsz(info); | 658 | bsz = ctype_bitbsz(info); |
| 650 | lua_assert(pos < 8*ctype_bitcsz(info)); | 659 | lj_assertCTS(pos < 8*ctype_bitcsz(info), "bad bitfield position"); |
| 651 | lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info)); | 660 | lj_assertCTS(bsz > 0 && bsz <= 8*ctype_bitcsz(info), "bad bitfield size"); |
| 652 | /* Check if a packed bitfield crosses a container boundary. */ | 661 | /* Check if a packed bitfield crosses a container boundary. */ |
| 653 | if (pos + bsz > 8*ctype_bitcsz(info)) | 662 | if (pos + bsz > 8*ctype_bitcsz(info)) |
| 654 | lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); | 663 | lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT); |
| @@ -659,7 +668,9 @@ void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o) | |||
| 659 | case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break; | 668 | case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break; |
| 660 | case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break; | 669 | case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break; |
| 661 | case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break; | 670 | case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break; |
| 662 | default: lua_assert(0); break; | 671 | default: |
| 672 | lj_assertCTS(0, "bad bitfield container size %d", ctype_bitcsz(info)); | ||
| 673 | break; | ||
| 663 | } | 674 | } |
| 664 | } | 675 | } |
| 665 | 676 | ||
diff --git a/src/lj_cconv.h b/src/lj_cconv.h index 2fd5a71c..1f716d2a 100644 --- a/src/lj_cconv.h +++ b/src/lj_cconv.h | |||
| @@ -27,13 +27,14 @@ enum { | |||
| 27 | static LJ_AINLINE uint32_t cconv_idx(CTInfo info) | 27 | static LJ_AINLINE uint32_t cconv_idx(CTInfo info) |
| 28 | { | 28 | { |
| 29 | uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */ | 29 | uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */ |
| 30 | lua_assert(ctype_type(info) <= CT_MAYCONVERT); | 30 | lj_assertX(ctype_type(info) <= CT_MAYCONVERT, |
| 31 | "cannot convert ctype %08x", info); | ||
| 31 | #if LJ_64 | 32 | #if LJ_64 |
| 32 | idx = ((uint32_t)(U64x(f436fff5,fff7f021) >> 4*idx) & 15u); | 33 | idx = ((uint32_t)(U64x(f436fff5,fff7f021) >> 4*idx) & 15u); |
| 33 | #else | 34 | #else |
| 34 | idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u); | 35 | idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u); |
| 35 | #endif | 36 | #endif |
| 36 | lua_assert(idx < 8); | 37 | lj_assertX(idx < 8, "cannot convert ctype %08x", info); |
| 37 | return idx; | 38 | return idx; |
| 38 | } | 39 | } |
| 39 | 40 | ||
diff --git a/src/lj_cdata.c b/src/lj_cdata.c index 10d9423d..a827d1ec 100644 --- a/src/lj_cdata.c +++ b/src/lj_cdata.c | |||
| @@ -35,7 +35,7 @@ GCcdata *lj_cdata_newv(lua_State *L, CTypeID id, CTSize sz, CTSize align) | |||
| 35 | uintptr_t adata = (uintptr_t)p + sizeof(GCcdataVar) + sizeof(GCcdata); | 35 | uintptr_t adata = (uintptr_t)p + sizeof(GCcdataVar) + sizeof(GCcdata); |
| 36 | uintptr_t almask = (1u << align) - 1u; | 36 | uintptr_t almask = (1u << align) - 1u; |
| 37 | GCcdata *cd = (GCcdata *)(((adata + almask) & ~almask) - sizeof(GCcdata)); | 37 | GCcdata *cd = (GCcdata *)(((adata + almask) & ~almask) - sizeof(GCcdata)); |
| 38 | lua_assert((char *)cd - p < 65536); | 38 | lj_assertL((char *)cd - p < 65536, "excessive cdata alignment"); |
| 39 | cdatav(cd)->offset = (uint16_t)((char *)cd - p); | 39 | cdatav(cd)->offset = (uint16_t)((char *)cd - p); |
| 40 | cdatav(cd)->extra = extra; | 40 | cdatav(cd)->extra = extra; |
| 41 | cdatav(cd)->len = sz; | 41 | cdatav(cd)->len = sz; |
| @@ -76,8 +76,8 @@ void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd) | |||
| 76 | } else if (LJ_LIKELY(!cdataisv(cd))) { | 76 | } else if (LJ_LIKELY(!cdataisv(cd))) { |
| 77 | CType *ct = ctype_raw(ctype_ctsG(g), cd->ctypeid); | 77 | CType *ct = ctype_raw(ctype_ctsG(g), cd->ctypeid); |
| 78 | CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR; | 78 | CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR; |
| 79 | lua_assert(ctype_hassize(ct->info) || ctype_isfunc(ct->info) || | 79 | lj_assertG(ctype_hassize(ct->info) || ctype_isfunc(ct->info) || |
| 80 | ctype_isextern(ct->info)); | 80 | ctype_isextern(ct->info), "free of ctype without a size"); |
| 81 | lj_mem_free(g, cd, sizeof(GCcdata) + sz); | 81 | lj_mem_free(g, cd, sizeof(GCcdata) + sz); |
| 82 | } else { | 82 | } else { |
| 83 | lj_mem_free(g, memcdatav(cd), sizecdatav(cd)); | 83 | lj_mem_free(g, memcdatav(cd), sizecdatav(cd)); |
| @@ -115,7 +115,7 @@ CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, uint8_t **pp, | |||
| 115 | 115 | ||
| 116 | /* Resolve reference for cdata object. */ | 116 | /* Resolve reference for cdata object. */ |
| 117 | if (ctype_isref(ct->info)) { | 117 | if (ctype_isref(ct->info)) { |
| 118 | lua_assert(ct->size == CTSIZE_PTR); | 118 | lj_assertCTS(ct->size == CTSIZE_PTR, "ref is not pointer-sized"); |
| 119 | p = *(uint8_t **)p; | 119 | p = *(uint8_t **)p; |
| 120 | ct = ctype_child(cts, ct); | 120 | ct = ctype_child(cts, ct); |
| 121 | } | 121 | } |
| @@ -126,7 +126,8 @@ collect_attrib: | |||
| 126 | if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size; | 126 | if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size; |
| 127 | ct = ctype_child(cts, ct); | 127 | ct = ctype_child(cts, ct); |
| 128 | } | 128 | } |
| 129 | lua_assert(!ctype_isref(ct->info)); /* Interning rejects refs to refs. */ | 129 | /* Interning rejects refs to refs. */ |
| 130 | lj_assertCTS(!ctype_isref(ct->info), "bad ref of ref"); | ||
| 130 | 131 | ||
| 131 | if (tvisint(key)) { | 132 | if (tvisint(key)) { |
| 132 | idx = (ptrdiff_t)intV(key); | 133 | idx = (ptrdiff_t)intV(key); |
| @@ -212,7 +213,8 @@ collect_attrib: | |||
| 212 | static void cdata_getconst(CTState *cts, TValue *o, CType *ct) | 213 | static void cdata_getconst(CTState *cts, TValue *o, CType *ct) |
| 213 | { | 214 | { |
| 214 | CType *ctt = ctype_child(cts, ct); | 215 | CType *ctt = ctype_child(cts, ct); |
| 215 | lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4); | 216 | lj_assertCTS(ctype_isinteger(ctt->info) && ctt->size <= 4, |
| 217 | "only 32 bit const supported"); /* NYI */ | ||
| 216 | /* Constants are already zero-extended/sign-extended to 32 bits. */ | 218 | /* Constants are already zero-extended/sign-extended to 32 bits. */ |
| 217 | if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) | 219 | if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) |
| 218 | setnumV(o, (lua_Number)(uint32_t)ct->size); | 220 | setnumV(o, (lua_Number)(uint32_t)ct->size); |
| @@ -233,13 +235,14 @@ int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp) | |||
| 233 | } | 235 | } |
| 234 | 236 | ||
| 235 | /* Get child type of pointer/array/field. */ | 237 | /* Get child type of pointer/array/field. */ |
| 236 | lua_assert(ctype_ispointer(s->info) || ctype_isfield(s->info)); | 238 | lj_assertCTS(ctype_ispointer(s->info) || ctype_isfield(s->info), |
| 239 | "pointer or field expected"); | ||
| 237 | sid = ctype_cid(s->info); | 240 | sid = ctype_cid(s->info); |
| 238 | s = ctype_get(cts, sid); | 241 | s = ctype_get(cts, sid); |
| 239 | 242 | ||
| 240 | /* Resolve reference for field. */ | 243 | /* Resolve reference for field. */ |
| 241 | if (ctype_isref(s->info)) { | 244 | if (ctype_isref(s->info)) { |
| 242 | lua_assert(s->size == CTSIZE_PTR); | 245 | lj_assertCTS(s->size == CTSIZE_PTR, "ref is not pointer-sized"); |
| 243 | sp = *(uint8_t **)sp; | 246 | sp = *(uint8_t **)sp; |
| 244 | sid = ctype_cid(s->info); | 247 | sid = ctype_cid(s->info); |
| 245 | s = ctype_get(cts, sid); | 248 | s = ctype_get(cts, sid); |
| @@ -266,12 +269,13 @@ void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual) | |||
| 266 | } | 269 | } |
| 267 | 270 | ||
| 268 | /* Get child type of pointer/array/field. */ | 271 | /* Get child type of pointer/array/field. */ |
| 269 | lua_assert(ctype_ispointer(d->info) || ctype_isfield(d->info)); | 272 | lj_assertCTS(ctype_ispointer(d->info) || ctype_isfield(d->info), |
| 273 | "pointer or field expected"); | ||
| 270 | d = ctype_child(cts, d); | 274 | d = ctype_child(cts, d); |
| 271 | 275 | ||
| 272 | /* Resolve reference for field. */ | 276 | /* Resolve reference for field. */ |
| 273 | if (ctype_isref(d->info)) { | 277 | if (ctype_isref(d->info)) { |
| 274 | lua_assert(d->size == CTSIZE_PTR); | 278 | lj_assertCTS(d->size == CTSIZE_PTR, "ref is not pointer-sized"); |
| 275 | dp = *(uint8_t **)dp; | 279 | dp = *(uint8_t **)dp; |
| 276 | d = ctype_child(cts, d); | 280 | d = ctype_child(cts, d); |
| 277 | } | 281 | } |
| @@ -286,7 +290,8 @@ void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual) | |||
| 286 | d = ctype_child(cts, d); | 290 | d = ctype_child(cts, d); |
| 287 | } | 291 | } |
| 288 | 292 | ||
| 289 | lua_assert(ctype_hassize(d->info) && !ctype_isvoid(d->info)); | 293 | lj_assertCTS(ctype_hassize(d->info), "store to ctype without size"); |
| 294 | lj_assertCTS(!ctype_isvoid(d->info), "store to void type"); | ||
| 290 | 295 | ||
| 291 | if (((d->info|qual) & CTF_CONST)) { | 296 | if (((d->info|qual) & CTF_CONST)) { |
| 292 | err_const: | 297 | err_const: |
diff --git a/src/lj_cdata.h b/src/lj_cdata.h index c1089e64..c3df8ba0 100644 --- a/src/lj_cdata.h +++ b/src/lj_cdata.h | |||
| @@ -18,7 +18,7 @@ static LJ_AINLINE void *cdata_getptr(void *p, CTSize sz) | |||
| 18 | if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ | 18 | if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ |
| 19 | return ((void *)(uintptr_t)*(uint32_t *)p); | 19 | return ((void *)(uintptr_t)*(uint32_t *)p); |
| 20 | } else { | 20 | } else { |
| 21 | lua_assert(sz == CTSIZE_PTR); | 21 | lj_assertX(sz == CTSIZE_PTR, "bad pointer size %d", sz); |
| 22 | return *(void **)p; | 22 | return *(void **)p; |
| 23 | } | 23 | } |
| 24 | } | 24 | } |
| @@ -29,7 +29,7 @@ static LJ_AINLINE void cdata_setptr(void *p, CTSize sz, const void *v) | |||
| 29 | if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ | 29 | if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */ |
| 30 | *(uint32_t *)p = (uint32_t)(uintptr_t)v; | 30 | *(uint32_t *)p = (uint32_t)(uintptr_t)v; |
| 31 | } else { | 31 | } else { |
| 32 | lua_assert(sz == CTSIZE_PTR); | 32 | lj_assertX(sz == CTSIZE_PTR, "bad pointer size %d", sz); |
| 33 | *(void **)p = (void *)v; | 33 | *(void **)p = (void *)v; |
| 34 | } | 34 | } |
| 35 | } | 35 | } |
| @@ -40,7 +40,8 @@ static LJ_AINLINE GCcdata *lj_cdata_new(CTState *cts, CTypeID id, CTSize sz) | |||
| 40 | GCcdata *cd; | 40 | GCcdata *cd; |
| 41 | #ifdef LUA_USE_ASSERT | 41 | #ifdef LUA_USE_ASSERT |
| 42 | CType *ct = ctype_raw(cts, id); | 42 | CType *ct = ctype_raw(cts, id); |
| 43 | lua_assert((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz); | 43 | lj_assertCTS((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz, |
| 44 | "inconsistent size of fixed-size cdata alloc"); | ||
| 44 | #endif | 45 | #endif |
| 45 | cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz); | 46 | cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz); |
| 46 | cd->gct = ~LJ_TCDATA; | 47 | cd->gct = ~LJ_TCDATA; |
diff --git a/src/lj_clib.c b/src/lj_clib.c index 2ea6ff45..8da41a83 100644 --- a/src/lj_clib.c +++ b/src/lj_clib.c | |||
| @@ -350,7 +350,8 @@ TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name) | |||
| 350 | lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name)); | 350 | lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name)); |
| 351 | if (ctype_isconstval(ct->info)) { | 351 | if (ctype_isconstval(ct->info)) { |
| 352 | CType *ctt = ctype_child(cts, ct); | 352 | CType *ctt = ctype_child(cts, ct); |
| 353 | lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4); | 353 | lj_assertCTS(ctype_isinteger(ctt->info) && ctt->size <= 4, |
| 354 | "only 32 bit const supported"); /* NYI */ | ||
| 354 | if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) | 355 | if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0) |
| 355 | setnumV(tv, (lua_Number)(uint32_t)ct->size); | 356 | setnumV(tv, (lua_Number)(uint32_t)ct->size); |
| 356 | else | 357 | else |
| @@ -362,7 +363,8 @@ TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name) | |||
| 362 | #endif | 363 | #endif |
| 363 | void *p = clib_getsym(cl, sym); | 364 | void *p = clib_getsym(cl, sym); |
| 364 | GCcdata *cd; | 365 | GCcdata *cd; |
| 365 | lua_assert(ctype_isfunc(ct->info) || ctype_isextern(ct->info)); | 366 | lj_assertCTS(ctype_isfunc(ct->info) || ctype_isextern(ct->info), |
| 367 | "unexpected ctype %08x in clib", ct->info); | ||
| 366 | #if LJ_TARGET_X86 && LJ_ABI_WIN | 368 | #if LJ_TARGET_X86 && LJ_ABI_WIN |
| 367 | /* Retry with decorated name for fastcall/stdcall functions. */ | 369 | /* Retry with decorated name for fastcall/stdcall functions. */ |
| 368 | if (!p && ctype_isfunc(ct->info)) { | 370 | if (!p && ctype_isfunc(ct->info)) { |
diff --git a/src/lj_cparse.c b/src/lj_cparse.c index a393965e..78628bba 100644 --- a/src/lj_cparse.c +++ b/src/lj_cparse.c | |||
| @@ -28,6 +28,12 @@ | |||
| 28 | ** If in doubt, please check the input against your favorite C compiler. | 28 | ** If in doubt, please check the input against your favorite C compiler. |
| 29 | */ | 29 | */ |
| 30 | 30 | ||
| 31 | #ifdef LUA_USE_ASSERT | ||
| 32 | #define lj_assertCP(c, ...) (lj_assertG_(G(cp->L), (c), __VA_ARGS__)) | ||
| 33 | #else | ||
| 34 | #define lj_assertCP(c, ...) ((void)cp) | ||
| 35 | #endif | ||
| 36 | |||
| 31 | /* -- Miscellaneous ------------------------------------------------------- */ | 37 | /* -- Miscellaneous ------------------------------------------------------- */ |
| 32 | 38 | ||
| 33 | /* Match string against a C literal. */ | 39 | /* Match string against a C literal. */ |
| @@ -61,7 +67,7 @@ LJ_NORET static void cp_err(CPState *cp, ErrMsg em); | |||
| 61 | 67 | ||
| 62 | static const char *cp_tok2str(CPState *cp, CPToken tok) | 68 | static const char *cp_tok2str(CPState *cp, CPToken tok) |
| 63 | { | 69 | { |
| 64 | lua_assert(tok < CTOK_FIRSTDECL); | 70 | lj_assertCP(tok < CTOK_FIRSTDECL, "bad CPToken %d", tok); |
| 65 | if (tok > CTOK_OFS) | 71 | if (tok > CTOK_OFS) |
| 66 | return ctoknames[tok-CTOK_OFS-1]; | 72 | return ctoknames[tok-CTOK_OFS-1]; |
| 67 | else if (!lj_char_iscntrl(tok)) | 73 | else if (!lj_char_iscntrl(tok)) |
| @@ -392,7 +398,7 @@ static void cp_init(CPState *cp) | |||
| 392 | cp->curpack = 0; | 398 | cp->curpack = 0; |
| 393 | cp->packstack[0] = 255; | 399 | cp->packstack[0] = 255; |
| 394 | lj_buf_init(cp->L, &cp->sb); | 400 | lj_buf_init(cp->L, &cp->sb); |
| 395 | lua_assert(cp->p != NULL); | 401 | lj_assertCP(cp->p != NULL, "uninitialized cp->p"); |
| 396 | cp_get(cp); /* Read-ahead first char. */ | 402 | cp_get(cp); /* Read-ahead first char. */ |
| 397 | cp->tok = 0; | 403 | cp->tok = 0; |
| 398 | cp->tmask = CPNS_DEFAULT; | 404 | cp->tmask = CPNS_DEFAULT; |
| @@ -853,12 +859,13 @@ static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl) | |||
| 853 | /* The cid is already part of info for copies of pointers/functions. */ | 859 | /* The cid is already part of info for copies of pointers/functions. */ |
| 854 | idx = ct->next; | 860 | idx = ct->next; |
| 855 | if (ctype_istypedef(info)) { | 861 | if (ctype_istypedef(info)) { |
| 856 | lua_assert(id == 0); | 862 | lj_assertCP(id == 0, "typedef not at toplevel"); |
| 857 | id = ctype_cid(info); | 863 | id = ctype_cid(info); |
| 858 | /* Always refetch info/size, since struct/enum may have been completed. */ | 864 | /* Always refetch info/size, since struct/enum may have been completed. */ |
| 859 | cinfo = ctype_get(cp->cts, id)->info; | 865 | cinfo = ctype_get(cp->cts, id)->info; |
| 860 | csize = ctype_get(cp->cts, id)->size; | 866 | csize = ctype_get(cp->cts, id)->size; |
| 861 | lua_assert(ctype_isstruct(cinfo) || ctype_isenum(cinfo)); | 867 | lj_assertCP(ctype_isstruct(cinfo) || ctype_isenum(cinfo), |
| 868 | "typedef of bad type"); | ||
| 862 | } else if (ctype_isfunc(info)) { /* Intern function. */ | 869 | } else if (ctype_isfunc(info)) { /* Intern function. */ |
| 863 | CType *fct; | 870 | CType *fct; |
| 864 | CTypeID fid; | 871 | CTypeID fid; |
| @@ -891,7 +898,7 @@ static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl) | |||
| 891 | /* Inherit csize/cinfo from original type. */ | 898 | /* Inherit csize/cinfo from original type. */ |
| 892 | } else { | 899 | } else { |
| 893 | if (ctype_isnum(info)) { /* Handle mode/vector-size attributes. */ | 900 | if (ctype_isnum(info)) { /* Handle mode/vector-size attributes. */ |
| 894 | lua_assert(id == 0); | 901 | lj_assertCP(id == 0, "number not at toplevel"); |
| 895 | if (!(info & CTF_BOOL)) { | 902 | if (!(info & CTF_BOOL)) { |
| 896 | CTSize msize = ctype_msizeP(decl->attr); | 903 | CTSize msize = ctype_msizeP(decl->attr); |
| 897 | CTSize vsize = ctype_vsizeP(decl->attr); | 904 | CTSize vsize = ctype_vsizeP(decl->attr); |
| @@ -946,7 +953,7 @@ static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl) | |||
| 946 | info = (info & ~CTF_ALIGN) | (cinfo & CTF_ALIGN); | 953 | info = (info & ~CTF_ALIGN) | (cinfo & CTF_ALIGN); |
| 947 | info |= (cinfo & CTF_QUAL); /* Inherit qual. */ | 954 | info |= (cinfo & CTF_QUAL); /* Inherit qual. */ |
| 948 | } else { | 955 | } else { |
| 949 | lua_assert(ctype_isvoid(info)); | 956 | lj_assertCP(ctype_isvoid(info), "bad ctype %08x", info); |
| 950 | } | 957 | } |
| 951 | csize = size; | 958 | csize = size; |
| 952 | cinfo = info+id; | 959 | cinfo = info+id; |
| @@ -1585,7 +1592,7 @@ end_decl: | |||
| 1585 | cp_errmsg(cp, cp->tok, LJ_ERR_FFI_DECLSPEC); | 1592 | cp_errmsg(cp, cp->tok, LJ_ERR_FFI_DECLSPEC); |
| 1586 | sz = sizeof(int); | 1593 | sz = sizeof(int); |
| 1587 | } | 1594 | } |
| 1588 | lua_assert(sz != 0); | 1595 | lj_assertCP(sz != 0, "basic ctype with zero size"); |
| 1589 | info += CTALIGN(lj_fls(sz)); /* Use natural alignment. */ | 1596 | info += CTALIGN(lj_fls(sz)); /* Use natural alignment. */ |
| 1590 | info += (decl->attr & CTF_QUAL); /* Merge qualifiers. */ | 1597 | info += (decl->attr & CTF_QUAL); /* Merge qualifiers. */ |
| 1591 | cp_push(decl, info, sz); | 1598 | cp_push(decl, info, sz); |
| @@ -1845,7 +1852,7 @@ static void cp_decl_multi(CPState *cp) | |||
| 1845 | /* Treat both static and extern function declarations as extern. */ | 1852 | /* Treat both static and extern function declarations as extern. */ |
| 1846 | ct = ctype_get(cp->cts, ctypeid); | 1853 | ct = ctype_get(cp->cts, ctypeid); |
| 1847 | /* We always get new anonymous functions (typedefs are copied). */ | 1854 | /* We always get new anonymous functions (typedefs are copied). */ |
| 1848 | lua_assert(gcref(ct->name) == NULL); | 1855 | lj_assertCP(gcref(ct->name) == NULL, "unexpected named function"); |
| 1849 | id = ctypeid; /* Just name it. */ | 1856 | id = ctypeid; /* Just name it. */ |
| 1850 | } else if ((scl & CDF_STATIC)) { /* Accept static constants. */ | 1857 | } else if ((scl & CDF_STATIC)) { /* Accept static constants. */ |
| 1851 | id = cp_decl_constinit(cp, &ct, ctypeid); | 1858 | id = cp_decl_constinit(cp, &ct, ctypeid); |
| @@ -1902,7 +1909,7 @@ static TValue *cpcparser(lua_State *L, lua_CFunction dummy, void *ud) | |||
| 1902 | cp_decl_single(cp); | 1909 | cp_decl_single(cp); |
| 1903 | if (cp->param && cp->param != cp->L->top) | 1910 | if (cp->param && cp->param != cp->L->top) |
| 1904 | cp_err(cp, LJ_ERR_FFI_NUMPARAM); | 1911 | cp_err(cp, LJ_ERR_FFI_NUMPARAM); |
| 1905 | lua_assert(cp->depth == 0); | 1912 | lj_assertCP(cp->depth == 0, "unbalanced cparser declaration depth"); |
| 1906 | return NULL; | 1913 | return NULL; |
| 1907 | } | 1914 | } |
| 1908 | 1915 | ||
diff --git a/src/lj_crecord.c b/src/lj_crecord.c index e50fdbf7..7ae1479e 100644 --- a/src/lj_crecord.c +++ b/src/lj_crecord.c | |||
| @@ -61,7 +61,8 @@ static GCcdata *argv2cdata(jit_State *J, TRef tr, cTValue *o) | |||
| 61 | static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr) | 61 | static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr) |
| 62 | { | 62 | { |
| 63 | CTypeID id; | 63 | CTypeID id; |
| 64 | lua_assert(tref_iscdata(tr) && cd->ctypeid == CTID_CTYPEID); | 64 | lj_assertJ(tref_iscdata(tr) && cd->ctypeid == CTID_CTYPEID, |
| 65 | "expected CTypeID cdata"); | ||
| 65 | id = *(CTypeID *)cdataptr(cd); | 66 | id = *(CTypeID *)cdataptr(cd); |
| 66 | tr = emitir(IRT(IR_FLOAD, IRT_INT), tr, IRFL_CDATA_INT); | 67 | tr = emitir(IRT(IR_FLOAD, IRT_INT), tr, IRFL_CDATA_INT); |
| 67 | emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id)); | 68 | emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id)); |
| @@ -237,13 +238,14 @@ static void crec_copy(jit_State *J, TRef trdst, TRef trsrc, TRef trlen, | |||
| 237 | if (len > CREC_COPY_MAXLEN) goto fallback; | 238 | if (len > CREC_COPY_MAXLEN) goto fallback; |
| 238 | if (ct) { | 239 | if (ct) { |
| 239 | CTState *cts = ctype_ctsG(J2G(J)); | 240 | CTState *cts = ctype_ctsG(J2G(J)); |
| 240 | lua_assert(ctype_isarray(ct->info) || ctype_isstruct(ct->info)); | 241 | lj_assertJ(ctype_isarray(ct->info) || ctype_isstruct(ct->info), |
| 242 | "copy of non-aggregate"); | ||
| 241 | if (ctype_isarray(ct->info)) { | 243 | if (ctype_isarray(ct->info)) { |
| 242 | CType *cct = ctype_rawchild(cts, ct); | 244 | CType *cct = ctype_rawchild(cts, ct); |
| 243 | tp = crec_ct2irt(cts, cct); | 245 | tp = crec_ct2irt(cts, cct); |
| 244 | if (tp == IRT_CDATA) goto rawcopy; | 246 | if (tp == IRT_CDATA) goto rawcopy; |
| 245 | step = lj_ir_type_size[tp]; | 247 | step = lj_ir_type_size[tp]; |
| 246 | lua_assert((len & (step-1)) == 0); | 248 | lj_assertJ((len & (step-1)) == 0, "copy of fractional size"); |
| 247 | } else if ((ct->info & CTF_UNION)) { | 249 | } else if ((ct->info & CTF_UNION)) { |
| 248 | step = (1u << ctype_align(ct->info)); | 250 | step = (1u << ctype_align(ct->info)); |
| 249 | goto rawcopy; | 251 | goto rawcopy; |
| @@ -629,7 +631,8 @@ static TRef crec_ct_tv(jit_State *J, CType *d, TRef dp, TRef sp, cTValue *sval) | |||
| 629 | /* Specialize to the name of the enum constant. */ | 631 | /* Specialize to the name of the enum constant. */ |
| 630 | emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str)); | 632 | emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str)); |
| 631 | if (cct && ctype_isconstval(cct->info)) { | 633 | if (cct && ctype_isconstval(cct->info)) { |
| 632 | lua_assert(ctype_child(cts, cct)->size == 4); | 634 | lj_assertJ(ctype_child(cts, cct)->size == 4, |
| 635 | "only 32 bit const supported"); /* NYI */ | ||
| 633 | svisnz = (void *)(intptr_t)(ofs != 0); | 636 | svisnz = (void *)(intptr_t)(ofs != 0); |
| 634 | sp = lj_ir_kint(J, (int32_t)ofs); | 637 | sp = lj_ir_kint(J, (int32_t)ofs); |
| 635 | sid = ctype_cid(cct->info); | 638 | sid = ctype_cid(cct->info); |
| @@ -757,7 +760,7 @@ static void crec_index_bf(jit_State *J, RecordFFData *rd, TRef ptr, CTInfo info) | |||
| 757 | IRType t = IRT_I8 + 2*lj_fls(ctype_bitcsz(info)) + ((info&CTF_UNSIGNED)?1:0); | 760 | IRType t = IRT_I8 + 2*lj_fls(ctype_bitcsz(info)) + ((info&CTF_UNSIGNED)?1:0); |
| 758 | TRef tr = emitir(IRT(IR_XLOAD, t), ptr, 0); | 761 | TRef tr = emitir(IRT(IR_XLOAD, t), ptr, 0); |
| 759 | CTSize pos = ctype_bitpos(info), bsz = ctype_bitbsz(info), shift = 32 - bsz; | 762 | CTSize pos = ctype_bitpos(info), bsz = ctype_bitbsz(info), shift = 32 - bsz; |
| 760 | lua_assert(t <= IRT_U32); /* NYI: 64 bit bitfields. */ | 763 | lj_assertJ(t <= IRT_U32, "only 32 bit bitfields supported"); /* NYI */ |
| 761 | if (rd->data == 0) { /* __index metamethod. */ | 764 | if (rd->data == 0) { /* __index metamethod. */ |
| 762 | if ((info & CTF_BOOL)) { | 765 | if ((info & CTF_BOOL)) { |
| 763 | tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << pos)))); | 766 | tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << pos)))); |
| @@ -769,7 +772,7 @@ static void crec_index_bf(jit_State *J, RecordFFData *rd, TRef ptr, CTInfo info) | |||
| 769 | tr = emitir(IRTI(IR_BSHL), tr, lj_ir_kint(J, shift - pos)); | 772 | tr = emitir(IRTI(IR_BSHL), tr, lj_ir_kint(J, shift - pos)); |
| 770 | tr = emitir(IRTI(IR_BSAR), tr, lj_ir_kint(J, shift)); | 773 | tr = emitir(IRTI(IR_BSAR), tr, lj_ir_kint(J, shift)); |
| 771 | } else { | 774 | } else { |
| 772 | lua_assert(bsz < 32); /* Full-size fields cannot end up here. */ | 775 | lj_assertJ(bsz < 32, "unexpected full bitfield index"); |
| 773 | tr = emitir(IRTI(IR_BSHR), tr, lj_ir_kint(J, pos)); | 776 | tr = emitir(IRTI(IR_BSHR), tr, lj_ir_kint(J, pos)); |
| 774 | tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << bsz)-1))); | 777 | tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << bsz)-1))); |
| 775 | /* We can omit the U32 to NUM conversion, since bsz < 32. */ | 778 | /* We can omit the U32 to NUM conversion, since bsz < 32. */ |
| @@ -884,7 +887,7 @@ again: | |||
| 884 | crec_index_bf(J, rd, ptr, fct->info); | 887 | crec_index_bf(J, rd, ptr, fct->info); |
| 885 | return; | 888 | return; |
| 886 | } else { | 889 | } else { |
| 887 | lua_assert(ctype_isfield(fct->info)); | 890 | lj_assertJ(ctype_isfield(fct->info), "field expected"); |
| 888 | sid = ctype_cid(fct->info); | 891 | sid = ctype_cid(fct->info); |
| 889 | } | 892 | } |
| 890 | } | 893 | } |
| @@ -1111,7 +1114,7 @@ static TRef crec_call_args(jit_State *J, RecordFFData *rd, | |||
| 1111 | if (fid) { /* Get argument type from field. */ | 1114 | if (fid) { /* Get argument type from field. */ |
| 1112 | CType *ctf = ctype_get(cts, fid); | 1115 | CType *ctf = ctype_get(cts, fid); |
| 1113 | fid = ctf->sib; | 1116 | fid = ctf->sib; |
| 1114 | lua_assert(ctype_isfield(ctf->info)); | 1117 | lj_assertJ(ctype_isfield(ctf->info), "field expected"); |
| 1115 | did = ctype_cid(ctf->info); | 1118 | did = ctype_cid(ctf->info); |
| 1116 | } else { | 1119 | } else { |
| 1117 | if (!(ct->info & CTF_VARARG)) | 1120 | if (!(ct->info & CTF_VARARG)) |
diff --git a/src/lj_ctype.c b/src/lj_ctype.c index 7e96e1bc..0e3f8855 100644 --- a/src/lj_ctype.c +++ b/src/lj_ctype.c | |||
| @@ -153,7 +153,7 @@ CTypeID lj_ctype_new(CTState *cts, CType **ctp) | |||
| 153 | { | 153 | { |
| 154 | CTypeID id = cts->top; | 154 | CTypeID id = cts->top; |
| 155 | CType *ct; | 155 | CType *ct; |
| 156 | lua_assert(cts->L); | 156 | lj_assertCTS(cts->L, "uninitialized cts->L"); |
| 157 | if (LJ_UNLIKELY(id >= cts->sizetab)) { | 157 | if (LJ_UNLIKELY(id >= cts->sizetab)) { |
| 158 | if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV); | 158 | if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV); |
| 159 | #ifdef LUAJIT_CTYPE_CHECK_ANCHOR | 159 | #ifdef LUAJIT_CTYPE_CHECK_ANCHOR |
| @@ -182,7 +182,7 @@ CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size) | |||
| 182 | { | 182 | { |
| 183 | uint32_t h = ct_hashtype(info, size); | 183 | uint32_t h = ct_hashtype(info, size); |
| 184 | CTypeID id = cts->hash[h]; | 184 | CTypeID id = cts->hash[h]; |
| 185 | lua_assert(cts->L); | 185 | lj_assertCTS(cts->L, "uninitialized cts->L"); |
| 186 | while (id) { | 186 | while (id) { |
| 187 | CType *ct = ctype_get(cts, id); | 187 | CType *ct = ctype_get(cts, id); |
| 188 | if (ct->info == info && ct->size == size) | 188 | if (ct->info == info && ct->size == size) |
| @@ -298,9 +298,9 @@ CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem) | |||
| 298 | } | 298 | } |
| 299 | ct = ctype_raw(cts, arrid); | 299 | ct = ctype_raw(cts, arrid); |
| 300 | } | 300 | } |
| 301 | lua_assert(ctype_isvlarray(ct->info)); /* Must be a VLA. */ | 301 | lj_assertCTS(ctype_isvlarray(ct->info), "VLA expected"); |
| 302 | ct = ctype_rawchild(cts, ct); /* Get array element. */ | 302 | ct = ctype_rawchild(cts, ct); /* Get array element. */ |
| 303 | lua_assert(ctype_hassize(ct->info)); | 303 | lj_assertCTS(ctype_hassize(ct->info), "bad VLA without size"); |
| 304 | /* Calculate actual size of VLA and check for overflow. */ | 304 | /* Calculate actual size of VLA and check for overflow. */ |
| 305 | xsz += (uint64_t)ct->size * nelem; | 305 | xsz += (uint64_t)ct->size * nelem; |
| 306 | return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID; | 306 | return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID; |
| @@ -323,7 +323,8 @@ CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp) | |||
| 323 | } else { | 323 | } else { |
| 324 | if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN); | 324 | if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN); |
| 325 | qual |= (info & ~(CTF_ALIGN|CTMASK_CID)); | 325 | qual |= (info & ~(CTF_ALIGN|CTMASK_CID)); |
| 326 | lua_assert(ctype_hassize(info) || ctype_isfunc(info)); | 326 | lj_assertCTS(ctype_hassize(info) || ctype_isfunc(info), |
| 327 | "ctype without size"); | ||
| 327 | *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size; | 328 | *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size; |
| 328 | break; | 329 | break; |
| 329 | } | 330 | } |
| @@ -528,7 +529,7 @@ static void ctype_repr(CTRepr *ctr, CTypeID id) | |||
| 528 | ctype_appc(ctr, ')'); | 529 | ctype_appc(ctr, ')'); |
| 529 | break; | 530 | break; |
| 530 | default: | 531 | default: |
| 531 | lua_assert(0); | 532 | lj_assertG_(ctr->cts->g, 0, "bad ctype %08x", info); |
| 532 | break; | 533 | break; |
| 533 | } | 534 | } |
| 534 | ct = ctype_get(ctr->cts, ctype_cid(info)); | 535 | ct = ctype_get(ctr->cts, ctype_cid(info)); |
diff --git a/src/lj_ctype.h b/src/lj_ctype.h index 73cefef8..8c8fc790 100644 --- a/src/lj_ctype.h +++ b/src/lj_ctype.h | |||
| @@ -260,6 +260,12 @@ typedef struct CTState { | |||
| 260 | 260 | ||
| 261 | #define CT_MEMALIGN 3 /* Alignment guaranteed by memory allocator. */ | 261 | #define CT_MEMALIGN 3 /* Alignment guaranteed by memory allocator. */ |
| 262 | 262 | ||
| 263 | #ifdef LUA_USE_ASSERT | ||
| 264 | #define lj_assertCTS(c, ...) (lj_assertG_(cts->g, (c), __VA_ARGS__)) | ||
| 265 | #else | ||
| 266 | #define lj_assertCTS(c, ...) ((void)cts) | ||
| 267 | #endif | ||
| 268 | |||
| 263 | /* -- Predefined types ---------------------------------------------------- */ | 269 | /* -- Predefined types ---------------------------------------------------- */ |
| 264 | 270 | ||
| 265 | /* Target-dependent types. */ | 271 | /* Target-dependent types. */ |
| @@ -392,7 +398,8 @@ static LJ_AINLINE CTState *ctype_cts(lua_State *L) | |||
| 392 | /* Check C type ID for validity when assertions are enabled. */ | 398 | /* Check C type ID for validity when assertions are enabled. */ |
| 393 | static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id) | 399 | static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id) |
| 394 | { | 400 | { |
| 395 | lua_assert(id > 0 && id < cts->top); UNUSED(cts); | 401 | UNUSED(cts); |
| 402 | lj_assertCTS(id > 0 && id < cts->top, "bad CTID %d", id); | ||
| 396 | return id; | 403 | return id; |
| 397 | } | 404 | } |
| 398 | 405 | ||
| @@ -408,8 +415,9 @@ static LJ_AINLINE CType *ctype_get(CTState *cts, CTypeID id) | |||
| 408 | /* Get child C type. */ | 415 | /* Get child C type. */ |
| 409 | static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct) | 416 | static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct) |
| 410 | { | 417 | { |
| 411 | lua_assert(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) || | 418 | lj_assertCTS(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) || |
| 412 | ctype_isbitfield(ct->info))); /* These don't have children. */ | 419 | ctype_isbitfield(ct->info)), |
| 420 | "ctype %08x has no children", ct->info); | ||
| 413 | return ctype_get(cts, ctype_cid(ct->info)); | 421 | return ctype_get(cts, ctype_cid(ct->info)); |
| 414 | } | 422 | } |
| 415 | 423 | ||
diff --git a/src/lj_debug.c b/src/lj_debug.c index 2f2ea9f0..c1f0f314 100644 --- a/src/lj_debug.c +++ b/src/lj_debug.c | |||
| @@ -55,7 +55,8 @@ static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe) | |||
| 55 | const BCIns *ins; | 55 | const BCIns *ins; |
| 56 | GCproto *pt; | 56 | GCproto *pt; |
| 57 | BCPos pos; | 57 | BCPos pos; |
| 58 | lua_assert(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD); | 58 | lj_assertL(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD, |
| 59 | "function or frame expected"); | ||
| 59 | if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */ | 60 | if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */ |
| 60 | return NO_BCPOS; | 61 | return NO_BCPOS; |
| 61 | } else if (nextframe == NULL) { /* Lua function on top. */ | 62 | } else if (nextframe == NULL) { /* Lua function on top. */ |
| @@ -100,7 +101,7 @@ static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe) | |||
| 100 | #if LJ_HASJIT | 101 | #if LJ_HASJIT |
| 101 | if (pos > pt->sizebc) { /* Undo the effects of lj_trace_exit for JLOOP. */ | 102 | if (pos > pt->sizebc) { /* Undo the effects of lj_trace_exit for JLOOP. */ |
| 102 | GCtrace *T = (GCtrace *)((char *)(ins-1) - offsetof(GCtrace, startins)); | 103 | GCtrace *T = (GCtrace *)((char *)(ins-1) - offsetof(GCtrace, startins)); |
| 103 | lua_assert(bc_isret(bc_op(ins[-1]))); | 104 | lj_assertL(bc_isret(bc_op(ins[-1])), "return bytecode expected"); |
| 104 | pos = proto_bcpos(pt, mref(T->startpc, const BCIns)); | 105 | pos = proto_bcpos(pt, mref(T->startpc, const BCIns)); |
| 105 | } | 106 | } |
| 106 | #endif | 107 | #endif |
| @@ -133,7 +134,7 @@ static BCLine debug_frameline(lua_State *L, GCfunc *fn, cTValue *nextframe) | |||
| 133 | BCPos pc = debug_framepc(L, fn, nextframe); | 134 | BCPos pc = debug_framepc(L, fn, nextframe); |
| 134 | if (pc != NO_BCPOS) { | 135 | if (pc != NO_BCPOS) { |
| 135 | GCproto *pt = funcproto(fn); | 136 | GCproto *pt = funcproto(fn); |
| 136 | lua_assert(pc <= pt->sizebc); | 137 | lj_assertL(pc <= pt->sizebc, "PC out of range"); |
| 137 | return lj_debug_line(pt, pc); | 138 | return lj_debug_line(pt, pc); |
| 138 | } | 139 | } |
| 139 | return -1; | 140 | return -1; |
| @@ -214,7 +215,7 @@ static TValue *debug_localname(lua_State *L, const lua_Debug *ar, | |||
| 214 | const char *lj_debug_uvname(GCproto *pt, uint32_t idx) | 215 | const char *lj_debug_uvname(GCproto *pt, uint32_t idx) |
| 215 | { | 216 | { |
| 216 | const uint8_t *p = proto_uvinfo(pt); | 217 | const uint8_t *p = proto_uvinfo(pt); |
| 217 | lua_assert(idx < pt->sizeuv); | 218 | lj_assertX(idx < pt->sizeuv, "bad upvalue index"); |
| 218 | if (!p) return ""; | 219 | if (!p) return ""; |
| 219 | if (idx) while (*p++ || --idx) ; | 220 | if (idx) while (*p++ || --idx) ; |
| 220 | return (const char *)p; | 221 | return (const char *)p; |
| @@ -439,13 +440,14 @@ int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, int ext) | |||
| 439 | } else { | 440 | } else { |
| 440 | uint32_t offset = (uint32_t)ar->i_ci & 0xffff; | 441 | uint32_t offset = (uint32_t)ar->i_ci & 0xffff; |
| 441 | uint32_t size = (uint32_t)ar->i_ci >> 16; | 442 | uint32_t size = (uint32_t)ar->i_ci >> 16; |
| 442 | lua_assert(offset != 0); | 443 | lj_assertL(offset != 0, "bad frame offset"); |
| 443 | frame = tvref(L->stack) + offset; | 444 | frame = tvref(L->stack) + offset; |
| 444 | if (size) nextframe = frame + size; | 445 | if (size) nextframe = frame + size; |
| 445 | lua_assert(frame <= tvref(L->maxstack) && | 446 | lj_assertL(frame <= tvref(L->maxstack) && |
| 446 | (!nextframe || nextframe <= tvref(L->maxstack))); | 447 | (!nextframe || nextframe <= tvref(L->maxstack)), |
| 448 | "broken frame chain"); | ||
| 447 | fn = frame_func(frame); | 449 | fn = frame_func(frame); |
| 448 | lua_assert(fn->c.gct == ~LJ_TFUNC); | 450 | lj_assertL(fn->c.gct == ~LJ_TFUNC, "bad frame function"); |
| 449 | } | 451 | } |
| 450 | for (; *what; what++) { | 452 | for (; *what; what++) { |
| 451 | if (*what == 'S') { | 453 | if (*what == 'S') { |
diff --git a/src/lj_def.h b/src/lj_def.h index 75aaeb79..af0687c4 100644 --- a/src/lj_def.h +++ b/src/lj_def.h | |||
| @@ -337,14 +337,28 @@ static LJ_AINLINE uint32_t lj_getu32(const void *v) | |||
| 337 | #define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET | 337 | #define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET |
| 338 | #define LJ_ASMF_NORET LJ_ASMF LJ_NORET | 338 | #define LJ_ASMF_NORET LJ_ASMF LJ_NORET |
| 339 | 339 | ||
| 340 | /* Runtime assertions. */ | 340 | /* Internal assertions. */ |
| 341 | #ifdef lua_assert | 341 | #if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK) |
| 342 | #define check_exp(c, e) (lua_assert(c), (e)) | 342 | #define lj_assert_check(g, c, ...) \ |
| 343 | #define api_check(l, e) lua_assert(e) | 343 | ((c) ? (void)0 : \ |
| 344 | (lj_assert_fail((g), __FILE__, __LINE__, __func__, __VA_ARGS__), 0)) | ||
| 345 | #define lj_checkapi(c, ...) lj_assert_check(G(L), (c), __VA_ARGS__) | ||
| 344 | #else | 346 | #else |
| 345 | #define lua_assert(c) ((void)0) | 347 | #define lj_checkapi(c, ...) ((void)L) |
| 348 | #endif | ||
| 349 | |||
| 350 | #ifdef LUA_USE_ASSERT | ||
| 351 | #define lj_assertG_(g, c, ...) lj_assert_check((g), (c), __VA_ARGS__) | ||
| 352 | #define lj_assertG(c, ...) lj_assert_check(g, (c), __VA_ARGS__) | ||
| 353 | #define lj_assertL(c, ...) lj_assert_check(G(L), (c), __VA_ARGS__) | ||
| 354 | #define lj_assertX(c, ...) lj_assert_check(NULL, (c), __VA_ARGS__) | ||
| 355 | #define check_exp(c, e) (lj_assertX((c), #c), (e)) | ||
| 356 | #else | ||
| 357 | #define lj_assertG_(g, c, ...) ((void)0) | ||
| 358 | #define lj_assertG(c, ...) ((void)g) | ||
| 359 | #define lj_assertL(c, ...) ((void)L) | ||
| 360 | #define lj_assertX(c, ...) ((void)0) | ||
| 346 | #define check_exp(c, e) (e) | 361 | #define check_exp(c, e) (e) |
| 347 | #define api_check luai_apicheck | ||
| 348 | #endif | 362 | #endif |
| 349 | 363 | ||
| 350 | /* Static assertions. */ | 364 | /* Static assertions. */ |
diff --git a/src/lj_dispatch.c b/src/lj_dispatch.c index 39416d00..c608e223 100644 --- a/src/lj_dispatch.c +++ b/src/lj_dispatch.c | |||
| @@ -367,7 +367,7 @@ static void callhook(lua_State *L, int event, BCLine line) | |||
| 367 | hook_enter(g); | 367 | hook_enter(g); |
| 368 | #endif | 368 | #endif |
| 369 | hookf(L, &ar); | 369 | hookf(L, &ar); |
| 370 | lua_assert(hook_active(g)); | 370 | lj_assertG(hook_active(g), "active hook flag removed"); |
| 371 | setgcref(g->cur_L, obj2gco(L)); | 371 | setgcref(g->cur_L, obj2gco(L)); |
| 372 | #if LJ_HASPROFILE && !LJ_PROFILE_SIGPROF | 372 | #if LJ_HASPROFILE && !LJ_PROFILE_SIGPROF |
| 373 | lj_profile_hook_leave(g); | 373 | lj_profile_hook_leave(g); |
| @@ -415,7 +415,8 @@ void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc) | |||
| 415 | #endif | 415 | #endif |
| 416 | J->L = L; | 416 | J->L = L; |
| 417 | lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ | 417 | lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ |
| 418 | lua_assert(L->top - L->base == delta); | 418 | lj_assertG(L->top - L->base == delta, |
| 419 | "unbalanced stack after tracing of instruction"); | ||
| 419 | } | 420 | } |
| 420 | } | 421 | } |
| 421 | #endif | 422 | #endif |
| @@ -475,7 +476,8 @@ ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc) | |||
| 475 | #endif | 476 | #endif |
| 476 | pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1); | 477 | pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1); |
| 477 | lj_trace_hot(J, pc); | 478 | lj_trace_hot(J, pc); |
| 478 | lua_assert(L->top - L->base == delta); | 479 | lj_assertG(L->top - L->base == delta, |
| 480 | "unbalanced stack after hot call"); | ||
| 479 | goto out; | 481 | goto out; |
| 480 | } else if (J->state != LJ_TRACE_IDLE && | 482 | } else if (J->state != LJ_TRACE_IDLE && |
| 481 | !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) { | 483 | !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) { |
| @@ -484,7 +486,8 @@ ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc) | |||
| 484 | #endif | 486 | #endif |
| 485 | /* Record the FUNC* bytecodes, too. */ | 487 | /* Record the FUNC* bytecodes, too. */ |
| 486 | lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ | 488 | lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */ |
| 487 | lua_assert(L->top - L->base == delta); | 489 | lj_assertG(L->top - L->base == delta, |
| 490 | "unbalanced stack after hot instruction"); | ||
| 488 | } | 491 | } |
| 489 | #endif | 492 | #endif |
| 490 | if ((g->hookmask & LUA_MASKCALL)) { | 493 | if ((g->hookmask & LUA_MASKCALL)) { |
diff --git a/src/lj_emit_arm.h b/src/lj_emit_arm.h index 25561549..165d546d 100644 --- a/src/lj_emit_arm.h +++ b/src/lj_emit_arm.h | |||
| @@ -81,7 +81,8 @@ static void emit_m(ASMState *as, ARMIns ai, Reg rm) | |||
| 81 | 81 | ||
| 82 | static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) | 82 | static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) |
| 83 | { | 83 | { |
| 84 | lua_assert(ofs >= -255 && ofs <= 255); | 84 | lj_assertA(ofs >= -255 && ofs <= 255, |
| 85 | "load/store offset %d out of range", ofs); | ||
| 85 | if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; | 86 | if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; |
| 86 | *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) | | 87 | *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) | |
| 87 | ((ofs & 0xf0) << 4) | (ofs & 0x0f); | 88 | ((ofs & 0xf0) << 4) | (ofs & 0x0f); |
| @@ -89,7 +90,8 @@ static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) | |||
| 89 | 90 | ||
| 90 | static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) | 91 | static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) |
| 91 | { | 92 | { |
| 92 | lua_assert(ofs >= -4095 && ofs <= 4095); | 93 | lj_assertA(ofs >= -4095 && ofs <= 4095, |
| 94 | "load/store offset %d out of range", ofs); | ||
| 93 | /* Combine LDR/STR pairs to LDRD/STRD. */ | 95 | /* Combine LDR/STR pairs to LDRD/STRD. */ |
| 94 | if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) && | 96 | if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) && |
| 95 | (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn && | 97 | (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn && |
| @@ -106,7 +108,8 @@ static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) | |||
| 106 | #if !LJ_SOFTFP | 108 | #if !LJ_SOFTFP |
| 107 | static void emit_vlso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) | 109 | static void emit_vlso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs) |
| 108 | { | 110 | { |
| 109 | lua_assert(ofs >= -1020 && ofs <= 1020 && (ofs&3) == 0); | 111 | lj_assertA(ofs >= -1020 && ofs <= 1020 && (ofs&3) == 0, |
| 112 | "load/store offset %d out of range", ofs); | ||
| 110 | if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; | 113 | if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U; |
| 111 | *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd & 15) | ARMF_N(rn) | (ofs >> 2); | 114 | *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd & 15) | ARMF_N(rn) | (ofs >> 2); |
| 112 | } | 115 | } |
| @@ -124,7 +127,7 @@ static int emit_kdelta1(ASMState *as, Reg d, int32_t i) | |||
| 124 | while (work) { | 127 | while (work) { |
| 125 | Reg r = rset_picktop(work); | 128 | Reg r = rset_picktop(work); |
| 126 | IRRef ref = regcost_ref(as->cost[r]); | 129 | IRRef ref = regcost_ref(as->cost[r]); |
| 127 | lua_assert(r != d); | 130 | lj_assertA(r != d, "dest reg not free"); |
| 128 | if (emit_canremat(ref)) { | 131 | if (emit_canremat(ref)) { |
| 129 | int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); | 132 | int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); |
| 130 | uint32_t k = emit_isk12(ARMI_ADD, delta); | 133 | uint32_t k = emit_isk12(ARMI_ADD, delta); |
| @@ -142,13 +145,13 @@ static int emit_kdelta1(ASMState *as, Reg d, int32_t i) | |||
| 142 | } | 145 | } |
| 143 | 146 | ||
| 144 | /* Try to find a two step delta relative to another constant. */ | 147 | /* Try to find a two step delta relative to another constant. */ |
| 145 | static int emit_kdelta2(ASMState *as, Reg d, int32_t i) | 148 | static int emit_kdelta2(ASMState *as, Reg rd, int32_t i) |
| 146 | { | 149 | { |
| 147 | RegSet work = ~as->freeset & RSET_GPR; | 150 | RegSet work = ~as->freeset & RSET_GPR; |
| 148 | while (work) { | 151 | while (work) { |
| 149 | Reg r = rset_picktop(work); | 152 | Reg r = rset_picktop(work); |
| 150 | IRRef ref = regcost_ref(as->cost[r]); | 153 | IRRef ref = regcost_ref(as->cost[r]); |
| 151 | lua_assert(r != d); | 154 | lj_assertA(r != rd, "dest reg %d not free", rd); |
| 152 | if (emit_canremat(ref)) { | 155 | if (emit_canremat(ref)) { |
| 153 | int32_t other = ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i; | 156 | int32_t other = ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i; |
| 154 | if (other) { | 157 | if (other) { |
| @@ -159,8 +162,8 @@ static int emit_kdelta2(ASMState *as, Reg d, int32_t i) | |||
| 159 | k2 = emit_isk12(0, delta & (255 << sh)); | 162 | k2 = emit_isk12(0, delta & (255 << sh)); |
| 160 | k = emit_isk12(0, delta & ~(255 << sh)); | 163 | k = emit_isk12(0, delta & ~(255 << sh)); |
| 161 | if (k) { | 164 | if (k) { |
| 162 | emit_dn(as, ARMI_ADD^k2^inv, d, d); | 165 | emit_dn(as, ARMI_ADD^k2^inv, rd, rd); |
| 163 | emit_dn(as, ARMI_ADD^k^inv, d, r); | 166 | emit_dn(as, ARMI_ADD^k^inv, rd, r); |
| 164 | return 1; | 167 | return 1; |
| 165 | } | 168 | } |
| 166 | } | 169 | } |
| @@ -171,23 +174,24 @@ static int emit_kdelta2(ASMState *as, Reg d, int32_t i) | |||
| 171 | } | 174 | } |
| 172 | 175 | ||
| 173 | /* Load a 32 bit constant into a GPR. */ | 176 | /* Load a 32 bit constant into a GPR. */ |
| 174 | static void emit_loadi(ASMState *as, Reg r, int32_t i) | 177 | static void emit_loadi(ASMState *as, Reg rd, int32_t i) |
| 175 | { | 178 | { |
| 176 | uint32_t k = emit_isk12(ARMI_MOV, i); | 179 | uint32_t k = emit_isk12(ARMI_MOV, i); |
| 177 | lua_assert(rset_test(as->freeset, r) || r == RID_TMP); | 180 | lj_assertA(rset_test(as->freeset, rd) || rd == RID_TMP, |
| 181 | "dest reg %d not free", rd); | ||
| 178 | if (k) { | 182 | if (k) { |
| 179 | /* Standard K12 constant. */ | 183 | /* Standard K12 constant. */ |
| 180 | emit_d(as, ARMI_MOV^k, r); | 184 | emit_d(as, ARMI_MOV^k, rd); |
| 181 | } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) { | 185 | } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) { |
| 182 | /* 16 bit loword constant for ARMv6T2. */ | 186 | /* 16 bit loword constant for ARMv6T2. */ |
| 183 | emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r); | 187 | emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), rd); |
| 184 | } else if (emit_kdelta1(as, r, i)) { | 188 | } else if (emit_kdelta1(as, rd, i)) { |
| 185 | /* One step delta relative to another constant. */ | 189 | /* One step delta relative to another constant. */ |
| 186 | } else if ((as->flags & JIT_F_ARMV6T2)) { | 190 | } else if ((as->flags & JIT_F_ARMV6T2)) { |
| 187 | /* 32 bit hiword/loword constant for ARMv6T2. */ | 191 | /* 32 bit hiword/loword constant for ARMv6T2. */ |
| 188 | emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), r); | 192 | emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), rd); |
| 189 | emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r); | 193 | emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), rd); |
| 190 | } else if (emit_kdelta2(as, r, i)) { | 194 | } else if (emit_kdelta2(as, rd, i)) { |
| 191 | /* Two step delta relative to another constant. */ | 195 | /* Two step delta relative to another constant. */ |
| 192 | } else { | 196 | } else { |
| 193 | /* Otherwise construct the constant with up to 4 instructions. */ | 197 | /* Otherwise construct the constant with up to 4 instructions. */ |
| @@ -197,15 +201,15 @@ static void emit_loadi(ASMState *as, Reg r, int32_t i) | |||
| 197 | int32_t m = i & (255 << sh); | 201 | int32_t m = i & (255 << sh); |
| 198 | i &= ~(255 << sh); | 202 | i &= ~(255 << sh); |
| 199 | if (i == 0) { | 203 | if (i == 0) { |
| 200 | emit_d(as, ARMI_MOV ^ emit_isk12(0, m), r); | 204 | emit_d(as, ARMI_MOV ^ emit_isk12(0, m), rd); |
| 201 | break; | 205 | break; |
| 202 | } | 206 | } |
| 203 | emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), r, r); | 207 | emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), rd, rd); |
| 204 | } | 208 | } |
| 205 | } | 209 | } |
| 206 | } | 210 | } |
| 207 | 211 | ||
| 208 | #define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr))) | 212 | #define emit_loada(as, rd, addr) emit_loadi(as, (rd), i32ptr((addr))) |
| 209 | 213 | ||
| 210 | static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow); | 214 | static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow); |
| 211 | 215 | ||
| @@ -261,7 +265,7 @@ static void emit_branch(ASMState *as, ARMIns ai, MCode *target) | |||
| 261 | { | 265 | { |
| 262 | MCode *p = as->mcp; | 266 | MCode *p = as->mcp; |
| 263 | ptrdiff_t delta = (target - p) - 1; | 267 | ptrdiff_t delta = (target - p) - 1; |
| 264 | lua_assert(((delta + 0x00800000) >> 24) == 0); | 268 | lj_assertA(((delta + 0x00800000) >> 24) == 0, "branch target out of range"); |
| 265 | *--p = ai | ((uint32_t)delta & 0x00ffffffu); | 269 | *--p = ai | ((uint32_t)delta & 0x00ffffffu); |
| 266 | as->mcp = p; | 270 | as->mcp = p; |
| 267 | } | 271 | } |
| @@ -289,7 +293,7 @@ static void emit_call(ASMState *as, void *target) | |||
| 289 | static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) | 293 | static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) |
| 290 | { | 294 | { |
| 291 | #if LJ_SOFTFP | 295 | #if LJ_SOFTFP |
| 292 | lua_assert(!irt_isnum(ir->t)); UNUSED(ir); | 296 | lj_assertA(!irt_isnum(ir->t), "unexpected FP op"); UNUSED(ir); |
| 293 | #else | 297 | #else |
| 294 | if (dst >= RID_MAX_GPR) { | 298 | if (dst >= RID_MAX_GPR) { |
| 295 | emit_dm(as, irt_isnum(ir->t) ? ARMI_VMOV_D : ARMI_VMOV_S, | 299 | emit_dm(as, irt_isnum(ir->t) ? ARMI_VMOV_D : ARMI_VMOV_S, |
| @@ -313,7 +317,7 @@ static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src) | |||
| 313 | static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) | 317 | static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) |
| 314 | { | 318 | { |
| 315 | #if LJ_SOFTFP | 319 | #if LJ_SOFTFP |
| 316 | lua_assert(!irt_isnum(ir->t)); UNUSED(ir); | 320 | lj_assertA(!irt_isnum(ir->t), "unexpected FP op"); UNUSED(ir); |
| 317 | #else | 321 | #else |
| 318 | if (r >= RID_MAX_GPR) | 322 | if (r >= RID_MAX_GPR) |
| 319 | emit_vlso(as, irt_isnum(ir->t) ? ARMI_VLDR_D : ARMI_VLDR_S, r, base, ofs); | 323 | emit_vlso(as, irt_isnum(ir->t) ? ARMI_VLDR_D : ARMI_VLDR_S, r, base, ofs); |
| @@ -326,7 +330,7 @@ static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) | |||
| 326 | static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) | 330 | static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) |
| 327 | { | 331 | { |
| 328 | #if LJ_SOFTFP | 332 | #if LJ_SOFTFP |
| 329 | lua_assert(!irt_isnum(ir->t)); UNUSED(ir); | 333 | lj_assertA(!irt_isnum(ir->t), "unexpected FP op"); UNUSED(ir); |
| 330 | #else | 334 | #else |
| 331 | if (r >= RID_MAX_GPR) | 335 | if (r >= RID_MAX_GPR) |
| 332 | emit_vlso(as, irt_isnum(ir->t) ? ARMI_VSTR_D : ARMI_VSTR_S, r, base, ofs); | 336 | emit_vlso(as, irt_isnum(ir->t) ? ARMI_VSTR_D : ARMI_VSTR_S, r, base, ofs); |
diff --git a/src/lj_emit_arm64.h b/src/lj_emit_arm64.h index f09c0f3a..61a2df82 100644 --- a/src/lj_emit_arm64.h +++ b/src/lj_emit_arm64.h | |||
| @@ -8,8 +8,9 @@ | |||
| 8 | 8 | ||
| 9 | /* -- Constant encoding --------------------------------------------------- */ | 9 | /* -- Constant encoding --------------------------------------------------- */ |
| 10 | 10 | ||
| 11 | static uint64_t get_k64val(IRIns *ir) | 11 | static uint64_t get_k64val(ASMState *as, IRRef ref) |
| 12 | { | 12 | { |
| 13 | IRIns *ir = IR(ref); | ||
| 13 | if (ir->o == IR_KINT64) { | 14 | if (ir->o == IR_KINT64) { |
| 14 | return ir_kint64(ir)->u64; | 15 | return ir_kint64(ir)->u64; |
| 15 | } else if (ir->o == IR_KGC) { | 16 | } else if (ir->o == IR_KGC) { |
| @@ -17,7 +18,8 @@ static uint64_t get_k64val(IRIns *ir) | |||
| 17 | } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) { | 18 | } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) { |
| 18 | return (uint64_t)ir_kptr(ir); | 19 | return (uint64_t)ir_kptr(ir); |
| 19 | } else { | 20 | } else { |
| 20 | lua_assert(ir->o == IR_KINT || ir->o == IR_KNULL); | 21 | lj_assertA(ir->o == IR_KINT || ir->o == IR_KNULL, |
| 22 | "bad 64 bit const IR op %d", ir->o); | ||
| 21 | return ir->i; /* Sign-extended. */ | 23 | return ir->i; /* Sign-extended. */ |
| 22 | } | 24 | } |
| 23 | } | 25 | } |
| @@ -122,7 +124,7 @@ static int emit_checkofs(A64Ins ai, int64_t ofs) | |||
| 122 | static void emit_lso(ASMState *as, A64Ins ai, Reg rd, Reg rn, int64_t ofs) | 124 | static void emit_lso(ASMState *as, A64Ins ai, Reg rd, Reg rn, int64_t ofs) |
| 123 | { | 125 | { |
| 124 | int ot = emit_checkofs(ai, ofs), sc = (ai >> 30) & 3; | 126 | int ot = emit_checkofs(ai, ofs), sc = (ai >> 30) & 3; |
| 125 | lua_assert(ot); | 127 | lj_assertA(ot, "load/store offset %d out of range", ofs); |
| 126 | /* Combine LDR/STR pairs to LDP/STP. */ | 128 | /* Combine LDR/STR pairs to LDP/STP. */ |
| 127 | if ((sc == 2 || sc == 3) && | 129 | if ((sc == 2 || sc == 3) && |
| 128 | (!(ai & 0x400000) || rd != rn) && | 130 | (!(ai & 0x400000) || rd != rn) && |
| @@ -166,10 +168,10 @@ static int emit_kdelta(ASMState *as, Reg rd, uint64_t k, int lim) | |||
| 166 | while (work) { | 168 | while (work) { |
| 167 | Reg r = rset_picktop(work); | 169 | Reg r = rset_picktop(work); |
| 168 | IRRef ref = regcost_ref(as->cost[r]); | 170 | IRRef ref = regcost_ref(as->cost[r]); |
| 169 | lua_assert(r != rd); | 171 | lj_assertA(r != rd, "dest reg %d not free", rd); |
| 170 | if (ref < REF_TRUE) { | 172 | if (ref < REF_TRUE) { |
| 171 | uint64_t kx = ra_iskref(ref) ? (uint64_t)ra_krefk(as, ref) : | 173 | uint64_t kx = ra_iskref(ref) ? (uint64_t)ra_krefk(as, ref) : |
| 172 | get_k64val(IR(ref)); | 174 | get_k64val(as, ref); |
| 173 | int64_t delta = (int64_t)(k - kx); | 175 | int64_t delta = (int64_t)(k - kx); |
| 174 | if (delta == 0) { | 176 | if (delta == 0) { |
| 175 | emit_dm(as, A64I_MOVx, rd, r); | 177 | emit_dm(as, A64I_MOVx, rd, r); |
| @@ -312,7 +314,7 @@ static void emit_cond_branch(ASMState *as, A64CC cond, MCode *target) | |||
| 312 | { | 314 | { |
| 313 | MCode *p = --as->mcp; | 315 | MCode *p = --as->mcp; |
| 314 | ptrdiff_t delta = target - p; | 316 | ptrdiff_t delta = target - p; |
| 315 | lua_assert(A64F_S_OK(delta, 19)); | 317 | lj_assertA(A64F_S_OK(delta, 19), "branch target out of range"); |
| 316 | *p = A64I_BCC | A64F_S19(delta) | cond; | 318 | *p = A64I_BCC | A64F_S19(delta) | cond; |
| 317 | } | 319 | } |
| 318 | 320 | ||
| @@ -320,7 +322,7 @@ static void emit_branch(ASMState *as, A64Ins ai, MCode *target) | |||
| 320 | { | 322 | { |
| 321 | MCode *p = --as->mcp; | 323 | MCode *p = --as->mcp; |
| 322 | ptrdiff_t delta = target - p; | 324 | ptrdiff_t delta = target - p; |
| 323 | lua_assert(A64F_S_OK(delta, 26)); | 325 | lj_assertA(A64F_S_OK(delta, 26), "branch target out of range"); |
| 324 | *p = ai | A64F_S26(delta); | 326 | *p = ai | A64F_S26(delta); |
| 325 | } | 327 | } |
| 326 | 328 | ||
| @@ -328,7 +330,8 @@ static void emit_tnb(ASMState *as, A64Ins ai, Reg r, uint32_t bit, MCode *target | |||
| 328 | { | 330 | { |
| 329 | MCode *p = --as->mcp; | 331 | MCode *p = --as->mcp; |
| 330 | ptrdiff_t delta = target - p; | 332 | ptrdiff_t delta = target - p; |
| 331 | lua_assert(bit < 63 && A64F_S_OK(delta, 14)); | 333 | lj_assertA(bit < 63, "bit number out of range"); |
| 334 | lj_assertA(A64F_S_OK(delta, 14), "branch target out of range"); | ||
| 332 | if (bit > 31) ai |= A64I_X; | 335 | if (bit > 31) ai |= A64I_X; |
| 333 | *p = ai | A64F_BIT(bit & 31) | A64F_S14(delta) | r; | 336 | *p = ai | A64F_BIT(bit & 31) | A64F_S14(delta) | r; |
| 334 | } | 337 | } |
| @@ -337,7 +340,7 @@ static void emit_cnb(ASMState *as, A64Ins ai, Reg r, MCode *target) | |||
| 337 | { | 340 | { |
| 338 | MCode *p = --as->mcp; | 341 | MCode *p = --as->mcp; |
| 339 | ptrdiff_t delta = target - p; | 342 | ptrdiff_t delta = target - p; |
| 340 | lua_assert(A64F_S_OK(delta, 19)); | 343 | lj_assertA(A64F_S_OK(delta, 19), "branch target out of range"); |
| 341 | *p = ai | A64F_S19(delta) | r; | 344 | *p = ai | A64F_S19(delta) | r; |
| 342 | } | 345 | } |
| 343 | 346 | ||
diff --git a/src/lj_emit_mips.h b/src/lj_emit_mips.h index bdabcf16..3de5ff18 100644 --- a/src/lj_emit_mips.h +++ b/src/lj_emit_mips.h | |||
| @@ -4,8 +4,9 @@ | |||
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #if LJ_64 | 6 | #if LJ_64 |
| 7 | static intptr_t get_k64val(IRIns *ir) | 7 | static intptr_t get_k64val(ASMState *as, IRRef ref) |
| 8 | { | 8 | { |
| 9 | IRIns *ir = IR(ref); | ||
| 9 | if (ir->o == IR_KINT64) { | 10 | if (ir->o == IR_KINT64) { |
| 10 | return (intptr_t)ir_kint64(ir)->u64; | 11 | return (intptr_t)ir_kint64(ir)->u64; |
| 11 | } else if (ir->o == IR_KGC) { | 12 | } else if (ir->o == IR_KGC) { |
| @@ -15,16 +16,17 @@ static intptr_t get_k64val(IRIns *ir) | |||
| 15 | } else if (LJ_SOFTFP && ir->o == IR_KNUM) { | 16 | } else if (LJ_SOFTFP && ir->o == IR_KNUM) { |
| 16 | return (intptr_t)ir_knum(ir)->u64; | 17 | return (intptr_t)ir_knum(ir)->u64; |
| 17 | } else { | 18 | } else { |
| 18 | lua_assert(ir->o == IR_KINT || ir->o == IR_KNULL); | 19 | lj_assertA(ir->o == IR_KINT || ir->o == IR_KNULL, |
| 20 | "bad 64 bit const IR op %d", ir->o); | ||
| 19 | return ir->i; /* Sign-extended. */ | 21 | return ir->i; /* Sign-extended. */ |
| 20 | } | 22 | } |
| 21 | } | 23 | } |
| 22 | #endif | 24 | #endif |
| 23 | 25 | ||
| 24 | #if LJ_64 | 26 | #if LJ_64 |
| 25 | #define get_kval(ir) get_k64val(ir) | 27 | #define get_kval(as, ref) get_k64val(as, ref) |
| 26 | #else | 28 | #else |
| 27 | #define get_kval(ir) ((ir)->i) | 29 | #define get_kval(as, ref) (IR((ref))->i) |
| 28 | #endif | 30 | #endif |
| 29 | 31 | ||
| 30 | /* -- Emit basic instructions --------------------------------------------- */ | 32 | /* -- Emit basic instructions --------------------------------------------- */ |
| @@ -82,18 +84,18 @@ static void emit_tsml(ASMState *as, MIPSIns mi, Reg rt, Reg rs, uint32_t msb, | |||
| 82 | #define emit_canremat(ref) ((ref) <= REF_BASE) | 84 | #define emit_canremat(ref) ((ref) <= REF_BASE) |
| 83 | 85 | ||
| 84 | /* Try to find a one step delta relative to another constant. */ | 86 | /* Try to find a one step delta relative to another constant. */ |
| 85 | static int emit_kdelta1(ASMState *as, Reg t, intptr_t i) | 87 | static int emit_kdelta1(ASMState *as, Reg rd, intptr_t i) |
| 86 | { | 88 | { |
| 87 | RegSet work = ~as->freeset & RSET_GPR; | 89 | RegSet work = ~as->freeset & RSET_GPR; |
| 88 | while (work) { | 90 | while (work) { |
| 89 | Reg r = rset_picktop(work); | 91 | Reg r = rset_picktop(work); |
| 90 | IRRef ref = regcost_ref(as->cost[r]); | 92 | IRRef ref = regcost_ref(as->cost[r]); |
| 91 | lua_assert(r != t); | 93 | lj_assertA(r != rd, "dest reg %d not free", rd); |
| 92 | if (ref < ASMREF_L) { | 94 | if (ref < ASMREF_L) { |
| 93 | intptr_t delta = (intptr_t)((uintptr_t)i - | 95 | intptr_t delta = (intptr_t)((uintptr_t)i - |
| 94 | (uintptr_t)(ra_iskref(ref) ? ra_krefk(as, ref) : get_kval(IR(ref)))); | 96 | (uintptr_t)(ra_iskref(ref) ? ra_krefk(as, ref) : get_kval(as, ref))); |
| 95 | if (checki16(delta)) { | 97 | if (checki16(delta)) { |
| 96 | emit_tsi(as, MIPSI_AADDIU, t, r, delta); | 98 | emit_tsi(as, MIPSI_AADDIU, rd, r, delta); |
| 97 | return 1; | 99 | return 1; |
| 98 | } | 100 | } |
| 99 | } | 101 | } |
| @@ -223,7 +225,7 @@ static void emit_branch(ASMState *as, MIPSIns mi, Reg rs, Reg rt, MCode *target) | |||
| 223 | { | 225 | { |
| 224 | MCode *p = as->mcp; | 226 | MCode *p = as->mcp; |
| 225 | ptrdiff_t delta = target - p; | 227 | ptrdiff_t delta = target - p; |
| 226 | lua_assert(((delta + 0x8000) >> 16) == 0); | 228 | lj_assertA(((delta + 0x8000) >> 16) == 0, "branch target out of range"); |
| 227 | *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu); | 229 | *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu); |
| 228 | as->mcp = p; | 230 | as->mcp = p; |
| 229 | } | 231 | } |
| @@ -299,7 +301,7 @@ static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs) | |||
| 299 | static void emit_addptr(ASMState *as, Reg r, int32_t ofs) | 301 | static void emit_addptr(ASMState *as, Reg r, int32_t ofs) |
| 300 | { | 302 | { |
| 301 | if (ofs) { | 303 | if (ofs) { |
| 302 | lua_assert(checki16(ofs)); | 304 | lj_assertA(checki16(ofs), "offset %d out of range", ofs); |
| 303 | emit_tsi(as, MIPSI_AADDIU, r, r, ofs); | 305 | emit_tsi(as, MIPSI_AADDIU, r, r, ofs); |
| 304 | } | 306 | } |
| 305 | } | 307 | } |
diff --git a/src/lj_emit_ppc.h b/src/lj_emit_ppc.h index 69765528..6bc74c04 100644 --- a/src/lj_emit_ppc.h +++ b/src/lj_emit_ppc.h | |||
| @@ -41,13 +41,13 @@ static void emit_rot(ASMState *as, PPCIns pi, Reg ra, Reg rs, | |||
| 41 | 41 | ||
| 42 | static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n) | 42 | static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n) |
| 43 | { | 43 | { |
| 44 | lua_assert(n >= 0 && n < 32); | 44 | lj_assertA(n >= 0 && n < 32, "shift out or range"); |
| 45 | emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n); | 45 | emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n); |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n) | 48 | static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n) |
| 49 | { | 49 | { |
| 50 | lua_assert(n >= 0 && n < 32); | 50 | lj_assertA(n >= 0 && n < 32, "shift out or range"); |
| 51 | emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31); | 51 | emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31); |
| 52 | } | 52 | } |
| 53 | 53 | ||
| @@ -57,17 +57,17 @@ static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n) | |||
| 57 | #define emit_canremat(ref) ((ref) <= REF_BASE) | 57 | #define emit_canremat(ref) ((ref) <= REF_BASE) |
| 58 | 58 | ||
| 59 | /* Try to find a one step delta relative to another constant. */ | 59 | /* Try to find a one step delta relative to another constant. */ |
| 60 | static int emit_kdelta1(ASMState *as, Reg t, int32_t i) | 60 | static int emit_kdelta1(ASMState *as, Reg rd, int32_t i) |
| 61 | { | 61 | { |
| 62 | RegSet work = ~as->freeset & RSET_GPR; | 62 | RegSet work = ~as->freeset & RSET_GPR; |
| 63 | while (work) { | 63 | while (work) { |
| 64 | Reg r = rset_picktop(work); | 64 | Reg r = rset_picktop(work); |
| 65 | IRRef ref = regcost_ref(as->cost[r]); | 65 | IRRef ref = regcost_ref(as->cost[r]); |
| 66 | lua_assert(r != t); | 66 | lj_assertA(r != rd, "dest reg %d not free", rd); |
| 67 | if (ref < ASMREF_L) { | 67 | if (ref < ASMREF_L) { |
| 68 | int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); | 68 | int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i); |
| 69 | if (checki16(delta)) { | 69 | if (checki16(delta)) { |
| 70 | emit_tai(as, PPCI_ADDI, t, r, delta); | 70 | emit_tai(as, PPCI_ADDI, rd, r, delta); |
| 71 | return 1; | 71 | return 1; |
| 72 | } | 72 | } |
| 73 | } | 73 | } |
| @@ -144,7 +144,7 @@ static void emit_condbranch(ASMState *as, PPCIns pi, PPCCC cc, MCode *target) | |||
| 144 | { | 144 | { |
| 145 | MCode *p = --as->mcp; | 145 | MCode *p = --as->mcp; |
| 146 | ptrdiff_t delta = (char *)target - (char *)p; | 146 | ptrdiff_t delta = (char *)target - (char *)p; |
| 147 | lua_assert(((delta + 0x8000) >> 16) == 0); | 147 | lj_assertA(((delta + 0x8000) >> 16) == 0, "branch target out of range"); |
| 148 | pi ^= (delta & 0x8000) * (PPCF_Y/0x8000); | 148 | pi ^= (delta & 0x8000) * (PPCF_Y/0x8000); |
| 149 | *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu); | 149 | *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu); |
| 150 | } | 150 | } |
diff --git a/src/lj_emit_x86.h b/src/lj_emit_x86.h index b17e28a5..66750a96 100644 --- a/src/lj_emit_x86.h +++ b/src/lj_emit_x86.h | |||
| @@ -92,7 +92,7 @@ static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2) | |||
| 92 | /* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */ | 92 | /* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */ |
| 93 | static int32_t ptr2addr(const void *p) | 93 | static int32_t ptr2addr(const void *p) |
| 94 | { | 94 | { |
| 95 | lua_assert((uintptr_t)p < (uintptr_t)0x80000000); | 95 | lj_assertX((uintptr_t)p < (uintptr_t)0x80000000, "pointer outside 2G range"); |
| 96 | return i32ptr(p); | 96 | return i32ptr(p); |
| 97 | } | 97 | } |
| 98 | #else | 98 | #else |
| @@ -208,7 +208,7 @@ static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb) | |||
| 208 | rb = RID_ESP; | 208 | rb = RID_ESP; |
| 209 | #endif | 209 | #endif |
| 210 | } else if (LJ_GC64 && rb == RID_RIP) { | 210 | } else if (LJ_GC64 && rb == RID_RIP) { |
| 211 | lua_assert(as->mrm.idx == RID_NONE); | 211 | lj_assertA(as->mrm.idx == RID_NONE, "RIP-rel mrm cannot have index"); |
| 212 | mode = XM_OFS0; | 212 | mode = XM_OFS0; |
| 213 | p -= 4; | 213 | p -= 4; |
| 214 | *(int32_t *)p = as->mrm.ofs; | 214 | *(int32_t *)p = as->mrm.ofs; |
| @@ -401,7 +401,8 @@ static void emit_loadk64(ASMState *as, Reg r, IRIns *ir) | |||
| 401 | emit_rma(as, xo, r64, k); | 401 | emit_rma(as, xo, r64, k); |
| 402 | } else { | 402 | } else { |
| 403 | if (ir->i) { | 403 | if (ir->i) { |
| 404 | lua_assert(*k == *(uint64_t*)(as->mctop - ir->i)); | 404 | lj_assertA(*k == *(uint64_t*)(as->mctop - ir->i), |
| 405 | "bad interned 64 bit constant"); | ||
| 405 | } else if (as->curins <= as->stopins && rset_test(RSET_GPR, r)) { | 406 | } else if (as->curins <= as->stopins && rset_test(RSET_GPR, r)) { |
| 406 | emit_loadu64(as, r, *k); | 407 | emit_loadu64(as, r, *k); |
| 407 | return; | 408 | return; |
| @@ -433,7 +434,7 @@ static void emit_sjmp(ASMState *as, MCLabel target) | |||
| 433 | { | 434 | { |
| 434 | MCode *p = as->mcp; | 435 | MCode *p = as->mcp; |
| 435 | ptrdiff_t delta = target - p; | 436 | ptrdiff_t delta = target - p; |
| 436 | lua_assert(delta == (int8_t)delta); | 437 | lj_assertA(delta == (int8_t)delta, "short jump target out of range"); |
| 437 | p[-1] = (MCode)(int8_t)delta; | 438 | p[-1] = (MCode)(int8_t)delta; |
| 438 | p[-2] = XI_JMPs; | 439 | p[-2] = XI_JMPs; |
| 439 | as->mcp = p - 2; | 440 | as->mcp = p - 2; |
| @@ -445,7 +446,7 @@ static void emit_sjcc(ASMState *as, int cc, MCLabel target) | |||
| 445 | { | 446 | { |
| 446 | MCode *p = as->mcp; | 447 | MCode *p = as->mcp; |
| 447 | ptrdiff_t delta = target - p; | 448 | ptrdiff_t delta = target - p; |
| 448 | lua_assert(delta == (int8_t)delta); | 449 | lj_assertA(delta == (int8_t)delta, "short jump target out of range"); |
| 449 | p[-1] = (MCode)(int8_t)delta; | 450 | p[-1] = (MCode)(int8_t)delta; |
| 450 | p[-2] = (MCode)(XI_JCCs+(cc&15)); | 451 | p[-2] = (MCode)(XI_JCCs+(cc&15)); |
| 451 | as->mcp = p - 2; | 452 | as->mcp = p - 2; |
| @@ -471,10 +472,11 @@ static void emit_sfixup(ASMState *as, MCLabel source) | |||
| 471 | #define emit_label(as) ((as)->mcp) | 472 | #define emit_label(as) ((as)->mcp) |
| 472 | 473 | ||
| 473 | /* Compute relative 32 bit offset for jump and call instructions. */ | 474 | /* Compute relative 32 bit offset for jump and call instructions. */ |
| 474 | static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target) | 475 | static LJ_AINLINE int32_t jmprel(jit_State *J, MCode *p, MCode *target) |
| 475 | { | 476 | { |
| 476 | ptrdiff_t delta = target - p; | 477 | ptrdiff_t delta = target - p; |
| 477 | lua_assert(delta == (int32_t)delta); | 478 | UNUSED(J); |
| 479 | lj_assertJ(delta == (int32_t)delta, "jump target out of range"); | ||
| 478 | return (int32_t)delta; | 480 | return (int32_t)delta; |
| 479 | } | 481 | } |
| 480 | 482 | ||
| @@ -482,7 +484,7 @@ static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target) | |||
| 482 | static void emit_jcc(ASMState *as, int cc, MCode *target) | 484 | static void emit_jcc(ASMState *as, int cc, MCode *target) |
| 483 | { | 485 | { |
| 484 | MCode *p = as->mcp; | 486 | MCode *p = as->mcp; |
| 485 | *(int32_t *)(p-4) = jmprel(p, target); | 487 | *(int32_t *)(p-4) = jmprel(as->J, p, target); |
| 486 | p[-5] = (MCode)(XI_JCCn+(cc&15)); | 488 | p[-5] = (MCode)(XI_JCCn+(cc&15)); |
| 487 | p[-6] = 0x0f; | 489 | p[-6] = 0x0f; |
| 488 | as->mcp = p - 6; | 490 | as->mcp = p - 6; |
| @@ -492,7 +494,7 @@ static void emit_jcc(ASMState *as, int cc, MCode *target) | |||
| 492 | static void emit_jmp(ASMState *as, MCode *target) | 494 | static void emit_jmp(ASMState *as, MCode *target) |
| 493 | { | 495 | { |
| 494 | MCode *p = as->mcp; | 496 | MCode *p = as->mcp; |
| 495 | *(int32_t *)(p-4) = jmprel(p, target); | 497 | *(int32_t *)(p-4) = jmprel(as->J, p, target); |
| 496 | p[-5] = XI_JMP; | 498 | p[-5] = XI_JMP; |
| 497 | as->mcp = p - 5; | 499 | as->mcp = p - 5; |
| 498 | } | 500 | } |
| @@ -509,7 +511,7 @@ static void emit_call_(ASMState *as, MCode *target) | |||
| 509 | return; | 511 | return; |
| 510 | } | 512 | } |
| 511 | #endif | 513 | #endif |
| 512 | *(int32_t *)(p-4) = jmprel(p, target); | 514 | *(int32_t *)(p-4) = jmprel(as->J, p, target); |
| 513 | p[-5] = XI_CALL; | 515 | p[-5] = XI_CALL; |
| 514 | as->mcp = p - 5; | 516 | as->mcp = p - 5; |
| 515 | } | 517 | } |
diff --git a/src/lj_err.c b/src/lj_err.c index 52498932..41fbf5c7 100644 --- a/src/lj_err.c +++ b/src/lj_err.c | |||
| @@ -589,7 +589,7 @@ static ptrdiff_t finderrfunc(lua_State *L) | |||
| 589 | return savestack(L, frame_prevd(frame)+1); /* xpcall's errorfunc. */ | 589 | return savestack(L, frame_prevd(frame)+1); /* xpcall's errorfunc. */ |
| 590 | return 0; | 590 | return 0; |
| 591 | default: | 591 | default: |
| 592 | lua_assert(0); | 592 | lj_assertL(0, "bad frame type"); |
| 593 | return 0; | 593 | return 0; |
| 594 | } | 594 | } |
| 595 | } | 595 | } |
diff --git a/src/lj_func.c b/src/lj_func.c index 9afdb638..fb267885 100644 --- a/src/lj_func.c +++ b/src/lj_func.c | |||
| @@ -24,9 +24,11 @@ void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt) | |||
| 24 | 24 | ||
| 25 | /* -- Upvalues ------------------------------------------------------------ */ | 25 | /* -- Upvalues ------------------------------------------------------------ */ |
| 26 | 26 | ||
| 27 | static void unlinkuv(GCupval *uv) | 27 | static void unlinkuv(global_State *g, GCupval *uv) |
| 28 | { | 28 | { |
| 29 | lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); | 29 | UNUSED(g); |
| 30 | lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv, | ||
| 31 | "broken upvalue chain"); | ||
| 30 | setgcrefr(uvnext(uv)->prev, uv->prev); | 32 | setgcrefr(uvnext(uv)->prev, uv->prev); |
| 31 | setgcrefr(uvprev(uv)->next, uv->next); | 33 | setgcrefr(uvprev(uv)->next, uv->next); |
| 32 | } | 34 | } |
| @@ -40,7 +42,7 @@ static GCupval *func_finduv(lua_State *L, TValue *slot) | |||
| 40 | GCupval *uv; | 42 | GCupval *uv; |
| 41 | /* Search the sorted list of open upvalues. */ | 43 | /* Search the sorted list of open upvalues. */ |
| 42 | while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) { | 44 | while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) { |
| 43 | lua_assert(!p->closed && uvval(p) != &p->tv); | 45 | lj_assertG(!p->closed && uvval(p) != &p->tv, "closed upvalue in chain"); |
| 44 | if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */ | 46 | if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */ |
| 45 | if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */ | 47 | if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */ |
| 46 | flipwhite(obj2gco(p)); | 48 | flipwhite(obj2gco(p)); |
| @@ -61,7 +63,8 @@ static GCupval *func_finduv(lua_State *L, TValue *slot) | |||
| 61 | setgcrefr(uv->next, g->uvhead.next); | 63 | setgcrefr(uv->next, g->uvhead.next); |
| 62 | setgcref(uvnext(uv)->prev, obj2gco(uv)); | 64 | setgcref(uvnext(uv)->prev, obj2gco(uv)); |
| 63 | setgcref(g->uvhead.next, obj2gco(uv)); | 65 | setgcref(g->uvhead.next, obj2gco(uv)); |
| 64 | lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); | 66 | lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv, |
| 67 | "broken upvalue chain"); | ||
| 65 | return uv; | 68 | return uv; |
| 66 | } | 69 | } |
| 67 | 70 | ||
| @@ -84,12 +87,13 @@ void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level) | |||
| 84 | while (gcref(L->openupval) != NULL && | 87 | while (gcref(L->openupval) != NULL && |
| 85 | uvval((uv = gco2uv(gcref(L->openupval)))) >= level) { | 88 | uvval((uv = gco2uv(gcref(L->openupval)))) >= level) { |
| 86 | GCobj *o = obj2gco(uv); | 89 | GCobj *o = obj2gco(uv); |
| 87 | lua_assert(!isblack(o) && !uv->closed && uvval(uv) != &uv->tv); | 90 | lj_assertG(!isblack(o), "bad black upvalue"); |
| 91 | lj_assertG(!uv->closed && uvval(uv) != &uv->tv, "closed upvalue in chain"); | ||
| 88 | setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */ | 92 | setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */ |
| 89 | if (isdead(g, o)) { | 93 | if (isdead(g, o)) { |
| 90 | lj_func_freeuv(g, uv); | 94 | lj_func_freeuv(g, uv); |
| 91 | } else { | 95 | } else { |
| 92 | unlinkuv(uv); | 96 | unlinkuv(g, uv); |
| 93 | lj_gc_closeuv(g, uv); | 97 | lj_gc_closeuv(g, uv); |
| 94 | } | 98 | } |
| 95 | } | 99 | } |
| @@ -98,7 +102,7 @@ void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level) | |||
| 98 | void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv) | 102 | void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv) |
| 99 | { | 103 | { |
| 100 | if (!uv->closed) | 104 | if (!uv->closed) |
| 101 | unlinkuv(uv); | 105 | unlinkuv(g, uv); |
| 102 | lj_mem_freet(g, uv); | 106 | lj_mem_freet(g, uv); |
| 103 | } | 107 | } |
| 104 | 108 | ||
diff --git a/src/lj_gc.c b/src/lj_gc.c index 81439aab..671b5983 100644 --- a/src/lj_gc.c +++ b/src/lj_gc.c | |||
| @@ -42,7 +42,8 @@ | |||
| 42 | 42 | ||
| 43 | /* Mark a TValue (if needed). */ | 43 | /* Mark a TValue (if needed). */ |
| 44 | #define gc_marktv(g, tv) \ | 44 | #define gc_marktv(g, tv) \ |
| 45 | { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \ | 45 | { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \ |
| 46 | "TValue and GC type mismatch"); \ | ||
| 46 | if (tviswhite(tv)) gc_mark(g, gcV(tv)); } | 47 | if (tviswhite(tv)) gc_mark(g, gcV(tv)); } |
| 47 | 48 | ||
| 48 | /* Mark a GCobj (if needed). */ | 49 | /* Mark a GCobj (if needed). */ |
| @@ -56,7 +57,8 @@ | |||
| 56 | static void gc_mark(global_State *g, GCobj *o) | 57 | static void gc_mark(global_State *g, GCobj *o) |
| 57 | { | 58 | { |
| 58 | int gct = o->gch.gct; | 59 | int gct = o->gch.gct; |
| 59 | lua_assert(iswhite(o) && !isdead(g, o)); | 60 | lj_assertG(iswhite(o), "mark of non-white object"); |
| 61 | lj_assertG(!isdead(g, o), "mark of dead object"); | ||
| 60 | white2gray(o); | 62 | white2gray(o); |
| 61 | if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) { | 63 | if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) { |
| 62 | GCtab *mt = tabref(gco2ud(o)->metatable); | 64 | GCtab *mt = tabref(gco2ud(o)->metatable); |
| @@ -69,8 +71,9 @@ static void gc_mark(global_State *g, GCobj *o) | |||
| 69 | if (uv->closed) | 71 | if (uv->closed) |
| 70 | gray2black(o); /* Closed upvalues are never gray. */ | 72 | gray2black(o); /* Closed upvalues are never gray. */ |
| 71 | } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { | 73 | } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { |
| 72 | lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || | 74 | lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || |
| 73 | gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE); | 75 | gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE, |
| 76 | "bad GC type %d", gct); | ||
| 74 | setgcrefr(o->gch.gclist, g->gc.gray); | 77 | setgcrefr(o->gch.gclist, g->gc.gray); |
| 75 | setgcref(g->gc.gray, o); | 78 | setgcref(g->gc.gray, o); |
| 76 | } | 79 | } |
| @@ -103,7 +106,8 @@ static void gc_mark_uv(global_State *g) | |||
| 103 | { | 106 | { |
| 104 | GCupval *uv; | 107 | GCupval *uv; |
| 105 | for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) { | 108 | for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) { |
| 106 | lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv); | 109 | lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv, |
| 110 | "broken upvalue chain"); | ||
| 107 | if (isgray(obj2gco(uv))) | 111 | if (isgray(obj2gco(uv))) |
| 108 | gc_marktv(g, uvval(uv)); | 112 | gc_marktv(g, uvval(uv)); |
| 109 | } | 113 | } |
| @@ -198,7 +202,7 @@ static int gc_traverse_tab(global_State *g, GCtab *t) | |||
| 198 | for (i = 0; i <= hmask; i++) { | 202 | for (i = 0; i <= hmask; i++) { |
| 199 | Node *n = &node[i]; | 203 | Node *n = &node[i]; |
| 200 | if (!tvisnil(&n->val)) { /* Mark non-empty slot. */ | 204 | if (!tvisnil(&n->val)) { /* Mark non-empty slot. */ |
| 201 | lua_assert(!tvisnil(&n->key)); | 205 | lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot"); |
| 202 | if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key); | 206 | if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key); |
| 203 | if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val); | 207 | if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val); |
| 204 | } | 208 | } |
| @@ -213,7 +217,8 @@ static void gc_traverse_func(global_State *g, GCfunc *fn) | |||
| 213 | gc_markobj(g, tabref(fn->c.env)); | 217 | gc_markobj(g, tabref(fn->c.env)); |
| 214 | if (isluafunc(fn)) { | 218 | if (isluafunc(fn)) { |
| 215 | uint32_t i; | 219 | uint32_t i; |
| 216 | lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv); | 220 | lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv, |
| 221 | "function upvalues out of range"); | ||
| 217 | gc_markobj(g, funcproto(fn)); | 222 | gc_markobj(g, funcproto(fn)); |
| 218 | for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */ | 223 | for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */ |
| 219 | gc_markobj(g, &gcref(fn->l.uvptr[i])->uv); | 224 | gc_markobj(g, &gcref(fn->l.uvptr[i])->uv); |
| @@ -229,7 +234,7 @@ static void gc_traverse_func(global_State *g, GCfunc *fn) | |||
| 229 | static void gc_marktrace(global_State *g, TraceNo traceno) | 234 | static void gc_marktrace(global_State *g, TraceNo traceno) |
| 230 | { | 235 | { |
| 231 | GCobj *o = obj2gco(traceref(G2J(g), traceno)); | 236 | GCobj *o = obj2gco(traceref(G2J(g), traceno)); |
| 232 | lua_assert(traceno != G2J(g)->cur.traceno); | 237 | lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped"); |
| 233 | if (iswhite(o)) { | 238 | if (iswhite(o)) { |
| 234 | white2gray(o); | 239 | white2gray(o); |
| 235 | setgcrefr(o->gch.gclist, g->gc.gray); | 240 | setgcrefr(o->gch.gclist, g->gc.gray); |
| @@ -310,7 +315,7 @@ static size_t propagatemark(global_State *g) | |||
| 310 | { | 315 | { |
| 311 | GCobj *o = gcref(g->gc.gray); | 316 | GCobj *o = gcref(g->gc.gray); |
| 312 | int gct = o->gch.gct; | 317 | int gct = o->gch.gct; |
| 313 | lua_assert(isgray(o)); | 318 | lj_assertG(isgray(o), "propagation of non-gray object"); |
| 314 | gray2black(o); | 319 | gray2black(o); |
| 315 | setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */ | 320 | setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */ |
| 316 | if (LJ_LIKELY(gct == ~LJ_TTAB)) { | 321 | if (LJ_LIKELY(gct == ~LJ_TTAB)) { |
| @@ -342,7 +347,7 @@ static size_t propagatemark(global_State *g) | |||
| 342 | return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + | 347 | return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + |
| 343 | T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry); | 348 | T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry); |
| 344 | #else | 349 | #else |
| 345 | lua_assert(0); | 350 | lj_assertG(0, "bad GC type %d", gct); |
| 346 | return 0; | 351 | return 0; |
| 347 | #endif | 352 | #endif |
| 348 | } | 353 | } |
| @@ -396,11 +401,13 @@ static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) | |||
| 396 | if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */ | 401 | if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */ |
| 397 | gc_fullsweep(g, &gco2th(o)->openupval); | 402 | gc_fullsweep(g, &gco2th(o)->openupval); |
| 398 | if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */ | 403 | if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */ |
| 399 | lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED)); | 404 | lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED), |
| 405 | "sweep of undead object"); | ||
| 400 | makewhite(g, o); /* Value is alive, change to the current white. */ | 406 | makewhite(g, o); /* Value is alive, change to the current white. */ |
| 401 | p = &o->gch.nextgc; | 407 | p = &o->gch.nextgc; |
| 402 | } else { /* Otherwise value is dead, free it. */ | 408 | } else { /* Otherwise value is dead, free it. */ |
| 403 | lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED); | 409 | lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED, |
| 410 | "sweep of unlive object"); | ||
| 404 | setgcrefr(*p, o->gch.nextgc); | 411 | setgcrefr(*p, o->gch.nextgc); |
| 405 | if (o == gcref(g->gc.root)) | 412 | if (o == gcref(g->gc.root)) |
| 406 | setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */ | 413 | setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */ |
| @@ -427,11 +434,12 @@ static int gc_mayclear(cTValue *o, int val) | |||
| 427 | } | 434 | } |
| 428 | 435 | ||
| 429 | /* Clear collected entries from weak tables. */ | 436 | /* Clear collected entries from weak tables. */ |
| 430 | static void gc_clearweak(GCobj *o) | 437 | static void gc_clearweak(global_State *g, GCobj *o) |
| 431 | { | 438 | { |
| 439 | UNUSED(g); | ||
| 432 | while (o) { | 440 | while (o) { |
| 433 | GCtab *t = gco2tab(o); | 441 | GCtab *t = gco2tab(o); |
| 434 | lua_assert((t->marked & LJ_GC_WEAK)); | 442 | lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table"); |
| 435 | if ((t->marked & LJ_GC_WEAKVAL)) { | 443 | if ((t->marked & LJ_GC_WEAKVAL)) { |
| 436 | MSize i, asize = t->asize; | 444 | MSize i, asize = t->asize; |
| 437 | for (i = 0; i < asize; i++) { | 445 | for (i = 0; i < asize; i++) { |
| @@ -488,7 +496,7 @@ static void gc_finalize(lua_State *L) | |||
| 488 | global_State *g = G(L); | 496 | global_State *g = G(L); |
| 489 | GCobj *o = gcnext(gcref(g->gc.mmudata)); | 497 | GCobj *o = gcnext(gcref(g->gc.mmudata)); |
| 490 | cTValue *mo; | 498 | cTValue *mo; |
| 491 | lua_assert(tvref(g->jit_base) == NULL); /* Must not be called on trace. */ | 499 | lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace"); |
| 492 | /* Unchain from list of userdata to be finalized. */ | 500 | /* Unchain from list of userdata to be finalized. */ |
| 493 | if (o == gcref(g->gc.mmudata)) | 501 | if (o == gcref(g->gc.mmudata)) |
| 494 | setgcrefnull(g->gc.mmudata); | 502 | setgcrefnull(g->gc.mmudata); |
| @@ -580,7 +588,7 @@ static void atomic(global_State *g, lua_State *L) | |||
| 580 | 588 | ||
| 581 | setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */ | 589 | setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */ |
| 582 | setgcrefnull(g->gc.weak); | 590 | setgcrefnull(g->gc.weak); |
| 583 | lua_assert(!iswhite(obj2gco(mainthread(g)))); | 591 | lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white"); |
| 584 | gc_markobj(g, L); /* Mark running thread. */ | 592 | gc_markobj(g, L); /* Mark running thread. */ |
| 585 | gc_traverse_curtrace(g); /* Traverse current trace. */ | 593 | gc_traverse_curtrace(g); /* Traverse current trace. */ |
| 586 | gc_mark_gcroot(g); /* Mark GC roots (again). */ | 594 | gc_mark_gcroot(g); /* Mark GC roots (again). */ |
| @@ -595,7 +603,7 @@ static void atomic(global_State *g, lua_State *L) | |||
| 595 | udsize += gc_propagate_gray(g); /* And propagate the marks. */ | 603 | udsize += gc_propagate_gray(g); /* And propagate the marks. */ |
| 596 | 604 | ||
| 597 | /* All marking done, clear weak tables. */ | 605 | /* All marking done, clear weak tables. */ |
| 598 | gc_clearweak(gcref(g->gc.weak)); | 606 | gc_clearweak(g, gcref(g->gc.weak)); |
| 599 | 607 | ||
| 600 | lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */ | 608 | lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */ |
| 601 | 609 | ||
| @@ -631,14 +639,14 @@ static size_t gc_onestep(lua_State *L) | |||
| 631 | gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */ | 639 | gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */ |
| 632 | if (g->gc.sweepstr > g->strmask) | 640 | if (g->gc.sweepstr > g->strmask) |
| 633 | g->gc.state = GCSsweep; /* All string hash chains sweeped. */ | 641 | g->gc.state = GCSsweep; /* All string hash chains sweeped. */ |
| 634 | lua_assert(old >= g->gc.total); | 642 | lj_assertG(old >= g->gc.total, "sweep increased memory"); |
| 635 | g->gc.estimate -= old - g->gc.total; | 643 | g->gc.estimate -= old - g->gc.total; |
| 636 | return GCSWEEPCOST; | 644 | return GCSWEEPCOST; |
| 637 | } | 645 | } |
| 638 | case GCSsweep: { | 646 | case GCSsweep: { |
| 639 | GCSize old = g->gc.total; | 647 | GCSize old = g->gc.total; |
| 640 | setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); | 648 | setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); |
| 641 | lua_assert(old >= g->gc.total); | 649 | lj_assertG(old >= g->gc.total, "sweep increased memory"); |
| 642 | g->gc.estimate -= old - g->gc.total; | 650 | g->gc.estimate -= old - g->gc.total; |
| 643 | if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { | 651 | if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { |
| 644 | if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1) | 652 | if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1) |
| @@ -671,7 +679,7 @@ static size_t gc_onestep(lua_State *L) | |||
| 671 | g->gc.debt = 0; | 679 | g->gc.debt = 0; |
| 672 | return 0; | 680 | return 0; |
| 673 | default: | 681 | default: |
| 674 | lua_assert(0); | 682 | lj_assertG(0, "bad GC state"); |
| 675 | return 0; | 683 | return 0; |
| 676 | } | 684 | } |
| 677 | } | 685 | } |
| @@ -745,7 +753,8 @@ void lj_gc_fullgc(lua_State *L) | |||
| 745 | } | 753 | } |
| 746 | while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep) | 754 | while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep) |
| 747 | gc_onestep(L); /* Finish sweep. */ | 755 | gc_onestep(L); /* Finish sweep. */ |
| 748 | lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause); | 756 | lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause, |
| 757 | "bad GC state"); | ||
| 749 | /* Now perform a full GC. */ | 758 | /* Now perform a full GC. */ |
| 750 | g->gc.state = GCSpause; | 759 | g->gc.state = GCSpause; |
| 751 | do { gc_onestep(L); } while (g->gc.state != GCSpause); | 760 | do { gc_onestep(L); } while (g->gc.state != GCSpause); |
| @@ -758,9 +767,11 @@ void lj_gc_fullgc(lua_State *L) | |||
| 758 | /* Move the GC propagation frontier forward. */ | 767 | /* Move the GC propagation frontier forward. */ |
| 759 | void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v) | 768 | void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v) |
| 760 | { | 769 | { |
| 761 | lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); | 770 | lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o), |
| 762 | lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); | 771 | "bad object states for forward barrier"); |
| 763 | lua_assert(o->gch.gct != ~LJ_TTAB); | 772 | lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause, |
| 773 | "bad GC state"); | ||
| 774 | lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table"); | ||
| 764 | /* Preserve invariant during propagation. Otherwise it doesn't matter. */ | 775 | /* Preserve invariant during propagation. Otherwise it doesn't matter. */ |
| 765 | if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) | 776 | if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) |
| 766 | gc_mark(g, v); /* Move frontier forward. */ | 777 | gc_mark(g, v); /* Move frontier forward. */ |
| @@ -797,7 +808,8 @@ void lj_gc_closeuv(global_State *g, GCupval *uv) | |||
| 797 | lj_gc_barrierf(g, o, gcV(&uv->tv)); | 808 | lj_gc_barrierf(g, o, gcV(&uv->tv)); |
| 798 | } else { | 809 | } else { |
| 799 | makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */ | 810 | makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */ |
| 800 | lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); | 811 | lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause, |
| 812 | "bad GC state"); | ||
| 801 | } | 813 | } |
| 802 | } | 814 | } |
| 803 | } | 815 | } |
| @@ -817,12 +829,13 @@ void lj_gc_barriertrace(global_State *g, uint32_t traceno) | |||
| 817 | void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz) | 829 | void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz) |
| 818 | { | 830 | { |
| 819 | global_State *g = G(L); | 831 | global_State *g = G(L); |
| 820 | lua_assert((osz == 0) == (p == NULL)); | 832 | lj_assertG((osz == 0) == (p == NULL), "realloc API violation"); |
| 821 | p = g->allocf(g->allocd, p, osz, nsz); | 833 | p = g->allocf(g->allocd, p, osz, nsz); |
| 822 | if (p == NULL && nsz > 0) | 834 | if (p == NULL && nsz > 0) |
| 823 | lj_err_mem(L); | 835 | lj_err_mem(L); |
| 824 | lua_assert((nsz == 0) == (p == NULL)); | 836 | lj_assertG((nsz == 0) == (p == NULL), "allocf API violation"); |
| 825 | lua_assert(checkptrGC(p)); | 837 | lj_assertG(checkptrGC(p), |
| 838 | "allocated memory address %p outside required range", p); | ||
| 826 | g->gc.total = (g->gc.total - osz) + nsz; | 839 | g->gc.total = (g->gc.total - osz) + nsz; |
| 827 | return p; | 840 | return p; |
| 828 | } | 841 | } |
| @@ -834,7 +847,8 @@ void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size) | |||
| 834 | GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); | 847 | GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); |
| 835 | if (o == NULL) | 848 | if (o == NULL) |
| 836 | lj_err_mem(L); | 849 | lj_err_mem(L); |
| 837 | lua_assert(checkptrGC(o)); | 850 | lj_assertG(checkptrGC(o), |
| 851 | "allocated memory address %p outside required range", o); | ||
| 838 | g->gc.total += size; | 852 | g->gc.total += size; |
| 839 | setgcrefr(o->gch.nextgc, g->gc.root); | 853 | setgcrefr(o->gch.nextgc, g->gc.root); |
| 840 | setgcref(g->gc.root, o); | 854 | setgcref(g->gc.root, o); |
diff --git a/src/lj_gc.h b/src/lj_gc.h index 1725c639..6fc88cf9 100644 --- a/src/lj_gc.h +++ b/src/lj_gc.h | |||
| @@ -81,8 +81,10 @@ LJ_FUNC void lj_gc_barriertrace(global_State *g, uint32_t traceno); | |||
| 81 | static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t) | 81 | static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t) |
| 82 | { | 82 | { |
| 83 | GCobj *o = obj2gco(t); | 83 | GCobj *o = obj2gco(t); |
| 84 | lua_assert(isblack(o) && !isdead(g, o)); | 84 | lj_assertG(isblack(o) && !isdead(g, o), |
| 85 | lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause); | 85 | "bad object states for backward barrier"); |
| 86 | lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause, | ||
| 87 | "bad GC state"); | ||
| 86 | black2gray(o); | 88 | black2gray(o); |
| 87 | setgcrefr(t->gclist, g->gc.grayagain); | 89 | setgcrefr(t->gclist, g->gc.grayagain); |
| 88 | setgcref(g->gc.grayagain, o); | 90 | setgcref(g->gc.grayagain, o); |
diff --git a/src/lj_gdbjit.c b/src/lj_gdbjit.c index a20d9ae2..69585e51 100644 --- a/src/lj_gdbjit.c +++ b/src/lj_gdbjit.c | |||
| @@ -724,7 +724,7 @@ static void gdbjit_buildobj(GDBJITctx *ctx) | |||
| 724 | SECTALIGN(ctx->p, sizeof(uintptr_t)); | 724 | SECTALIGN(ctx->p, sizeof(uintptr_t)); |
| 725 | gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe); | 725 | gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe); |
| 726 | ctx->objsize = (size_t)((char *)ctx->p - (char *)obj); | 726 | ctx->objsize = (size_t)((char *)ctx->p - (char *)obj); |
| 727 | lua_assert(ctx->objsize < sizeof(GDBJITobj)); | 727 | lj_assertX(ctx->objsize < sizeof(GDBJITobj), "GDBJITobj overflow"); |
| 728 | } | 728 | } |
| 729 | 729 | ||
| 730 | #undef SECTALIGN | 730 | #undef SECTALIGN |
| @@ -782,7 +782,8 @@ void lj_gdbjit_addtrace(jit_State *J, GCtrace *T) | |||
| 782 | ctx.spadjp = CFRAME_SIZE_JIT + | 782 | ctx.spadjp = CFRAME_SIZE_JIT + |
| 783 | (MSize)(parent ? traceref(J, parent)->spadjust : 0); | 783 | (MSize)(parent ? traceref(J, parent)->spadjust : 0); |
| 784 | ctx.spadj = CFRAME_SIZE_JIT + T->spadjust; | 784 | ctx.spadj = CFRAME_SIZE_JIT + T->spadjust; |
| 785 | lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc); | 785 | lj_assertJ(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc, |
| 786 | "start PC out of range"); | ||
| 786 | ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); | 787 | ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); |
| 787 | ctx.filename = proto_chunknamestr(pt); | 788 | ctx.filename = proto_chunknamestr(pt); |
| 788 | if (*ctx.filename == '@' || *ctx.filename == '=') | 789 | if (*ctx.filename == '@' || *ctx.filename == '=') |
diff --git a/src/lj_ir.c b/src/lj_ir.c index 1dd25f23..600e432c 100644 --- a/src/lj_ir.c +++ b/src/lj_ir.c | |||
| @@ -38,7 +38,7 @@ | |||
| 38 | #define fins (&J->fold.ins) | 38 | #define fins (&J->fold.ins) |
| 39 | 39 | ||
| 40 | /* Pass IR on to next optimization in chain (FOLD). */ | 40 | /* Pass IR on to next optimization in chain (FOLD). */ |
| 41 | #define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) | 41 | #define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) |
| 42 | 42 | ||
| 43 | /* -- IR tables ----------------------------------------------------------- */ | 43 | /* -- IR tables ----------------------------------------------------------- */ |
| 44 | 44 | ||
| @@ -90,8 +90,9 @@ static void lj_ir_growbot(jit_State *J) | |||
| 90 | { | 90 | { |
| 91 | IRIns *baseir = J->irbuf + J->irbotlim; | 91 | IRIns *baseir = J->irbuf + J->irbotlim; |
| 92 | MSize szins = J->irtoplim - J->irbotlim; | 92 | MSize szins = J->irtoplim - J->irbotlim; |
| 93 | lua_assert(szins != 0); | 93 | lj_assertJ(szins != 0, "zero IR size"); |
| 94 | lua_assert(J->cur.nk == J->irbotlim || J->cur.nk-1 == J->irbotlim); | 94 | lj_assertJ(J->cur.nk == J->irbotlim || J->cur.nk-1 == J->irbotlim, |
| 95 | "unexpected IR growth"); | ||
| 95 | if (J->cur.nins + (szins >> 1) < J->irtoplim) { | 96 | if (J->cur.nins + (szins >> 1) < J->irtoplim) { |
| 96 | /* More than half of the buffer is free on top: shift up by a quarter. */ | 97 | /* More than half of the buffer is free on top: shift up by a quarter. */ |
| 97 | MSize ofs = szins >> 2; | 98 | MSize ofs = szins >> 2; |
| @@ -148,9 +149,10 @@ TRef lj_ir_call(jit_State *J, IRCallID id, ...) | |||
| 148 | /* Load field of type t from GG_State + offset. Must be 32 bit aligned. */ | 149 | /* Load field of type t from GG_State + offset. Must be 32 bit aligned. */ |
| 149 | LJ_FUNC TRef lj_ir_ggfload(jit_State *J, IRType t, uintptr_t ofs) | 150 | LJ_FUNC TRef lj_ir_ggfload(jit_State *J, IRType t, uintptr_t ofs) |
| 150 | { | 151 | { |
| 151 | lua_assert((ofs & 3) == 0); | 152 | lj_assertJ((ofs & 3) == 0, "unaligned GG_State field offset"); |
| 152 | ofs >>= 2; | 153 | ofs >>= 2; |
| 153 | lua_assert(ofs >= IRFL__MAX && ofs <= 0x3ff); /* 10 bit FOLD key limit. */ | 154 | lj_assertJ(ofs >= IRFL__MAX && ofs <= 0x3ff, |
| 155 | "GG_State field offset breaks 10 bit FOLD key limit"); | ||
| 154 | lj_ir_set(J, IRT(IR_FLOAD, t), REF_NIL, ofs); | 156 | lj_ir_set(J, IRT(IR_FLOAD, t), REF_NIL, ofs); |
| 155 | return lj_opt_fold(J); | 157 | return lj_opt_fold(J); |
| 156 | } | 158 | } |
| @@ -181,7 +183,7 @@ static LJ_AINLINE IRRef ir_nextk(jit_State *J) | |||
| 181 | static LJ_AINLINE IRRef ir_nextk64(jit_State *J) | 183 | static LJ_AINLINE IRRef ir_nextk64(jit_State *J) |
| 182 | { | 184 | { |
| 183 | IRRef ref = J->cur.nk - 2; | 185 | IRRef ref = J->cur.nk - 2; |
| 184 | lua_assert(J->state != LJ_TRACE_ASM); | 186 | lj_assertJ(J->state != LJ_TRACE_ASM, "bad JIT state"); |
| 185 | if (LJ_UNLIKELY(ref < J->irbotlim)) lj_ir_growbot(J); | 187 | if (LJ_UNLIKELY(ref < J->irbotlim)) lj_ir_growbot(J); |
| 186 | J->cur.nk = ref; | 188 | J->cur.nk = ref; |
| 187 | return ref; | 189 | return ref; |
| @@ -277,7 +279,7 @@ TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t) | |||
| 277 | { | 279 | { |
| 278 | IRIns *ir, *cir = J->cur.ir; | 280 | IRIns *ir, *cir = J->cur.ir; |
| 279 | IRRef ref; | 281 | IRRef ref; |
| 280 | lua_assert(!isdead(J2G(J), o)); | 282 | lj_assertJ(!isdead(J2G(J), o), "interning of dead GC object"); |
| 281 | for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev) | 283 | for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev) |
| 282 | if (ir_kgc(&cir[ref]) == o) | 284 | if (ir_kgc(&cir[ref]) == o) |
| 283 | goto found; | 285 | goto found; |
| @@ -299,7 +301,7 @@ TRef lj_ir_ktrace(jit_State *J) | |||
| 299 | { | 301 | { |
| 300 | IRRef ref = ir_nextkgc(J); | 302 | IRRef ref = ir_nextkgc(J); |
| 301 | IRIns *ir = IR(ref); | 303 | IRIns *ir = IR(ref); |
| 302 | lua_assert(irt_toitype_(IRT_P64) == LJ_TTRACE); | 304 | lj_assertJ(irt_toitype_(IRT_P64) == LJ_TTRACE, "mismatched type mapping"); |
| 303 | ir->t.irt = IRT_P64; | 305 | ir->t.irt = IRT_P64; |
| 304 | ir->o = LJ_GC64 ? IR_KNUM : IR_KNULL; /* Not IR_KGC yet, but same size. */ | 306 | ir->o = LJ_GC64 ? IR_KNUM : IR_KNULL; /* Not IR_KGC yet, but same size. */ |
| 305 | ir->op12 = 0; | 307 | ir->op12 = 0; |
| @@ -313,7 +315,7 @@ TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr) | |||
| 313 | IRIns *ir, *cir = J->cur.ir; | 315 | IRIns *ir, *cir = J->cur.ir; |
| 314 | IRRef ref; | 316 | IRRef ref; |
| 315 | #if LJ_64 && !LJ_GC64 | 317 | #if LJ_64 && !LJ_GC64 |
| 316 | lua_assert((void *)(uintptr_t)u32ptr(ptr) == ptr); | 318 | lj_assertJ((void *)(uintptr_t)u32ptr(ptr) == ptr, "out-of-range GC pointer"); |
| 317 | #endif | 319 | #endif |
| 318 | for (ref = J->chain[op]; ref; ref = cir[ref].prev) | 320 | for (ref = J->chain[op]; ref; ref = cir[ref].prev) |
| 319 | if (ir_kptr(&cir[ref]) == ptr) | 321 | if (ir_kptr(&cir[ref]) == ptr) |
| @@ -360,7 +362,8 @@ TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot) | |||
| 360 | IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot); | 362 | IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot); |
| 361 | IRRef ref; | 363 | IRRef ref; |
| 362 | /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */ | 364 | /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */ |
| 363 | lua_assert(tref_isk(key) && slot == (IRRef)(IRRef1)slot); | 365 | lj_assertJ(tref_isk(key) && slot == (IRRef)(IRRef1)slot, |
| 366 | "out-of-range key/slot"); | ||
| 364 | for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev) | 367 | for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev) |
| 365 | if (cir[ref].op12 == op12) | 368 | if (cir[ref].op12 == op12) |
| 366 | goto found; | 369 | goto found; |
| @@ -381,7 +384,7 @@ found: | |||
| 381 | void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir) | 384 | void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir) |
| 382 | { | 385 | { |
| 383 | UNUSED(L); | 386 | UNUSED(L); |
| 384 | lua_assert(ir->o != IR_KSLOT); /* Common mistake. */ | 387 | lj_assertL(ir->o != IR_KSLOT, "unexpected KSLOT"); /* Common mistake. */ |
| 385 | switch (ir->o) { | 388 | switch (ir->o) { |
| 386 | case IR_KPRI: setpriV(tv, irt_toitype(ir->t)); break; | 389 | case IR_KPRI: setpriV(tv, irt_toitype(ir->t)); break; |
| 387 | case IR_KINT: setintV(tv, ir->i); break; | 390 | case IR_KINT: setintV(tv, ir->i); break; |
| @@ -397,7 +400,7 @@ void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir) | |||
| 397 | break; | 400 | break; |
| 398 | } | 401 | } |
| 399 | #endif | 402 | #endif |
| 400 | default: lua_assert(0); break; | 403 | default: lj_assertL(0, "bad IR constant op %d", ir->o); break; |
| 401 | } | 404 | } |
| 402 | } | 405 | } |
| 403 | 406 | ||
| @@ -457,7 +460,7 @@ int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op) | |||
| 457 | case IR_UGE: return !(a < b); | 460 | case IR_UGE: return !(a < b); |
| 458 | case IR_ULE: return !(a > b); | 461 | case IR_ULE: return !(a > b); |
| 459 | case IR_UGT: return !(a <= b); | 462 | case IR_UGT: return !(a <= b); |
| 460 | default: lua_assert(0); return 0; | 463 | default: lj_assertX(0, "bad IR op %d", op); return 0; |
| 461 | } | 464 | } |
| 462 | } | 465 | } |
| 463 | 466 | ||
| @@ -470,7 +473,7 @@ int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op) | |||
| 470 | case IR_GE: return (res >= 0); | 473 | case IR_GE: return (res >= 0); |
| 471 | case IR_LE: return (res <= 0); | 474 | case IR_LE: return (res <= 0); |
| 472 | case IR_GT: return (res > 0); | 475 | case IR_GT: return (res > 0); |
| 473 | default: lua_assert(0); return 0; | 476 | default: lj_assertX(0, "bad IR op %d", op); return 0; |
| 474 | } | 477 | } |
| 475 | } | 478 | } |
| 476 | 479 | ||
diff --git a/src/lj_ir.h b/src/lj_ir.h index a801d5d0..6116f7e5 100644 --- a/src/lj_ir.h +++ b/src/lj_ir.h | |||
| @@ -413,11 +413,12 @@ static LJ_AINLINE IRType itype2irt(const TValue *tv) | |||
| 413 | 413 | ||
| 414 | static LJ_AINLINE uint32_t irt_toitype_(IRType t) | 414 | static LJ_AINLINE uint32_t irt_toitype_(IRType t) |
| 415 | { | 415 | { |
| 416 | lua_assert(!LJ_64 || LJ_GC64 || t != IRT_LIGHTUD); | 416 | lj_assertX(!LJ_64 || LJ_GC64 || t != IRT_LIGHTUD, |
| 417 | "no plain type tag for lightuserdata"); | ||
| 417 | if (LJ_DUALNUM && t > IRT_NUM) { | 418 | if (LJ_DUALNUM && t > IRT_NUM) { |
| 418 | return LJ_TISNUM; | 419 | return LJ_TISNUM; |
| 419 | } else { | 420 | } else { |
| 420 | lua_assert(t <= IRT_NUM); | 421 | lj_assertX(t <= IRT_NUM, "no plain type tag for IR type %d", t); |
| 421 | return ~(uint32_t)t; | 422 | return ~(uint32_t)t; |
| 422 | } | 423 | } |
| 423 | } | 424 | } |
diff --git a/src/lj_jit.h b/src/lj_jit.h index a9c602f0..fa754b64 100644 --- a/src/lj_jit.h +++ b/src/lj_jit.h | |||
| @@ -510,6 +510,12 @@ LJ_ALIGN(16) /* For DISPATCH-relative addresses in assembler part. */ | |||
| 510 | #endif | 510 | #endif |
| 511 | jit_State; | 511 | jit_State; |
| 512 | 512 | ||
| 513 | #ifdef LUA_USE_ASSERT | ||
| 514 | #define lj_assertJ(c, ...) lj_assertG_(J2G(J), (c), __VA_ARGS__) | ||
| 515 | #else | ||
| 516 | #define lj_assertJ(c, ...) ((void)J) | ||
| 517 | #endif | ||
| 518 | |||
| 513 | /* Trivial PRNG e.g. used for penalty randomization. */ | 519 | /* Trivial PRNG e.g. used for penalty randomization. */ |
| 514 | static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits) | 520 | static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits) |
| 515 | { | 521 | { |
diff --git a/src/lj_lex.c b/src/lj_lex.c index ada0876e..61c7ff43 100644 --- a/src/lj_lex.c +++ b/src/lj_lex.c | |||
| @@ -82,7 +82,7 @@ static LJ_AINLINE LexChar lex_savenext(LexState *ls) | |||
| 82 | static void lex_newline(LexState *ls) | 82 | static void lex_newline(LexState *ls) |
| 83 | { | 83 | { |
| 84 | LexChar old = ls->c; | 84 | LexChar old = ls->c; |
| 85 | lua_assert(lex_iseol(ls)); | 85 | lj_assertLS(lex_iseol(ls), "bad usage"); |
| 86 | lex_next(ls); /* Skip "\n" or "\r". */ | 86 | lex_next(ls); /* Skip "\n" or "\r". */ |
| 87 | if (lex_iseol(ls) && ls->c != old) lex_next(ls); /* Skip "\n\r" or "\r\n". */ | 87 | if (lex_iseol(ls) && ls->c != old) lex_next(ls); /* Skip "\n\r" or "\r\n". */ |
| 88 | if (++ls->linenumber >= LJ_MAX_LINE) | 88 | if (++ls->linenumber >= LJ_MAX_LINE) |
| @@ -96,7 +96,7 @@ static void lex_number(LexState *ls, TValue *tv) | |||
| 96 | { | 96 | { |
| 97 | StrScanFmt fmt; | 97 | StrScanFmt fmt; |
| 98 | LexChar c, xp = 'e'; | 98 | LexChar c, xp = 'e'; |
| 99 | lua_assert(lj_char_isdigit(ls->c)); | 99 | lj_assertLS(lj_char_isdigit(ls->c), "bad usage"); |
| 100 | if ((c = ls->c) == '0' && (lex_savenext(ls) | 0x20) == 'x') | 100 | if ((c = ls->c) == '0' && (lex_savenext(ls) | 0x20) == 'x') |
| 101 | xp = 'p'; | 101 | xp = 'p'; |
| 102 | while (lj_char_isident(ls->c) || ls->c == '.' || | 102 | while (lj_char_isident(ls->c) || ls->c == '.' || |
| @@ -116,7 +116,8 @@ static void lex_number(LexState *ls, TValue *tv) | |||
| 116 | } else if (fmt != STRSCAN_ERROR) { | 116 | } else if (fmt != STRSCAN_ERROR) { |
| 117 | lua_State *L = ls->L; | 117 | lua_State *L = ls->L; |
| 118 | GCcdata *cd; | 118 | GCcdata *cd; |
| 119 | lua_assert(fmt == STRSCAN_I64 || fmt == STRSCAN_U64 || fmt == STRSCAN_IMAG); | 119 | lj_assertLS(fmt == STRSCAN_I64 || fmt == STRSCAN_U64 || fmt == STRSCAN_IMAG, |
| 120 | "unexpected number format %d", fmt); | ||
| 120 | if (!ctype_ctsG(G(L))) { | 121 | if (!ctype_ctsG(G(L))) { |
| 121 | ptrdiff_t oldtop = savestack(L, L->top); | 122 | ptrdiff_t oldtop = savestack(L, L->top); |
| 122 | luaopen_ffi(L); /* Load FFI library on-demand. */ | 123 | luaopen_ffi(L); /* Load FFI library on-demand. */ |
| @@ -133,7 +134,8 @@ static void lex_number(LexState *ls, TValue *tv) | |||
| 133 | lj_parse_keepcdata(ls, tv, cd); | 134 | lj_parse_keepcdata(ls, tv, cd); |
| 134 | #endif | 135 | #endif |
| 135 | } else { | 136 | } else { |
| 136 | lua_assert(fmt == STRSCAN_ERROR); | 137 | lj_assertLS(fmt == STRSCAN_ERROR, |
| 138 | "unexpected number format %d", fmt); | ||
| 137 | lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER); | 139 | lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER); |
| 138 | } | 140 | } |
| 139 | } | 141 | } |
| @@ -143,7 +145,7 @@ static int lex_skipeq(LexState *ls) | |||
| 143 | { | 145 | { |
| 144 | int count = 0; | 146 | int count = 0; |
| 145 | LexChar s = ls->c; | 147 | LexChar s = ls->c; |
| 146 | lua_assert(s == '[' || s == ']'); | 148 | lj_assertLS(s == '[' || s == ']', "bad usage"); |
| 147 | while (lex_savenext(ls) == '=' && count < 0x20000000) | 149 | while (lex_savenext(ls) == '=' && count < 0x20000000) |
| 148 | count++; | 150 | count++; |
| 149 | return (ls->c == s) ? count : (-count) - 1; | 151 | return (ls->c == s) ? count : (-count) - 1; |
| @@ -469,7 +471,7 @@ void lj_lex_next(LexState *ls) | |||
| 469 | /* Look ahead for the next token. */ | 471 | /* Look ahead for the next token. */ |
| 470 | LexToken lj_lex_lookahead(LexState *ls) | 472 | LexToken lj_lex_lookahead(LexState *ls) |
| 471 | { | 473 | { |
| 472 | lua_assert(ls->lookahead == TK_eof); | 474 | lj_assertLS(ls->lookahead == TK_eof, "double lookahead"); |
| 473 | ls->lookahead = lex_scan(ls, &ls->lookaheadval); | 475 | ls->lookahead = lex_scan(ls, &ls->lookaheadval); |
| 474 | return ls->lookahead; | 476 | return ls->lookahead; |
| 475 | } | 477 | } |
diff --git a/src/lj_lex.h b/src/lj_lex.h index 8665aa2a..e1b5610b 100644 --- a/src/lj_lex.h +++ b/src/lj_lex.h | |||
| @@ -84,4 +84,10 @@ LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken tok); | |||
| 84 | LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken tok, ErrMsg em, ...); | 84 | LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken tok, ErrMsg em, ...); |
| 85 | LJ_FUNC void lj_lex_init(lua_State *L); | 85 | LJ_FUNC void lj_lex_init(lua_State *L); |
| 86 | 86 | ||
| 87 | #ifdef LUA_USE_ASSERT | ||
| 88 | #define lj_assertLS(c, ...) (lj_assertG_(G(ls->L), (c), __VA_ARGS__)) | ||
| 89 | #else | ||
| 90 | #define lj_assertLS(c, ...) ((void)ls) | ||
| 91 | #endif | ||
| 92 | |||
| 87 | #endif | 93 | #endif |
diff --git a/src/lj_load.c b/src/lj_load.c index 746bf428..e5918c04 100644 --- a/src/lj_load.c +++ b/src/lj_load.c | |||
| @@ -159,7 +159,7 @@ LUALIB_API int luaL_loadstring(lua_State *L, const char *s) | |||
| 159 | LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data) | 159 | LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data) |
| 160 | { | 160 | { |
| 161 | cTValue *o = L->top-1; | 161 | cTValue *o = L->top-1; |
| 162 | api_check(L, L->top > L->base); | 162 | lj_checkapi(L->top > L->base, "top slot empty"); |
| 163 | if (tvisfunc(o) && isluafunc(funcV(o))) | 163 | if (tvisfunc(o) && isluafunc(funcV(o))) |
| 164 | return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0); | 164 | return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0); |
| 165 | else | 165 | else |
diff --git a/src/lj_mcode.c b/src/lj_mcode.c index bc3e922f..e64c5878 100644 --- a/src/lj_mcode.c +++ b/src/lj_mcode.c | |||
| @@ -351,7 +351,7 @@ MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) | |||
| 351 | /* Otherwise search through the list of MCode areas. */ | 351 | /* Otherwise search through the list of MCode areas. */ |
| 352 | for (;;) { | 352 | for (;;) { |
| 353 | mc = ((MCLink *)mc)->next; | 353 | mc = ((MCLink *)mc)->next; |
| 354 | lua_assert(mc != NULL); | 354 | lj_assertJ(mc != NULL, "broken MCode area chain"); |
| 355 | if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { | 355 | if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { |
| 356 | if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN))) | 356 | if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN))) |
| 357 | mcode_protfail(J); | 357 | mcode_protfail(J); |
diff --git a/src/lj_meta.c b/src/lj_meta.c index 7391ff00..2cdb6a0f 100644 --- a/src/lj_meta.c +++ b/src/lj_meta.c | |||
| @@ -47,7 +47,7 @@ void lj_meta_init(lua_State *L) | |||
| 47 | cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name) | 47 | cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name) |
| 48 | { | 48 | { |
| 49 | cTValue *mo = lj_tab_getstr(mt, name); | 49 | cTValue *mo = lj_tab_getstr(mt, name); |
| 50 | lua_assert(mm <= MM_FAST); | 50 | lj_assertX(mm <= MM_FAST, "bad metamethod %d", mm); |
| 51 | if (!mo || tvisnil(mo)) { /* No metamethod? */ | 51 | if (!mo || tvisnil(mo)) { /* No metamethod? */ |
| 52 | mt->nomm |= (uint8_t)(1u<<mm); /* Set negative cache flag. */ | 52 | mt->nomm |= (uint8_t)(1u<<mm); /* Set negative cache flag. */ |
| 53 | return NULL; | 53 | return NULL; |
| @@ -363,7 +363,7 @@ TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins) | |||
| 363 | } else if (op == BC_ISEQN) { | 363 | } else if (op == BC_ISEQN) { |
| 364 | o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)]; | 364 | o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)]; |
| 365 | } else { | 365 | } else { |
| 366 | lua_assert(op == BC_ISEQP); | 366 | lj_assertL(op == BC_ISEQP, "bad bytecode op %d", op); |
| 367 | setpriV(&tv, ~bc_d(ins)); | 367 | setpriV(&tv, ~bc_d(ins)); |
| 368 | o2 = &tv; | 368 | o2 = &tv; |
| 369 | } | 369 | } |
| @@ -426,7 +426,7 @@ void lj_meta_istype(lua_State *L, BCReg ra, BCReg tp) | |||
| 426 | { | 426 | { |
| 427 | L->top = curr_topL(L); | 427 | L->top = curr_topL(L); |
| 428 | ra++; tp--; | 428 | ra++; tp--; |
| 429 | lua_assert(LJ_DUALNUM || tp != ~LJ_TNUMX); /* ISTYPE -> ISNUM broken. */ | 429 | lj_assertL(LJ_DUALNUM || tp != ~LJ_TNUMX, "bad type for ISTYPE"); |
| 430 | if (LJ_DUALNUM && tp == ~LJ_TNUMX) lj_lib_checkint(L, ra); | 430 | if (LJ_DUALNUM && tp == ~LJ_TNUMX) lj_lib_checkint(L, ra); |
| 431 | else if (tp == ~LJ_TNUMX+1) lj_lib_checknum(L, ra); | 431 | else if (tp == ~LJ_TNUMX+1) lj_lib_checknum(L, ra); |
| 432 | else if (tp == ~LJ_TSTR) lj_lib_checkstr(L, ra); | 432 | else if (tp == ~LJ_TSTR) lj_lib_checkstr(L, ra); |
diff --git a/src/lj_obj.h b/src/lj_obj.h index 7d582949..c0ae6892 100644 --- a/src/lj_obj.h +++ b/src/lj_obj.h | |||
| @@ -679,6 +679,11 @@ struct lua_State { | |||
| 679 | #define curr_topL(L) (L->base + curr_proto(L)->framesize) | 679 | #define curr_topL(L) (L->base + curr_proto(L)->framesize) |
| 680 | #define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top) | 680 | #define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top) |
| 681 | 681 | ||
| 682 | #if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK) | ||
| 683 | LJ_FUNC_NORET void lj_assert_fail(global_State *g, const char *file, int line, | ||
| 684 | const char *func, const char *fmt, ...); | ||
| 685 | #endif | ||
| 686 | |||
| 682 | /* -- GC object definition and conversions -------------------------------- */ | 687 | /* -- GC object definition and conversions -------------------------------- */ |
| 683 | 688 | ||
| 684 | /* GC header for generic access to common fields of GC objects. */ | 689 | /* GC header for generic access to common fields of GC objects. */ |
| @@ -732,10 +737,6 @@ typedef union GCobj { | |||
| 732 | 737 | ||
| 733 | /* -- TValue getters/setters ---------------------------------------------- */ | 738 | /* -- TValue getters/setters ---------------------------------------------- */ |
| 734 | 739 | ||
| 735 | #ifdef LUA_USE_ASSERT | ||
| 736 | #include "lj_gc.h" | ||
| 737 | #endif | ||
| 738 | |||
| 739 | /* Macros to test types. */ | 740 | /* Macros to test types. */ |
| 740 | #if LJ_GC64 | 741 | #if LJ_GC64 |
| 741 | #define itype(o) ((uint32_t)((o)->it64 >> 47)) | 742 | #define itype(o) ((uint32_t)((o)->it64 >> 47)) |
| @@ -856,9 +857,19 @@ static LJ_AINLINE void setlightudV(TValue *o, void *p) | |||
| 856 | #define setcont(o, f) setlightudV((o), contptr(f)) | 857 | #define setcont(o, f) setlightudV((o), contptr(f)) |
| 857 | #endif | 858 | #endif |
| 858 | 859 | ||
| 859 | #define tvchecklive(L, o) \ | 860 | static LJ_AINLINE void checklivetv(lua_State *L, TValue *o, const char *msg) |
| 860 | UNUSED(L), lua_assert(!tvisgcv(o) || \ | 861 | { |
| 861 | ((~itype(o) == gcval(o)->gch.gct) && !isdead(G(L), gcval(o)))) | 862 | UNUSED(L); UNUSED(o); UNUSED(msg); |
| 863 | #if LUA_USE_ASSERT | ||
| 864 | if (tvisgcv(o)) { | ||
| 865 | lj_assertL(~itype(o) == gcval(o)->gch.gct, | ||
| 866 | "mismatch of TValue type %d vs GC type %d", | ||
| 867 | ~itype(o), gcval(o)->gch.gct); | ||
| 868 | /* Copy of isdead check from lj_gc.h to avoid circular include. */ | ||
| 869 | lj_assertL(!(gcval(o)->gch.marked & (G(L)->gc.currentwhite ^ 3) & 3), msg); | ||
| 870 | } | ||
| 871 | #endif | ||
| 872 | } | ||
| 862 | 873 | ||
| 863 | static LJ_AINLINE void setgcVraw(TValue *o, GCobj *v, uint32_t itype) | 874 | static LJ_AINLINE void setgcVraw(TValue *o, GCobj *v, uint32_t itype) |
| 864 | { | 875 | { |
| @@ -871,7 +882,8 @@ static LJ_AINLINE void setgcVraw(TValue *o, GCobj *v, uint32_t itype) | |||
| 871 | 882 | ||
| 872 | static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t it) | 883 | static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t it) |
| 873 | { | 884 | { |
| 874 | setgcVraw(o, v, it); tvchecklive(L, o); | 885 | setgcVraw(o, v, it); |
| 886 | checklivetv(L, o, "store to dead GC object"); | ||
| 875 | } | 887 | } |
| 876 | 888 | ||
| 877 | #define define_setV(name, type, tag) \ | 889 | #define define_setV(name, type, tag) \ |
| @@ -918,7 +930,8 @@ static LJ_AINLINE void setint64V(TValue *o, int64_t i) | |||
| 918 | /* Copy tagged values. */ | 930 | /* Copy tagged values. */ |
| 919 | static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2) | 931 | static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2) |
| 920 | { | 932 | { |
| 921 | *o1 = *o2; tvchecklive(L, o1); | 933 | *o1 = *o2; |
| 934 | checklivetv(L, o1, "copy of dead GC object"); | ||
| 922 | } | 935 | } |
| 923 | 936 | ||
| 924 | /* -- Number to integer conversion ---------------------------------------- */ | 937 | /* -- Number to integer conversion ---------------------------------------- */ |
diff --git a/src/lj_opt_fold.c b/src/lj_opt_fold.c index 42c57c9b..96f272b8 100644 --- a/src/lj_opt_fold.c +++ b/src/lj_opt_fold.c | |||
| @@ -282,7 +282,7 @@ static int32_t kfold_intop(int32_t k1, int32_t k2, IROp op) | |||
| 282 | case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break; | 282 | case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break; |
| 283 | case IR_MIN: k1 = k1 < k2 ? k1 : k2; break; | 283 | case IR_MIN: k1 = k1 < k2 ? k1 : k2; break; |
| 284 | case IR_MAX: k1 = k1 > k2 ? k1 : k2; break; | 284 | case IR_MAX: k1 = k1 > k2 ? k1 : k2; break; |
| 285 | default: lua_assert(0); break; | 285 | default: lj_assertX(0, "bad IR op %d", op); break; |
| 286 | } | 286 | } |
| 287 | return k1; | 287 | return k1; |
| 288 | } | 288 | } |
| @@ -354,7 +354,7 @@ LJFOLDF(kfold_intcomp) | |||
| 354 | case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b); | 354 | case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b); |
| 355 | case IR_ABC: | 355 | case IR_ABC: |
| 356 | case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b); | 356 | case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b); |
| 357 | default: lua_assert(0); return FAILFOLD; | 357 | default: lj_assertJ(0, "bad IR op %d", fins->o); return FAILFOLD; |
| 358 | } | 358 | } |
| 359 | } | 359 | } |
| 360 | 360 | ||
| @@ -368,10 +368,12 @@ LJFOLDF(kfold_intcomp0) | |||
| 368 | 368 | ||
| 369 | /* -- Constant folding for 64 bit integers -------------------------------- */ | 369 | /* -- Constant folding for 64 bit integers -------------------------------- */ |
| 370 | 370 | ||
| 371 | static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op) | 371 | static uint64_t kfold_int64arith(jit_State *J, uint64_t k1, uint64_t k2, |
| 372 | IROp op) | ||
| 372 | { | 373 | { |
| 373 | switch (op) { | 374 | UNUSED(J); |
| 374 | #if LJ_HASFFI | 375 | #if LJ_HASFFI |
| 376 | switch (op) { | ||
| 375 | case IR_ADD: k1 += k2; break; | 377 | case IR_ADD: k1 += k2; break; |
| 376 | case IR_SUB: k1 -= k2; break; | 378 | case IR_SUB: k1 -= k2; break; |
| 377 | case IR_MUL: k1 *= k2; break; | 379 | case IR_MUL: k1 *= k2; break; |
| @@ -383,9 +385,12 @@ static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op) | |||
| 383 | case IR_BSAR: k1 >>= (k2 & 63); break; | 385 | case IR_BSAR: k1 >>= (k2 & 63); break; |
| 384 | case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 63)); break; | 386 | case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 63)); break; |
| 385 | case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 63)); break; | 387 | case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 63)); break; |
| 386 | #endif | 388 | default: lj_assertJ(0, "bad IR op %d", op); break; |
| 387 | default: UNUSED(k2); lua_assert(0); break; | ||
| 388 | } | 389 | } |
| 390 | #else | ||
| 391 | UNUSED(k2); UNUSED(op); | ||
| 392 | lj_assertJ(0, "FFI IR op without FFI"); | ||
| 393 | #endif | ||
| 389 | return k1; | 394 | return k1; |
| 390 | } | 395 | } |
| 391 | 396 | ||
| @@ -397,7 +402,7 @@ LJFOLD(BOR KINT64 KINT64) | |||
| 397 | LJFOLD(BXOR KINT64 KINT64) | 402 | LJFOLD(BXOR KINT64 KINT64) |
| 398 | LJFOLDF(kfold_int64arith) | 403 | LJFOLDF(kfold_int64arith) |
| 399 | { | 404 | { |
| 400 | return INT64FOLD(kfold_int64arith(ir_k64(fleft)->u64, | 405 | return INT64FOLD(kfold_int64arith(J, ir_k64(fleft)->u64, |
| 401 | ir_k64(fright)->u64, (IROp)fins->o)); | 406 | ir_k64(fright)->u64, (IROp)fins->o)); |
| 402 | } | 407 | } |
| 403 | 408 | ||
| @@ -419,7 +424,7 @@ LJFOLDF(kfold_int64arith2) | |||
| 419 | } | 424 | } |
| 420 | return INT64FOLD(k1); | 425 | return INT64FOLD(k1); |
| 421 | #else | 426 | #else |
| 422 | UNUSED(J); lua_assert(0); return FAILFOLD; | 427 | UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD; |
| 423 | #endif | 428 | #endif |
| 424 | } | 429 | } |
| 425 | 430 | ||
| @@ -435,7 +440,7 @@ LJFOLDF(kfold_int64shift) | |||
| 435 | int32_t sh = (fright->i & 63); | 440 | int32_t sh = (fright->i & 63); |
| 436 | return INT64FOLD(lj_carith_shift64(k, sh, fins->o - IR_BSHL)); | 441 | return INT64FOLD(lj_carith_shift64(k, sh, fins->o - IR_BSHL)); |
| 437 | #else | 442 | #else |
| 438 | UNUSED(J); lua_assert(0); return FAILFOLD; | 443 | UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD; |
| 439 | #endif | 444 | #endif |
| 440 | } | 445 | } |
| 441 | 446 | ||
| @@ -445,7 +450,7 @@ LJFOLDF(kfold_bnot64) | |||
| 445 | #if LJ_HASFFI | 450 | #if LJ_HASFFI |
| 446 | return INT64FOLD(~ir_k64(fleft)->u64); | 451 | return INT64FOLD(~ir_k64(fleft)->u64); |
| 447 | #else | 452 | #else |
| 448 | UNUSED(J); lua_assert(0); return FAILFOLD; | 453 | UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD; |
| 449 | #endif | 454 | #endif |
| 450 | } | 455 | } |
| 451 | 456 | ||
| @@ -455,7 +460,7 @@ LJFOLDF(kfold_bswap64) | |||
| 455 | #if LJ_HASFFI | 460 | #if LJ_HASFFI |
| 456 | return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64)); | 461 | return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64)); |
| 457 | #else | 462 | #else |
| 458 | UNUSED(J); lua_assert(0); return FAILFOLD; | 463 | UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD; |
| 459 | #endif | 464 | #endif |
| 460 | } | 465 | } |
| 461 | 466 | ||
| @@ -480,10 +485,10 @@ LJFOLDF(kfold_int64comp) | |||
| 480 | case IR_UGE: return CONDFOLD(a >= b); | 485 | case IR_UGE: return CONDFOLD(a >= b); |
| 481 | case IR_ULE: return CONDFOLD(a <= b); | 486 | case IR_ULE: return CONDFOLD(a <= b); |
| 482 | case IR_UGT: return CONDFOLD(a > b); | 487 | case IR_UGT: return CONDFOLD(a > b); |
| 483 | default: lua_assert(0); return FAILFOLD; | 488 | default: lj_assertJ(0, "bad IR op %d", fins->o); return FAILFOLD; |
| 484 | } | 489 | } |
| 485 | #else | 490 | #else |
| 486 | UNUSED(J); lua_assert(0); return FAILFOLD; | 491 | UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD; |
| 487 | #endif | 492 | #endif |
| 488 | } | 493 | } |
| 489 | 494 | ||
| @@ -495,7 +500,7 @@ LJFOLDF(kfold_int64comp0) | |||
| 495 | return DROPFOLD; | 500 | return DROPFOLD; |
| 496 | return NEXTFOLD; | 501 | return NEXTFOLD; |
| 497 | #else | 502 | #else |
| 498 | UNUSED(J); lua_assert(0); return FAILFOLD; | 503 | UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD; |
| 499 | #endif | 504 | #endif |
| 500 | } | 505 | } |
| 501 | 506 | ||
| @@ -520,7 +525,7 @@ LJFOLD(STRREF KGC KINT) | |||
| 520 | LJFOLDF(kfold_strref) | 525 | LJFOLDF(kfold_strref) |
| 521 | { | 526 | { |
| 522 | GCstr *str = ir_kstr(fleft); | 527 | GCstr *str = ir_kstr(fleft); |
| 523 | lua_assert((MSize)fright->i <= str->len); | 528 | lj_assertJ((MSize)fright->i <= str->len, "bad string ref"); |
| 524 | return lj_ir_kkptr(J, (char *)strdata(str) + fright->i); | 529 | return lj_ir_kkptr(J, (char *)strdata(str) + fright->i); |
| 525 | } | 530 | } |
| 526 | 531 | ||
| @@ -616,8 +621,9 @@ LJFOLDF(bufput_kgc) | |||
| 616 | LJFOLD(BUFSTR any any) | 621 | LJFOLD(BUFSTR any any) |
| 617 | LJFOLDF(bufstr_kfold_cse) | 622 | LJFOLDF(bufstr_kfold_cse) |
| 618 | { | 623 | { |
| 619 | lua_assert(fleft->o == IR_BUFHDR || fleft->o == IR_BUFPUT || | 624 | lj_assertJ(fleft->o == IR_BUFHDR || fleft->o == IR_BUFPUT || |
| 620 | fleft->o == IR_CALLL); | 625 | fleft->o == IR_CALLL, |
| 626 | "bad buffer constructor IR op %d", fleft->o); | ||
| 621 | if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) { | 627 | if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) { |
| 622 | if (fleft->o == IR_BUFHDR) { /* No put operations? */ | 628 | if (fleft->o == IR_BUFHDR) { /* No put operations? */ |
| 623 | if (!(fleft->op2 & IRBUFHDR_APPEND)) /* Empty buffer? */ | 629 | if (!(fleft->op2 & IRBUFHDR_APPEND)) /* Empty buffer? */ |
| @@ -637,8 +643,9 @@ LJFOLDF(bufstr_kfold_cse) | |||
| 637 | while (ref) { | 643 | while (ref) { |
| 638 | IRIns *irs = IR(ref), *ira = fleft, *irb = IR(irs->op1); | 644 | IRIns *irs = IR(ref), *ira = fleft, *irb = IR(irs->op1); |
| 639 | while (ira->o == irb->o && ira->op2 == irb->op2) { | 645 | while (ira->o == irb->o && ira->op2 == irb->op2) { |
| 640 | lua_assert(ira->o == IR_BUFHDR || ira->o == IR_BUFPUT || | 646 | lj_assertJ(ira->o == IR_BUFHDR || ira->o == IR_BUFPUT || |
| 641 | ira->o == IR_CALLL || ira->o == IR_CARG); | 647 | ira->o == IR_CALLL || ira->o == IR_CARG, |
| 648 | "bad buffer constructor IR op %d", ira->o); | ||
| 642 | if (ira->o == IR_BUFHDR && !(ira->op2 & IRBUFHDR_APPEND)) | 649 | if (ira->o == IR_BUFHDR && !(ira->op2 & IRBUFHDR_APPEND)) |
| 643 | return ref; /* CSE succeeded. */ | 650 | return ref; /* CSE succeeded. */ |
| 644 | if (ira->o == IR_CALLL && ira->op2 == IRCALL_lj_buf_puttab) | 651 | if (ira->o == IR_CALLL && ira->op2 == IRCALL_lj_buf_puttab) |
| @@ -697,7 +704,7 @@ LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfchar) | |||
| 697 | LJFOLDF(bufput_kfold_fmt) | 704 | LJFOLDF(bufput_kfold_fmt) |
| 698 | { | 705 | { |
| 699 | IRIns *irc = IR(fleft->op1); | 706 | IRIns *irc = IR(fleft->op1); |
| 700 | lua_assert(irref_isk(irc->op2)); /* SFormat must be const. */ | 707 | lj_assertJ(irref_isk(irc->op2), "SFormat must be const"); |
| 701 | if (irref_isk(fleft->op2)) { | 708 | if (irref_isk(fleft->op2)) { |
| 702 | SFormat sf = (SFormat)IR(irc->op2)->i; | 709 | SFormat sf = (SFormat)IR(irc->op2)->i; |
| 703 | IRIns *ira = IR(fleft->op2); | 710 | IRIns *ira = IR(fleft->op2); |
| @@ -1216,10 +1223,10 @@ LJFOLDF(simplify_tobit_conv) | |||
| 1216 | { | 1223 | { |
| 1217 | /* Fold even across PHI to avoid expensive num->int conversions in loop. */ | 1224 | /* Fold even across PHI to avoid expensive num->int conversions in loop. */ |
| 1218 | if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) { | 1225 | if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) { |
| 1219 | lua_assert(irt_isnum(fleft->t)); | 1226 | lj_assertJ(irt_isnum(fleft->t), "expected TOBIT number arg"); |
| 1220 | return fleft->op1; | 1227 | return fleft->op1; |
| 1221 | } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) { | 1228 | } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) { |
| 1222 | lua_assert(irt_isnum(fleft->t)); | 1229 | lj_assertJ(irt_isnum(fleft->t), "expected TOBIT number arg"); |
| 1223 | fins->o = IR_CONV; | 1230 | fins->o = IR_CONV; |
| 1224 | fins->op1 = fleft->op1; | 1231 | fins->op1 = fleft->op1; |
| 1225 | fins->op2 = (IRT_INT<<5)|IRT_U32; | 1232 | fins->op2 = (IRT_INT<<5)|IRT_U32; |
| @@ -1259,7 +1266,7 @@ LJFOLDF(simplify_conv_sext) | |||
| 1259 | /* Use scalar evolution analysis results to strength-reduce sign-extension. */ | 1266 | /* Use scalar evolution analysis results to strength-reduce sign-extension. */ |
| 1260 | if (ref == J->scev.idx) { | 1267 | if (ref == J->scev.idx) { |
| 1261 | IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop; | 1268 | IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop; |
| 1262 | lua_assert(irt_isint(J->scev.t)); | 1269 | lj_assertJ(irt_isint(J->scev.t), "only int SCEV supported"); |
| 1263 | if (lo && IR(lo)->o == IR_KINT && IR(lo)->i + ofs >= 0) { | 1270 | if (lo && IR(lo)->o == IR_KINT && IR(lo)->i + ofs >= 0) { |
| 1264 | ok_reduce: | 1271 | ok_reduce: |
| 1265 | #if LJ_TARGET_X64 | 1272 | #if LJ_TARGET_X64 |
| @@ -1335,7 +1342,8 @@ LJFOLDF(narrow_convert) | |||
| 1335 | /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */ | 1342 | /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */ |
| 1336 | if (J->chain[IR_LOOP]) | 1343 | if (J->chain[IR_LOOP]) |
| 1337 | return NEXTFOLD; | 1344 | return NEXTFOLD; |
| 1338 | lua_assert(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT); | 1345 | lj_assertJ(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT, |
| 1346 | "unexpected CONV TOBIT"); | ||
| 1339 | return lj_opt_narrow_convert(J); | 1347 | return lj_opt_narrow_convert(J); |
| 1340 | } | 1348 | } |
| 1341 | 1349 | ||
| @@ -1441,7 +1449,7 @@ LJFOLDF(simplify_intmul_k64) | |||
| 1441 | return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64); | 1449 | return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64); |
| 1442 | return NEXTFOLD; | 1450 | return NEXTFOLD; |
| 1443 | #else | 1451 | #else |
| 1444 | UNUSED(J); lua_assert(0); return FAILFOLD; | 1452 | UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD; |
| 1445 | #endif | 1453 | #endif |
| 1446 | } | 1454 | } |
| 1447 | 1455 | ||
| @@ -1449,7 +1457,7 @@ LJFOLD(MOD any KINT) | |||
| 1449 | LJFOLDF(simplify_intmod_k) | 1457 | LJFOLDF(simplify_intmod_k) |
| 1450 | { | 1458 | { |
| 1451 | int32_t k = fright->i; | 1459 | int32_t k = fright->i; |
| 1452 | lua_assert(k != 0); | 1460 | lj_assertJ(k != 0, "integer mod 0"); |
| 1453 | if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */ | 1461 | if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */ |
| 1454 | fins->o = IR_BAND; | 1462 | fins->o = IR_BAND; |
| 1455 | fins->op2 = lj_ir_kint(J, k-1); | 1463 | fins->op2 = lj_ir_kint(J, k-1); |
| @@ -1699,7 +1707,8 @@ LJFOLDF(simplify_shiftk_andk) | |||
| 1699 | fins->ot = IRTI(IR_BAND); | 1707 | fins->ot = IRTI(IR_BAND); |
| 1700 | return RETRYFOLD; | 1708 | return RETRYFOLD; |
| 1701 | } else if (irk->o == IR_KINT64) { | 1709 | } else if (irk->o == IR_KINT64) { |
| 1702 | uint64_t k = kfold_int64arith(ir_k64(irk)->u64, fright->i, (IROp)fins->o); | 1710 | uint64_t k = kfold_int64arith(J, ir_k64(irk)->u64, fright->i, |
| 1711 | (IROp)fins->o); | ||
| 1703 | IROpT ot = fleft->ot; | 1712 | IROpT ot = fleft->ot; |
| 1704 | fins->op1 = fleft->op1; | 1713 | fins->op1 = fleft->op1; |
| 1705 | fins->op1 = (IRRef1)lj_opt_fold(J); | 1714 | fins->op1 = (IRRef1)lj_opt_fold(J); |
| @@ -1747,8 +1756,8 @@ LJFOLDF(simplify_andor_k64) | |||
| 1747 | IRIns *irk = IR(fleft->op2); | 1756 | IRIns *irk = IR(fleft->op2); |
| 1748 | PHIBARRIER(fleft); | 1757 | PHIBARRIER(fleft); |
| 1749 | if (irk->o == IR_KINT64) { | 1758 | if (irk->o == IR_KINT64) { |
| 1750 | uint64_t k = kfold_int64arith(ir_k64(irk)->u64, | 1759 | uint64_t k = kfold_int64arith(J, ir_k64(irk)->u64, ir_k64(fright)->u64, |
| 1751 | ir_k64(fright)->u64, (IROp)fins->o); | 1760 | (IROp)fins->o); |
| 1752 | /* (i | k1) & k2 ==> i & k2, if (k1 & k2) == 0. */ | 1761 | /* (i | k1) & k2 ==> i & k2, if (k1 & k2) == 0. */ |
| 1753 | /* (i & k1) | k2 ==> i | k2, if (k1 | k2) == -1. */ | 1762 | /* (i & k1) | k2 ==> i | k2, if (k1 | k2) == -1. */ |
| 1754 | if (k == (fins->o == IR_BAND ? (uint64_t)0 : ~(uint64_t)0)) { | 1763 | if (k == (fins->o == IR_BAND ? (uint64_t)0 : ~(uint64_t)0)) { |
| @@ -1758,7 +1767,7 @@ LJFOLDF(simplify_andor_k64) | |||
| 1758 | } | 1767 | } |
| 1759 | return NEXTFOLD; | 1768 | return NEXTFOLD; |
| 1760 | #else | 1769 | #else |
| 1761 | UNUSED(J); lua_assert(0); return FAILFOLD; | 1770 | UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD; |
| 1762 | #endif | 1771 | #endif |
| 1763 | } | 1772 | } |
| 1764 | 1773 | ||
| @@ -1794,8 +1803,8 @@ LJFOLDF(reassoc_intarith_k64) | |||
| 1794 | #if LJ_HASFFI | 1803 | #if LJ_HASFFI |
| 1795 | IRIns *irk = IR(fleft->op2); | 1804 | IRIns *irk = IR(fleft->op2); |
| 1796 | if (irk->o == IR_KINT64) { | 1805 | if (irk->o == IR_KINT64) { |
| 1797 | uint64_t k = kfold_int64arith(ir_k64(irk)->u64, | 1806 | uint64_t k = kfold_int64arith(J, ir_k64(irk)->u64, ir_k64(fright)->u64, |
| 1798 | ir_k64(fright)->u64, (IROp)fins->o); | 1807 | (IROp)fins->o); |
| 1799 | PHIBARRIER(fleft); | 1808 | PHIBARRIER(fleft); |
| 1800 | fins->op1 = fleft->op1; | 1809 | fins->op1 = fleft->op1; |
| 1801 | fins->op2 = (IRRef1)lj_ir_kint64(J, k); | 1810 | fins->op2 = (IRRef1)lj_ir_kint64(J, k); |
| @@ -1803,7 +1812,7 @@ LJFOLDF(reassoc_intarith_k64) | |||
| 1803 | } | 1812 | } |
| 1804 | return NEXTFOLD; | 1813 | return NEXTFOLD; |
| 1805 | #else | 1814 | #else |
| 1806 | UNUSED(J); lua_assert(0); return FAILFOLD; | 1815 | UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD; |
| 1807 | #endif | 1816 | #endif |
| 1808 | } | 1817 | } |
| 1809 | 1818 | ||
| @@ -2058,7 +2067,7 @@ LJFOLDF(merge_eqne_snew_kgc) | |||
| 2058 | { | 2067 | { |
| 2059 | GCstr *kstr = ir_kstr(fright); | 2068 | GCstr *kstr = ir_kstr(fright); |
| 2060 | int32_t len = (int32_t)kstr->len; | 2069 | int32_t len = (int32_t)kstr->len; |
| 2061 | lua_assert(irt_isstr(fins->t)); | 2070 | lj_assertJ(irt_isstr(fins->t), "bad equality IR type"); |
| 2062 | 2071 | ||
| 2063 | #if LJ_TARGET_UNALIGNED | 2072 | #if LJ_TARGET_UNALIGNED |
| 2064 | #define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */ | 2073 | #define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */ |
| @@ -2122,7 +2131,7 @@ LJFOLD(HLOAD KKPTR) | |||
| 2122 | LJFOLDF(kfold_hload_kkptr) | 2131 | LJFOLDF(kfold_hload_kkptr) |
| 2123 | { | 2132 | { |
| 2124 | UNUSED(J); | 2133 | UNUSED(J); |
| 2125 | lua_assert(ir_kptr(fleft) == niltvg(J2G(J))); | 2134 | lj_assertJ(ir_kptr(fleft) == niltvg(J2G(J)), "expected niltv"); |
| 2126 | return TREF_NIL; | 2135 | return TREF_NIL; |
| 2127 | } | 2136 | } |
| 2128 | 2137 | ||
| @@ -2333,7 +2342,7 @@ LJFOLDF(fwd_sload) | |||
| 2333 | TRef tr = lj_opt_cse(J); | 2342 | TRef tr = lj_opt_cse(J); |
| 2334 | return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr; | 2343 | return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr; |
| 2335 | } else { | 2344 | } else { |
| 2336 | lua_assert(J->slot[fins->op1] != 0); | 2345 | lj_assertJ(J->slot[fins->op1] != 0, "uninitialized slot accessed"); |
| 2337 | return J->slot[fins->op1]; | 2346 | return J->slot[fins->op1]; |
| 2338 | } | 2347 | } |
| 2339 | } | 2348 | } |
| @@ -2448,8 +2457,9 @@ TRef LJ_FASTCALL lj_opt_fold(jit_State *J) | |||
| 2448 | IRRef ref; | 2457 | IRRef ref; |
| 2449 | 2458 | ||
| 2450 | if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) { | 2459 | if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) { |
| 2451 | lua_assert(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) | | 2460 | lj_assertJ(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) | |
| 2452 | JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT); | 2461 | JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT, |
| 2462 | "bad JIT_F_OPT_DEFAULT"); | ||
| 2453 | /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */ | 2463 | /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */ |
| 2454 | if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N) | 2464 | if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N) |
| 2455 | return lj_opt_cse(J); | 2465 | return lj_opt_cse(J); |
| @@ -2511,7 +2521,7 @@ retry: | |||
| 2511 | return lj_ir_kint(J, fins->i); | 2521 | return lj_ir_kint(J, fins->i); |
| 2512 | if (ref == FAILFOLD) | 2522 | if (ref == FAILFOLD) |
| 2513 | lj_trace_err(J, LJ_TRERR_GFAIL); | 2523 | lj_trace_err(J, LJ_TRERR_GFAIL); |
| 2514 | lua_assert(ref == DROPFOLD); | 2524 | lj_assertJ(ref == DROPFOLD, "bad fold result"); |
| 2515 | return REF_DROP; | 2525 | return REF_DROP; |
| 2516 | } | 2526 | } |
| 2517 | 2527 | ||
diff --git a/src/lj_opt_loop.c b/src/lj_opt_loop.c index 2eacb7d7..0e5189cd 100644 --- a/src/lj_opt_loop.c +++ b/src/lj_opt_loop.c | |||
| @@ -299,7 +299,8 @@ static void loop_unroll(LoopState *lps) | |||
| 299 | loopmap = &J->cur.snapmap[loopsnap->mapofs]; | 299 | loopmap = &J->cur.snapmap[loopsnap->mapofs]; |
| 300 | /* The PC of snapshot #0 and the loop snapshot must match. */ | 300 | /* The PC of snapshot #0 and the loop snapshot must match. */ |
| 301 | psentinel = &loopmap[loopsnap->nent]; | 301 | psentinel = &loopmap[loopsnap->nent]; |
| 302 | lua_assert(*psentinel == J->cur.snapmap[J->cur.snap[0].nent]); | 302 | lj_assertJ(*psentinel == J->cur.snapmap[J->cur.snap[0].nent], |
| 303 | "mismatched PC for loop snapshot"); | ||
| 303 | *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */ | 304 | *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */ |
| 304 | 305 | ||
| 305 | /* Start substitution with snapshot #1 (#0 is empty for root traces). */ | 306 | /* Start substitution with snapshot #1 (#0 is empty for root traces). */ |
| @@ -372,7 +373,7 @@ static void loop_unroll(LoopState *lps) | |||
| 372 | } | 373 | } |
| 373 | if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */ | 374 | if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */ |
| 374 | J->cur.nsnapmap = (uint32_t)J->cur.snap[--J->cur.nsnap].mapofs; | 375 | J->cur.nsnapmap = (uint32_t)J->cur.snap[--J->cur.nsnap].mapofs; |
| 375 | lua_assert(J->cur.nsnapmap <= J->sizesnapmap); | 376 | lj_assertJ(J->cur.nsnapmap <= J->sizesnapmap, "bad snapshot map index"); |
| 376 | *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */ | 377 | *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */ |
| 377 | 378 | ||
| 378 | loop_emit_phi(J, subst, phi, nphi, onsnap); | 379 | loop_emit_phi(J, subst, phi, nphi, onsnap); |
diff --git a/src/lj_opt_mem.c b/src/lj_opt_mem.c index 4c2c05fe..80517f16 100644 --- a/src/lj_opt_mem.c +++ b/src/lj_opt_mem.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include "lj_jit.h" | 18 | #include "lj_jit.h" |
| 19 | #include "lj_iropt.h" | 19 | #include "lj_iropt.h" |
| 20 | #include "lj_ircall.h" | 20 | #include "lj_ircall.h" |
| 21 | #include "lj_dispatch.h" | ||
| 21 | 22 | ||
| 22 | /* Some local macros to save typing. Undef'd at the end. */ | 23 | /* Some local macros to save typing. Undef'd at the end. */ |
| 23 | #define IR(ref) (&J->cur.ir[(ref)]) | 24 | #define IR(ref) (&J->cur.ir[(ref)]) |
| @@ -56,8 +57,8 @@ static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb) | |||
| 56 | { | 57 | { |
| 57 | IRIns *taba = IR(ta), *tabb = IR(tb); | 58 | IRIns *taba = IR(ta), *tabb = IR(tb); |
| 58 | int newa, newb; | 59 | int newa, newb; |
| 59 | lua_assert(ta != tb); | 60 | lj_assertJ(ta != tb, "bad usage"); |
| 60 | lua_assert(irt_istab(taba->t) && irt_istab(tabb->t)); | 61 | lj_assertJ(irt_istab(taba->t) && irt_istab(tabb->t), "bad usage"); |
| 61 | /* Disambiguate new allocations. */ | 62 | /* Disambiguate new allocations. */ |
| 62 | newa = (taba->o == IR_TNEW || taba->o == IR_TDUP); | 63 | newa = (taba->o == IR_TNEW || taba->o == IR_TDUP); |
| 63 | newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP); | 64 | newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP); |
| @@ -99,7 +100,7 @@ static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb) | |||
| 99 | /* Disambiguate array references based on index arithmetic. */ | 100 | /* Disambiguate array references based on index arithmetic. */ |
| 100 | int32_t ofsa = 0, ofsb = 0; | 101 | int32_t ofsa = 0, ofsb = 0; |
| 101 | IRRef basea = ka, baseb = kb; | 102 | IRRef basea = ka, baseb = kb; |
| 102 | lua_assert(refb->o == IR_AREF); | 103 | lj_assertJ(refb->o == IR_AREF, "expected AREF"); |
| 103 | /* Gather base and offset from t[base] or t[base+-ofs]. */ | 104 | /* Gather base and offset from t[base] or t[base+-ofs]. */ |
| 104 | if (keya->o == IR_ADD && irref_isk(keya->op2)) { | 105 | if (keya->o == IR_ADD && irref_isk(keya->op2)) { |
| 105 | basea = keya->op1; | 106 | basea = keya->op1; |
| @@ -117,8 +118,9 @@ static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb) | |||
| 117 | return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */ | 118 | return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */ |
| 118 | } else { | 119 | } else { |
| 119 | /* Disambiguate hash references based on the type of their keys. */ | 120 | /* Disambiguate hash references based on the type of their keys. */ |
| 120 | lua_assert((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) && | 121 | lj_assertJ((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) && |
| 121 | (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF)); | 122 | (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF), |
| 123 | "bad xREF IR op %d or %d", refa->o, refb->o); | ||
| 122 | if (!irt_sametype(keya->t, keyb->t)) | 124 | if (!irt_sametype(keya->t, keyb->t)) |
| 123 | return ALIAS_NO; /* Different key types. */ | 125 | return ALIAS_NO; /* Different key types. */ |
| 124 | } | 126 | } |
| @@ -192,7 +194,8 @@ static TRef fwd_ahload(jit_State *J, IRRef xref) | |||
| 192 | if (key->o == IR_KSLOT) key = IR(key->op1); | 194 | if (key->o == IR_KSLOT) key = IR(key->op1); |
| 193 | lj_ir_kvalue(J->L, &keyv, key); | 195 | lj_ir_kvalue(J->L, &keyv, key); |
| 194 | tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv); | 196 | tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv); |
| 195 | lua_assert(itype2irt(tv) == irt_type(fins->t)); | 197 | lj_assertJ(itype2irt(tv) == irt_type(fins->t), |
| 198 | "mismatched type in constant table"); | ||
| 196 | if (irt_isnum(fins->t)) | 199 | if (irt_isnum(fins->t)) |
| 197 | return lj_ir_knum_u64(J, tv->u64); | 200 | return lj_ir_knum_u64(J, tv->u64); |
| 198 | else if (LJ_DUALNUM && irt_isint(fins->t)) | 201 | else if (LJ_DUALNUM && irt_isint(fins->t)) |
diff --git a/src/lj_opt_narrow.c b/src/lj_opt_narrow.c index 94cce582..a381d8d8 100644 --- a/src/lj_opt_narrow.c +++ b/src/lj_opt_narrow.c | |||
| @@ -372,17 +372,17 @@ static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc) | |||
| 372 | } else if (op == NARROW_CONV) { | 372 | } else if (op == NARROW_CONV) { |
| 373 | *sp++ = emitir_raw(convot, ref, convop2); /* Raw emit avoids a loop. */ | 373 | *sp++ = emitir_raw(convot, ref, convop2); /* Raw emit avoids a loop. */ |
| 374 | } else if (op == NARROW_SEXT) { | 374 | } else if (op == NARROW_SEXT) { |
| 375 | lua_assert(sp >= nc->stack+1); | 375 | lj_assertJ(sp >= nc->stack+1, "stack underflow"); |
| 376 | sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1], | 376 | sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1], |
| 377 | (IRT_I64<<5)|IRT_INT|IRCONV_SEXT); | 377 | (IRT_I64<<5)|IRT_INT|IRCONV_SEXT); |
| 378 | } else if (op == NARROW_INT) { | 378 | } else if (op == NARROW_INT) { |
| 379 | lua_assert(next < last); | 379 | lj_assertJ(next < last, "missing arg to NARROW_INT"); |
| 380 | *sp++ = nc->t == IRT_I64 ? | 380 | *sp++ = nc->t == IRT_I64 ? |
| 381 | lj_ir_kint64(J, (int64_t)(int32_t)*next++) : | 381 | lj_ir_kint64(J, (int64_t)(int32_t)*next++) : |
| 382 | lj_ir_kint(J, *next++); | 382 | lj_ir_kint(J, *next++); |
| 383 | } else { /* Regular IROpT. Pops two operands and pushes one result. */ | 383 | } else { /* Regular IROpT. Pops two operands and pushes one result. */ |
| 384 | IRRef mode = nc->mode; | 384 | IRRef mode = nc->mode; |
| 385 | lua_assert(sp >= nc->stack+2); | 385 | lj_assertJ(sp >= nc->stack+2, "stack underflow"); |
| 386 | sp--; | 386 | sp--; |
| 387 | /* Omit some overflow checks for array indexing. See comments above. */ | 387 | /* Omit some overflow checks for array indexing. See comments above. */ |
| 388 | if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) { | 388 | if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) { |
| @@ -398,7 +398,7 @@ static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc) | |||
| 398 | narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode); | 398 | narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode); |
| 399 | } | 399 | } |
| 400 | } | 400 | } |
| 401 | lua_assert(sp == nc->stack+1); | 401 | lj_assertJ(sp == nc->stack+1, "stack misalignment"); |
| 402 | return nc->stack[0]; | 402 | return nc->stack[0]; |
| 403 | } | 403 | } |
| 404 | 404 | ||
| @@ -452,7 +452,7 @@ static TRef narrow_stripov(jit_State *J, TRef tr, int lastop, IRRef mode) | |||
| 452 | TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr) | 452 | TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr) |
| 453 | { | 453 | { |
| 454 | IRIns *ir; | 454 | IRIns *ir; |
| 455 | lua_assert(tref_isnumber(tr)); | 455 | lj_assertJ(tref_isnumber(tr), "expected number type"); |
| 456 | if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ | 456 | if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */ |
| 457 | return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX); | 457 | return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX); |
| 458 | /* Omit some overflow checks for array indexing. See comments above. */ | 458 | /* Omit some overflow checks for array indexing. See comments above. */ |
| @@ -499,7 +499,7 @@ TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr) | |||
| 499 | /* Narrow C array index (overflow undefined). */ | 499 | /* Narrow C array index (overflow undefined). */ |
| 500 | TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr) | 500 | TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr) |
| 501 | { | 501 | { |
| 502 | lua_assert(tref_isnumber(tr)); | 502 | lj_assertJ(tref_isnumber(tr), "expected number type"); |
| 503 | if (tref_isnum(tr)) | 503 | if (tref_isnum(tr)) |
| 504 | return emitir(IRT(IR_CONV, IRT_INTP), tr, (IRT_INTP<<5)|IRT_NUM|IRCONV_ANY); | 504 | return emitir(IRT(IR_CONV, IRT_INTP), tr, (IRT_INTP<<5)|IRT_NUM|IRCONV_ANY); |
| 505 | /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */ | 505 | /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */ |
| @@ -627,9 +627,10 @@ static int narrow_forl(jit_State *J, cTValue *o) | |||
| 627 | /* Narrow the FORL index type by looking at the runtime values. */ | 627 | /* Narrow the FORL index type by looking at the runtime values. */ |
| 628 | IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv) | 628 | IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv) |
| 629 | { | 629 | { |
| 630 | lua_assert(tvisnumber(&tv[FORL_IDX]) && | 630 | lj_assertJ(tvisnumber(&tv[FORL_IDX]) && |
| 631 | tvisnumber(&tv[FORL_STOP]) && | 631 | tvisnumber(&tv[FORL_STOP]) && |
| 632 | tvisnumber(&tv[FORL_STEP])); | 632 | tvisnumber(&tv[FORL_STEP]), |
| 633 | "expected number types"); | ||
| 633 | /* Narrow only if the runtime values of start/stop/step are all integers. */ | 634 | /* Narrow only if the runtime values of start/stop/step are all integers. */ |
| 634 | if (narrow_forl(J, &tv[FORL_IDX]) && | 635 | if (narrow_forl(J, &tv[FORL_IDX]) && |
| 635 | narrow_forl(J, &tv[FORL_STOP]) && | 636 | narrow_forl(J, &tv[FORL_STOP]) && |
diff --git a/src/lj_opt_split.c b/src/lj_opt_split.c index 7925cfa5..798a02cc 100644 --- a/src/lj_opt_split.c +++ b/src/lj_opt_split.c | |||
| @@ -235,7 +235,7 @@ static IRRef split_bitshift(jit_State *J, IRRef1 *hisubst, | |||
| 235 | return split_emit(J, IRTI(IR_BOR), t1, t2); | 235 | return split_emit(J, IRTI(IR_BOR), t1, t2); |
| 236 | } else { | 236 | } else { |
| 237 | IRRef t1 = ir->prev, t2; | 237 | IRRef t1 = ir->prev, t2; |
| 238 | lua_assert(op == IR_BSHR || op == IR_BSAR); | 238 | lj_assertJ(op == IR_BSHR || op == IR_BSAR, "bad usage"); |
| 239 | nir->o = IR_BSHR; | 239 | nir->o = IR_BSHR; |
| 240 | t2 = split_emit(J, IRTI(IR_BSHL), hi, lj_ir_kint(J, (-k&31))); | 240 | t2 = split_emit(J, IRTI(IR_BSHL), hi, lj_ir_kint(J, (-k&31))); |
| 241 | ir->prev = split_emit(J, IRTI(IR_BOR), t1, t2); | 241 | ir->prev = split_emit(J, IRTI(IR_BOR), t1, t2); |
| @@ -250,7 +250,7 @@ static IRRef split_bitshift(jit_State *J, IRRef1 *hisubst, | |||
| 250 | ir->prev = lj_ir_kint(J, 0); | 250 | ir->prev = lj_ir_kint(J, 0); |
| 251 | return lo; | 251 | return lo; |
| 252 | } else { | 252 | } else { |
| 253 | lua_assert(op == IR_BSHR || op == IR_BSAR); | 253 | lj_assertJ(op == IR_BSHR || op == IR_BSAR, "bad usage"); |
| 254 | if (k == 32) { | 254 | if (k == 32) { |
| 255 | J->cur.nins--; | 255 | J->cur.nins--; |
| 256 | ir->prev = hi; | 256 | ir->prev = hi; |
| @@ -429,7 +429,7 @@ static void split_ir(jit_State *J) | |||
| 429 | hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); | 429 | hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); |
| 430 | break; | 430 | break; |
| 431 | case IR_FLOAD: | 431 | case IR_FLOAD: |
| 432 | lua_assert(ir->op1 == REF_NIL); | 432 | lj_assertJ(ir->op1 == REF_NIL, "expected FLOAD from GG_State"); |
| 433 | hi = lj_ir_kint(J, *(int32_t*)((char*)J2GG(J) + ir->op2 + LJ_LE*4)); | 433 | hi = lj_ir_kint(J, *(int32_t*)((char*)J2GG(J) + ir->op2 + LJ_LE*4)); |
| 434 | nir->op2 += LJ_BE*4; | 434 | nir->op2 += LJ_BE*4; |
| 435 | break; | 435 | break; |
| @@ -465,8 +465,9 @@ static void split_ir(jit_State *J) | |||
| 465 | break; | 465 | break; |
| 466 | } | 466 | } |
| 467 | #endif | 467 | #endif |
| 468 | lua_assert(st == IRT_INT || | 468 | lj_assertJ(st == IRT_INT || |
| 469 | (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT))); | 469 | (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT)), |
| 470 | "bad source type for CONV"); | ||
| 470 | nir->o = IR_CALLN; | 471 | nir->o = IR_CALLN; |
| 471 | #if LJ_32 && LJ_HASFFI | 472 | #if LJ_32 && LJ_HASFFI |
| 472 | nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d : | 473 | nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d : |
| @@ -496,7 +497,8 @@ static void split_ir(jit_State *J) | |||
| 496 | hi = nir->op2; | 497 | hi = nir->op2; |
| 497 | break; | 498 | break; |
| 498 | default: | 499 | default: |
| 499 | lua_assert(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX); | 500 | lj_assertJ(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX, |
| 501 | "bad IR op %d", ir->o); | ||
| 500 | hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), | 502 | hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), |
| 501 | hisubst[ir->op1], hisubst[ir->op2]); | 503 | hisubst[ir->op1], hisubst[ir->op2]); |
| 502 | break; | 504 | break; |
| @@ -553,7 +555,7 @@ static void split_ir(jit_State *J) | |||
| 553 | hi = split_bitshift(J, hisubst, oir, nir, ir); | 555 | hi = split_bitshift(J, hisubst, oir, nir, ir); |
| 554 | break; | 556 | break; |
| 555 | case IR_FLOAD: | 557 | case IR_FLOAD: |
| 556 | lua_assert(ir->op2 == IRFL_CDATA_INT64); | 558 | lj_assertJ(ir->op2 == IRFL_CDATA_INT64, "only INT64 supported"); |
| 557 | hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4); | 559 | hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4); |
| 558 | #if LJ_BE | 560 | #if LJ_BE |
| 559 | ir->prev = hi; hi = nref; | 561 | ir->prev = hi; hi = nref; |
| @@ -619,7 +621,7 @@ static void split_ir(jit_State *J) | |||
| 619 | hi = nir->op2; | 621 | hi = nir->op2; |
| 620 | break; | 622 | break; |
| 621 | default: | 623 | default: |
| 622 | lua_assert(ir->o <= IR_NE); /* Comparisons. */ | 624 | lj_assertJ(ir->o <= IR_NE, "bad IR op %d", ir->o); /* Comparisons. */ |
| 623 | split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]); | 625 | split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]); |
| 624 | break; | 626 | break; |
| 625 | } | 627 | } |
| @@ -697,7 +699,7 @@ static void split_ir(jit_State *J) | |||
| 697 | #if LJ_SOFTFP | 699 | #if LJ_SOFTFP |
| 698 | if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) { | 700 | if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) { |
| 699 | if (irt_isguard(ir->t)) { | 701 | if (irt_isguard(ir->t)) { |
| 700 | lua_assert(st == IRT_NUM && irt_isint(ir->t)); | 702 | lj_assertJ(st == IRT_NUM && irt_isint(ir->t), "bad CONV types"); |
| 701 | J->cur.nins--; | 703 | J->cur.nins--; |
| 702 | ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1); | 704 | ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1); |
| 703 | } else { | 705 | } else { |
| @@ -828,7 +830,7 @@ void lj_opt_split(jit_State *J) | |||
| 828 | if (!J->needsplit) | 830 | if (!J->needsplit) |
| 829 | J->needsplit = split_needsplit(J); | 831 | J->needsplit = split_needsplit(J); |
| 830 | #else | 832 | #else |
| 831 | lua_assert(J->needsplit >= split_needsplit(J)); /* Verify flag. */ | 833 | lj_assertJ(J->needsplit >= split_needsplit(J), "bad SPLIT state"); |
| 832 | #endif | 834 | #endif |
| 833 | if (J->needsplit) { | 835 | if (J->needsplit) { |
| 834 | int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit); | 836 | int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit); |
diff --git a/src/lj_parse.c b/src/lj_parse.c index 33955ab8..3ae05446 100644 --- a/src/lj_parse.c +++ b/src/lj_parse.c | |||
| @@ -163,6 +163,12 @@ LJ_STATIC_ASSERT((int)BC_MULVV-(int)BC_ADDVV == (int)OPR_MUL-(int)OPR_ADD); | |||
| 163 | LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD); | 163 | LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD); |
| 164 | LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD); | 164 | LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD); |
| 165 | 165 | ||
| 166 | #ifdef LUA_USE_ASSERT | ||
| 167 | #define lj_assertFS(c, ...) (lj_assertG_(G(fs->L), (c), __VA_ARGS__)) | ||
| 168 | #else | ||
| 169 | #define lj_assertFS(c, ...) ((void)fs) | ||
| 170 | #endif | ||
| 171 | |||
| 166 | /* -- Error handling ------------------------------------------------------ */ | 172 | /* -- Error handling ------------------------------------------------------ */ |
| 167 | 173 | ||
| 168 | LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em) | 174 | LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em) |
| @@ -200,7 +206,7 @@ static BCReg const_num(FuncState *fs, ExpDesc *e) | |||
| 200 | { | 206 | { |
| 201 | lua_State *L = fs->L; | 207 | lua_State *L = fs->L; |
| 202 | TValue *o; | 208 | TValue *o; |
| 203 | lua_assert(expr_isnumk(e)); | 209 | lj_assertFS(expr_isnumk(e), "bad usage"); |
| 204 | o = lj_tab_set(L, fs->kt, &e->u.nval); | 210 | o = lj_tab_set(L, fs->kt, &e->u.nval); |
| 205 | if (tvhaskslot(o)) | 211 | if (tvhaskslot(o)) |
| 206 | return tvkslot(o); | 212 | return tvkslot(o); |
| @@ -225,7 +231,7 @@ static BCReg const_gc(FuncState *fs, GCobj *gc, uint32_t itype) | |||
| 225 | /* Add a string constant. */ | 231 | /* Add a string constant. */ |
| 226 | static BCReg const_str(FuncState *fs, ExpDesc *e) | 232 | static BCReg const_str(FuncState *fs, ExpDesc *e) |
| 227 | { | 233 | { |
| 228 | lua_assert(expr_isstrk(e) || e->k == VGLOBAL); | 234 | lj_assertFS(expr_isstrk(e) || e->k == VGLOBAL, "bad usage"); |
| 229 | return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR); | 235 | return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR); |
| 230 | } | 236 | } |
| 231 | 237 | ||
| @@ -313,7 +319,7 @@ static void jmp_patchins(FuncState *fs, BCPos pc, BCPos dest) | |||
| 313 | { | 319 | { |
| 314 | BCIns *jmp = &fs->bcbase[pc].ins; | 320 | BCIns *jmp = &fs->bcbase[pc].ins; |
| 315 | BCPos offset = dest-(pc+1)+BCBIAS_J; | 321 | BCPos offset = dest-(pc+1)+BCBIAS_J; |
| 316 | lua_assert(dest != NO_JMP); | 322 | lj_assertFS(dest != NO_JMP, "uninitialized jump target"); |
| 317 | if (offset > BCMAX_D) | 323 | if (offset > BCMAX_D) |
| 318 | err_syntax(fs->ls, LJ_ERR_XJUMP); | 324 | err_syntax(fs->ls, LJ_ERR_XJUMP); |
| 319 | setbc_d(jmp, offset); | 325 | setbc_d(jmp, offset); |
| @@ -362,7 +368,7 @@ static void jmp_patch(FuncState *fs, BCPos list, BCPos target) | |||
| 362 | if (target == fs->pc) { | 368 | if (target == fs->pc) { |
| 363 | jmp_tohere(fs, list); | 369 | jmp_tohere(fs, list); |
| 364 | } else { | 370 | } else { |
| 365 | lua_assert(target < fs->pc); | 371 | lj_assertFS(target < fs->pc, "bad jump target"); |
| 366 | jmp_patchval(fs, list, target, NO_REG, target); | 372 | jmp_patchval(fs, list, target, NO_REG, target); |
| 367 | } | 373 | } |
| 368 | } | 374 | } |
| @@ -392,7 +398,7 @@ static void bcreg_free(FuncState *fs, BCReg reg) | |||
| 392 | { | 398 | { |
| 393 | if (reg >= fs->nactvar) { | 399 | if (reg >= fs->nactvar) { |
| 394 | fs->freereg--; | 400 | fs->freereg--; |
| 395 | lua_assert(reg == fs->freereg); | 401 | lj_assertFS(reg == fs->freereg, "bad regfree"); |
| 396 | } | 402 | } |
| 397 | } | 403 | } |
| 398 | 404 | ||
| @@ -542,7 +548,7 @@ static void expr_toreg_nobranch(FuncState *fs, ExpDesc *e, BCReg reg) | |||
| 542 | } else if (e->k <= VKTRUE) { | 548 | } else if (e->k <= VKTRUE) { |
| 543 | ins = BCINS_AD(BC_KPRI, reg, const_pri(e)); | 549 | ins = BCINS_AD(BC_KPRI, reg, const_pri(e)); |
| 544 | } else { | 550 | } else { |
| 545 | lua_assert(e->k == VVOID || e->k == VJMP); | 551 | lj_assertFS(e->k == VVOID || e->k == VJMP, "bad expr type %d", e->k); |
| 546 | return; | 552 | return; |
| 547 | } | 553 | } |
| 548 | bcemit_INS(fs, ins); | 554 | bcemit_INS(fs, ins); |
| @@ -637,7 +643,7 @@ static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e) | |||
| 637 | ins = BCINS_AD(BC_GSET, ra, const_str(fs, var)); | 643 | ins = BCINS_AD(BC_GSET, ra, const_str(fs, var)); |
| 638 | } else { | 644 | } else { |
| 639 | BCReg ra, rc; | 645 | BCReg ra, rc; |
| 640 | lua_assert(var->k == VINDEXED); | 646 | lj_assertFS(var->k == VINDEXED, "bad expr type %d", var->k); |
| 641 | ra = expr_toanyreg(fs, e); | 647 | ra = expr_toanyreg(fs, e); |
| 642 | rc = var->u.s.aux; | 648 | rc = var->u.s.aux; |
| 643 | if ((int32_t)rc < 0) { | 649 | if ((int32_t)rc < 0) { |
| @@ -645,10 +651,12 @@ static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e) | |||
| 645 | } else if (rc > BCMAX_C) { | 651 | } else if (rc > BCMAX_C) { |
| 646 | ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1)); | 652 | ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1)); |
| 647 | } else { | 653 | } else { |
| 654 | #ifdef LUA_USE_ASSERT | ||
| 648 | /* Free late alloced key reg to avoid assert on free of value reg. */ | 655 | /* Free late alloced key reg to avoid assert on free of value reg. */ |
| 649 | /* This can only happen when called from expr_table(). */ | 656 | /* This can only happen when called from expr_table(). */ |
| 650 | lua_assert(e->k != VNONRELOC || ra < fs->nactvar || | 657 | if (e->k == VNONRELOC && ra >= fs->nactvar && rc >= ra) |
| 651 | rc < ra || (bcreg_free(fs, rc),1)); | 658 | bcreg_free(fs, rc); |
| 659 | #endif | ||
| 652 | ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc); | 660 | ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc); |
| 653 | } | 661 | } |
| 654 | } | 662 | } |
| @@ -663,7 +671,7 @@ static void bcemit_method(FuncState *fs, ExpDesc *e, ExpDesc *key) | |||
| 663 | expr_free(fs, e); | 671 | expr_free(fs, e); |
| 664 | func = fs->freereg; | 672 | func = fs->freereg; |
| 665 | bcemit_AD(fs, BC_MOV, func+1+LJ_FR2, obj); /* Copy object to 1st argument. */ | 673 | bcemit_AD(fs, BC_MOV, func+1+LJ_FR2, obj); /* Copy object to 1st argument. */ |
| 666 | lua_assert(expr_isstrk(key)); | 674 | lj_assertFS(expr_isstrk(key), "bad usage"); |
| 667 | idx = const_str(fs, key); | 675 | idx = const_str(fs, key); |
| 668 | if (idx <= BCMAX_C) { | 676 | if (idx <= BCMAX_C) { |
| 669 | bcreg_reserve(fs, 2+LJ_FR2); | 677 | bcreg_reserve(fs, 2+LJ_FR2); |
| @@ -803,7 +811,8 @@ static void bcemit_arith(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2) | |||
| 803 | else | 811 | else |
| 804 | rc = expr_toanyreg(fs, e2); | 812 | rc = expr_toanyreg(fs, e2); |
| 805 | /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */ | 813 | /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */ |
| 806 | lua_assert(expr_isnumk(e1) || e1->k == VNONRELOC); | 814 | lj_assertFS(expr_isnumk(e1) || e1->k == VNONRELOC, |
| 815 | "bad expr type %d", e1->k); | ||
| 807 | expr_toval(fs, e1); | 816 | expr_toval(fs, e1); |
| 808 | /* Avoid two consts to satisfy bytecode constraints. */ | 817 | /* Avoid two consts to satisfy bytecode constraints. */ |
| 809 | if (expr_isnumk(e1) && !expr_isnumk(e2) && | 818 | if (expr_isnumk(e1) && !expr_isnumk(e2) && |
| @@ -891,19 +900,20 @@ static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2) | |||
| 891 | if (op <= OPR_POW) { | 900 | if (op <= OPR_POW) { |
| 892 | bcemit_arith(fs, op, e1, e2); | 901 | bcemit_arith(fs, op, e1, e2); |
| 893 | } else if (op == OPR_AND) { | 902 | } else if (op == OPR_AND) { |
| 894 | lua_assert(e1->t == NO_JMP); /* List must be closed. */ | 903 | lj_assertFS(e1->t == NO_JMP, "jump list not closed"); |
| 895 | expr_discharge(fs, e2); | 904 | expr_discharge(fs, e2); |
| 896 | jmp_append(fs, &e2->f, e1->f); | 905 | jmp_append(fs, &e2->f, e1->f); |
| 897 | *e1 = *e2; | 906 | *e1 = *e2; |
| 898 | } else if (op == OPR_OR) { | 907 | } else if (op == OPR_OR) { |
| 899 | lua_assert(e1->f == NO_JMP); /* List must be closed. */ | 908 | lj_assertFS(e1->f == NO_JMP, "jump list not closed"); |
| 900 | expr_discharge(fs, e2); | 909 | expr_discharge(fs, e2); |
| 901 | jmp_append(fs, &e2->t, e1->t); | 910 | jmp_append(fs, &e2->t, e1->t); |
| 902 | *e1 = *e2; | 911 | *e1 = *e2; |
| 903 | } else if (op == OPR_CONCAT) { | 912 | } else if (op == OPR_CONCAT) { |
| 904 | expr_toval(fs, e2); | 913 | expr_toval(fs, e2); |
| 905 | if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) { | 914 | if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) { |
| 906 | lua_assert(e1->u.s.info == bc_b(*bcptr(fs, e2))-1); | 915 | lj_assertFS(e1->u.s.info == bc_b(*bcptr(fs, e2))-1, |
| 916 | "bad CAT stack layout"); | ||
| 907 | expr_free(fs, e1); | 917 | expr_free(fs, e1); |
| 908 | setbc_b(bcptr(fs, e2), e1->u.s.info); | 918 | setbc_b(bcptr(fs, e2), e1->u.s.info); |
| 909 | e1->u.s.info = e2->u.s.info; | 919 | e1->u.s.info = e2->u.s.info; |
| @@ -915,8 +925,9 @@ static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2) | |||
| 915 | } | 925 | } |
| 916 | e1->k = VRELOCABLE; | 926 | e1->k = VRELOCABLE; |
| 917 | } else { | 927 | } else { |
| 918 | lua_assert(op == OPR_NE || op == OPR_EQ || | 928 | lj_assertFS(op == OPR_NE || op == OPR_EQ || |
| 919 | op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT); | 929 | op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT, |
| 930 | "bad binop %d", op); | ||
| 920 | bcemit_comp(fs, op, e1, e2); | 931 | bcemit_comp(fs, op, e1, e2); |
| 921 | } | 932 | } |
| 922 | } | 933 | } |
| @@ -945,10 +956,10 @@ static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e) | |||
| 945 | e->u.s.info = fs->freereg-1; | 956 | e->u.s.info = fs->freereg-1; |
| 946 | e->k = VNONRELOC; | 957 | e->k = VNONRELOC; |
| 947 | } else { | 958 | } else { |
| 948 | lua_assert(e->k == VNONRELOC); | 959 | lj_assertFS(e->k == VNONRELOC, "bad expr type %d", e->k); |
| 949 | } | 960 | } |
| 950 | } else { | 961 | } else { |
| 951 | lua_assert(op == BC_UNM || op == BC_LEN); | 962 | lj_assertFS(op == BC_UNM || op == BC_LEN, "bad unop %d", op); |
| 952 | if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */ | 963 | if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */ |
| 953 | #if LJ_HASFFI | 964 | #if LJ_HASFFI |
| 954 | if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */ | 965 | if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */ |
| @@ -1043,8 +1054,9 @@ static void var_new(LexState *ls, BCReg n, GCstr *name) | |||
| 1043 | lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); | 1054 | lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); |
| 1044 | lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); | 1055 | lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); |
| 1045 | } | 1056 | } |
| 1046 | lua_assert((uintptr_t)name < VARNAME__MAX || | 1057 | lj_assertFS((uintptr_t)name < VARNAME__MAX || |
| 1047 | lj_tab_getstr(fs->kt, name) != NULL); | 1058 | lj_tab_getstr(fs->kt, name) != NULL, |
| 1059 | "unanchored variable name"); | ||
| 1048 | /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ | 1060 | /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ |
| 1049 | setgcref(ls->vstack[vtop].name, obj2gco(name)); | 1061 | setgcref(ls->vstack[vtop].name, obj2gco(name)); |
| 1050 | fs->varmap[fs->nactvar+n] = (uint16_t)vtop; | 1062 | fs->varmap[fs->nactvar+n] = (uint16_t)vtop; |
| @@ -1099,7 +1111,7 @@ static MSize var_lookup_uv(FuncState *fs, MSize vidx, ExpDesc *e) | |||
| 1099 | return i; /* Already exists. */ | 1111 | return i; /* Already exists. */ |
| 1100 | /* Otherwise create a new one. */ | 1112 | /* Otherwise create a new one. */ |
| 1101 | checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues"); | 1113 | checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues"); |
| 1102 | lua_assert(e->k == VLOCAL || e->k == VUPVAL); | 1114 | lj_assertFS(e->k == VLOCAL || e->k == VUPVAL, "bad expr type %d", e->k); |
| 1103 | fs->uvmap[n] = (uint16_t)vidx; | 1115 | fs->uvmap[n] = (uint16_t)vidx; |
| 1104 | fs->uvtmp[n] = (uint16_t)(e->k == VLOCAL ? vidx : LJ_MAX_VSTACK+e->u.s.info); | 1116 | fs->uvtmp[n] = (uint16_t)(e->k == VLOCAL ? vidx : LJ_MAX_VSTACK+e->u.s.info); |
| 1105 | fs->nuv = n+1; | 1117 | fs->nuv = n+1; |
| @@ -1150,7 +1162,8 @@ static MSize gola_new(LexState *ls, GCstr *name, uint8_t info, BCPos pc) | |||
| 1150 | lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); | 1162 | lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); |
| 1151 | lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); | 1163 | lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); |
| 1152 | } | 1164 | } |
| 1153 | lua_assert(name == NAME_BREAK || lj_tab_getstr(fs->kt, name) != NULL); | 1165 | lj_assertFS(name == NAME_BREAK || lj_tab_getstr(fs->kt, name) != NULL, |
| 1166 | "unanchored label name"); | ||
| 1154 | /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ | 1167 | /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ |
| 1155 | setgcref(ls->vstack[vtop].name, obj2gco(name)); | 1168 | setgcref(ls->vstack[vtop].name, obj2gco(name)); |
| 1156 | ls->vstack[vtop].startpc = pc; | 1169 | ls->vstack[vtop].startpc = pc; |
| @@ -1180,8 +1193,9 @@ static void gola_close(LexState *ls, VarInfo *vg) | |||
| 1180 | FuncState *fs = ls->fs; | 1193 | FuncState *fs = ls->fs; |
| 1181 | BCPos pc = vg->startpc; | 1194 | BCPos pc = vg->startpc; |
| 1182 | BCIns *ip = &fs->bcbase[pc].ins; | 1195 | BCIns *ip = &fs->bcbase[pc].ins; |
| 1183 | lua_assert(gola_isgoto(vg)); | 1196 | lj_assertFS(gola_isgoto(vg), "expected goto"); |
| 1184 | lua_assert(bc_op(*ip) == BC_JMP || bc_op(*ip) == BC_UCLO); | 1197 | lj_assertFS(bc_op(*ip) == BC_JMP || bc_op(*ip) == BC_UCLO, |
| 1198 | "bad bytecode op %d", bc_op(*ip)); | ||
| 1185 | setbc_a(ip, vg->slot); | 1199 | setbc_a(ip, vg->slot); |
| 1186 | if (bc_op(*ip) == BC_JMP) { | 1200 | if (bc_op(*ip) == BC_JMP) { |
| 1187 | BCPos next = jmp_next(fs, pc); | 1201 | BCPos next = jmp_next(fs, pc); |
| @@ -1200,9 +1214,9 @@ static void gola_resolve(LexState *ls, FuncScope *bl, MSize idx) | |||
| 1200 | if (gcrefeq(vg->name, vl->name) && gola_isgoto(vg)) { | 1214 | if (gcrefeq(vg->name, vl->name) && gola_isgoto(vg)) { |
| 1201 | if (vg->slot < vl->slot) { | 1215 | if (vg->slot < vl->slot) { |
| 1202 | GCstr *name = strref(var_get(ls, ls->fs, vg->slot).name); | 1216 | GCstr *name = strref(var_get(ls, ls->fs, vg->slot).name); |
| 1203 | lua_assert((uintptr_t)name >= VARNAME__MAX); | 1217 | lj_assertLS((uintptr_t)name >= VARNAME__MAX, "expected goto name"); |
| 1204 | ls->linenumber = ls->fs->bcbase[vg->startpc].line; | 1218 | ls->linenumber = ls->fs->bcbase[vg->startpc].line; |
| 1205 | lua_assert(strref(vg->name) != NAME_BREAK); | 1219 | lj_assertLS(strref(vg->name) != NAME_BREAK, "unexpected break"); |
| 1206 | lj_lex_error(ls, 0, LJ_ERR_XGSCOPE, | 1220 | lj_lex_error(ls, 0, LJ_ERR_XGSCOPE, |
| 1207 | strdata(strref(vg->name)), strdata(name)); | 1221 | strdata(strref(vg->name)), strdata(name)); |
| 1208 | } | 1222 | } |
| @@ -1266,7 +1280,7 @@ static void fscope_begin(FuncState *fs, FuncScope *bl, int flags) | |||
| 1266 | bl->vstart = fs->ls->vtop; | 1280 | bl->vstart = fs->ls->vtop; |
| 1267 | bl->prev = fs->bl; | 1281 | bl->prev = fs->bl; |
| 1268 | fs->bl = bl; | 1282 | fs->bl = bl; |
| 1269 | lua_assert(fs->freereg == fs->nactvar); | 1283 | lj_assertFS(fs->freereg == fs->nactvar, "bad regalloc"); |
| 1270 | } | 1284 | } |
| 1271 | 1285 | ||
| 1272 | /* End a scope. */ | 1286 | /* End a scope. */ |
| @@ -1277,7 +1291,7 @@ static void fscope_end(FuncState *fs) | |||
| 1277 | fs->bl = bl->prev; | 1291 | fs->bl = bl->prev; |
| 1278 | var_remove(ls, bl->nactvar); | 1292 | var_remove(ls, bl->nactvar); |
| 1279 | fs->freereg = fs->nactvar; | 1293 | fs->freereg = fs->nactvar; |
| 1280 | lua_assert(bl->nactvar == fs->nactvar); | 1294 | lj_assertFS(bl->nactvar == fs->nactvar, "bad regalloc"); |
| 1281 | if ((bl->flags & (FSCOPE_UPVAL|FSCOPE_NOCLOSE)) == FSCOPE_UPVAL) | 1295 | if ((bl->flags & (FSCOPE_UPVAL|FSCOPE_NOCLOSE)) == FSCOPE_UPVAL) |
| 1282 | bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0); | 1296 | bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0); |
| 1283 | if ((bl->flags & FSCOPE_BREAK)) { | 1297 | if ((bl->flags & FSCOPE_BREAK)) { |
| @@ -1364,13 +1378,13 @@ static void fs_fixup_k(FuncState *fs, GCproto *pt, void *kptr) | |||
| 1364 | Node *n = &node[i]; | 1378 | Node *n = &node[i]; |
| 1365 | if (tvhaskslot(&n->val)) { | 1379 | if (tvhaskslot(&n->val)) { |
| 1366 | ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val); | 1380 | ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val); |
| 1367 | lua_assert(!tvisint(&n->key)); | 1381 | lj_assertFS(!tvisint(&n->key), "unexpected integer key"); |
| 1368 | if (tvisnum(&n->key)) { | 1382 | if (tvisnum(&n->key)) { |
| 1369 | TValue *tv = &((TValue *)kptr)[kidx]; | 1383 | TValue *tv = &((TValue *)kptr)[kidx]; |
| 1370 | if (LJ_DUALNUM) { | 1384 | if (LJ_DUALNUM) { |
| 1371 | lua_Number nn = numV(&n->key); | 1385 | lua_Number nn = numV(&n->key); |
| 1372 | int32_t k = lj_num2int(nn); | 1386 | int32_t k = lj_num2int(nn); |
| 1373 | lua_assert(!tvismzero(&n->key)); | 1387 | lj_assertFS(!tvismzero(&n->key), "unexpected -0 key"); |
| 1374 | if ((lua_Number)k == nn) | 1388 | if ((lua_Number)k == nn) |
| 1375 | setintV(tv, k); | 1389 | setintV(tv, k); |
| 1376 | else | 1390 | else |
| @@ -1418,21 +1432,21 @@ static void fs_fixup_line(FuncState *fs, GCproto *pt, | |||
| 1418 | uint8_t *li = (uint8_t *)lineinfo; | 1432 | uint8_t *li = (uint8_t *)lineinfo; |
| 1419 | do { | 1433 | do { |
| 1420 | BCLine delta = base[i].line - first; | 1434 | BCLine delta = base[i].line - first; |
| 1421 | lua_assert(delta >= 0 && delta < 256); | 1435 | lj_assertFS(delta >= 0 && delta < 256, "bad line delta"); |
| 1422 | li[i] = (uint8_t)delta; | 1436 | li[i] = (uint8_t)delta; |
| 1423 | } while (++i < n); | 1437 | } while (++i < n); |
| 1424 | } else if (LJ_LIKELY(numline < 65536)) { | 1438 | } else if (LJ_LIKELY(numline < 65536)) { |
| 1425 | uint16_t *li = (uint16_t *)lineinfo; | 1439 | uint16_t *li = (uint16_t *)lineinfo; |
| 1426 | do { | 1440 | do { |
| 1427 | BCLine delta = base[i].line - first; | 1441 | BCLine delta = base[i].line - first; |
| 1428 | lua_assert(delta >= 0 && delta < 65536); | 1442 | lj_assertFS(delta >= 0 && delta < 65536, "bad line delta"); |
| 1429 | li[i] = (uint16_t)delta; | 1443 | li[i] = (uint16_t)delta; |
| 1430 | } while (++i < n); | 1444 | } while (++i < n); |
| 1431 | } else { | 1445 | } else { |
| 1432 | uint32_t *li = (uint32_t *)lineinfo; | 1446 | uint32_t *li = (uint32_t *)lineinfo; |
| 1433 | do { | 1447 | do { |
| 1434 | BCLine delta = base[i].line - first; | 1448 | BCLine delta = base[i].line - first; |
| 1435 | lua_assert(delta >= 0); | 1449 | lj_assertFS(delta >= 0, "bad line delta"); |
| 1436 | li[i] = (uint32_t)delta; | 1450 | li[i] = (uint32_t)delta; |
| 1437 | } while (++i < n); | 1451 | } while (++i < n); |
| 1438 | } | 1452 | } |
| @@ -1522,7 +1536,7 @@ static void fs_fixup_ret(FuncState *fs) | |||
| 1522 | } | 1536 | } |
| 1523 | fs->bl->flags |= FSCOPE_NOCLOSE; /* Handled above. */ | 1537 | fs->bl->flags |= FSCOPE_NOCLOSE; /* Handled above. */ |
| 1524 | fscope_end(fs); | 1538 | fscope_end(fs); |
| 1525 | lua_assert(fs->bl == NULL); | 1539 | lj_assertFS(fs->bl == NULL, "bad scope nesting"); |
| 1526 | /* May need to fixup returns encoded before first function was created. */ | 1540 | /* May need to fixup returns encoded before first function was created. */ |
| 1527 | if (fs->flags & PROTO_FIXUP_RETURN) { | 1541 | if (fs->flags & PROTO_FIXUP_RETURN) { |
| 1528 | BCPos pc; | 1542 | BCPos pc; |
| @@ -1594,7 +1608,7 @@ static GCproto *fs_finish(LexState *ls, BCLine line) | |||
| 1594 | L->top--; /* Pop table of constants. */ | 1608 | L->top--; /* Pop table of constants. */ |
| 1595 | ls->vtop = fs->vbase; /* Reset variable stack. */ | 1609 | ls->vtop = fs->vbase; /* Reset variable stack. */ |
| 1596 | ls->fs = fs->prev; | 1610 | ls->fs = fs->prev; |
| 1597 | lua_assert(ls->fs != NULL || ls->tok == TK_eof); | 1611 | lj_assertL(ls->fs != NULL || ls->tok == TK_eof, "bad parser state"); |
| 1598 | return pt; | 1612 | return pt; |
| 1599 | } | 1613 | } |
| 1600 | 1614 | ||
| @@ -1688,14 +1702,15 @@ static void expr_bracket(LexState *ls, ExpDesc *v) | |||
| 1688 | } | 1702 | } |
| 1689 | 1703 | ||
| 1690 | /* Get value of constant expression. */ | 1704 | /* Get value of constant expression. */ |
| 1691 | static void expr_kvalue(TValue *v, ExpDesc *e) | 1705 | static void expr_kvalue(FuncState *fs, TValue *v, ExpDesc *e) |
| 1692 | { | 1706 | { |
| 1707 | UNUSED(fs); | ||
| 1693 | if (e->k <= VKTRUE) { | 1708 | if (e->k <= VKTRUE) { |
| 1694 | setpriV(v, ~(uint32_t)e->k); | 1709 | setpriV(v, ~(uint32_t)e->k); |
| 1695 | } else if (e->k == VKSTR) { | 1710 | } else if (e->k == VKSTR) { |
| 1696 | setgcVraw(v, obj2gco(e->u.sval), LJ_TSTR); | 1711 | setgcVraw(v, obj2gco(e->u.sval), LJ_TSTR); |
| 1697 | } else { | 1712 | } else { |
| 1698 | lua_assert(tvisnumber(expr_numtv(e))); | 1713 | lj_assertFS(tvisnumber(expr_numtv(e)), "bad number constant"); |
| 1699 | *v = *expr_numtv(e); | 1714 | *v = *expr_numtv(e); |
| 1700 | } | 1715 | } |
| 1701 | } | 1716 | } |
| @@ -1745,11 +1760,11 @@ static void expr_table(LexState *ls, ExpDesc *e) | |||
| 1745 | fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx); | 1760 | fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx); |
| 1746 | } | 1761 | } |
| 1747 | vcall = 0; | 1762 | vcall = 0; |
| 1748 | expr_kvalue(&k, &key); | 1763 | expr_kvalue(fs, &k, &key); |
| 1749 | v = lj_tab_set(fs->L, t, &k); | 1764 | v = lj_tab_set(fs->L, t, &k); |
| 1750 | lj_gc_anybarriert(fs->L, t); | 1765 | lj_gc_anybarriert(fs->L, t); |
| 1751 | if (expr_isk_nojump(&val)) { /* Add const key/value to template table. */ | 1766 | if (expr_isk_nojump(&val)) { /* Add const key/value to template table. */ |
| 1752 | expr_kvalue(v, &val); | 1767 | expr_kvalue(fs, v, &val); |
| 1753 | } else { /* Otherwise create dummy string key (avoids lj_tab_newkey). */ | 1768 | } else { /* Otherwise create dummy string key (avoids lj_tab_newkey). */ |
| 1754 | settabV(fs->L, v, t); /* Preserve key with table itself as value. */ | 1769 | settabV(fs->L, v, t); /* Preserve key with table itself as value. */ |
| 1755 | fixt = 1; /* Fix this later, after all resizes. */ | 1770 | fixt = 1; /* Fix this later, after all resizes. */ |
| @@ -1768,8 +1783,9 @@ static void expr_table(LexState *ls, ExpDesc *e) | |||
| 1768 | if (vcall) { | 1783 | if (vcall) { |
| 1769 | BCInsLine *ilp = &fs->bcbase[fs->pc-1]; | 1784 | BCInsLine *ilp = &fs->bcbase[fs->pc-1]; |
| 1770 | ExpDesc en; | 1785 | ExpDesc en; |
| 1771 | lua_assert(bc_a(ilp->ins) == freg && | 1786 | lj_assertFS(bc_a(ilp->ins) == freg && |
| 1772 | bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB)); | 1787 | bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB), |
| 1788 | "bad CALL code generation"); | ||
| 1773 | expr_init(&en, VKNUM, 0); | 1789 | expr_init(&en, VKNUM, 0); |
| 1774 | en.u.nval.u32.lo = narr-1; | 1790 | en.u.nval.u32.lo = narr-1; |
| 1775 | en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */ | 1791 | en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */ |
| @@ -1799,7 +1815,7 @@ static void expr_table(LexState *ls, ExpDesc *e) | |||
| 1799 | for (i = 0; i <= hmask; i++) { | 1815 | for (i = 0; i <= hmask; i++) { |
| 1800 | Node *n = &node[i]; | 1816 | Node *n = &node[i]; |
| 1801 | if (tvistab(&n->val)) { | 1817 | if (tvistab(&n->val)) { |
| 1802 | lua_assert(tabV(&n->val) == t); | 1818 | lj_assertFS(tabV(&n->val) == t, "bad dummy key in template table"); |
| 1803 | setnilV(&n->val); /* Turn value into nil. */ | 1819 | setnilV(&n->val); /* Turn value into nil. */ |
| 1804 | } | 1820 | } |
| 1805 | } | 1821 | } |
| @@ -1830,7 +1846,7 @@ static BCReg parse_params(LexState *ls, int needself) | |||
| 1830 | } while (lex_opt(ls, ',')); | 1846 | } while (lex_opt(ls, ',')); |
| 1831 | } | 1847 | } |
| 1832 | var_add(ls, nparams); | 1848 | var_add(ls, nparams); |
| 1833 | lua_assert(fs->nactvar == nparams); | 1849 | lj_assertFS(fs->nactvar == nparams, "bad regalloc"); |
| 1834 | bcreg_reserve(fs, nparams); | 1850 | bcreg_reserve(fs, nparams); |
| 1835 | lex_check(ls, ')'); | 1851 | lex_check(ls, ')'); |
| 1836 | return nparams; | 1852 | return nparams; |
| @@ -1917,7 +1933,7 @@ static void parse_args(LexState *ls, ExpDesc *e) | |||
| 1917 | err_syntax(ls, LJ_ERR_XFUNARG); | 1933 | err_syntax(ls, LJ_ERR_XFUNARG); |
| 1918 | return; /* Silence compiler. */ | 1934 | return; /* Silence compiler. */ |
| 1919 | } | 1935 | } |
| 1920 | lua_assert(e->k == VNONRELOC); | 1936 | lj_assertFS(e->k == VNONRELOC, "bad expr type %d", e->k); |
| 1921 | base = e->u.s.info; /* Base register for call. */ | 1937 | base = e->u.s.info; /* Base register for call. */ |
| 1922 | if (args.k == VCALL) { | 1938 | if (args.k == VCALL) { |
| 1923 | ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1 - LJ_FR2); | 1939 | ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1 - LJ_FR2); |
| @@ -2687,8 +2703,9 @@ static void parse_chunk(LexState *ls) | |||
| 2687 | while (!islast && !parse_isend(ls->tok)) { | 2703 | while (!islast && !parse_isend(ls->tok)) { |
| 2688 | islast = parse_stmt(ls); | 2704 | islast = parse_stmt(ls); |
| 2689 | lex_opt(ls, ';'); | 2705 | lex_opt(ls, ';'); |
| 2690 | lua_assert(ls->fs->framesize >= ls->fs->freereg && | 2706 | lj_assertLS(ls->fs->framesize >= ls->fs->freereg && |
| 2691 | ls->fs->freereg >= ls->fs->nactvar); | 2707 | ls->fs->freereg >= ls->fs->nactvar, |
| 2708 | "bad regalloc"); | ||
| 2692 | ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */ | 2709 | ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */ |
| 2693 | } | 2710 | } |
| 2694 | synlevel_end(ls); | 2711 | synlevel_end(ls); |
| @@ -2723,9 +2740,8 @@ GCproto *lj_parse(LexState *ls) | |||
| 2723 | err_token(ls, TK_eof); | 2740 | err_token(ls, TK_eof); |
| 2724 | pt = fs_finish(ls, ls->linenumber); | 2741 | pt = fs_finish(ls, ls->linenumber); |
| 2725 | L->top--; /* Drop chunkname. */ | 2742 | L->top--; /* Drop chunkname. */ |
| 2726 | lua_assert(fs.prev == NULL); | 2743 | lj_assertL(fs.prev == NULL && ls->fs == NULL, "mismatched frame nesting"); |
| 2727 | lua_assert(ls->fs == NULL); | 2744 | lj_assertL(pt->sizeuv == 0, "toplevel proto has upvalues"); |
| 2728 | lua_assert(pt->sizeuv == 0); | ||
| 2729 | return pt; | 2745 | return pt; |
| 2730 | } | 2746 | } |
| 2731 | 2747 | ||
diff --git a/src/lj_record.c b/src/lj_record.c index 4fc22742..2a4a766e 100644 --- a/src/lj_record.c +++ b/src/lj_record.c | |||
| @@ -50,34 +50,52 @@ | |||
| 50 | static void rec_check_ir(jit_State *J) | 50 | static void rec_check_ir(jit_State *J) |
| 51 | { | 51 | { |
| 52 | IRRef i, nins = J->cur.nins, nk = J->cur.nk; | 52 | IRRef i, nins = J->cur.nins, nk = J->cur.nk; |
| 53 | lua_assert(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536); | 53 | lj_assertJ(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536, |
| 54 | "inconsistent IR layout"); | ||
| 54 | for (i = nk; i < nins; i++) { | 55 | for (i = nk; i < nins; i++) { |
| 55 | IRIns *ir = IR(i); | 56 | IRIns *ir = IR(i); |
| 56 | uint32_t mode = lj_ir_mode[ir->o]; | 57 | uint32_t mode = lj_ir_mode[ir->o]; |
| 57 | IRRef op1 = ir->op1; | 58 | IRRef op1 = ir->op1; |
| 58 | IRRef op2 = ir->op2; | 59 | IRRef op2 = ir->op2; |
| 60 | const char *err = NULL; | ||
| 59 | switch (irm_op1(mode)) { | 61 | switch (irm_op1(mode)) { |
| 60 | case IRMnone: lua_assert(op1 == 0); break; | 62 | case IRMnone: |
| 61 | case IRMref: lua_assert(op1 >= nk); | 63 | if (op1 != 0) err = "IRMnone op1 used"; |
| 62 | lua_assert(i >= REF_BIAS ? op1 < i : op1 > i); break; | 64 | break; |
| 65 | case IRMref: | ||
| 66 | if (op1 < nk || (i >= REF_BIAS ? op1 >= i : op1 <= i)) | ||
| 67 | err = "IRMref op1 out of range"; | ||
| 68 | break; | ||
| 63 | case IRMlit: break; | 69 | case IRMlit: break; |
| 64 | case IRMcst: lua_assert(i < REF_BIAS); | 70 | case IRMcst: |
| 71 | if (i >= REF_BIAS) { err = "constant in IR range"; break; } | ||
| 65 | if (irt_is64(ir->t) && ir->o != IR_KNULL) | 72 | if (irt_is64(ir->t) && ir->o != IR_KNULL) |
| 66 | i++; | 73 | i++; |
| 67 | continue; | 74 | continue; |
| 68 | } | 75 | } |
| 69 | switch (irm_op2(mode)) { | 76 | switch (irm_op2(mode)) { |
| 70 | case IRMnone: lua_assert(op2 == 0); break; | 77 | case IRMnone: |
| 71 | case IRMref: lua_assert(op2 >= nk); | 78 | if (op2) err = "IRMnone op2 used"; |
| 72 | lua_assert(i >= REF_BIAS ? op2 < i : op2 > i); break; | 79 | break; |
| 80 | case IRMref: | ||
| 81 | if (op2 < nk || (i >= REF_BIAS ? op2 >= i : op2 <= i)) | ||
| 82 | err = "IRMref op2 out of range"; | ||
| 83 | break; | ||
| 73 | case IRMlit: break; | 84 | case IRMlit: break; |
| 74 | case IRMcst: lua_assert(0); break; | 85 | case IRMcst: err = "IRMcst op2"; break; |
| 75 | } | 86 | } |
| 76 | if (ir->prev) { | 87 | if (!err && ir->prev) { |
| 77 | lua_assert(ir->prev >= nk); | 88 | if (ir->prev < nk || (i >= REF_BIAS ? ir->prev >= i : ir->prev <= i)) |
| 78 | lua_assert(i >= REF_BIAS ? ir->prev < i : ir->prev > i); | 89 | err = "chain out of range"; |
| 79 | lua_assert(ir->o == IR_NOP || IR(ir->prev)->o == ir->o); | 90 | else if (ir->o != IR_NOP && IR(ir->prev)->o != ir->o) |
| 91 | err = "chain to different op"; | ||
| 80 | } | 92 | } |
| 93 | lj_assertJ(!err, "bad IR %04d op %d(%04d,%04d): %s", | ||
| 94 | i-REF_BIAS, | ||
| 95 | ir->o, | ||
| 96 | irm_op1(mode) == IRMref ? op1-REF_BIAS : op1, | ||
| 97 | irm_op2(mode) == IRMref ? op2-REF_BIAS : op2, | ||
| 98 | err); | ||
| 81 | } | 99 | } |
| 82 | } | 100 | } |
| 83 | 101 | ||
| @@ -87,9 +105,10 @@ static void rec_check_slots(jit_State *J) | |||
| 87 | BCReg s, nslots = J->baseslot + J->maxslot; | 105 | BCReg s, nslots = J->baseslot + J->maxslot; |
| 88 | int32_t depth = 0; | 106 | int32_t depth = 0; |
| 89 | cTValue *base = J->L->base - J->baseslot; | 107 | cTValue *base = J->L->base - J->baseslot; |
| 90 | lua_assert(J->baseslot >= 1+LJ_FR2); | 108 | lj_assertJ(J->baseslot >= 1+LJ_FR2, "bad baseslot"); |
| 91 | lua_assert(J->baseslot == 1+LJ_FR2 || (J->slot[J->baseslot-1] & TREF_FRAME)); | 109 | lj_assertJ(J->baseslot == 1+LJ_FR2 || (J->slot[J->baseslot-1] & TREF_FRAME), |
| 92 | lua_assert(nslots <= LJ_MAX_JSLOTS); | 110 | "baseslot does not point to frame"); |
| 111 | lj_assertJ(nslots <= LJ_MAX_JSLOTS, "slot overflow"); | ||
| 93 | for (s = 0; s < nslots; s++) { | 112 | for (s = 0; s < nslots; s++) { |
| 94 | TRef tr = J->slot[s]; | 113 | TRef tr = J->slot[s]; |
| 95 | if (tr) { | 114 | if (tr) { |
| @@ -97,56 +116,65 @@ static void rec_check_slots(jit_State *J) | |||
| 97 | IRRef ref = tref_ref(tr); | 116 | IRRef ref = tref_ref(tr); |
| 98 | IRIns *ir = NULL; /* Silence compiler. */ | 117 | IRIns *ir = NULL; /* Silence compiler. */ |
| 99 | if (!LJ_FR2 || ref || !(tr & (TREF_FRAME | TREF_CONT))) { | 118 | if (!LJ_FR2 || ref || !(tr & (TREF_FRAME | TREF_CONT))) { |
| 100 | lua_assert(ref >= J->cur.nk && ref < J->cur.nins); | 119 | lj_assertJ(ref >= J->cur.nk && ref < J->cur.nins, |
| 120 | "slot %d ref %04d out of range", s, ref - REF_BIAS); | ||
| 101 | ir = IR(ref); | 121 | ir = IR(ref); |
| 102 | lua_assert(irt_t(ir->t) == tref_t(tr)); | 122 | lj_assertJ(irt_t(ir->t) == tref_t(tr), "slot %d IR type mismatch", s); |
| 103 | } | 123 | } |
| 104 | if (s == 0) { | 124 | if (s == 0) { |
| 105 | lua_assert(tref_isfunc(tr)); | 125 | lj_assertJ(tref_isfunc(tr), "frame slot 0 is not a function"); |
| 106 | #if LJ_FR2 | 126 | #if LJ_FR2 |
| 107 | } else if (s == 1) { | 127 | } else if (s == 1) { |
| 108 | lua_assert((tr & ~TREF_FRAME) == 0); | 128 | lj_assertJ((tr & ~TREF_FRAME) == 0, "bad frame slot 1"); |
| 109 | #endif | 129 | #endif |
| 110 | } else if ((tr & TREF_FRAME)) { | 130 | } else if ((tr & TREF_FRAME)) { |
| 111 | GCfunc *fn = gco2func(frame_gc(tv)); | 131 | GCfunc *fn = gco2func(frame_gc(tv)); |
| 112 | BCReg delta = (BCReg)(tv - frame_prev(tv)); | 132 | BCReg delta = (BCReg)(tv - frame_prev(tv)); |
| 113 | #if LJ_FR2 | 133 | #if LJ_FR2 |
| 114 | if (ref) | 134 | lj_assertJ(!ref || ir_knum(ir)->u64 == tv->u64, |
| 115 | lua_assert(ir_knum(ir)->u64 == tv->u64); | 135 | "frame slot %d PC mismatch", s); |
| 116 | tr = J->slot[s-1]; | 136 | tr = J->slot[s-1]; |
| 117 | ir = IR(tref_ref(tr)); | 137 | ir = IR(tref_ref(tr)); |
| 118 | #endif | 138 | #endif |
| 119 | lua_assert(tref_isfunc(tr)); | 139 | lj_assertJ(tref_isfunc(tr), |
| 120 | if (tref_isk(tr)) lua_assert(fn == ir_kfunc(ir)); | 140 | "frame slot %d is not a function", s-LJ_FR2); |
| 121 | lua_assert(s > delta + LJ_FR2 ? (J->slot[s-delta] & TREF_FRAME) | 141 | lj_assertJ(!tref_isk(tr) || fn == ir_kfunc(ir), |
| 122 | : (s == delta + LJ_FR2)); | 142 | "frame slot %d function mismatch", s-LJ_FR2); |
| 143 | lj_assertJ(s > delta + LJ_FR2 ? (J->slot[s-delta] & TREF_FRAME) | ||
| 144 | : (s == delta + LJ_FR2), | ||
| 145 | "frame slot %d broken chain", s-LJ_FR2); | ||
| 123 | depth++; | 146 | depth++; |
| 124 | } else if ((tr & TREF_CONT)) { | 147 | } else if ((tr & TREF_CONT)) { |
| 125 | #if LJ_FR2 | 148 | #if LJ_FR2 |
| 126 | if (ref) | 149 | lj_assertJ(!ref || ir_knum(ir)->u64 == tv->u64, |
| 127 | lua_assert(ir_knum(ir)->u64 == tv->u64); | 150 | "cont slot %d continuation mismatch", s); |
| 128 | #else | 151 | #else |
| 129 | lua_assert(ir_kptr(ir) == gcrefp(tv->gcr, void)); | 152 | lj_assertJ(ir_kptr(ir) == gcrefp(tv->gcr, void), |
| 153 | "cont slot %d continuation mismatch", s); | ||
| 130 | #endif | 154 | #endif |
| 131 | lua_assert((J->slot[s+1+LJ_FR2] & TREF_FRAME)); | 155 | lj_assertJ((J->slot[s+1+LJ_FR2] & TREF_FRAME), |
| 156 | "cont slot %d not followed by frame", s); | ||
| 132 | depth++; | 157 | depth++; |
| 133 | } else { | 158 | } else { |
| 134 | if (tvisnumber(tv)) | 159 | /* Number repr. may differ, but other types must be the same. */ |
| 135 | lua_assert(tref_isnumber(tr)); /* Could be IRT_INT etc., too. */ | 160 | lj_assertJ(tvisnumber(tv) ? tref_isnumber(tr) : |
| 136 | else | 161 | itype2irt(tv) == tref_type(tr), |
| 137 | lua_assert(itype2irt(tv) == tref_type(tr)); | 162 | "slot %d type mismatch: stack type %d vs IR type %d", |
| 163 | s, itypemap(tv), tref_type(tr)); | ||
| 138 | if (tref_isk(tr)) { /* Compare constants. */ | 164 | if (tref_isk(tr)) { /* Compare constants. */ |
| 139 | TValue tvk; | 165 | TValue tvk; |
| 140 | lj_ir_kvalue(J->L, &tvk, ir); | 166 | lj_ir_kvalue(J->L, &tvk, ir); |
| 141 | if (!(tvisnum(&tvk) && tvisnan(&tvk))) | 167 | lj_assertJ((tvisnum(&tvk) && tvisnan(&tvk)) ? |
| 142 | lua_assert(lj_obj_equal(tv, &tvk)); | 168 | (tvisnum(tv) && tvisnan(tv)) : |
| 143 | else | 169 | lj_obj_equal(tv, &tvk), |
| 144 | lua_assert(tvisnum(tv) && tvisnan(tv)); | 170 | "slot %d const mismatch: stack %016llx vs IR %016llx", |
| 171 | s, tv->u64, tvk.u64); | ||
| 145 | } | 172 | } |
| 146 | } | 173 | } |
| 147 | } | 174 | } |
| 148 | } | 175 | } |
| 149 | lua_assert(J->framedepth == depth); | 176 | lj_assertJ(J->framedepth == depth, |
| 177 | "frame depth mismatch %d vs %d", J->framedepth, depth); | ||
| 150 | } | 178 | } |
| 151 | #endif | 179 | #endif |
| 152 | 180 | ||
| @@ -182,7 +210,7 @@ static TRef getcurrf(jit_State *J) | |||
| 182 | { | 210 | { |
| 183 | if (J->base[-1-LJ_FR2]) | 211 | if (J->base[-1-LJ_FR2]) |
| 184 | return J->base[-1-LJ_FR2]; | 212 | return J->base[-1-LJ_FR2]; |
| 185 | lua_assert(J->baseslot == 1+LJ_FR2); | 213 | lj_assertJ(J->baseslot == 1+LJ_FR2, "bad baseslot"); |
| 186 | return sloadt(J, -1-LJ_FR2, IRT_FUNC, IRSLOAD_READONLY); | 214 | return sloadt(J, -1-LJ_FR2, IRT_FUNC, IRSLOAD_READONLY); |
| 187 | } | 215 | } |
| 188 | 216 | ||
| @@ -427,7 +455,8 @@ static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev, | |||
| 427 | TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode); | 455 | TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode); |
| 428 | TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode); | 456 | TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode); |
| 429 | int tc, dir = rec_for_direction(&tv[FORL_STEP]); | 457 | int tc, dir = rec_for_direction(&tv[FORL_STEP]); |
| 430 | lua_assert(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI); | 458 | lj_assertJ(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI, |
| 459 | "bad bytecode %d instead of FORI/JFORI", bc_op(*fori)); | ||
| 431 | scev->t.irt = t; | 460 | scev->t.irt = t; |
| 432 | scev->dir = dir; | 461 | scev->dir = dir; |
| 433 | scev->stop = tref_ref(stop); | 462 | scev->stop = tref_ref(stop); |
| @@ -483,7 +512,7 @@ static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl) | |||
| 483 | IRT_NUM; | 512 | IRT_NUM; |
| 484 | for (i = FORL_IDX; i <= FORL_STEP; i++) { | 513 | for (i = FORL_IDX; i <= FORL_STEP; i++) { |
| 485 | if (!tr[i]) sload(J, ra+i); | 514 | if (!tr[i]) sload(J, ra+i); |
| 486 | lua_assert(tref_isnumber_str(tr[i])); | 515 | lj_assertJ(tref_isnumber_str(tr[i]), "bad FORI argument type"); |
| 487 | if (tref_isstr(tr[i])) | 516 | if (tref_isstr(tr[i])) |
| 488 | tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0); | 517 | tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0); |
| 489 | if (t == IRT_INT) { | 518 | if (t == IRT_INT) { |
| @@ -615,7 +644,8 @@ static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev) | |||
| 615 | static int rec_profile_need(jit_State *J, GCproto *pt, const BCIns *pc) | 644 | static int rec_profile_need(jit_State *J, GCproto *pt, const BCIns *pc) |
| 616 | { | 645 | { |
| 617 | GCproto *ppt; | 646 | GCproto *ppt; |
| 618 | lua_assert(J->prof_mode == 'f' || J->prof_mode == 'l'); | 647 | lj_assertJ(J->prof_mode == 'f' || J->prof_mode == 'l', |
| 648 | "bad profiler mode %c", J->prof_mode); | ||
| 619 | if (!pt) | 649 | if (!pt) |
| 620 | return 0; | 650 | return 0; |
| 621 | ppt = J->prev_pt; | 651 | ppt = J->prev_pt; |
| @@ -793,7 +823,7 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults) | |||
| 793 | BCReg cbase = (BCReg)frame_delta(frame); | 823 | BCReg cbase = (BCReg)frame_delta(frame); |
| 794 | if (--J->framedepth <= 0) | 824 | if (--J->framedepth <= 0) |
| 795 | lj_trace_err(J, LJ_TRERR_NYIRETL); | 825 | lj_trace_err(J, LJ_TRERR_NYIRETL); |
| 796 | lua_assert(J->baseslot > 1+LJ_FR2); | 826 | lj_assertJ(J->baseslot > 1+LJ_FR2, "bad baseslot for return"); |
| 797 | gotresults++; | 827 | gotresults++; |
| 798 | rbase += cbase; | 828 | rbase += cbase; |
| 799 | J->baseslot -= (BCReg)cbase; | 829 | J->baseslot -= (BCReg)cbase; |
| @@ -817,7 +847,7 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults) | |||
| 817 | BCReg cbase = (BCReg)frame_delta(frame); | 847 | BCReg cbase = (BCReg)frame_delta(frame); |
| 818 | if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */ | 848 | if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */ |
| 819 | lj_trace_err(J, LJ_TRERR_NYIRETL); | 849 | lj_trace_err(J, LJ_TRERR_NYIRETL); |
| 820 | lua_assert(J->baseslot > 1+LJ_FR2); | 850 | lj_assertJ(J->baseslot > 1+LJ_FR2, "bad baseslot for return"); |
| 821 | rbase += cbase; | 851 | rbase += cbase; |
| 822 | J->baseslot -= (BCReg)cbase; | 852 | J->baseslot -= (BCReg)cbase; |
| 823 | J->base -= cbase; | 853 | J->base -= cbase; |
| @@ -844,7 +874,7 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults) | |||
| 844 | J->maxslot = cbase+(BCReg)nresults; | 874 | J->maxslot = cbase+(BCReg)nresults; |
| 845 | if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */ | 875 | if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */ |
| 846 | J->framedepth--; | 876 | J->framedepth--; |
| 847 | lua_assert(J->baseslot > cbase+1+LJ_FR2); | 877 | lj_assertJ(J->baseslot > cbase+1+LJ_FR2, "bad baseslot for return"); |
| 848 | J->baseslot -= cbase+1+LJ_FR2; | 878 | J->baseslot -= cbase+1+LJ_FR2; |
| 849 | J->base -= cbase+1+LJ_FR2; | 879 | J->base -= cbase+1+LJ_FR2; |
| 850 | } else if (J->parent == 0 && J->exitno == 0 && | 880 | } else if (J->parent == 0 && J->exitno == 0 && |
| @@ -859,7 +889,7 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults) | |||
| 859 | emitir(IRTG(IR_RETF, IRT_PGC), trpt, trpc); | 889 | emitir(IRTG(IR_RETF, IRT_PGC), trpt, trpc); |
| 860 | J->retdepth++; | 890 | J->retdepth++; |
| 861 | J->needsnap = 1; | 891 | J->needsnap = 1; |
| 862 | lua_assert(J->baseslot == 1+LJ_FR2); | 892 | lj_assertJ(J->baseslot == 1+LJ_FR2, "bad baseslot for return"); |
| 863 | /* Shift result slots up and clear the slots of the new frame below. */ | 893 | /* Shift result slots up and clear the slots of the new frame below. */ |
| 864 | memmove(J->base + cbase, J->base-1-LJ_FR2, sizeof(TRef)*nresults); | 894 | memmove(J->base + cbase, J->base-1-LJ_FR2, sizeof(TRef)*nresults); |
| 865 | memset(J->base-1-LJ_FR2, 0, sizeof(TRef)*(cbase+1+LJ_FR2)); | 895 | memset(J->base-1-LJ_FR2, 0, sizeof(TRef)*(cbase+1+LJ_FR2)); |
| @@ -907,12 +937,13 @@ void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults) | |||
| 907 | } /* Otherwise continue with another __concat call. */ | 937 | } /* Otherwise continue with another __concat call. */ |
| 908 | } else { | 938 | } else { |
| 909 | /* Result type already specialized. */ | 939 | /* Result type already specialized. */ |
| 910 | lua_assert(cont == lj_cont_condf || cont == lj_cont_condt); | 940 | lj_assertJ(cont == lj_cont_condf || cont == lj_cont_condt, |
| 941 | "bad continuation type"); | ||
| 911 | } | 942 | } |
| 912 | } else { | 943 | } else { |
| 913 | lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */ | 944 | lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */ |
| 914 | } | 945 | } |
| 915 | lua_assert(J->baseslot >= 1+LJ_FR2); | 946 | lj_assertJ(J->baseslot >= 1+LJ_FR2, "bad baseslot for return"); |
| 916 | } | 947 | } |
| 917 | 948 | ||
| 918 | /* -- Metamethod handling ------------------------------------------------- */ | 949 | /* -- Metamethod handling ------------------------------------------------- */ |
| @@ -1167,7 +1198,7 @@ static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm) | |||
| 1167 | ix->tab = ix->val; | 1198 | ix->tab = ix->val; |
| 1168 | copyTV(J->L, &ix->tabv, &ix->valv); | 1199 | copyTV(J->L, &ix->tabv, &ix->valv); |
| 1169 | } else { | 1200 | } else { |
| 1170 | lua_assert(tref_iscdata(ix->key)); | 1201 | lj_assertJ(tref_iscdata(ix->key), "cdata expected"); |
| 1171 | ix->tab = ix->key; | 1202 | ix->tab = ix->key; |
| 1172 | copyTV(J->L, &ix->tabv, &ix->keyv); | 1203 | copyTV(J->L, &ix->tabv, &ix->keyv); |
| 1173 | } | 1204 | } |
| @@ -1264,7 +1295,8 @@ static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize) | |||
| 1264 | /* Got scalar evolution analysis results for this reference? */ | 1295 | /* Got scalar evolution analysis results for this reference? */ |
| 1265 | if (ref == J->scev.idx) { | 1296 | if (ref == J->scev.idx) { |
| 1266 | int32_t stop; | 1297 | int32_t stop; |
| 1267 | lua_assert(irt_isint(J->scev.t) && ir->o == IR_SLOAD); | 1298 | lj_assertJ(irt_isint(J->scev.t) && ir->o == IR_SLOAD, |
| 1299 | "only int SCEV supported"); | ||
| 1268 | stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]); | 1300 | stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]); |
| 1269 | /* Runtime value for stop of loop is within bounds? */ | 1301 | /* Runtime value for stop of loop is within bounds? */ |
| 1270 | if ((uint64_t)stop + ofs < (uint64_t)asize) { | 1302 | if ((uint64_t)stop + ofs < (uint64_t)asize) { |
| @@ -1382,7 +1414,7 @@ TRef lj_record_idx(jit_State *J, RecordIndex *ix) | |||
| 1382 | 1414 | ||
| 1383 | while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */ | 1415 | while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */ |
| 1384 | /* Never call raw lj_record_idx() on non-table. */ | 1416 | /* Never call raw lj_record_idx() on non-table. */ |
| 1385 | lua_assert(ix->idxchain != 0); | 1417 | lj_assertJ(ix->idxchain != 0, "bad usage"); |
| 1386 | if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index)) | 1418 | if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index)) |
| 1387 | lj_trace_err(J, LJ_TRERR_NOMM); | 1419 | lj_trace_err(J, LJ_TRERR_NOMM); |
| 1388 | handlemm: | 1420 | handlemm: |
| @@ -1466,10 +1498,10 @@ TRef lj_record_idx(jit_State *J, RecordIndex *ix) | |||
| 1466 | emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_PGC), | 1498 | emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_PGC), |
| 1467 | xref, lj_ir_kkptr(J, niltvg(J2G(J)))); | 1499 | xref, lj_ir_kkptr(J, niltvg(J2G(J)))); |
| 1468 | if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) { | 1500 | if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) { |
| 1469 | lua_assert(hasmm); | 1501 | lj_assertJ(hasmm, "inconsistent metamethod handling"); |
| 1470 | goto handlemm; | 1502 | goto handlemm; |
| 1471 | } | 1503 | } |
| 1472 | lua_assert(!hasmm); | 1504 | lj_assertJ(!hasmm, "inconsistent metamethod handling"); |
| 1473 | if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */ | 1505 | if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */ |
| 1474 | TRef key = ix->key; | 1506 | TRef key = ix->key; |
| 1475 | if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */ | 1507 | if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */ |
| @@ -1575,7 +1607,7 @@ static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val) | |||
| 1575 | int needbarrier = 0; | 1607 | int needbarrier = 0; |
| 1576 | if (rec_upvalue_constify(J, uvp)) { /* Try to constify immutable upvalue. */ | 1608 | if (rec_upvalue_constify(J, uvp)) { /* Try to constify immutable upvalue. */ |
| 1577 | TRef tr, kfunc; | 1609 | TRef tr, kfunc; |
| 1578 | lua_assert(val == 0); | 1610 | lj_assertJ(val == 0, "bad usage"); |
| 1579 | if (!tref_isk(fn)) { /* Late specialization of current function. */ | 1611 | if (!tref_isk(fn)) { /* Late specialization of current function. */ |
| 1580 | if (J->pt->flags >= PROTO_CLC_POLY) | 1612 | if (J->pt->flags >= PROTO_CLC_POLY) |
| 1581 | goto noconstify; | 1613 | goto noconstify; |
| @@ -1697,7 +1729,7 @@ static void rec_func_vararg(jit_State *J) | |||
| 1697 | { | 1729 | { |
| 1698 | GCproto *pt = J->pt; | 1730 | GCproto *pt = J->pt; |
| 1699 | BCReg s, fixargs, vframe = J->maxslot+1+LJ_FR2; | 1731 | BCReg s, fixargs, vframe = J->maxslot+1+LJ_FR2; |
| 1700 | lua_assert((pt->flags & PROTO_VARARG)); | 1732 | lj_assertJ((pt->flags & PROTO_VARARG), "FUNCV in non-vararg function"); |
| 1701 | if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS) | 1733 | if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS) |
| 1702 | lj_trace_err(J, LJ_TRERR_STACKOV); | 1734 | lj_trace_err(J, LJ_TRERR_STACKOV); |
| 1703 | J->base[vframe-1-LJ_FR2] = J->base[-1-LJ_FR2]; /* Copy function up. */ | 1735 | J->base[vframe-1-LJ_FR2] = J->base[-1-LJ_FR2]; /* Copy function up. */ |
| @@ -1766,7 +1798,7 @@ static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults) | |||
| 1766 | { | 1798 | { |
| 1767 | int32_t numparams = J->pt->numparams; | 1799 | int32_t numparams = J->pt->numparams; |
| 1768 | ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1 - LJ_FR2; | 1800 | ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1 - LJ_FR2; |
| 1769 | lua_assert(frame_isvarg(J->L->base-1)); | 1801 | lj_assertJ(frame_isvarg(J->L->base-1), "VARG in non-vararg frame"); |
| 1770 | if (LJ_FR2 && dst > J->maxslot) | 1802 | if (LJ_FR2 && dst > J->maxslot) |
| 1771 | J->base[dst-1] = 0; /* Prevent resurrection of unrelated slot. */ | 1803 | J->base[dst-1] = 0; /* Prevent resurrection of unrelated slot. */ |
| 1772 | if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */ | 1804 | if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */ |
| @@ -1889,7 +1921,7 @@ static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot) | |||
| 1889 | TValue savetv[5]; | 1921 | TValue savetv[5]; |
| 1890 | BCReg s; | 1922 | BCReg s; |
| 1891 | RecordIndex ix; | 1923 | RecordIndex ix; |
| 1892 | lua_assert(baseslot < topslot); | 1924 | lj_assertJ(baseslot < topslot, "bad CAT arg"); |
| 1893 | for (s = baseslot; s <= topslot; s++) | 1925 | for (s = baseslot; s <= topslot; s++) |
| 1894 | (void)getslot(J, s); /* Ensure all arguments have a reference. */ | 1926 | (void)getslot(J, s); /* Ensure all arguments have a reference. */ |
| 1895 | if (tref_isnumber_str(top[0]) && tref_isnumber_str(top[-1])) { | 1927 | if (tref_isnumber_str(top[0]) && tref_isnumber_str(top[-1])) { |
| @@ -2013,7 +2045,7 @@ void lj_record_ins(jit_State *J) | |||
| 2013 | if (bc_op(*J->pc) >= BC__MAX) | 2045 | if (bc_op(*J->pc) >= BC__MAX) |
| 2014 | return; | 2046 | return; |
| 2015 | break; | 2047 | break; |
| 2016 | default: lua_assert(0); break; | 2048 | default: lj_assertJ(0, "bad post-processing mode"); break; |
| 2017 | } | 2049 | } |
| 2018 | J->postproc = LJ_POST_NONE; | 2050 | J->postproc = LJ_POST_NONE; |
| 2019 | } | 2051 | } |
| @@ -2381,7 +2413,8 @@ void lj_record_ins(jit_State *J) | |||
| 2381 | J->loopref = J->cur.nins; | 2413 | J->loopref = J->cur.nins; |
| 2382 | break; | 2414 | break; |
| 2383 | case BC_JFORI: | 2415 | case BC_JFORI: |
| 2384 | lua_assert(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL); | 2416 | lj_assertJ(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL, |
| 2417 | "JFORI does not point to JFORL"); | ||
| 2385 | if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */ | 2418 | if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */ |
| 2386 | lj_record_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J])); | 2419 | lj_record_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J])); |
| 2387 | /* Continue tracing if the loop is not entered. */ | 2420 | /* Continue tracing if the loop is not entered. */ |
| @@ -2434,7 +2467,8 @@ void lj_record_ins(jit_State *J) | |||
| 2434 | rec_func_lua(J); | 2467 | rec_func_lua(J); |
| 2435 | break; | 2468 | break; |
| 2436 | case BC_JFUNCV: | 2469 | case BC_JFUNCV: |
| 2437 | lua_assert(0); /* Cannot happen. No hotcall counting for varag funcs. */ | 2470 | /* Cannot happen. No hotcall counting for varag funcs. */ |
| 2471 | lj_assertJ(0, "unsupported vararg hotcall"); | ||
| 2438 | break; | 2472 | break; |
| 2439 | 2473 | ||
| 2440 | case BC_FUNCC: | 2474 | case BC_FUNCC: |
| @@ -2494,11 +2528,11 @@ static const BCIns *rec_setup_root(jit_State *J) | |||
| 2494 | J->bc_min = pc; | 2528 | J->bc_min = pc; |
| 2495 | break; | 2529 | break; |
| 2496 | case BC_ITERL: | 2530 | case BC_ITERL: |
| 2497 | lua_assert(bc_op(pc[-1]) == BC_ITERC); | 2531 | lj_assertJ(bc_op(pc[-1]) == BC_ITERC, "no ITERC before ITERL"); |
| 2498 | J->maxslot = ra + bc_b(pc[-1]) - 1; | 2532 | J->maxslot = ra + bc_b(pc[-1]) - 1; |
| 2499 | J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); | 2533 | J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns); |
| 2500 | pc += 1+bc_j(ins); | 2534 | pc += 1+bc_j(ins); |
| 2501 | lua_assert(bc_op(pc[-1]) == BC_JMP); | 2535 | lj_assertJ(bc_op(pc[-1]) == BC_JMP, "ITERL does not point to JMP+1"); |
| 2502 | J->bc_min = pc; | 2536 | J->bc_min = pc; |
| 2503 | break; | 2537 | break; |
| 2504 | case BC_LOOP: | 2538 | case BC_LOOP: |
| @@ -2530,7 +2564,7 @@ static const BCIns *rec_setup_root(jit_State *J) | |||
| 2530 | pc++; | 2564 | pc++; |
| 2531 | break; | 2565 | break; |
| 2532 | default: | 2566 | default: |
| 2533 | lua_assert(0); | 2567 | lj_assertJ(0, "bad root trace start bytecode %d", bc_op(ins)); |
| 2534 | break; | 2568 | break; |
| 2535 | } | 2569 | } |
| 2536 | return pc; | 2570 | return pc; |
diff --git a/src/lj_snap.c b/src/lj_snap.c index a47c0e3e..a21894f6 100644 --- a/src/lj_snap.c +++ b/src/lj_snap.c | |||
| @@ -110,7 +110,7 @@ static MSize snapshot_framelinks(jit_State *J, SnapEntry *map, uint8_t *topslot) | |||
| 110 | cTValue *ftop = isluafunc(fn) ? (frame+funcproto(fn)->framesize) : J->L->top; | 110 | cTValue *ftop = isluafunc(fn) ? (frame+funcproto(fn)->framesize) : J->L->top; |
| 111 | #if LJ_FR2 | 111 | #if LJ_FR2 |
| 112 | uint64_t pcbase = (u64ptr(J->pc) << 8) | (J->baseslot - 2); | 112 | uint64_t pcbase = (u64ptr(J->pc) << 8) | (J->baseslot - 2); |
| 113 | lua_assert(2 <= J->baseslot && J->baseslot <= 257); | 113 | lj_assertJ(2 <= J->baseslot && J->baseslot <= 257, "bad baseslot"); |
| 114 | memcpy(map, &pcbase, sizeof(uint64_t)); | 114 | memcpy(map, &pcbase, sizeof(uint64_t)); |
| 115 | #else | 115 | #else |
| 116 | MSize f = 0; | 116 | MSize f = 0; |
| @@ -129,7 +129,7 @@ static MSize snapshot_framelinks(jit_State *J, SnapEntry *map, uint8_t *topslot) | |||
| 129 | #endif | 129 | #endif |
| 130 | frame = frame_prevd(frame); | 130 | frame = frame_prevd(frame); |
| 131 | } else { | 131 | } else { |
| 132 | lua_assert(!frame_isc(frame)); | 132 | lj_assertJ(!frame_isc(frame), "broken frame chain"); |
| 133 | #if !LJ_FR2 | 133 | #if !LJ_FR2 |
| 134 | map[f++] = SNAP_MKFTSZ(frame_ftsz(frame)); | 134 | map[f++] = SNAP_MKFTSZ(frame_ftsz(frame)); |
| 135 | #endif | 135 | #endif |
| @@ -141,10 +141,10 @@ static MSize snapshot_framelinks(jit_State *J, SnapEntry *map, uint8_t *topslot) | |||
| 141 | } | 141 | } |
| 142 | *topslot = (uint8_t)(ftop - lim); | 142 | *topslot = (uint8_t)(ftop - lim); |
| 143 | #if LJ_FR2 | 143 | #if LJ_FR2 |
| 144 | lua_assert(sizeof(SnapEntry) * 2 == sizeof(uint64_t)); | 144 | lj_assertJ(sizeof(SnapEntry) * 2 == sizeof(uint64_t), "bad SnapEntry def"); |
| 145 | return 2; | 145 | return 2; |
| 146 | #else | 146 | #else |
| 147 | lua_assert(f == (MSize)(1 + J->framedepth)); | 147 | lj_assertJ(f == (MSize)(1 + J->framedepth), "miscalculated snapshot size"); |
| 148 | return f; | 148 | return f; |
| 149 | #endif | 149 | #endif |
| 150 | } | 150 | } |
| @@ -222,7 +222,8 @@ static BCReg snap_usedef(jit_State *J, uint8_t *udf, | |||
| 222 | #define DEF_SLOT(s) udf[(s)] *= 3 | 222 | #define DEF_SLOT(s) udf[(s)] *= 3 |
| 223 | 223 | ||
| 224 | /* Scan through following bytecode and check for uses/defs. */ | 224 | /* Scan through following bytecode and check for uses/defs. */ |
| 225 | lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc); | 225 | lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc, |
| 226 | "snapshot PC out of range"); | ||
| 226 | for (;;) { | 227 | for (;;) { |
| 227 | BCIns ins = *pc++; | 228 | BCIns ins = *pc++; |
| 228 | BCOp op = bc_op(ins); | 229 | BCOp op = bc_op(ins); |
| @@ -233,7 +234,7 @@ static BCReg snap_usedef(jit_State *J, uint8_t *udf, | |||
| 233 | switch (bcmode_c(op)) { | 234 | switch (bcmode_c(op)) { |
| 234 | case BCMvar: USE_SLOT(bc_c(ins)); break; | 235 | case BCMvar: USE_SLOT(bc_c(ins)); break; |
| 235 | case BCMrbase: | 236 | case BCMrbase: |
| 236 | lua_assert(op == BC_CAT); | 237 | lj_assertJ(op == BC_CAT, "unhandled op %d with RC rbase", op); |
| 237 | for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s); | 238 | for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s); |
| 238 | for (; s < maxslot; s++) DEF_SLOT(s); | 239 | for (; s < maxslot; s++) DEF_SLOT(s); |
| 239 | break; | 240 | break; |
| @@ -285,7 +286,8 @@ static BCReg snap_usedef(jit_State *J, uint8_t *udf, | |||
| 285 | break; | 286 | break; |
| 286 | default: break; | 287 | default: break; |
| 287 | } | 288 | } |
| 288 | lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc); | 289 | lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc, |
| 290 | "use/def analysis PC out of range"); | ||
| 289 | } | 291 | } |
| 290 | 292 | ||
| 291 | #undef USE_SLOT | 293 | #undef USE_SLOT |
| @@ -356,19 +358,20 @@ static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs) | |||
| 356 | } | 358 | } |
| 357 | 359 | ||
| 358 | /* Copy RegSP from parent snapshot to the parent links of the IR. */ | 360 | /* Copy RegSP from parent snapshot to the parent links of the IR. */ |
| 359 | IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir) | 361 | IRIns *lj_snap_regspmap(jit_State *J, GCtrace *T, SnapNo snapno, IRIns *ir) |
| 360 | { | 362 | { |
| 361 | SnapShot *snap = &T->snap[snapno]; | 363 | SnapShot *snap = &T->snap[snapno]; |
| 362 | SnapEntry *map = &T->snapmap[snap->mapofs]; | 364 | SnapEntry *map = &T->snapmap[snap->mapofs]; |
| 363 | BloomFilter rfilt = snap_renamefilter(T, snapno); | 365 | BloomFilter rfilt = snap_renamefilter(T, snapno); |
| 364 | MSize n = 0; | 366 | MSize n = 0; |
| 365 | IRRef ref = 0; | 367 | IRRef ref = 0; |
| 368 | UNUSED(J); | ||
| 366 | for ( ; ; ir++) { | 369 | for ( ; ; ir++) { |
| 367 | uint32_t rs; | 370 | uint32_t rs; |
| 368 | if (ir->o == IR_SLOAD) { | 371 | if (ir->o == IR_SLOAD) { |
| 369 | if (!(ir->op2 & IRSLOAD_PARENT)) break; | 372 | if (!(ir->op2 & IRSLOAD_PARENT)) break; |
| 370 | for ( ; ; n++) { | 373 | for ( ; ; n++) { |
| 371 | lua_assert(n < snap->nent); | 374 | lj_assertJ(n < snap->nent, "slot %d not found in snapshot", ir->op1); |
| 372 | if (snap_slot(map[n]) == ir->op1) { | 375 | if (snap_slot(map[n]) == ir->op1) { |
| 373 | ref = snap_ref(map[n++]); | 376 | ref = snap_ref(map[n++]); |
| 374 | break; | 377 | break; |
| @@ -385,7 +388,7 @@ IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir) | |||
| 385 | if (bloomtest(rfilt, ref)) | 388 | if (bloomtest(rfilt, ref)) |
| 386 | rs = snap_renameref(T, snapno, ref, rs); | 389 | rs = snap_renameref(T, snapno, ref, rs); |
| 387 | ir->prev = (uint16_t)rs; | 390 | ir->prev = (uint16_t)rs; |
| 388 | lua_assert(regsp_used(rs)); | 391 | lj_assertJ(regsp_used(rs), "unused IR %04d in snapshot", ref - REF_BIAS); |
| 389 | } | 392 | } |
| 390 | return ir; | 393 | return ir; |
| 391 | } | 394 | } |
| @@ -403,7 +406,7 @@ static TRef snap_replay_const(jit_State *J, IRIns *ir) | |||
| 403 | case IR_KNUM: case IR_KINT64: | 406 | case IR_KNUM: case IR_KINT64: |
| 404 | return lj_ir_k64(J, (IROp)ir->o, ir_k64(ir)->u64); | 407 | return lj_ir_k64(J, (IROp)ir->o, ir_k64(ir)->u64); |
| 405 | case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir)); /* Continuation. */ | 408 | case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir)); /* Continuation. */ |
| 406 | default: lua_assert(0); return TREF_NIL; break; | 409 | default: lj_assertJ(0, "bad IR constant op %d", ir->o); return TREF_NIL; |
| 407 | } | 410 | } |
| 408 | } | 411 | } |
| 409 | 412 | ||
| @@ -481,7 +484,7 @@ void lj_snap_replay(jit_State *J, GCtrace *T) | |||
| 481 | tr = snap_replay_const(J, ir); | 484 | tr = snap_replay_const(J, ir); |
| 482 | } else if (!regsp_used(ir->prev)) { | 485 | } else if (!regsp_used(ir->prev)) { |
| 483 | pass23 = 1; | 486 | pass23 = 1; |
| 484 | lua_assert(s != 0); | 487 | lj_assertJ(s != 0, "unused slot 0 in snapshot"); |
| 485 | tr = s; | 488 | tr = s; |
| 486 | } else { | 489 | } else { |
| 487 | IRType t = irt_type(ir->t); | 490 | IRType t = irt_type(ir->t); |
| @@ -507,8 +510,9 @@ void lj_snap_replay(jit_State *J, GCtrace *T) | |||
| 507 | if (regsp_reg(ir->r) == RID_SUNK) { | 510 | if (regsp_reg(ir->r) == RID_SUNK) { |
| 508 | if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue; | 511 | if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue; |
| 509 | pass23 = 1; | 512 | pass23 = 1; |
| 510 | lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || | 513 | lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP || |
| 511 | ir->o == IR_CNEW || ir->o == IR_CNEWI); | 514 | ir->o == IR_CNEW || ir->o == IR_CNEWI, |
| 515 | "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o); | ||
| 512 | if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1); | 516 | if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1); |
| 513 | if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2); | 517 | if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2); |
| 514 | if (LJ_HASFFI && ir->o == IR_CNEWI) { | 518 | if (LJ_HASFFI && ir->o == IR_CNEWI) { |
| @@ -526,7 +530,8 @@ void lj_snap_replay(jit_State *J, GCtrace *T) | |||
| 526 | } | 530 | } |
| 527 | } | 531 | } |
| 528 | } else if (!irref_isk(refp) && !regsp_used(ir->prev)) { | 532 | } else if (!irref_isk(refp) && !regsp_used(ir->prev)) { |
| 529 | lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); | 533 | lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT, |
| 534 | "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o); | ||
| 530 | J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1); | 535 | J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1); |
| 531 | } | 536 | } |
| 532 | } | 537 | } |
| @@ -576,7 +581,9 @@ void lj_snap_replay(jit_State *J, GCtrace *T) | |||
| 576 | val = snap_pref(J, T, map, nent, seen, irs->op2); | 581 | val = snap_pref(J, T, map, nent, seen, irs->op2); |
| 577 | if (val == 0) { | 582 | if (val == 0) { |
| 578 | IRIns *irc = &T->ir[irs->op2]; | 583 | IRIns *irc = &T->ir[irs->op2]; |
| 579 | lua_assert(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT); | 584 | lj_assertJ(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT, |
| 585 | "sunk store for parent IR %04d with bad op %d", | ||
| 586 | refp - REF_BIAS, irc->o); | ||
| 580 | val = snap_pref(J, T, map, nent, seen, irc->op1); | 587 | val = snap_pref(J, T, map, nent, seen, irc->op1); |
| 581 | val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT); | 588 | val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT); |
| 582 | } else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) && | 589 | } else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) && |
| @@ -645,13 +652,14 @@ static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex, | |||
| 645 | o->u64 = *(uint64_t *)sps; | 652 | o->u64 = *(uint64_t *)sps; |
| 646 | #endif | 653 | #endif |
| 647 | } else { | 654 | } else { |
| 648 | lua_assert(!irt_ispri(t)); /* PRI refs never have a spill slot. */ | 655 | lj_assertJ(!irt_ispri(t), "PRI ref with spill slot"); |
| 649 | setgcV(J->L, o, (GCobj *)(uintptr_t)*(GCSize *)sps, irt_toitype(t)); | 656 | setgcV(J->L, o, (GCobj *)(uintptr_t)*(GCSize *)sps, irt_toitype(t)); |
| 650 | } | 657 | } |
| 651 | } else { /* Restore from register. */ | 658 | } else { /* Restore from register. */ |
| 652 | Reg r = regsp_reg(rs); | 659 | Reg r = regsp_reg(rs); |
| 653 | if (ra_noreg(r)) { | 660 | if (ra_noreg(r)) { |
| 654 | lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); | 661 | lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT, |
| 662 | "restore from IR %04d has no reg", ref - REF_BIAS); | ||
| 655 | snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o); | 663 | snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o); |
| 656 | if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o)); | 664 | if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o)); |
| 657 | return; | 665 | return; |
| @@ -679,7 +687,7 @@ static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex, | |||
| 679 | 687 | ||
| 680 | #if LJ_HASFFI | 688 | #if LJ_HASFFI |
| 681 | /* Restore raw data from the trace exit state. */ | 689 | /* Restore raw data from the trace exit state. */ |
| 682 | static void snap_restoredata(GCtrace *T, ExitState *ex, | 690 | static void snap_restoredata(jit_State *J, GCtrace *T, ExitState *ex, |
| 683 | SnapNo snapno, BloomFilter rfilt, | 691 | SnapNo snapno, BloomFilter rfilt, |
| 684 | IRRef ref, void *dst, CTSize sz) | 692 | IRRef ref, void *dst, CTSize sz) |
| 685 | { | 693 | { |
| @@ -687,6 +695,7 @@ static void snap_restoredata(GCtrace *T, ExitState *ex, | |||
| 687 | RegSP rs = ir->prev; | 695 | RegSP rs = ir->prev; |
| 688 | int32_t *src; | 696 | int32_t *src; |
| 689 | uint64_t tmp; | 697 | uint64_t tmp; |
| 698 | UNUSED(J); | ||
| 690 | if (irref_isk(ref)) { | 699 | if (irref_isk(ref)) { |
| 691 | if (ir_isk64(ir)) { | 700 | if (ir_isk64(ir)) { |
| 692 | src = (int32_t *)&ir[1]; | 701 | src = (int32_t *)&ir[1]; |
| @@ -709,8 +718,9 @@ static void snap_restoredata(GCtrace *T, ExitState *ex, | |||
| 709 | Reg r = regsp_reg(rs); | 718 | Reg r = regsp_reg(rs); |
| 710 | if (ra_noreg(r)) { | 719 | if (ra_noreg(r)) { |
| 711 | /* Note: this assumes CNEWI is never used for SOFTFP split numbers. */ | 720 | /* Note: this assumes CNEWI is never used for SOFTFP split numbers. */ |
| 712 | lua_assert(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); | 721 | lj_assertJ(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT, |
| 713 | snap_restoredata(T, ex, snapno, rfilt, ir->op1, dst, 4); | 722 | "restore from IR %04d has no reg", ref - REF_BIAS); |
| 723 | snap_restoredata(J, T, ex, snapno, rfilt, ir->op1, dst, 4); | ||
| 714 | *(lua_Number *)dst = (lua_Number)*(int32_t *)dst; | 724 | *(lua_Number *)dst = (lua_Number)*(int32_t *)dst; |
| 715 | return; | 725 | return; |
| 716 | } | 726 | } |
| @@ -731,7 +741,8 @@ static void snap_restoredata(GCtrace *T, ExitState *ex, | |||
| 731 | if (LJ_64 && LJ_BE && sz == 4) src++; | 741 | if (LJ_64 && LJ_BE && sz == 4) src++; |
| 732 | } | 742 | } |
| 733 | } | 743 | } |
| 734 | lua_assert(sz == 1 || sz == 2 || sz == 4 || sz == 8); | 744 | lj_assertJ(sz == 1 || sz == 2 || sz == 4 || sz == 8, |
| 745 | "restore from IR %04d with bad size %d", ref - REF_BIAS, sz); | ||
| 735 | if (sz == 4) *(int32_t *)dst = *src; | 746 | if (sz == 4) *(int32_t *)dst = *src; |
| 736 | else if (sz == 8) *(int64_t *)dst = *(int64_t *)src; | 747 | else if (sz == 8) *(int64_t *)dst = *(int64_t *)src; |
| 737 | else if (sz == 1) *(int8_t *)dst = (int8_t)*src; | 748 | else if (sz == 1) *(int8_t *)dst = (int8_t)*src; |
| @@ -744,8 +755,9 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, | |||
| 744 | SnapNo snapno, BloomFilter rfilt, | 755 | SnapNo snapno, BloomFilter rfilt, |
| 745 | IRIns *ir, TValue *o) | 756 | IRIns *ir, TValue *o) |
| 746 | { | 757 | { |
| 747 | lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || | 758 | lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP || |
| 748 | ir->o == IR_CNEW || ir->o == IR_CNEWI); | 759 | ir->o == IR_CNEW || ir->o == IR_CNEWI, |
| 760 | "sunk allocation with bad op %d", ir->o); | ||
| 749 | #if LJ_HASFFI | 761 | #if LJ_HASFFI |
| 750 | if (ir->o == IR_CNEW || ir->o == IR_CNEWI) { | 762 | if (ir->o == IR_CNEW || ir->o == IR_CNEWI) { |
| 751 | CTState *cts = ctype_cts(J->L); | 763 | CTState *cts = ctype_cts(J->L); |
| @@ -756,13 +768,14 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, | |||
| 756 | setcdataV(J->L, o, cd); | 768 | setcdataV(J->L, o, cd); |
| 757 | if (ir->o == IR_CNEWI) { | 769 | if (ir->o == IR_CNEWI) { |
| 758 | uint8_t *p = (uint8_t *)cdataptr(cd); | 770 | uint8_t *p = (uint8_t *)cdataptr(cd); |
| 759 | lua_assert(sz == 4 || sz == 8); | 771 | lj_assertJ(sz == 4 || sz == 8, "sunk cdata with bad size %d", sz); |
| 760 | if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) { | 772 | if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) { |
| 761 | snap_restoredata(T, ex, snapno, rfilt, (ir+1)->op2, LJ_LE?p+4:p, 4); | 773 | snap_restoredata(J, T, ex, snapno, rfilt, (ir+1)->op2, |
| 774 | LJ_LE ? p+4 : p, 4); | ||
| 762 | if (LJ_BE) p += 4; | 775 | if (LJ_BE) p += 4; |
| 763 | sz = 4; | 776 | sz = 4; |
| 764 | } | 777 | } |
| 765 | snap_restoredata(T, ex, snapno, rfilt, ir->op2, p, sz); | 778 | snap_restoredata(J, T, ex, snapno, rfilt, ir->op2, p, sz); |
| 766 | } else { | 779 | } else { |
| 767 | IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref]; | 780 | IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref]; |
| 768 | for (irs = ir+1; irs < irlast; irs++) | 781 | for (irs = ir+1; irs < irlast; irs++) |
| @@ -770,8 +783,11 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, | |||
| 770 | IRIns *iro = &T->ir[T->ir[irs->op1].op2]; | 783 | IRIns *iro = &T->ir[T->ir[irs->op1].op2]; |
| 771 | uint8_t *p = (uint8_t *)cd; | 784 | uint8_t *p = (uint8_t *)cd; |
| 772 | CTSize szs; | 785 | CTSize szs; |
| 773 | lua_assert(irs->o == IR_XSTORE && T->ir[irs->op1].o == IR_ADD); | 786 | lj_assertJ(irs->o == IR_XSTORE, "sunk store with bad op %d", irs->o); |
| 774 | lua_assert(iro->o == IR_KINT || iro->o == IR_KINT64); | 787 | lj_assertJ(T->ir[irs->op1].o == IR_ADD, |
| 788 | "sunk store with bad add op %d", T->ir[irs->op1].o); | ||
| 789 | lj_assertJ(iro->o == IR_KINT || iro->o == IR_KINT64, | ||
| 790 | "sunk store with bad const offset op %d", iro->o); | ||
| 775 | if (irt_is64(irs->t)) szs = 8; | 791 | if (irt_is64(irs->t)) szs = 8; |
| 776 | else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1; | 792 | else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1; |
| 777 | else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2; | 793 | else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2; |
| @@ -780,14 +796,16 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, | |||
| 780 | p += (int64_t)ir_k64(iro)->u64; | 796 | p += (int64_t)ir_k64(iro)->u64; |
| 781 | else | 797 | else |
| 782 | p += iro->i; | 798 | p += iro->i; |
| 783 | lua_assert(p >= (uint8_t *)cdataptr(cd) && | 799 | lj_assertJ(p >= (uint8_t *)cdataptr(cd) && |
| 784 | p + szs <= (uint8_t *)cdataptr(cd) + sz); | 800 | p + szs <= (uint8_t *)cdataptr(cd) + sz, |
| 801 | "sunk store with offset out of range"); | ||
| 785 | if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) { | 802 | if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) { |
| 786 | lua_assert(szs == 4); | 803 | lj_assertJ(szs == 4, "sunk store with bad size %d", szs); |
| 787 | snap_restoredata(T, ex, snapno, rfilt, (irs+1)->op2, LJ_LE?p+4:p,4); | 804 | snap_restoredata(J, T, ex, snapno, rfilt, (irs+1)->op2, |
| 805 | LJ_LE ? p+4 : p, 4); | ||
| 788 | if (LJ_BE) p += 4; | 806 | if (LJ_BE) p += 4; |
| 789 | } | 807 | } |
| 790 | snap_restoredata(T, ex, snapno, rfilt, irs->op2, p, szs); | 808 | snap_restoredata(J, T, ex, snapno, rfilt, irs->op2, p, szs); |
| 791 | } | 809 | } |
| 792 | } | 810 | } |
| 793 | } else | 811 | } else |
| @@ -802,10 +820,12 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex, | |||
| 802 | if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) { | 820 | if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) { |
| 803 | IRIns *irk = &T->ir[irs->op1]; | 821 | IRIns *irk = &T->ir[irs->op1]; |
| 804 | TValue tmp, *val; | 822 | TValue tmp, *val; |
| 805 | lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE || | 823 | lj_assertJ(irs->o == IR_ASTORE || irs->o == IR_HSTORE || |
| 806 | irs->o == IR_FSTORE); | 824 | irs->o == IR_FSTORE, |
| 825 | "sunk store with bad op %d", irs->o); | ||
| 807 | if (irk->o == IR_FREF) { | 826 | if (irk->o == IR_FREF) { |
| 808 | lua_assert(irk->op2 == IRFL_TAB_META); | 827 | lj_assertJ(irk->op2 == IRFL_TAB_META, |
| 828 | "sunk store with bad field %d", irk->op2); | ||
| 809 | snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp); | 829 | snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp); |
| 810 | /* NOBARRIER: The table is new (marked white). */ | 830 | /* NOBARRIER: The table is new (marked white). */ |
| 811 | setgcref(t->metatable, obj2gco(tabV(&tmp))); | 831 | setgcref(t->metatable, obj2gco(tabV(&tmp))); |
| @@ -893,7 +913,7 @@ const BCIns *lj_snap_restore(jit_State *J, void *exptr) | |||
| 893 | #if LJ_FR2 | 913 | #if LJ_FR2 |
| 894 | L->base += (map[nent+LJ_BE] & 0xff); | 914 | L->base += (map[nent+LJ_BE] & 0xff); |
| 895 | #endif | 915 | #endif |
| 896 | lua_assert(map + nent == flinks); | 916 | lj_assertJ(map + nent == flinks, "inconsistent frames in snapshot"); |
| 897 | 917 | ||
| 898 | /* Compute current stack top. */ | 918 | /* Compute current stack top. */ |
| 899 | switch (bc_op(*pc)) { | 919 | switch (bc_op(*pc)) { |
diff --git a/src/lj_snap.h b/src/lj_snap.h index 816a9b79..f1760b05 100644 --- a/src/lj_snap.h +++ b/src/lj_snap.h | |||
| @@ -13,7 +13,8 @@ | |||
| 13 | LJ_FUNC void lj_snap_add(jit_State *J); | 13 | LJ_FUNC void lj_snap_add(jit_State *J); |
| 14 | LJ_FUNC void lj_snap_purge(jit_State *J); | 14 | LJ_FUNC void lj_snap_purge(jit_State *J); |
| 15 | LJ_FUNC void lj_snap_shrink(jit_State *J); | 15 | LJ_FUNC void lj_snap_shrink(jit_State *J); |
| 16 | LJ_FUNC IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir); | 16 | LJ_FUNC IRIns *lj_snap_regspmap(jit_State *J, GCtrace *T, SnapNo snapno, |
| 17 | IRIns *ir); | ||
| 17 | LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T); | 18 | LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T); |
| 18 | LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr); | 19 | LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr); |
| 19 | LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need); | 20 | LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need); |
diff --git a/src/lj_state.c b/src/lj_state.c index dc82e260..7081a474 100644 --- a/src/lj_state.c +++ b/src/lj_state.c | |||
| @@ -60,7 +60,8 @@ static void resizestack(lua_State *L, MSize n) | |||
| 60 | MSize oldsize = L->stacksize; | 60 | MSize oldsize = L->stacksize; |
| 61 | MSize realsize = n + 1 + LJ_STACK_EXTRA; | 61 | MSize realsize = n + 1 + LJ_STACK_EXTRA; |
| 62 | GCobj *up; | 62 | GCobj *up; |
| 63 | lua_assert((MSize)(tvref(L->maxstack)-oldst)==L->stacksize-LJ_STACK_EXTRA-1); | 63 | lj_assertL((MSize)(tvref(L->maxstack)-oldst) == L->stacksize-LJ_STACK_EXTRA-1, |
| 64 | "inconsistent stack size"); | ||
| 64 | st = (TValue *)lj_mem_realloc(L, tvref(L->stack), | 65 | st = (TValue *)lj_mem_realloc(L, tvref(L->stack), |
| 65 | (MSize)(oldsize*sizeof(TValue)), | 66 | (MSize)(oldsize*sizeof(TValue)), |
| 66 | (MSize)(realsize*sizeof(TValue))); | 67 | (MSize)(realsize*sizeof(TValue))); |
| @@ -162,8 +163,9 @@ static void close_state(lua_State *L) | |||
| 162 | global_State *g = G(L); | 163 | global_State *g = G(L); |
| 163 | lj_func_closeuv(L, tvref(L->stack)); | 164 | lj_func_closeuv(L, tvref(L->stack)); |
| 164 | lj_gc_freeall(g); | 165 | lj_gc_freeall(g); |
| 165 | lua_assert(gcref(g->gc.root) == obj2gco(L)); | 166 | lj_assertG(gcref(g->gc.root) == obj2gco(L), |
| 166 | lua_assert(g->strnum == 0); | 167 | "main thread is not first GC object"); |
| 168 | lj_assertG(g->strnum == 0, "leaked %d strings", g->strnum); | ||
| 167 | lj_trace_freestate(g); | 169 | lj_trace_freestate(g); |
| 168 | #if LJ_HASFFI | 170 | #if LJ_HASFFI |
| 169 | lj_ctype_freestate(g); | 171 | lj_ctype_freestate(g); |
| @@ -171,7 +173,9 @@ static void close_state(lua_State *L) | |||
| 171 | lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef); | 173 | lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef); |
| 172 | lj_buf_free(g, &g->tmpbuf); | 174 | lj_buf_free(g, &g->tmpbuf); |
| 173 | lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); | 175 | lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); |
| 174 | lua_assert(g->gc.total == sizeof(GG_State)); | 176 | lj_assertG(g->gc.total == sizeof(GG_State), |
| 177 | "memory leak of %lld bytes", | ||
| 178 | (long long)(g->gc.total - sizeof(GG_State))); | ||
| 175 | #ifndef LUAJIT_USE_SYSMALLOC | 179 | #ifndef LUAJIT_USE_SYSMALLOC |
| 176 | if (g->allocf == lj_alloc_f) | 180 | if (g->allocf == lj_alloc_f) |
| 177 | lj_alloc_destroy(g->allocd); | 181 | lj_alloc_destroy(g->allocd); |
| @@ -283,17 +287,17 @@ lua_State *lj_state_new(lua_State *L) | |||
| 283 | setmrefr(L1->glref, L->glref); | 287 | setmrefr(L1->glref, L->glref); |
| 284 | setgcrefr(L1->env, L->env); | 288 | setgcrefr(L1->env, L->env); |
| 285 | stack_init(L1, L); /* init stack */ | 289 | stack_init(L1, L); /* init stack */ |
| 286 | lua_assert(iswhite(obj2gco(L1))); | 290 | lj_assertL(iswhite(obj2gco(L1)), "new thread object is not white"); |
| 287 | return L1; | 291 | return L1; |
| 288 | } | 292 | } |
| 289 | 293 | ||
| 290 | void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L) | 294 | void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L) |
| 291 | { | 295 | { |
| 292 | lua_assert(L != mainthread(g)); | 296 | lj_assertG(L != mainthread(g), "free of main thread"); |
| 293 | if (obj2gco(L) == gcref(g->cur_L)) | 297 | if (obj2gco(L) == gcref(g->cur_L)) |
| 294 | setgcrefnull(g->cur_L); | 298 | setgcrefnull(g->cur_L); |
| 295 | lj_func_closeuv(L, tvref(L->stack)); | 299 | lj_func_closeuv(L, tvref(L->stack)); |
| 296 | lua_assert(gcref(L->openupval) == NULL); | 300 | lj_assertG(gcref(L->openupval) == NULL, "stale open upvalues"); |
| 297 | lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); | 301 | lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue); |
| 298 | lj_mem_freet(g, L); | 302 | lj_mem_freet(g, L); |
| 299 | } | 303 | } |
diff --git a/src/lj_str.c b/src/lj_str.c index ec74afa5..0253c15e 100644 --- a/src/lj_str.c +++ b/src/lj_str.c | |||
| @@ -41,8 +41,9 @@ int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b) | |||
| 41 | static LJ_AINLINE int str_fastcmp(const char *a, const char *b, MSize len) | 41 | static LJ_AINLINE int str_fastcmp(const char *a, const char *b, MSize len) |
| 42 | { | 42 | { |
| 43 | MSize i = 0; | 43 | MSize i = 0; |
| 44 | lua_assert(len > 0); | 44 | lj_assertX(len > 0, "fast string compare with zero length"); |
| 45 | lua_assert((((uintptr_t)a+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4); | 45 | lj_assertX((((uintptr_t)a+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4, |
| 46 | "fast string compare crossing page boundary"); | ||
| 46 | do { /* Note: innocuous access up to end of string + 3. */ | 47 | do { /* Note: innocuous access up to end of string + 3. */ |
| 47 | uint32_t v = lj_getu32(a+i) ^ *(const uint32_t *)(b+i); | 48 | uint32_t v = lj_getu32(a+i) ^ *(const uint32_t *)(b+i); |
| 48 | if (v) { | 49 | if (v) { |
diff --git a/src/lj_strfmt.c b/src/lj_strfmt.c index 8f968d32..331d9474 100644 --- a/src/lj_strfmt.c +++ b/src/lj_strfmt.c | |||
| @@ -320,7 +320,7 @@ SBuf *lj_strfmt_putfxint(SBuf *sb, SFormat sf, uint64_t k) | |||
| 320 | if ((sf & STRFMT_F_LEFT)) | 320 | if ((sf & STRFMT_F_LEFT)) |
| 321 | while (width-- > pprec) *p++ = ' '; | 321 | while (width-- > pprec) *p++ = ' '; |
| 322 | 322 | ||
| 323 | lua_assert(need == (MSize)(p - ps)); | 323 | lj_assertX(need == (MSize)(p - ps), "miscalculated format size"); |
| 324 | setsbufP(sb, p); | 324 | setsbufP(sb, p); |
| 325 | return sb; | 325 | return sb; |
| 326 | } | 326 | } |
| @@ -449,7 +449,7 @@ const char *lj_strfmt_pushvf(lua_State *L, const char *fmt, va_list argp) | |||
| 449 | case STRFMT_ERR: | 449 | case STRFMT_ERR: |
| 450 | default: | 450 | default: |
| 451 | lj_buf_putb(sb, '?'); | 451 | lj_buf_putb(sb, '?'); |
| 452 | lua_assert(0); | 452 | lj_assertL(0, "bad string format near offset %d", fs.len); |
| 453 | break; | 453 | break; |
| 454 | } | 454 | } |
| 455 | } | 455 | } |
diff --git a/src/lj_strfmt.h b/src/lj_strfmt.h index 339f8e15..b4fbbb94 100644 --- a/src/lj_strfmt.h +++ b/src/lj_strfmt.h | |||
| @@ -79,7 +79,8 @@ static LJ_AINLINE void lj_strfmt_init(FormatState *fs, const char *p, MSize len) | |||
| 79 | { | 79 | { |
| 80 | fs->p = (const uint8_t *)p; | 80 | fs->p = (const uint8_t *)p; |
| 81 | fs->e = (const uint8_t *)p + len; | 81 | fs->e = (const uint8_t *)p + len; |
| 82 | lua_assert(*fs->e == 0); /* Must be NUL-terminated (may have NULs inside). */ | 82 | /* Must be NUL-terminated. May have NULs inside, too. */ |
| 83 | lj_assertX(*fs->e == 0, "format not NUL-terminated"); | ||
| 83 | } | 84 | } |
| 84 | 85 | ||
| 85 | /* Raw conversions. */ | 86 | /* Raw conversions. */ |
diff --git a/src/lj_strfmt_num.c b/src/lj_strfmt_num.c index 36b11dc0..8cb5d47f 100644 --- a/src/lj_strfmt_num.c +++ b/src/lj_strfmt_num.c | |||
| @@ -257,7 +257,7 @@ static int nd_similar(uint32_t* nd, uint32_t ndhi, uint32_t* ref, MSize hilen, | |||
| 257 | } else { | 257 | } else { |
| 258 | prec -= hilen - 9; | 258 | prec -= hilen - 9; |
| 259 | } | 259 | } |
| 260 | lua_assert(prec < 9); | 260 | lj_assertX(prec < 9, "bad precision %d", prec); |
| 261 | lj_strfmt_wuint9(nd9, nd[ndhi]); | 261 | lj_strfmt_wuint9(nd9, nd[ndhi]); |
| 262 | lj_strfmt_wuint9(ref9, *ref); | 262 | lj_strfmt_wuint9(ref9, *ref); |
| 263 | return !memcmp(nd9, ref9, prec) && (nd9[prec] < '5') == (ref9[prec] < '5'); | 263 | return !memcmp(nd9, ref9, prec) && (nd9[prec] < '5') == (ref9[prec] < '5'); |
| @@ -414,14 +414,14 @@ static char *lj_strfmt_wfnum(SBuf *sb, SFormat sf, lua_Number n, char *p) | |||
| 414 | ** Rescaling was performed, but this introduced some error, and might | 414 | ** Rescaling was performed, but this introduced some error, and might |
| 415 | ** have pushed us across a rounding boundary. We check whether this | 415 | ** have pushed us across a rounding boundary. We check whether this |
| 416 | ** error affected the result by introducing even more error (2ulp in | 416 | ** error affected the result by introducing even more error (2ulp in |
| 417 | ** either direction), and seeing whether a roundary boundary was | 417 | ** either direction), and seeing whether a rounding boundary was |
| 418 | ** crossed. Having already converted the -2ulp case, we save off its | 418 | ** crossed. Having already converted the -2ulp case, we save off its |
| 419 | ** most significant digits, convert the +2ulp case, and compare them. | 419 | ** most significant digits, convert the +2ulp case, and compare them. |
| 420 | */ | 420 | */ |
| 421 | int32_t eidx = e + 70 + (ND_MUL2K_MAX_SHIFT < 29) | 421 | int32_t eidx = e + 70 + (ND_MUL2K_MAX_SHIFT < 29) |
| 422 | + (t.u32.lo >= 0xfffffffe && !(~t.u32.hi << 12)); | 422 | + (t.u32.lo >= 0xfffffffe && !(~t.u32.hi << 12)); |
| 423 | const int8_t *m_e = four_ulp_m_e + eidx * 2; | 423 | const int8_t *m_e = four_ulp_m_e + eidx * 2; |
| 424 | lua_assert(0 <= eidx && eidx < 128); | 424 | lj_assertG_(G(sbufL(sb)), 0 <= eidx && eidx < 128, "bad eidx %d", eidx); |
| 425 | nd[33] = nd[ndhi]; | 425 | nd[33] = nd[ndhi]; |
| 426 | nd[32] = nd[(ndhi - 1) & 0x3f]; | 426 | nd[32] = nd[(ndhi - 1) & 0x3f]; |
| 427 | nd[31] = nd[(ndhi - 2) & 0x3f]; | 427 | nd[31] = nd[(ndhi - 2) & 0x3f]; |
diff --git a/src/lj_strscan.c b/src/lj_strscan.c index 433b33a3..0e37a4f6 100644 --- a/src/lj_strscan.c +++ b/src/lj_strscan.c | |||
| @@ -93,7 +93,7 @@ static void strscan_double(uint64_t x, TValue *o, int32_t ex2, int32_t neg) | |||
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | /* Convert to double using a signed int64_t conversion, then rescale. */ | 95 | /* Convert to double using a signed int64_t conversion, then rescale. */ |
| 96 | lua_assert((int64_t)x >= 0); | 96 | lj_assertX((int64_t)x >= 0, "bad double conversion"); |
| 97 | n = (double)(int64_t)x; | 97 | n = (double)(int64_t)x; |
| 98 | if (neg) n = -n; | 98 | if (neg) n = -n; |
| 99 | if (ex2) n = ldexp(n, ex2); | 99 | if (ex2) n = ldexp(n, ex2); |
| @@ -262,7 +262,7 @@ static StrScanFmt strscan_dec(const uint8_t *p, TValue *o, | |||
| 262 | uint32_t hi = 0, lo = (uint32_t)(xip-xi); | 262 | uint32_t hi = 0, lo = (uint32_t)(xip-xi); |
| 263 | int32_t ex2 = 0, idig = (int32_t)lo + (ex10 >> 1); | 263 | int32_t ex2 = 0, idig = (int32_t)lo + (ex10 >> 1); |
| 264 | 264 | ||
| 265 | lua_assert(lo > 0 && (ex10 & 1) == 0); | 265 | lj_assertX(lo > 0 && (ex10 & 1) == 0, "bad lo %d ex10 %d", lo, ex10); |
| 266 | 266 | ||
| 267 | /* Handle simple overflow/underflow. */ | 267 | /* Handle simple overflow/underflow. */ |
| 268 | if (idig > 310/2) { if (neg) setminfV(o); else setpinfV(o); return fmt; } | 268 | if (idig > 310/2) { if (neg) setminfV(o); else setpinfV(o); return fmt; } |
| @@ -528,7 +528,7 @@ int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o) | |||
| 528 | { | 528 | { |
| 529 | StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), str->len, o, | 529 | StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), str->len, o, |
| 530 | STRSCAN_OPT_TONUM); | 530 | STRSCAN_OPT_TONUM); |
| 531 | lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM); | 531 | lj_assertX(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM, "bad scan format"); |
| 532 | return (fmt != STRSCAN_ERROR); | 532 | return (fmt != STRSCAN_ERROR); |
| 533 | } | 533 | } |
| 534 | 534 | ||
| @@ -537,7 +537,8 @@ int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o) | |||
| 537 | { | 537 | { |
| 538 | StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), str->len, o, | 538 | StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), str->len, o, |
| 539 | STRSCAN_OPT_TOINT); | 539 | STRSCAN_OPT_TOINT); |
| 540 | lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM || fmt == STRSCAN_INT); | 540 | lj_assertX(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM || fmt == STRSCAN_INT, |
| 541 | "bad scan format"); | ||
| 541 | if (fmt == STRSCAN_INT) setitype(o, LJ_TISNUM); | 542 | if (fmt == STRSCAN_INT) setitype(o, LJ_TISNUM); |
| 542 | return (fmt != STRSCAN_ERROR); | 543 | return (fmt != STRSCAN_ERROR); |
| 543 | } | 544 | } |
diff --git a/src/lj_tab.c b/src/lj_tab.c index eb9ef4af..efc423cb 100644 --- a/src/lj_tab.c +++ b/src/lj_tab.c | |||
| @@ -38,7 +38,7 @@ static LJ_AINLINE Node *hashmask(const GCtab *t, uint32_t hash) | |||
| 38 | /* Hash an arbitrary key and return its anchor position in the hash table. */ | 38 | /* Hash an arbitrary key and return its anchor position in the hash table. */ |
| 39 | static Node *hashkey(const GCtab *t, cTValue *key) | 39 | static Node *hashkey(const GCtab *t, cTValue *key) |
| 40 | { | 40 | { |
| 41 | lua_assert(!tvisint(key)); | 41 | lj_assertX(!tvisint(key), "attempt to hash integer"); |
| 42 | if (tvisstr(key)) | 42 | if (tvisstr(key)) |
| 43 | return hashstr(t, strV(key)); | 43 | return hashstr(t, strV(key)); |
| 44 | else if (tvisnum(key)) | 44 | else if (tvisnum(key)) |
| @@ -57,7 +57,7 @@ static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits) | |||
| 57 | { | 57 | { |
| 58 | uint32_t hsize; | 58 | uint32_t hsize; |
| 59 | Node *node; | 59 | Node *node; |
| 60 | lua_assert(hbits != 0); | 60 | lj_assertL(hbits != 0, "zero hash size"); |
| 61 | if (hbits > LJ_MAX_HBITS) | 61 | if (hbits > LJ_MAX_HBITS) |
| 62 | lj_err_msg(L, LJ_ERR_TABOV); | 62 | lj_err_msg(L, LJ_ERR_TABOV); |
| 63 | hsize = 1u << hbits; | 63 | hsize = 1u << hbits; |
| @@ -78,7 +78,7 @@ static LJ_AINLINE void clearhpart(GCtab *t) | |||
| 78 | { | 78 | { |
| 79 | uint32_t i, hmask = t->hmask; | 79 | uint32_t i, hmask = t->hmask; |
| 80 | Node *node = noderef(t->node); | 80 | Node *node = noderef(t->node); |
| 81 | lua_assert(t->hmask != 0); | 81 | lj_assertX(t->hmask != 0, "empty hash part"); |
| 82 | for (i = 0; i <= hmask; i++) { | 82 | for (i = 0; i <= hmask; i++) { |
| 83 | Node *n = &node[i]; | 83 | Node *n = &node[i]; |
| 84 | setmref(n->next, NULL); | 84 | setmref(n->next, NULL); |
| @@ -103,7 +103,7 @@ static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits) | |||
| 103 | /* First try to colocate the array part. */ | 103 | /* First try to colocate the array part. */ |
| 104 | if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) { | 104 | if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) { |
| 105 | Node *nilnode; | 105 | Node *nilnode; |
| 106 | lua_assert((sizeof(GCtab) & 7) == 0); | 106 | lj_assertL((sizeof(GCtab) & 7) == 0, "bad GCtab size"); |
| 107 | t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize)); | 107 | t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize)); |
| 108 | t->gct = ~LJ_TTAB; | 108 | t->gct = ~LJ_TTAB; |
| 109 | t->nomm = (uint8_t)~0; | 109 | t->nomm = (uint8_t)~0; |
| @@ -185,7 +185,8 @@ GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt) | |||
| 185 | GCtab *t; | 185 | GCtab *t; |
| 186 | uint32_t asize, hmask; | 186 | uint32_t asize, hmask; |
| 187 | t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0); | 187 | t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0); |
| 188 | lua_assert(kt->asize == t->asize && kt->hmask == t->hmask); | 188 | lj_assertL(kt->asize == t->asize && kt->hmask == t->hmask, |
| 189 | "mismatched size of table and template"); | ||
| 189 | t->nomm = 0; /* Keys with metamethod names may be present. */ | 190 | t->nomm = 0; /* Keys with metamethod names may be present. */ |
| 190 | asize = kt->asize; | 191 | asize = kt->asize; |
| 191 | if (asize > 0) { | 192 | if (asize > 0) { |
| @@ -310,7 +311,7 @@ void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits) | |||
| 310 | 311 | ||
| 311 | static uint32_t countint(cTValue *key, uint32_t *bins) | 312 | static uint32_t countint(cTValue *key, uint32_t *bins) |
| 312 | { | 313 | { |
| 313 | lua_assert(!tvisint(key)); | 314 | lj_assertX(!tvisint(key), "bad integer key"); |
| 314 | if (tvisnum(key)) { | 315 | if (tvisnum(key)) { |
| 315 | lua_Number nk = numV(key); | 316 | lua_Number nk = numV(key); |
| 316 | int32_t k = lj_num2int(nk); | 317 | int32_t k = lj_num2int(nk); |
| @@ -463,7 +464,8 @@ TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key) | |||
| 463 | if (!tvisnil(&n->val) || t->hmask == 0) { | 464 | if (!tvisnil(&n->val) || t->hmask == 0) { |
| 464 | Node *nodebase = noderef(t->node); | 465 | Node *nodebase = noderef(t->node); |
| 465 | Node *collide, *freenode = getfreetop(t, nodebase); | 466 | Node *collide, *freenode = getfreetop(t, nodebase); |
| 466 | lua_assert(freenode >= nodebase && freenode <= nodebase+t->hmask+1); | 467 | lj_assertL(freenode >= nodebase && freenode <= nodebase+t->hmask+1, |
| 468 | "bad freenode"); | ||
| 467 | do { | 469 | do { |
| 468 | if (freenode == nodebase) { /* No free node found? */ | 470 | if (freenode == nodebase) { /* No free node found? */ |
| 469 | rehashtab(L, t, key); /* Rehash table. */ | 471 | rehashtab(L, t, key); /* Rehash table. */ |
| @@ -471,7 +473,7 @@ TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key) | |||
| 471 | } | 473 | } |
| 472 | } while (!tvisnil(&(--freenode)->key)); | 474 | } while (!tvisnil(&(--freenode)->key)); |
| 473 | setfreetop(t, nodebase, freenode); | 475 | setfreetop(t, nodebase, freenode); |
| 474 | lua_assert(freenode != &G(L)->nilnode); | 476 | lj_assertL(freenode != &G(L)->nilnode, "store to fallback hash"); |
| 475 | collide = hashkey(t, &n->key); | 477 | collide = hashkey(t, &n->key); |
| 476 | if (collide != n) { /* Colliding node not the main node? */ | 478 | if (collide != n) { /* Colliding node not the main node? */ |
| 477 | while (noderef(collide->next) != n) /* Find predecessor. */ | 479 | while (noderef(collide->next) != n) /* Find predecessor. */ |
| @@ -527,7 +529,7 @@ TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key) | |||
| 527 | if (LJ_UNLIKELY(tvismzero(&n->key))) | 529 | if (LJ_UNLIKELY(tvismzero(&n->key))) |
| 528 | n->key.u64 = 0; | 530 | n->key.u64 = 0; |
| 529 | lj_gc_anybarriert(L, t); | 531 | lj_gc_anybarriert(L, t); |
| 530 | lua_assert(tvisnil(&n->val)); | 532 | lj_assertL(tvisnil(&n->val), "new hash slot is not empty"); |
| 531 | return &n->val; | 533 | return &n->val; |
| 532 | } | 534 | } |
| 533 | 535 | ||
diff --git a/src/lj_target.h b/src/lj_target.h index 47c960bc..ce67d000 100644 --- a/src/lj_target.h +++ b/src/lj_target.h | |||
| @@ -152,7 +152,8 @@ typedef uint32_t RegCost; | |||
| 152 | /* Return the address of an exit stub. */ | 152 | /* Return the address of an exit stub. */ |
| 153 | static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno) | 153 | static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno) |
| 154 | { | 154 | { |
| 155 | lua_assert(group[exitno / EXITSTUBS_PER_GROUP] != NULL); | 155 | lj_assertX(group[exitno / EXITSTUBS_PER_GROUP] != NULL, |
| 156 | "exit stub group for exit %d uninitialized", exitno); | ||
| 156 | return (char *)group[exitno / EXITSTUBS_PER_GROUP] + | 157 | return (char *)group[exitno / EXITSTUBS_PER_GROUP] + |
| 157 | EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP); | 158 | EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP); |
| 158 | } | 159 | } |
diff --git a/src/lj_trace.c b/src/lj_trace.c index a43c8c4e..c4e728c6 100644 --- a/src/lj_trace.c +++ b/src/lj_trace.c | |||
| @@ -104,7 +104,8 @@ static void perftools_addtrace(GCtrace *T) | |||
| 104 | name++; | 104 | name++; |
| 105 | else | 105 | else |
| 106 | name = "(string)"; | 106 | name = "(string)"; |
| 107 | lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc); | 107 | lj_assertX(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc, |
| 108 | "trace PC out of range"); | ||
| 108 | lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); | 109 | lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); |
| 109 | if (!fp) { | 110 | if (!fp) { |
| 110 | char fname[40]; | 111 | char fname[40]; |
| @@ -183,7 +184,7 @@ void lj_trace_reenableproto(GCproto *pt) | |||
| 183 | { | 184 | { |
| 184 | if ((pt->flags & PROTO_ILOOP)) { | 185 | if ((pt->flags & PROTO_ILOOP)) { |
| 185 | BCIns *bc = proto_bc(pt); | 186 | BCIns *bc = proto_bc(pt); |
| 186 | BCPos i, sizebc = pt->sizebc;; | 187 | BCPos i, sizebc = pt->sizebc; |
| 187 | pt->flags &= ~PROTO_ILOOP; | 188 | pt->flags &= ~PROTO_ILOOP; |
| 188 | if (bc_op(bc[0]) == BC_IFUNCF) | 189 | if (bc_op(bc[0]) == BC_IFUNCF) |
| 189 | setbc_op(&bc[0], BC_FUNCF); | 190 | setbc_op(&bc[0], BC_FUNCF); |
| @@ -205,27 +206,28 @@ static void trace_unpatch(jit_State *J, GCtrace *T) | |||
| 205 | return; /* No need to unpatch branches in parent traces (yet). */ | 206 | return; /* No need to unpatch branches in parent traces (yet). */ |
| 206 | switch (bc_op(*pc)) { | 207 | switch (bc_op(*pc)) { |
| 207 | case BC_JFORL: | 208 | case BC_JFORL: |
| 208 | lua_assert(traceref(J, bc_d(*pc)) == T); | 209 | lj_assertJ(traceref(J, bc_d(*pc)) == T, "JFORL references other trace"); |
| 209 | *pc = T->startins; | 210 | *pc = T->startins; |
| 210 | pc += bc_j(T->startins); | 211 | pc += bc_j(T->startins); |
| 211 | lua_assert(bc_op(*pc) == BC_JFORI); | 212 | lj_assertJ(bc_op(*pc) == BC_JFORI, "FORL does not point to JFORI"); |
| 212 | setbc_op(pc, BC_FORI); | 213 | setbc_op(pc, BC_FORI); |
| 213 | break; | 214 | break; |
| 214 | case BC_JITERL: | 215 | case BC_JITERL: |
| 215 | case BC_JLOOP: | 216 | case BC_JLOOP: |
| 216 | lua_assert(op == BC_ITERL || op == BC_LOOP || bc_isret(op)); | 217 | lj_assertJ(op == BC_ITERL || op == BC_LOOP || bc_isret(op), |
| 218 | "bad original bytecode %d", op); | ||
| 217 | *pc = T->startins; | 219 | *pc = T->startins; |
| 218 | break; | 220 | break; |
| 219 | case BC_JMP: | 221 | case BC_JMP: |
| 220 | lua_assert(op == BC_ITERL); | 222 | lj_assertJ(op == BC_ITERL, "bad original bytecode %d", op); |
| 221 | pc += bc_j(*pc)+2; | 223 | pc += bc_j(*pc)+2; |
| 222 | if (bc_op(*pc) == BC_JITERL) { | 224 | if (bc_op(*pc) == BC_JITERL) { |
| 223 | lua_assert(traceref(J, bc_d(*pc)) == T); | 225 | lj_assertJ(traceref(J, bc_d(*pc)) == T, "JITERL references other trace"); |
| 224 | *pc = T->startins; | 226 | *pc = T->startins; |
| 225 | } | 227 | } |
| 226 | break; | 228 | break; |
| 227 | case BC_JFUNCF: | 229 | case BC_JFUNCF: |
| 228 | lua_assert(op == BC_FUNCF); | 230 | lj_assertJ(op == BC_FUNCF, "bad original bytecode %d", op); |
| 229 | *pc = T->startins; | 231 | *pc = T->startins; |
| 230 | break; | 232 | break; |
| 231 | default: /* Already unpatched. */ | 233 | default: /* Already unpatched. */ |
| @@ -237,7 +239,8 @@ static void trace_unpatch(jit_State *J, GCtrace *T) | |||
| 237 | static void trace_flushroot(jit_State *J, GCtrace *T) | 239 | static void trace_flushroot(jit_State *J, GCtrace *T) |
| 238 | { | 240 | { |
| 239 | GCproto *pt = &gcref(T->startpt)->pt; | 241 | GCproto *pt = &gcref(T->startpt)->pt; |
| 240 | lua_assert(T->root == 0 && pt != NULL); | 242 | lj_assertJ(T->root == 0, "not a root trace"); |
| 243 | lj_assertJ(pt != NULL, "trace has no prototype"); | ||
| 241 | /* First unpatch any modified bytecode. */ | 244 | /* First unpatch any modified bytecode. */ |
| 242 | trace_unpatch(J, T); | 245 | trace_unpatch(J, T); |
| 243 | /* Unlink root trace from chain anchored in prototype. */ | 246 | /* Unlink root trace from chain anchored in prototype. */ |
| @@ -353,7 +356,8 @@ void lj_trace_freestate(global_State *g) | |||
| 353 | { /* This assumes all traces have already been freed. */ | 356 | { /* This assumes all traces have already been freed. */ |
| 354 | ptrdiff_t i; | 357 | ptrdiff_t i; |
| 355 | for (i = 1; i < (ptrdiff_t)J->sizetrace; i++) | 358 | for (i = 1; i < (ptrdiff_t)J->sizetrace; i++) |
| 356 | lua_assert(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL); | 359 | lj_assertG(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL, |
| 360 | "trace still allocated"); | ||
| 357 | } | 361 | } |
| 358 | #endif | 362 | #endif |
| 359 | lj_mcode_free(J); | 363 | lj_mcode_free(J); |
| @@ -408,8 +412,9 @@ static void trace_start(jit_State *J) | |||
| 408 | if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ | 412 | if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ |
| 409 | if (J->parent == 0 && J->exitno == 0) { | 413 | if (J->parent == 0 && J->exitno == 0) { |
| 410 | /* Lazy bytecode patching to disable hotcount events. */ | 414 | /* Lazy bytecode patching to disable hotcount events. */ |
| 411 | lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || | 415 | lj_assertJ(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || |
| 412 | bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF); | 416 | bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF, |
| 417 | "bad hot bytecode %d", bc_op(*J->pc)); | ||
| 413 | setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP); | 418 | setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP); |
| 414 | J->pt->flags |= PROTO_ILOOP; | 419 | J->pt->flags |= PROTO_ILOOP; |
| 415 | } | 420 | } |
| @@ -420,7 +425,8 @@ static void trace_start(jit_State *J) | |||
| 420 | /* Get a new trace number. */ | 425 | /* Get a new trace number. */ |
| 421 | traceno = trace_findfree(J); | 426 | traceno = trace_findfree(J); |
| 422 | if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */ | 427 | if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */ |
| 423 | lua_assert((J2G(J)->hookmask & HOOK_GC) == 0); | 428 | lj_assertJ((J2G(J)->hookmask & HOOK_GC) == 0, |
| 429 | "recorder called from GC hook"); | ||
| 424 | lj_trace_flushall(J->L); | 430 | lj_trace_flushall(J->L); |
| 425 | J->state = LJ_TRACE_IDLE; /* Silently ignored. */ | 431 | J->state = LJ_TRACE_IDLE; /* Silently ignored. */ |
| 426 | return; | 432 | return; |
| @@ -496,7 +502,7 @@ static void trace_stop(jit_State *J) | |||
| 496 | goto addroot; | 502 | goto addroot; |
| 497 | case BC_JMP: | 503 | case BC_JMP: |
| 498 | /* Patch exit branch in parent to side trace entry. */ | 504 | /* Patch exit branch in parent to side trace entry. */ |
| 499 | lua_assert(J->parent != 0 && J->cur.root != 0); | 505 | lj_assertJ(J->parent != 0 && J->cur.root != 0, "not a side trace"); |
| 500 | lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode); | 506 | lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode); |
| 501 | /* Avoid compiling a side trace twice (stack resizing uses parent exit). */ | 507 | /* Avoid compiling a side trace twice (stack resizing uses parent exit). */ |
| 502 | traceref(J, J->parent)->snap[J->exitno].count = SNAPCOUNT_DONE; | 508 | traceref(J, J->parent)->snap[J->exitno].count = SNAPCOUNT_DONE; |
| @@ -515,7 +521,7 @@ static void trace_stop(jit_State *J) | |||
| 515 | traceref(J, J->exitno)->link = traceno; | 521 | traceref(J, J->exitno)->link = traceno; |
| 516 | break; | 522 | break; |
| 517 | default: | 523 | default: |
| 518 | lua_assert(0); | 524 | lj_assertJ(0, "bad stop bytecode %d", op); |
| 519 | break; | 525 | break; |
| 520 | } | 526 | } |
| 521 | 527 | ||
| @@ -536,8 +542,8 @@ static void trace_stop(jit_State *J) | |||
| 536 | static int trace_downrec(jit_State *J) | 542 | static int trace_downrec(jit_State *J) |
| 537 | { | 543 | { |
| 538 | /* Restart recording at the return instruction. */ | 544 | /* Restart recording at the return instruction. */ |
| 539 | lua_assert(J->pt != NULL); | 545 | lj_assertJ(J->pt != NULL, "no active prototype"); |
| 540 | lua_assert(bc_isret(bc_op(*J->pc))); | 546 | lj_assertJ(bc_isret(bc_op(*J->pc)), "not at a return bytecode"); |
| 541 | if (bc_op(*J->pc) == BC_RETM) | 547 | if (bc_op(*J->pc) == BC_RETM) |
| 542 | return 0; /* NYI: down-recursion with RETM. */ | 548 | return 0; /* NYI: down-recursion with RETM. */ |
| 543 | J->parent = 0; | 549 | J->parent = 0; |
| @@ -750,7 +756,7 @@ static void trace_hotside(jit_State *J, const BCIns *pc) | |||
| 750 | isluafunc(curr_func(J->L)) && | 756 | isluafunc(curr_func(J->L)) && |
| 751 | snap->count != SNAPCOUNT_DONE && | 757 | snap->count != SNAPCOUNT_DONE && |
| 752 | ++snap->count >= J->param[JIT_P_hotexit]) { | 758 | ++snap->count >= J->param[JIT_P_hotexit]) { |
| 753 | lua_assert(J->state == LJ_TRACE_IDLE); | 759 | lj_assertJ(J->state == LJ_TRACE_IDLE, "hot side exit while recording"); |
| 754 | /* J->parent is non-zero for a side trace. */ | 760 | /* J->parent is non-zero for a side trace. */ |
| 755 | J->state = LJ_TRACE_START; | 761 | J->state = LJ_TRACE_START; |
| 756 | lj_trace_ins(J, pc); | 762 | lj_trace_ins(J, pc); |
| @@ -822,7 +828,7 @@ static TraceNo trace_exit_find(jit_State *J, MCode *pc) | |||
| 822 | if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode)) | 828 | if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode)) |
| 823 | return traceno; | 829 | return traceno; |
| 824 | } | 830 | } |
| 825 | lua_assert(0); | 831 | lj_assertJ(0, "bad exit pc"); |
| 826 | return 0; | 832 | return 0; |
| 827 | } | 833 | } |
| 828 | #endif | 834 | #endif |
| @@ -844,13 +850,13 @@ int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr) | |||
| 844 | T = traceref(J, J->parent); UNUSED(T); | 850 | T = traceref(J, J->parent); UNUSED(T); |
| 845 | #ifdef EXITSTATE_CHECKEXIT | 851 | #ifdef EXITSTATE_CHECKEXIT |
| 846 | if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */ | 852 | if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */ |
| 847 | lua_assert(T->root != 0); | 853 | lj_assertJ(T->root != 0, "stack check in root trace"); |
| 848 | J->exitno = T->ir[REF_BASE].op2; | 854 | J->exitno = T->ir[REF_BASE].op2; |
| 849 | J->parent = T->ir[REF_BASE].op1; | 855 | J->parent = T->ir[REF_BASE].op1; |
| 850 | T = traceref(J, J->parent); | 856 | T = traceref(J, J->parent); |
| 851 | } | 857 | } |
| 852 | #endif | 858 | #endif |
| 853 | lua_assert(T != NULL && J->exitno < T->nsnap); | 859 | lj_assertJ(T != NULL && J->exitno < T->nsnap, "bad trace or exit number"); |
| 854 | exd.J = J; | 860 | exd.J = J; |
| 855 | exd.exptr = exptr; | 861 | exd.exptr = exptr; |
| 856 | errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp); | 862 | errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp); |
diff --git a/src/lj_vmmath.c b/src/lj_vmmath.c index 623a686d..9ed37bf2 100644 --- a/src/lj_vmmath.c +++ b/src/lj_vmmath.c | |||
| @@ -60,7 +60,8 @@ double lj_vm_foldarith(double x, double y, int op) | |||
| 60 | int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b) | 60 | int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b) |
| 61 | { | 61 | { |
| 62 | uint32_t y, ua, ub; | 62 | uint32_t y, ua, ub; |
| 63 | lua_assert(b != 0); /* This must be checked before using this function. */ | 63 | /* This must be checked before using this function. */ |
| 64 | lj_assertX(b != 0, "modulo with zero divisor"); | ||
| 64 | ua = a < 0 ? (uint32_t)-a : (uint32_t)a; | 65 | ua = a < 0 ? (uint32_t)-a : (uint32_t)a; |
| 65 | ub = b < 0 ? (uint32_t)-b : (uint32_t)b; | 66 | ub = b < 0 ? (uint32_t)-b : (uint32_t)b; |
| 66 | y = ua % ub; | 67 | y = ua % ub; |
| @@ -84,7 +85,7 @@ double lj_vm_log2(double a) | |||
| 84 | static double lj_vm_powui(double x, uint32_t k) | 85 | static double lj_vm_powui(double x, uint32_t k) |
| 85 | { | 86 | { |
| 86 | double y; | 87 | double y; |
| 87 | lua_assert(k != 0); | 88 | lj_assertX(k != 0, "pow with zero exponent"); |
| 88 | for (; (k & 1) == 0; k >>= 1) x *= x; | 89 | for (; (k & 1) == 0; k >>= 1) x *= x; |
| 89 | y = x; | 90 | y = x; |
| 90 | if ((k >>= 1) != 0) { | 91 | if ((k >>= 1) != 0) { |
| @@ -123,7 +124,7 @@ double lj_vm_foldfpm(double x, int fpm) | |||
| 123 | case IRFPM_SQRT: return sqrt(x); | 124 | case IRFPM_SQRT: return sqrt(x); |
| 124 | case IRFPM_LOG: return log(x); | 125 | case IRFPM_LOG: return log(x); |
| 125 | case IRFPM_LOG2: return lj_vm_log2(x); | 126 | case IRFPM_LOG2: return lj_vm_log2(x); |
| 126 | default: lua_assert(0); | 127 | default: lj_assertX(0, "bad fpm %d", fpm); |
| 127 | } | 128 | } |
| 128 | return 0; | 129 | return 0; |
| 129 | } | 130 | } |
diff --git a/src/ljamalg.c b/src/ljamalg.c index 6712d435..19980241 100644 --- a/src/ljamalg.c +++ b/src/ljamalg.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include "lua.h" | 18 | #include "lua.h" |
| 19 | #include "lauxlib.h" | 19 | #include "lauxlib.h" |
| 20 | 20 | ||
| 21 | #include "lj_assert.c" | ||
| 21 | #include "lj_gc.c" | 22 | #include "lj_gc.c" |
| 22 | #include "lj_err.c" | 23 | #include "lj_err.c" |
| 23 | #include "lj_char.c" | 24 | #include "lj_char.c" |
diff --git a/src/luaconf.h b/src/luaconf.h index d422827a..18fb961d 100644 --- a/src/luaconf.h +++ b/src/luaconf.h | |||
| @@ -136,7 +136,7 @@ | |||
| 136 | 136 | ||
| 137 | #define LUALIB_API LUA_API | 137 | #define LUALIB_API LUA_API |
| 138 | 138 | ||
| 139 | /* Support for internal assertions. */ | 139 | /* Compatibility support for assertions. */ |
| 140 | #if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK) | 140 | #if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK) |
| 141 | #include <assert.h> | 141 | #include <assert.h> |
| 142 | #endif | 142 | #endif |
