summaryrefslogtreecommitdiff
path: root/src/lj_iropt.h
diff options
context:
space:
mode:
authorMike Pall <mike>2009-12-08 19:46:35 +0100
committerMike Pall <mike>2009-12-08 19:46:35 +0100
commit55b16959717084884fd4a0cbae6d19e3786c20c7 (patch)
treec8a07a43c13679751ed25a9d06796e9e7b2134a6 /src/lj_iropt.h
downloadluajit-2.0.0-beta1.tar.gz
luajit-2.0.0-beta1.tar.bz2
luajit-2.0.0-beta1.zip
RELEASE LuaJIT-2.0.0-beta1v2.0.0-beta1
Diffstat (limited to '')
-rw-r--r--src/lj_iropt.h128
1 files changed, 128 insertions, 0 deletions
diff --git a/src/lj_iropt.h b/src/lj_iropt.h
new file mode 100644
index 00000000..69b0a955
--- /dev/null
+++ b/src/lj_iropt.h
@@ -0,0 +1,128 @@
1/*
2** Common header for IR emitter and optimizations.
3** Copyright (C) 2005-2009 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#ifndef _LJ_IROPT_H
7#define _LJ_IROPT_H
8
9#include "lj_obj.h"
10#include "lj_jit.h"
11
12#if LJ_HASJIT
13/* IR emitter. */
14LJ_FUNC void LJ_FASTCALL lj_ir_growtop(jit_State *J);
15LJ_FUNC TRef LJ_FASTCALL lj_ir_emit(jit_State *J);
16
17/* Save current IR in J->fold.ins, but do not emit it (yet). */
18static LJ_AINLINE void lj_ir_set_(jit_State *J, uint16_t ot, IRRef1 a, IRRef1 b)
19{
20 J->fold.ins.ot = ot; J->fold.ins.op1 = a; J->fold.ins.op2 = b;
21}
22
23#define lj_ir_set(J, ot, a, b) \
24 lj_ir_set_(J, (uint16_t)(ot), (IRRef1)(a), (IRRef1)(b))
25
26/* Get ref of next IR instruction and optionally grow IR.
27** Note: this may invalidate all IRIns*!
28*/
29static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J)
30{
31 IRRef ref = J->cur.nins;
32 if (LJ_UNLIKELY(ref >= J->irtoplim)) lj_ir_growtop(J);
33 J->cur.nins = ref + 1;
34 return ref;
35}
36
37/* Interning of constants. */
38LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k);
39LJ_FUNC void lj_ir_knum_freeall(jit_State *J);
40LJ_FUNC TRef lj_ir_knum_addr(jit_State *J, cTValue *tv);
41LJ_FUNC TRef lj_ir_knum_nn(jit_State *J, uint64_t nn);
42LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n);
43LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t);
44LJ_FUNC TRef lj_ir_kptr(jit_State *J, void *ptr);
45LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t);
46LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot);
47
48static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n)
49{
50 TValue tv;
51 tv.n = n;
52 return lj_ir_knum_nn(J, tv.u64);
53}
54
55#define lj_ir_kstr(J, str) lj_ir_kgc(J, obj2gco((str)), IRT_STR)
56#define lj_ir_ktab(J, tab) lj_ir_kgc(J, obj2gco((tab)), IRT_TAB)
57#define lj_ir_kfunc(J, func) lj_ir_kgc(J, obj2gco((func)), IRT_FUNC)
58
59/* Special FP constants. */
60#define lj_ir_knum_zero(J) lj_ir_knum_nn(J, U64x(00000000,00000000))
61#define lj_ir_knum_one(J) lj_ir_knum_nn(J, U64x(3ff00000,00000000))
62#define lj_ir_knum_tobit(J) lj_ir_knum_nn(J, U64x(43380000,00000000))
63
64/* Special 16 byte aligned SIMD constants. */
65LJ_DATA LJ_ALIGN(16) cTValue lj_ir_knum_tv[4];
66#define lj_ir_knum_abs(J) lj_ir_knum_addr(J, &lj_ir_knum_tv[0])
67#define lj_ir_knum_neg(J) lj_ir_knum_addr(J, &lj_ir_knum_tv[2])
68
69/* Access to constants. */
70LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir);
71
72/* Convert IR operand types. */
73LJ_FUNC TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr);
74LJ_FUNC TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr);
75LJ_FUNC TRef LJ_FASTCALL lj_ir_tobit(jit_State *J, TRef tr);
76LJ_FUNC TRef LJ_FASTCALL lj_ir_toint(jit_State *J, TRef tr);
77
78/* Miscellaneous IR ops. */
79LJ_FUNC int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op);
80LJ_FUNC int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op);
81LJ_FUNC void lj_ir_rollback(jit_State *J, IRRef ref);
82
83/* Emit IR instructions with on-the-fly optimizations. */
84LJ_FUNC TRef LJ_FASTCALL lj_opt_fold(jit_State *J);
85LJ_FUNC TRef LJ_FASTCALL lj_opt_cse(jit_State *J);
86
87/* Special return values for the fold functions. */
88enum {
89 NEXTFOLD, /* Couldn't fold, pass on. */
90 RETRYFOLD, /* Retry fold with modified fins. */
91 KINTFOLD, /* Return ref for int constant in fins->i. */
92 FAILFOLD, /* Guard would always fail. */
93 DROPFOLD, /* Guard eliminated. */
94 MAX_FOLD
95};
96
97#define INTFOLD(k) ((J->fold.ins.i = (k)), (TRef)KINTFOLD)
98#define CONDFOLD(cond) ((TRef)FAILFOLD + (TRef)(cond))
99#define LEFTFOLD (J->fold.ins.op1)
100#define RIGHTFOLD (J->fold.ins.op2)
101#define CSEFOLD (lj_opt_cse(J))
102#define EMITFOLD (lj_ir_emit(J))
103
104/* Load/store forwarding. */
105LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J);
106LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J);
107LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J);
108LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J);
109LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tlen(jit_State *J);
110LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref);
111
112/* Dead-store elimination. */
113LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J);
114LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J);
115LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J);
116
117/* Narrowing. */
118LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J);
119LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc);
120LJ_FUNC TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc);
121LJ_FUNC IRType lj_opt_narrow_forl(cTValue *forbase);
122
123/* Optimization passes. */
124LJ_FUNC void lj_opt_dce(jit_State *J);
125LJ_FUNC int lj_opt_loop(jit_State *J);
126#endif
127
128#endif