aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pall <mike>2016-05-23 00:27:51 +0200
committerMike Pall <mike>2016-05-23 00:27:51 +0200
commit9e99ccc360bc9784ebe5ce29d5fa2c72acfc5777 (patch)
treed61e424f74482c8079cd479393c36006da52f6e8
parent7fb75ccc4cf17825c1c8fe9f44ebfb0668a1b033 (diff)
downloadluajit-9e99ccc360bc9784ebe5ce29d5fa2c72acfc5777.tar.gz
luajit-9e99ccc360bc9784ebe5ce29d5fa2c72acfc5777.tar.bz2
luajit-9e99ccc360bc9784ebe5ce29d5fa2c72acfc5777.zip
Strip out old infrastructure for 64 bit constants.
Contributed by Peter Cawley.
-rw-r--r--src/lj_ir.c74
-rw-r--r--src/lj_iropt.h3
-rw-r--r--src/lj_jit.h1
-rw-r--r--src/lj_trace.c2
4 files changed, 0 insertions, 80 deletions
diff --git a/src/lj_ir.c b/src/lj_ir.c
index 124d5791..9c0a2224 100644
--- a/src/lj_ir.c
+++ b/src/lj_ir.c
@@ -204,80 +204,6 @@ found:
204 return TREF(ref, IRT_INT); 204 return TREF(ref, IRT_INT);
205} 205}
206 206
207/* The MRef inside the KNUM/KINT64 IR instructions holds the address of the
208** 64 bit constant. The constants themselves are stored in a chained array
209** and shared across traces.
210**
211** Rationale for choosing this data structure:
212** - The address of the constants is embedded in the generated machine code
213** and must never move. A resizable array or hash table wouldn't work.
214** - Most apps need very few non-32 bit integer constants (less than a dozen).
215** - Linear search is hard to beat in terms of speed and low complexity.
216*/
217typedef struct K64Array {
218 MRef next; /* Pointer to next list. */
219 MSize numk; /* Number of used elements in this array. */
220 TValue k[LJ_MIN_K64SZ]; /* Array of constants. */
221} K64Array;
222
223/* Free all chained arrays. */
224void lj_ir_k64_freeall(jit_State *J)
225{
226 K64Array *k;
227 for (k = mref(J->k64p, K64Array); k; ) {
228 K64Array *next = mref(k->next, K64Array);
229 lj_mem_free(J2G(J), k, sizeof(K64Array));
230 k = next;
231 }
232 setmref(J->k64p, NULL);
233}
234
235/* Get new 64 bit constant slot. */
236static TValue *ir_k64_add(jit_State *J, K64Array *kp, uint64_t u64)
237{
238 TValue *ntv;
239 if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */
240 K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array);
241 setmref(kn->next, NULL);
242 kn->numk = 0;
243 if (kp)
244 setmref(kp->next, kn); /* Chain to the end of the list. */
245 else
246 setmref(J->k64p, kn); /* Link first array. */
247 kp = kn;
248 }
249 ntv = &kp->k[kp->numk++]; /* Add to current array. */
250 ntv->u64 = u64;
251 return ntv;
252}
253
254/* Find 64 bit constant in chained array or add it. */
255cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64)
256{
257 K64Array *k, *kp = NULL;
258 MSize idx;
259 /* Search for the constant in the whole chain of arrays. */
260 for (k = mref(J->k64p, K64Array); k; k = mref(k->next, K64Array)) {
261 kp = k; /* Remember previous element in list. */
262 for (idx = 0; idx < k->numk; idx++) { /* Search one array. */
263 TValue *tv = &k->k[idx];
264 if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */
265 return tv;
266 }
267 }
268 /* Otherwise add a new constant. */
269 return ir_k64_add(J, kp, u64);
270}
271
272TValue *lj_ir_k64_reserve(jit_State *J)
273{
274 K64Array *k, *kp = NULL;
275 lj_ir_k64_find(J, 0); /* Intern dummy 0 to protect the reserved slot. */
276 /* Find last K64Array, if any. */
277 for (k = mref(J->k64p, K64Array); k; k = mref(k->next, K64Array)) kp = k;
278 return ir_k64_add(J, kp, 0); /* Set to 0. Final value is set later. */
279}
280
281/* Intern 64 bit constant, given by its 64 bit pattern. */ 207/* Intern 64 bit constant, given by its 64 bit pattern. */
282TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64) 208TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64)
283{ 209{
diff --git a/src/lj_iropt.h b/src/lj_iropt.h
index 219d391a..8b7a43de 100644
--- a/src/lj_iropt.h
+++ b/src/lj_iropt.h
@@ -40,10 +40,7 @@ LJ_FUNC TRef lj_ir_ggfload(jit_State *J, IRType t, uintptr_t ofs);
40 40
41/* Interning of constants. */ 41/* Interning of constants. */
42LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k); 42LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k);
43LJ_FUNC void lj_ir_k64_freeall(jit_State *J);
44LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64); 43LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64);
45LJ_FUNC TValue *lj_ir_k64_reserve(jit_State *J);
46LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64);
47LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64); 44LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64);
48LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n); 45LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n);
49LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64); 46LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64);
diff --git a/src/lj_jit.h b/src/lj_jit.h
index e9ab319e..55fbea8b 100644
--- a/src/lj_jit.h
+++ b/src/lj_jit.h
@@ -392,7 +392,6 @@ typedef struct jit_State {
392 int32_t framedepth; /* Current frame depth. */ 392 int32_t framedepth; /* Current frame depth. */
393 int32_t retdepth; /* Return frame depth (count of RETF). */ 393 int32_t retdepth; /* Return frame depth (count of RETF). */
394 394
395 MRef k64p; /* Pointer to chained array of 64 bit constants. */
396 TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */ 395 TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */
397 TValue k64[LJ_K64__MAX]; /* Common 8 byte constants used by backends. */ 396 TValue k64[LJ_K64__MAX]; /* Common 8 byte constants used by backends. */
398 uint32_t k32[LJ_K32__MAX]; /* Ditto for 4 byte constants. */ 397 uint32_t k32[LJ_K32__MAX]; /* Ditto for 4 byte constants. */
diff --git a/src/lj_trace.c b/src/lj_trace.c
index eaf9365c..87146832 100644
--- a/src/lj_trace.c
+++ b/src/lj_trace.c
@@ -295,7 +295,6 @@ int lj_trace_flushall(lua_State *L)
295 memset(J->penalty, 0, sizeof(J->penalty)); 295 memset(J->penalty, 0, sizeof(J->penalty));
296 /* Free the whole machine code and invalidate all exit stub groups. */ 296 /* Free the whole machine code and invalidate all exit stub groups. */
297 lj_mcode_free(J); 297 lj_mcode_free(J);
298 lj_ir_k64_freeall(J);
299 memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup)); 298 memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
300 lj_vmevent_send(L, TRACE, 299 lj_vmevent_send(L, TRACE,
301 setstrV(L, L->top++, lj_str_newlit(L, "flush")); 300 setstrV(L, L->top++, lj_str_newlit(L, "flush"));
@@ -351,7 +350,6 @@ void lj_trace_freestate(global_State *g)
351 } 350 }
352#endif 351#endif
353 lj_mcode_free(J); 352 lj_mcode_free(J);
354 lj_ir_k64_freeall(J);
355 lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry); 353 lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
356 lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot); 354 lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
357 lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns); 355 lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);