diff options
author | Mike Pall <mike> | 2009-12-08 19:46:35 +0100 |
---|---|---|
committer | Mike Pall <mike> | 2009-12-08 19:46:35 +0100 |
commit | 55b16959717084884fd4a0cbae6d19e3786c20c7 (patch) | |
tree | c8a07a43c13679751ed25a9d06796e9e7b2134a6 /src/lj_trace.c | |
download | luajit-2.0.0-beta1.tar.gz luajit-2.0.0-beta1.tar.bz2 luajit-2.0.0-beta1.zip |
RELEASE LuaJIT-2.0.0-beta1v2.0.0-beta1
Diffstat (limited to '')
-rw-r--r-- | src/lj_trace.c | 591 |
1 files changed, 591 insertions, 0 deletions
diff --git a/src/lj_trace.c b/src/lj_trace.c new file mode 100644 index 00000000..6ceb5633 --- /dev/null +++ b/src/lj_trace.c | |||
@@ -0,0 +1,591 @@ | |||
1 | /* | ||
2 | ** Trace management. | ||
3 | ** Copyright (C) 2005-2009 Mike Pall. See Copyright Notice in luajit.h | ||
4 | */ | ||
5 | |||
6 | #define lj_trace_c | ||
7 | #define LUA_CORE | ||
8 | |||
9 | #include "lj_obj.h" | ||
10 | |||
11 | #if LJ_HASJIT | ||
12 | |||
13 | #include "lj_gc.h" | ||
14 | #include "lj_err.h" | ||
15 | #include "lj_str.h" | ||
16 | #include "lj_frame.h" | ||
17 | #include "lj_state.h" | ||
18 | #include "lj_bc.h" | ||
19 | #include "lj_ir.h" | ||
20 | #include "lj_jit.h" | ||
21 | #include "lj_iropt.h" | ||
22 | #include "lj_mcode.h" | ||
23 | #include "lj_trace.h" | ||
24 | #include "lj_snap.h" | ||
25 | #include "lj_gdbjit.h" | ||
26 | #include "lj_record.h" | ||
27 | #include "lj_asm.h" | ||
28 | #include "lj_dispatch.h" | ||
29 | #include "lj_vm.h" | ||
30 | #include "lj_vmevent.h" | ||
31 | #include "lj_target.h" | ||
32 | |||
33 | /* -- Error handling ------------------------------------------------------ */ | ||
34 | |||
35 | /* Synchronous abort with error message. */ | ||
36 | void lj_trace_err(jit_State *J, TraceError e) | ||
37 | { | ||
38 | setnilV(&J->errinfo); /* No error info. */ | ||
39 | setintV(J->L->top++, (int32_t)e); | ||
40 | lj_err_throw(J->L, LUA_ERRRUN); | ||
41 | } | ||
42 | |||
43 | /* Synchronous abort with error message and error info. */ | ||
44 | void lj_trace_err_info(jit_State *J, TraceError e) | ||
45 | { | ||
46 | setintV(J->L->top++, (int32_t)e); | ||
47 | lj_err_throw(J->L, LUA_ERRRUN); | ||
48 | } | ||
49 | |||
50 | /* -- Trace management ---------------------------------------------------- */ | ||
51 | |||
52 | /* The current trace is first assembled in J->cur. The variable length | ||
53 | ** arrays point to shared, growable buffers (J->irbuf etc.). The trace is | ||
54 | ** kept in this state until a new trace needs to be created. Then the current | ||
55 | ** trace and its data structures are copied to a new (compact) Trace object. | ||
56 | */ | ||
57 | |||
58 | /* Find a free trace number. */ | ||
59 | static TraceNo trace_findfree(jit_State *J) | ||
60 | { | ||
61 | MSize osz, lim; | ||
62 | if (J->freetrace == 0) | ||
63 | J->freetrace = 1; | ||
64 | for (; J->freetrace < J->sizetrace; J->freetrace++) | ||
65 | if (J->trace[J->freetrace] == NULL) | ||
66 | return J->freetrace++; | ||
67 | /* Need to grow trace array. */ | ||
68 | lim = (MSize)J->param[JIT_P_maxtrace] + 1; | ||
69 | if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535; | ||
70 | osz = J->sizetrace; | ||
71 | if (osz >= lim) | ||
72 | return 0; /* Too many traces. */ | ||
73 | lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, Trace *); | ||
74 | while (osz < J->sizetrace) | ||
75 | J->trace[osz++] = NULL; | ||
76 | return J->freetrace; | ||
77 | } | ||
78 | |||
79 | #define TRACE_COPYELEM(field, szfield, tp) \ | ||
80 | T2->field = (tp *)p; \ | ||
81 | memcpy(p, T->field, T->szfield*sizeof(tp)); \ | ||
82 | p += T->szfield*sizeof(tp); | ||
83 | |||
84 | /* Save a trace by copying and compacting it. */ | ||
85 | static Trace *trace_save(jit_State *J, Trace *T) | ||
86 | { | ||
87 | size_t sztr = ((sizeof(Trace)+7)&~7); | ||
88 | size_t szins = (T->nins-T->nk)*sizeof(IRIns); | ||
89 | size_t sz = sztr + szins + | ||
90 | T->nsnap*sizeof(SnapShot) + | ||
91 | T->nsnapmap*sizeof(IRRef2); | ||
92 | Trace *T2 = lj_mem_newt(J->L, (MSize)sz, Trace); | ||
93 | char *p = (char *)T2 + sztr; | ||
94 | memcpy(T2, T, sizeof(Trace)); | ||
95 | T2->ir = (IRIns *)p - T->nk; | ||
96 | memcpy(p, T->ir+T->nk, szins); | ||
97 | p += szins; | ||
98 | TRACE_COPYELEM(snap, nsnap, SnapShot) | ||
99 | TRACE_COPYELEM(snapmap, nsnapmap, IRRef2) | ||
100 | lj_gc_barriertrace(J2G(J), T); | ||
101 | return T2; | ||
102 | } | ||
103 | |||
104 | /* Free a trace. */ | ||
105 | static void trace_free(jit_State *J, TraceNo traceno) | ||
106 | { | ||
107 | lua_assert(traceno != 0); | ||
108 | if (traceno < J->freetrace) | ||
109 | J->freetrace = traceno; | ||
110 | lj_gdbjit_deltrace(J, J->trace[traceno]); | ||
111 | if (traceno == J->curtrace) { | ||
112 | lua_assert(J->trace[traceno] == &J->cur); | ||
113 | J->trace[traceno] = NULL; | ||
114 | J->curtrace = 0; | ||
115 | } else { | ||
116 | Trace *T = J->trace[traceno]; | ||
117 | lua_assert(T != NULL && T != &J->cur); | ||
118 | J->trace[traceno] = NULL; | ||
119 | lj_mem_free(J2G(J), T, | ||
120 | ((sizeof(Trace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + | ||
121 | T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(IRRef2)); | ||
122 | } | ||
123 | } | ||
124 | |||
125 | /* Free all traces associated with a prototype. No unpatching needed. */ | ||
126 | void lj_trace_freeproto(global_State *g, GCproto *pt) | ||
127 | { | ||
128 | jit_State *J = G2J(g); | ||
129 | TraceNo traceno; | ||
130 | /* Free all root traces. */ | ||
131 | for (traceno = pt->trace; traceno != 0; ) { | ||
132 | TraceNo side, nextroot = J->trace[traceno]->nextroot; | ||
133 | /* Free all side traces. */ | ||
134 | for (side = J->trace[traceno]->nextside; side != 0; ) { | ||
135 | TraceNo next = J->trace[side]->nextside; | ||
136 | trace_free(J, side); | ||
137 | side = next; | ||
138 | } | ||
139 | /* Now free the trace itself. */ | ||
140 | trace_free(J, traceno); | ||
141 | traceno = nextroot; | ||
142 | } | ||
143 | } | ||
144 | |||
145 | /* Re-enable compiling a prototype by unpatching any modified bytecode. */ | ||
146 | void lj_trace_reenableproto(GCproto *pt) | ||
147 | { | ||
148 | if ((pt->flags & PROTO_HAS_ILOOP)) { | ||
149 | BCIns *bc = pt->bc; | ||
150 | BCPos i, sizebc = pt->sizebc;; | ||
151 | pt->flags &= ~PROTO_HAS_ILOOP; | ||
152 | for (i = 0; i < sizebc; i++) { | ||
153 | BCOp op = bc_op(bc[i]); | ||
154 | if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP) | ||
155 | setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP); | ||
156 | } | ||
157 | } | ||
158 | } | ||
159 | |||
160 | /* Unpatch the bytecode modified by a root trace. */ | ||
161 | static void trace_unpatch(jit_State *J, Trace *T) | ||
162 | { | ||
163 | BCOp op = bc_op(T->startins); | ||
164 | uint32_t pcofs = T->snap[0].mapofs + T->snap[0].nslots; | ||
165 | BCIns *pc = ((BCIns *)(uintptr_t)T->snapmap[pcofs]) - 1; | ||
166 | switch (op) { | ||
167 | case BC_FORL: | ||
168 | lua_assert(bc_op(*pc) == BC_JFORI); | ||
169 | setbc_op(pc, BC_FORI); /* Unpatch JFORI, too. */ | ||
170 | pc += bc_j(*pc); | ||
171 | lua_assert(bc_op(*pc) == BC_JFORL && J->trace[bc_d(*pc)] == T); | ||
172 | *pc = T->startins; | ||
173 | break; | ||
174 | case BC_LOOP: | ||
175 | lua_assert(bc_op(*pc) == BC_JLOOP && J->trace[bc_d(*pc)] == T); | ||
176 | *pc = T->startins; | ||
177 | break; | ||
178 | case BC_ITERL: | ||
179 | lua_assert(bc_op(*pc) == BC_JMP); | ||
180 | pc += bc_j(*pc)+2; | ||
181 | lua_assert(bc_op(*pc) == BC_JITERL && J->trace[bc_d(*pc)] == T); | ||
182 | *pc = T->startins; | ||
183 | break; | ||
184 | case BC_CALL: | ||
185 | lj_trace_err(J, LJ_TRERR_NYILNKF); | ||
186 | break; | ||
187 | case BC_JMP: /* No need to unpatch branches in parent traces (yet). */ | ||
188 | default: | ||
189 | lua_assert(0); | ||
190 | break; | ||
191 | } | ||
192 | } | ||
193 | |||
194 | /* Flush a root trace and any attached side traces. */ | ||
195 | void lj_trace_flush(jit_State *J, TraceNo traceno) | ||
196 | { | ||
197 | Trace *T = NULL; | ||
198 | GCproto *pt; | ||
199 | if (traceno > 0 && traceno <= J->sizetrace) | ||
200 | T = J->trace[traceno]; | ||
201 | if (T == NULL) | ||
202 | return; | ||
203 | pt = &gcref(T->startpt)->pt; | ||
204 | if (T->root == 0 && pt != NULL) { | ||
205 | TraceNo side; | ||
206 | /* First unpatch any modified bytecode. */ | ||
207 | trace_unpatch(J, T); | ||
208 | /* Unlink root trace from chain anchored in prototype. */ | ||
209 | if (pt->trace == traceno) { /* Trace is first in chain. Easy. */ | ||
210 | pt->trace = T->nextroot; | ||
211 | } else { /* Otherwise search in chain of root traces. */ | ||
212 | Trace *T2 = J->trace[pt->trace]; | ||
213 | while (T2->nextroot != traceno) { | ||
214 | lua_assert(T2->nextroot != 0); | ||
215 | T2 = J->trace[T2->nextroot]; | ||
216 | } | ||
217 | T2->nextroot = T->nextroot; /* Unlink from chain. */ | ||
218 | } | ||
219 | /* Free all side traces. */ | ||
220 | for (side = T->nextside; side != 0; ) { | ||
221 | TraceNo next = J->trace[side]->nextside; | ||
222 | trace_free(J, side); | ||
223 | side = next; | ||
224 | } | ||
225 | /* Now free the trace itself. */ | ||
226 | trace_free(J, traceno); | ||
227 | } /* Flush for non-root traces is currently ignored. */ | ||
228 | } | ||
229 | |||
230 | /* Flush all traces associated with a prototype. */ | ||
231 | void lj_trace_flushproto(global_State *g, GCproto *pt) | ||
232 | { | ||
233 | while (pt->trace != 0) | ||
234 | lj_trace_flush(G2J(g), pt->trace); | ||
235 | } | ||
236 | |||
237 | /* Flush all traces. */ | ||
238 | int lj_trace_flushall(lua_State *L) | ||
239 | { | ||
240 | jit_State *J = L2J(L); | ||
241 | ptrdiff_t i; | ||
242 | if ((J2G(J)->hookmask & HOOK_GC)) | ||
243 | return 1; | ||
244 | for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) | ||
245 | lj_trace_flush(J, (TraceNo)i); | ||
246 | #ifdef LUA_USE_ASSERT | ||
247 | for (i = 0; i < (ptrdiff_t)J->sizetrace; i++) | ||
248 | lua_assert(J->trace[i] == NULL); | ||
249 | #endif | ||
250 | J->freetrace = 0; | ||
251 | /* Free the whole machine code and invalidate all exit stub groups. */ | ||
252 | lj_mcode_free(J); | ||
253 | memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup)); | ||
254 | lj_vmevent_send(L, TRACE, | ||
255 | setstrV(L, L->top++, lj_str_newlit(L, "flush")); | ||
256 | ); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | /* Free everything associated with the JIT compiler state. */ | ||
261 | void lj_trace_freestate(global_State *g) | ||
262 | { | ||
263 | jit_State *J = G2J(g); | ||
264 | #ifdef LUA_USE_ASSERT | ||
265 | { /* This assumes all traces have already been freed. */ | ||
266 | ptrdiff_t i; | ||
267 | for (i = 0; i < (ptrdiff_t)J->sizetrace; i++) | ||
268 | lua_assert(J->trace[i] == NULL); | ||
269 | } | ||
270 | #endif | ||
271 | lj_mcode_free(J); | ||
272 | lj_ir_knum_freeall(J); | ||
273 | lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, IRRef2); | ||
274 | lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot); | ||
275 | lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns); | ||
276 | lj_mem_freevec(g, J->trace, J->sizetrace, Trace *); | ||
277 | } | ||
278 | |||
279 | /* -- Trace compiler state machine ---------------------------------------- */ | ||
280 | |||
281 | /* Penalize a bytecode instruction by bumping its hot counter. */ | ||
282 | static void hotpenalty(jit_State *J, const BCIns *pc, TraceError e) | ||
283 | { | ||
284 | uint32_t i, val = HOTCOUNT_MIN_PENALTY; | ||
285 | for (i = 0; i < PENALTY_SLOTS; i++) | ||
286 | if (J->penalty[i].pc == pc) { | ||
287 | val = ((uint32_t)J->penalty[i].val << 1) + 1; | ||
288 | if (val > HOTCOUNT_MAX_PENALTY) val = HOTCOUNT_MAX_PENALTY; | ||
289 | goto setpenalty; | ||
290 | } | ||
291 | i = J->penaltyslot; | ||
292 | J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1); | ||
293 | J->penalty[i].pc = pc; | ||
294 | setpenalty: | ||
295 | J->penalty[i].val = (uint16_t)val; | ||
296 | J->penalty[i].reason = e; | ||
297 | hotcount_set(J2GG(J), pc+1, val); | ||
298 | } | ||
299 | |||
300 | /* Start tracing. */ | ||
301 | static void trace_start(jit_State *J) | ||
302 | { | ||
303 | lua_State *L; | ||
304 | |||
305 | if (J->curtrace != 0 && J->trace[J->curtrace] == &J->cur) { | ||
306 | J->trace[J->curtrace] = trace_save(J, &J->cur); /* Save current trace. */ | ||
307 | J->curtrace = 0; | ||
308 | } | ||
309 | |||
310 | if ((J->pt->flags & PROTO_NO_JIT)) { /* JIT disabled for this proto? */ | ||
311 | if (J->parent == 0) { | ||
312 | if (J->pc >= J->pt->bc) { | ||
313 | /* Lazy bytecode patching to disable hotcount events. */ | ||
314 | setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP); | ||
315 | J->pt->flags |= PROTO_HAS_ILOOP; | ||
316 | } else { | ||
317 | /* NYI: lazy closure patching to disable hotcall events. */ | ||
318 | lua_assert(0); | ||
319 | } | ||
320 | } | ||
321 | J->state = LJ_TRACE_IDLE; /* Silently ignored. */ | ||
322 | return; | ||
323 | } | ||
324 | |||
325 | /* Get a new trace number. */ | ||
326 | J->curtrace = trace_findfree(J); | ||
327 | if (LJ_UNLIKELY(J->curtrace == 0)) { /* No free trace? */ | ||
328 | lua_assert((J2G(J)->hookmask & HOOK_GC) == 0); | ||
329 | lj_trace_flushall(J->L); | ||
330 | J->state = LJ_TRACE_IDLE; /* Silently ignored. */ | ||
331 | return; | ||
332 | } | ||
333 | J->trace[J->curtrace] = &J->cur; | ||
334 | |||
335 | /* Setup enough of the current trace to be able to send the vmevent. */ | ||
336 | memset(&J->cur, 0, sizeof(Trace)); | ||
337 | J->cur.nins = J->cur.nk = REF_BASE; | ||
338 | J->cur.ir = J->irbuf; | ||
339 | J->cur.snap = J->snapbuf; | ||
340 | J->cur.snapmap = J->snapmapbuf; | ||
341 | /* J->cur.nsnapmap = 0; */ | ||
342 | J->mergesnap = 0; | ||
343 | J->needsnap = 0; | ||
344 | J->guardemit.irt = 0; | ||
345 | |||
346 | L = J->L; | ||
347 | lj_vmevent_send(L, TRACE, | ||
348 | setstrV(L, L->top++, lj_str_newlit(L, "start")); | ||
349 | setintV(L->top++, J->curtrace); | ||
350 | setfuncV(L, L->top++, J->fn); | ||
351 | setintV(L->top++, J->pc - J->pt->bc + 1); | ||
352 | if (J->parent) { | ||
353 | setintV(L->top++, J->parent); | ||
354 | setintV(L->top++, J->exitno); | ||
355 | } | ||
356 | ); | ||
357 | lj_record_setup(J); | ||
358 | } | ||
359 | |||
360 | /* Stop tracing. */ | ||
361 | static void trace_stop(jit_State *J) | ||
362 | { | ||
363 | BCIns *pc = (BCIns *)J->startpc; /* Not const here. */ | ||
364 | BCOp op = bc_op(J->cur.startins); | ||
365 | GCproto *pt = &gcref(J->cur.startpt)->pt; | ||
366 | lua_State *L; | ||
367 | |||
368 | switch (op) { | ||
369 | case BC_FORL: | ||
370 | setbc_op(pc+bc_j(J->cur.startins), BC_JFORI); /* Patch FORI, too. */ | ||
371 | /* fallthrough */ | ||
372 | case BC_LOOP: | ||
373 | case BC_ITERL: | ||
374 | /* Patch bytecode of starting instruction in root trace. */ | ||
375 | setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP); | ||
376 | setbc_d(pc, J->curtrace); | ||
377 | /* Add to root trace chain in prototype. */ | ||
378 | J->cur.nextroot = pt->trace; | ||
379 | pt->trace = (TraceNo1)J->curtrace; | ||
380 | break; | ||
381 | case BC_CALL: | ||
382 | lj_trace_err(J, LJ_TRERR_NYILNKF); | ||
383 | break; | ||
384 | case BC_JMP: | ||
385 | /* Patch exit branch in parent to side trace entry. */ | ||
386 | lua_assert(J->parent != 0 && J->cur.root != 0); | ||
387 | lj_asm_patchexit(J, J->trace[J->parent], J->exitno, J->cur.mcode); | ||
388 | /* Avoid compiling a side trace twice (stack resizing uses parent exit). */ | ||
389 | J->trace[J->parent]->snap[J->exitno].count = SNAPCOUNT_DONE; | ||
390 | /* Add to side trace chain in root trace. */ | ||
391 | { | ||
392 | Trace *root = J->trace[J->cur.root]; | ||
393 | root->nchild++; | ||
394 | J->cur.nextside = root->nextside; | ||
395 | root->nextside = (TraceNo1)J->curtrace; | ||
396 | } | ||
397 | break; | ||
398 | default: | ||
399 | lua_assert(0); | ||
400 | break; | ||
401 | } | ||
402 | |||
403 | /* Commit new mcode only after all patching is done. */ | ||
404 | lj_mcode_commit(J, J->cur.mcode); | ||
405 | lj_gdbjit_addtrace(J, &J->cur, J->curtrace); | ||
406 | |||
407 | L = J->L; | ||
408 | lj_vmevent_send(L, TRACE, | ||
409 | setstrV(L, L->top++, lj_str_newlit(L, "stop")); | ||
410 | setintV(L->top++, J->curtrace); | ||
411 | ); | ||
412 | } | ||
413 | |||
414 | /* Abort tracing. */ | ||
415 | static int trace_abort(jit_State *J) | ||
416 | { | ||
417 | lua_State *L = J->L; | ||
418 | TraceError e = LJ_TRERR_RECERR; | ||
419 | lj_mcode_abort(J); | ||
420 | if (tvisnum(L->top-1)) | ||
421 | e = (TraceError)lj_num2int(numV(L->top-1)); | ||
422 | if (e == LJ_TRERR_MCODELM) { | ||
423 | J->state = LJ_TRACE_ASM; | ||
424 | return 1; /* Retry ASM with new MCode area. */ | ||
425 | } | ||
426 | if (J->parent == 0) | ||
427 | hotpenalty(J, J->startpc, e); /* Penalize starting instruction. */ | ||
428 | if (J->curtrace) { /* Is there anything to abort? */ | ||
429 | ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */ | ||
430 | lj_vmevent_send(L, TRACE, | ||
431 | setstrV(L, L->top++, lj_str_newlit(L, "abort")); | ||
432 | setintV(L->top++, J->curtrace); | ||
433 | setfuncV(L, L->top++, J->fn); | ||
434 | setintV(L->top++, J->pc - J->pt->bc + 1); | ||
435 | copyTV(L, L->top++, restorestack(L, errobj)); | ||
436 | copyTV(L, L->top++, &J->errinfo); | ||
437 | ); | ||
438 | /* Drop aborted trace after the vmevent (which may still access it). */ | ||
439 | J->trace[J->curtrace] = NULL; | ||
440 | if (J->curtrace < J->freetrace) | ||
441 | J->freetrace = J->curtrace; | ||
442 | J->curtrace = 0; | ||
443 | } | ||
444 | L->top--; /* Remove error object */ | ||
445 | if (e == LJ_TRERR_MCODEAL) | ||
446 | lj_trace_flushall(L); | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | /* State machine for the trace compiler. Protected callback. */ | ||
451 | static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud) | ||
452 | { | ||
453 | jit_State *J = (jit_State *)ud; | ||
454 | UNUSED(dummy); | ||
455 | do { | ||
456 | switch (J->state) { | ||
457 | case LJ_TRACE_START: | ||
458 | J->state = LJ_TRACE_RECORD; /* trace_start() may change state. */ | ||
459 | trace_start(J); | ||
460 | lj_dispatch_update(J2G(J)); | ||
461 | break; | ||
462 | |||
463 | case LJ_TRACE_RECORD: | ||
464 | setvmstate(J2G(J), RECORD); | ||
465 | lj_vmevent_send(L, RECORD, | ||
466 | setintV(L->top++, J->curtrace); | ||
467 | setfuncV(L, L->top++, J->fn); | ||
468 | setintV(L->top++, J->pc - J->pt->bc + 1); | ||
469 | setintV(L->top++, J->framedepth); | ||
470 | if (bcmode_mm(bc_op(*J->pc)) == MM_call) { | ||
471 | cTValue *o = &L->base[bc_a(*J->pc)]; | ||
472 | if (bc_op(*J->pc) == BC_ITERC) o -= 3; | ||
473 | copyTV(L, L->top++, o); | ||
474 | } | ||
475 | ); | ||
476 | lj_record_ins(J); | ||
477 | break; | ||
478 | |||
479 | case LJ_TRACE_END: | ||
480 | J->loopref = 0; | ||
481 | if ((J->flags & JIT_F_OPT_LOOP) && J->cur.link == J->curtrace) { | ||
482 | setvmstate(J2G(J), OPT); | ||
483 | lj_opt_dce(J); | ||
484 | if (lj_opt_loop(J)) { /* Loop optimization failed? */ | ||
485 | J->loopref = J->cur.nins; | ||
486 | J->state = LJ_TRACE_RECORD; /* Try to continue recording. */ | ||
487 | break; | ||
488 | } | ||
489 | J->loopref = J->chain[IR_LOOP]; /* Needed by assembler. */ | ||
490 | } | ||
491 | J->state = LJ_TRACE_ASM; | ||
492 | break; | ||
493 | |||
494 | case LJ_TRACE_ASM: | ||
495 | setvmstate(J2G(J), ASM); | ||
496 | lj_asm_trace(J, &J->cur); | ||
497 | trace_stop(J); | ||
498 | setvmstate(J2G(J), INTERP); | ||
499 | J->state = LJ_TRACE_IDLE; | ||
500 | lj_dispatch_update(J2G(J)); | ||
501 | return NULL; | ||
502 | |||
503 | default: /* Trace aborted asynchronously. */ | ||
504 | setintV(L->top++, (int32_t)LJ_TRERR_RECERR); | ||
505 | /* fallthrough */ | ||
506 | case LJ_TRACE_ERR: | ||
507 | if (trace_abort(J)) | ||
508 | break; /* Retry. */ | ||
509 | setvmstate(J2G(J), INTERP); | ||
510 | J->state = LJ_TRACE_IDLE; | ||
511 | lj_dispatch_update(J2G(J)); | ||
512 | return NULL; | ||
513 | } | ||
514 | } while (J->state > LJ_TRACE_RECORD); | ||
515 | return NULL; | ||
516 | } | ||
517 | |||
518 | /* -- Event handling ------------------------------------------------------ */ | ||
519 | |||
520 | /* A bytecode instruction is about to be executed. Record it. */ | ||
521 | void lj_trace_ins(jit_State *J) | ||
522 | { | ||
523 | while (lj_vm_cpcall(J->L, trace_state, NULL, (void *)J) != 0) | ||
524 | J->state = LJ_TRACE_ERR; | ||
525 | } | ||
526 | |||
527 | /* Start recording a new trace. */ | ||
528 | static void trace_new(jit_State *J) | ||
529 | { | ||
530 | /* Only start a new trace if not inside __gc call or vmevent. */ | ||
531 | if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) { | ||
532 | lua_assert(J->state == LJ_TRACE_IDLE); | ||
533 | J->state = LJ_TRACE_START; | ||
534 | J->fn = curr_func(J->L); | ||
535 | J->pt = funcproto(J->fn); | ||
536 | lj_trace_ins(J); | ||
537 | } | ||
538 | } | ||
539 | |||
540 | /* A hotcount triggered. Start recording a root trace. */ | ||
541 | void lj_trace_hot(jit_State *J, const BCIns *pc) | ||
542 | { | ||
543 | lua_State *L = J->L; | ||
544 | L->top = curr_topL(L); /* Only called from Lua and NRESULTS is not used. */ | ||
545 | hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]+1); /* Reset hotcount. */ | ||
546 | J->parent = 0; /* Root trace. */ | ||
547 | J->exitno = 0; | ||
548 | J->pc = pc-1; /* The interpreter bytecode PC is offset by 1. */ | ||
549 | trace_new(J); | ||
550 | } | ||
551 | |||
552 | /* A trace exited. Restore interpreter state and check for hot exits. */ | ||
553 | void *lj_trace_exit(jit_State *J, void *exptr) | ||
554 | { | ||
555 | lua_State *L = J->L; | ||
556 | void *cf; | ||
557 | |||
558 | /* Restore interpreter state. */ | ||
559 | lj_snap_restore(J, exptr); | ||
560 | cf = cframe_raw(L->cframe); | ||
561 | cframe_pc(cf) = J->pc; | ||
562 | |||
563 | lj_vmevent_send(L, TEXIT, | ||
564 | ExitState *ex = (ExitState *)exptr; | ||
565 | uint32_t i; | ||
566 | lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK); | ||
567 | setintV(L->top++, J->parent); | ||
568 | setintV(L->top++, J->exitno); | ||
569 | setintV(L->top++, RID_NUM_GPR); | ||
570 | setintV(L->top++, RID_NUM_FPR); | ||
571 | for (i = 0; i < RID_NUM_GPR; i++) | ||
572 | setintV(L->top++, ex->gpr[i]); | ||
573 | for (i = 0; i < RID_NUM_FPR; i++) { | ||
574 | setnumV(L->top, ex->fpr[i]); | ||
575 | if (LJ_UNLIKELY(tvisnan(L->top))) | ||
576 | setnanV(L->top); | ||
577 | L->top++; | ||
578 | } | ||
579 | ); | ||
580 | |||
581 | { /* Check for a hot exit. */ | ||
582 | SnapShot *snap = &J->trace[J->parent]->snap[J->exitno]; | ||
583 | if (snap->count != SNAPCOUNT_DONE && | ||
584 | ++snap->count >= J->param[JIT_P_hotexit]) | ||
585 | trace_new(J); /* Start recording a side trace. */ | ||
586 | } | ||
587 | |||
588 | return cf; /* Return the interpreter C frame. */ | ||
589 | } | ||
590 | |||
591 | #endif | ||