aboutsummaryrefslogtreecommitdiff
path: root/src/lj_trace.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/lj_trace.c64
1 files changed, 52 insertions, 12 deletions
diff --git a/src/lj_trace.c b/src/lj_trace.c
index 5df84414..7970aba6 100644
--- a/src/lj_trace.c
+++ b/src/lj_trace.c
@@ -274,7 +274,7 @@ int lj_trace_flushall(lua_State *L)
274 if (T->root == 0) 274 if (T->root == 0)
275 trace_flushroot(J, T); 275 trace_flushroot(J, T);
276 lj_gdbjit_deltrace(J, T); 276 lj_gdbjit_deltrace(J, T);
277 T->traceno = 0; 277 T->traceno = T->link = 0; /* Blacklist the link for cont_stitch. */
278 setgcrefnull(J->trace[i]); 278 setgcrefnull(J->trace[i]);
279 } 279 }
280 } 280 }
@@ -284,6 +284,7 @@ int lj_trace_flushall(lua_State *L)
284 memset(J->penalty, 0, sizeof(J->penalty)); 284 memset(J->penalty, 0, sizeof(J->penalty));
285 /* Free the whole machine code and invalidate all exit stub groups. */ 285 /* Free the whole machine code and invalidate all exit stub groups. */
286 lj_mcode_free(J); 286 lj_mcode_free(J);
287 lj_ir_k64_freeall(J);
287 memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup)); 288 memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
288 lj_vmevent_send(L, TRACE, 289 lj_vmevent_send(L, TRACE,
289 setstrV(L, L->top++, lj_str_newlit(L, "flush")); 290 setstrV(L, L->top++, lj_str_newlit(L, "flush"));
@@ -367,7 +368,7 @@ static void trace_start(jit_State *J)
367 TraceNo traceno; 368 TraceNo traceno;
368 369
369 if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ 370 if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */
370 if (J->parent == 0) { 371 if (J->parent == 0 && J->exitno == 0) {
371 /* Lazy bytecode patching to disable hotcount events. */ 372 /* Lazy bytecode patching to disable hotcount events. */
372 lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || 373 lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL ||
373 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF); 374 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF);
@@ -401,6 +402,8 @@ static void trace_start(jit_State *J)
401 J->guardemit.irt = 0; 402 J->guardemit.irt = 0;
402 J->postproc = LJ_POST_NONE; 403 J->postproc = LJ_POST_NONE;
403 lj_resetsplit(J); 404 lj_resetsplit(J);
405 J->retryrec = 0;
406 J->ktracep = NULL;
404 setgcref(J->cur.startpt, obj2gco(J->pt)); 407 setgcref(J->cur.startpt, obj2gco(J->pt));
405 408
406 L = J->L; 409 L = J->L;
@@ -461,6 +464,12 @@ static void trace_stop(jit_State *J)
461 root->nextside = (TraceNo1)traceno; 464 root->nextside = (TraceNo1)traceno;
462 } 465 }
463 break; 466 break;
467 case BC_CALLM:
468 case BC_CALL:
469 case BC_ITERC:
470 /* Trace stitching: patch link of previous trace. */
471 traceref(J, J->exitno)->link = traceno;
472 break;
464 default: 473 default:
465 lua_assert(0); 474 lua_assert(0);
466 break; 475 break;
@@ -470,11 +479,15 @@ static void trace_stop(jit_State *J)
470 lj_mcode_commit(J, J->cur.mcode); 479 lj_mcode_commit(J, J->cur.mcode);
471 J->postproc = LJ_POST_NONE; 480 J->postproc = LJ_POST_NONE;
472 trace_save(J, T); 481 trace_save(J, T);
482 if (J->ktracep) { /* Patch K64Array slot with the final GCtrace pointer. */
483 setgcV(J->L, J->ktracep, obj2gco(T), LJ_TTRACE);
484 }
473 485
474 L = J->L; 486 L = J->L;
475 lj_vmevent_send(L, TRACE, 487 lj_vmevent_send(L, TRACE,
476 setstrV(L, L->top++, lj_str_newlit(L, "stop")); 488 setstrV(L, L->top++, lj_str_newlit(L, "stop"));
477 setintV(L->top++, traceno); 489 setintV(L->top++, traceno);
490 setfuncV(L, L->top++, J->fn);
478 ); 491 );
479} 492}
480 493
@@ -510,8 +523,17 @@ static int trace_abort(jit_State *J)
510 return 1; /* Retry ASM with new MCode area. */ 523 return 1; /* Retry ASM with new MCode area. */
511 } 524 }
512 /* Penalize or blacklist starting bytecode instruction. */ 525 /* Penalize or blacklist starting bytecode instruction. */
513 if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) 526 if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
514 penalty_pc(J, &gcref(J->cur.startpt)->pt, mref(J->cur.startpc, BCIns), e); 527 if (J->exitno == 0) {
528 BCIns *startpc = mref(J->cur.startpc, BCIns);
529 if (e == LJ_TRERR_RETRY)
530 hotcount_set(J2GG(J), startpc+1, 1); /* Immediate retry. */
531 else
532 penalty_pc(J, &gcref(J->cur.startpt)->pt, startpc, e);
533 } else {
534 traceref(J, J->exitno)->link = J->exitno; /* Self-link is blacklisted. */
535 }
536 }
515 537
516 /* Is there anything to abort? */ 538 /* Is there anything to abort? */
517 traceno = J->cur.traceno; 539 traceno = J->cur.traceno;
@@ -680,6 +702,7 @@ static void trace_hotside(jit_State *J, const BCIns *pc)
680{ 702{
681 SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno]; 703 SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
682 if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) && 704 if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
705 isluafunc(curr_func(J->L)) &&
683 snap->count != SNAPCOUNT_DONE && 706 snap->count != SNAPCOUNT_DONE &&
684 ++snap->count >= J->param[JIT_P_hotexit]) { 707 ++snap->count >= J->param[JIT_P_hotexit]) {
685 lua_assert(J->state == LJ_TRACE_IDLE); 708 lua_assert(J->state == LJ_TRACE_IDLE);
@@ -689,6 +712,20 @@ static void trace_hotside(jit_State *J, const BCIns *pc)
689 } 712 }
690} 713}
691 714
715/* Stitch a new trace to the previous trace. */
716void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc)
717{
718 /* Only start a new trace if not recording or inside __gc call or vmevent. */
719 if (J->state == LJ_TRACE_IDLE &&
720 !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
721 J->parent = 0; /* Have to treat it like a root trace. */
722 /* J->exitno is set to the invoking trace. */
723 J->state = LJ_TRACE_START;
724 lj_trace_ins(J, pc);
725 }
726}
727
728
692/* Tiny struct to pass data to protected call. */ 729/* Tiny struct to pass data to protected call. */
693typedef struct ExitDataCP { 730typedef struct ExitDataCP {
694 jit_State *J; 731 jit_State *J;
@@ -775,17 +812,20 @@ int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
775 if (errcode) 812 if (errcode)
776 return -errcode; /* Return negated error code. */ 813 return -errcode; /* Return negated error code. */
777 814
778 lj_vmevent_send(L, TEXIT, 815 if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)))
779 lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK); 816 lj_vmevent_send(L, TEXIT,
780 setintV(L->top++, J->parent); 817 lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
781 setintV(L->top++, J->exitno); 818 setintV(L->top++, J->parent);
782 trace_exit_regs(L, ex); 819 setintV(L->top++, J->exitno);
783 ); 820 trace_exit_regs(L, ex);
821 );
784 822
785 pc = exd.pc; 823 pc = exd.pc;
786 cf = cframe_raw(L->cframe); 824 cf = cframe_raw(L->cframe);
787 setcframe_pc(cf, pc); 825 setcframe_pc(cf, pc);
788 if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) { 826 if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) {
827 /* Just exit to interpreter. */
828 } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) {
789 if (!(G(L)->hookmask & HOOK_GC)) 829 if (!(G(L)->hookmask & HOOK_GC))
790 lj_gc_step(L); /* Exited because of GC: drive GC forward. */ 830 lj_gc_step(L); /* Exited because of GC: drive GC forward. */
791 } else { 831 } else {
@@ -809,7 +849,7 @@ int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
809 ERRNO_RESTORE 849 ERRNO_RESTORE
810 switch (bc_op(*pc)) { 850 switch (bc_op(*pc)) {
811 case BC_CALLM: case BC_CALLMT: 851 case BC_CALLM: case BC_CALLMT:
812 return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc)); 852 return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc) + LJ_FR2);
813 case BC_RETM: 853 case BC_RETM:
814 return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc)); 854 return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc));
815 case BC_TSETM: 855 case BC_TSETM: