diff options
Diffstat (limited to 'src/lj_trace.c')
-rw-r--r-- | src/lj_trace.c | 49 |
1 files changed, 39 insertions, 10 deletions
diff --git a/src/lj_trace.c b/src/lj_trace.c index 9e5e400f..7bb6c8ae 100644 --- a/src/lj_trace.c +++ b/src/lj_trace.c | |||
@@ -360,7 +360,7 @@ static void trace_start(jit_State *J) | |||
360 | TraceNo traceno; | 360 | TraceNo traceno; |
361 | 361 | ||
362 | if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ | 362 | if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ |
363 | if (J->parent == 0) { | 363 | if (J->parent == 0 && J->exitno == 0) { |
364 | /* Lazy bytecode patching to disable hotcount events. */ | 364 | /* Lazy bytecode patching to disable hotcount events. */ |
365 | lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || | 365 | lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || |
366 | bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF); | 366 | bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF); |
@@ -453,6 +453,12 @@ static void trace_stop(jit_State *J) | |||
453 | root->nextside = (TraceNo1)traceno; | 453 | root->nextside = (TraceNo1)traceno; |
454 | } | 454 | } |
455 | break; | 455 | break; |
456 | case BC_CALLM: | ||
457 | case BC_CALL: | ||
458 | case BC_ITERC: | ||
459 | /* Trace stitching: patch link of previous trace. */ | ||
460 | traceref(J, J->exitno)->link = traceno; | ||
461 | break; | ||
456 | default: | 462 | default: |
457 | lua_assert(0); | 463 | lua_assert(0); |
458 | break; | 464 | break; |
@@ -467,6 +473,7 @@ static void trace_stop(jit_State *J) | |||
467 | lj_vmevent_send(L, TRACE, | 473 | lj_vmevent_send(L, TRACE, |
468 | setstrV(L, L->top++, lj_str_newlit(L, "stop")); | 474 | setstrV(L, L->top++, lj_str_newlit(L, "stop")); |
469 | setintV(L->top++, traceno); | 475 | setintV(L->top++, traceno); |
476 | setfuncV(L, L->top++, J->fn); | ||
470 | ); | 477 | ); |
471 | } | 478 | } |
472 | 479 | ||
@@ -502,8 +509,12 @@ static int trace_abort(jit_State *J) | |||
502 | return 1; /* Retry ASM with new MCode area. */ | 509 | return 1; /* Retry ASM with new MCode area. */ |
503 | } | 510 | } |
504 | /* Penalize or blacklist starting bytecode instruction. */ | 511 | /* Penalize or blacklist starting bytecode instruction. */ |
505 | if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) | 512 | if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) { |
506 | penalty_pc(J, &gcref(J->cur.startpt)->pt, mref(J->cur.startpc, BCIns), e); | 513 | if (J->exitno == 0) |
514 | penalty_pc(J, &gcref(J->cur.startpt)->pt, mref(J->cur.startpc, BCIns), e); | ||
515 | else | ||
516 | traceref(J, J->exitno)->link = J->exitno; /* Self-link is blacklisted. */ | ||
517 | } | ||
507 | 518 | ||
508 | /* Is there anything to abort? */ | 519 | /* Is there anything to abort? */ |
509 | traceno = J->cur.traceno; | 520 | traceno = J->cur.traceno; |
@@ -671,6 +682,7 @@ static void trace_hotside(jit_State *J, const BCIns *pc) | |||
671 | { | 682 | { |
672 | SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno]; | 683 | SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno]; |
673 | if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) && | 684 | if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) && |
685 | isluafunc(curr_func(J->L)) && | ||
674 | snap->count != SNAPCOUNT_DONE && | 686 | snap->count != SNAPCOUNT_DONE && |
675 | ++snap->count >= J->param[JIT_P_hotexit]) { | 687 | ++snap->count >= J->param[JIT_P_hotexit]) { |
676 | lua_assert(J->state == LJ_TRACE_IDLE); | 688 | lua_assert(J->state == LJ_TRACE_IDLE); |
@@ -680,6 +692,20 @@ static void trace_hotside(jit_State *J, const BCIns *pc) | |||
680 | } | 692 | } |
681 | } | 693 | } |
682 | 694 | ||
695 | /* Stitch a new trace to the previous trace. */ | ||
696 | void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc) | ||
697 | { | ||
698 | /* Only start a new trace if not recording or inside __gc call or vmevent. */ | ||
699 | if (J->state == LJ_TRACE_IDLE && | ||
700 | !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) { | ||
701 | J->parent = 0; /* Have to treat it like a root trace. */ | ||
702 | /* J->exitno is set to the invoking trace. */ | ||
703 | J->state = LJ_TRACE_START; | ||
704 | lj_trace_ins(J, pc); | ||
705 | } | ||
706 | } | ||
707 | |||
708 | |||
683 | /* Tiny struct to pass data to protected call. */ | 709 | /* Tiny struct to pass data to protected call. */ |
684 | typedef struct ExitDataCP { | 710 | typedef struct ExitDataCP { |
685 | jit_State *J; | 711 | jit_State *J; |
@@ -766,17 +792,20 @@ int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr) | |||
766 | if (errcode) | 792 | if (errcode) |
767 | return -errcode; /* Return negated error code. */ | 793 | return -errcode; /* Return negated error code. */ |
768 | 794 | ||
769 | lj_vmevent_send(L, TEXIT, | 795 | if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE))) |
770 | lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK); | 796 | lj_vmevent_send(L, TEXIT, |
771 | setintV(L->top++, J->parent); | 797 | lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK); |
772 | setintV(L->top++, J->exitno); | 798 | setintV(L->top++, J->parent); |
773 | trace_exit_regs(L, ex); | 799 | setintV(L->top++, J->exitno); |
774 | ); | 800 | trace_exit_regs(L, ex); |
801 | ); | ||
775 | 802 | ||
776 | pc = exd.pc; | 803 | pc = exd.pc; |
777 | cf = cframe_raw(L->cframe); | 804 | cf = cframe_raw(L->cframe); |
778 | setcframe_pc(cf, pc); | 805 | setcframe_pc(cf, pc); |
779 | if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) { | 806 | if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) { |
807 | /* Just exit to interpreter. */ | ||
808 | } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) { | ||
780 | if (!(G(L)->hookmask & HOOK_GC)) | 809 | if (!(G(L)->hookmask & HOOK_GC)) |
781 | lj_gc_step(L); /* Exited because of GC: drive GC forward. */ | 810 | lj_gc_step(L); /* Exited because of GC: drive GC forward. */ |
782 | } else { | 811 | } else { |