summaryrefslogtreecommitdiff
path: root/src/lj_trace.c
diff options
context:
space:
mode:
authorMike Pall <mike>2013-12-25 02:55:25 +0100
committerMike Pall <mike>2013-12-25 02:55:25 +0100
commitb5d741fa7e11a2a58df65f3c71489c58f8758f75 (patch)
tree3c33ec24c8fd363ca2ce797c998b21a8a0e39a5b /src/lj_trace.c
parent6e02c210c485791a5451cc74731acf319b2067bb (diff)
downloadluajit-b5d741fa7e11a2a58df65f3c71489c58f8758f75.tar.gz
luajit-b5d741fa7e11a2a58df65f3c71489c58f8758f75.tar.bz2
luajit-b5d741fa7e11a2a58df65f3c71489c58f8758f75.zip
Add trace stitching.
Diffstat (limited to 'src/lj_trace.c')
-rw-r--r--src/lj_trace.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/src/lj_trace.c b/src/lj_trace.c
index 2b8d931f..fa15e23d 100644
--- a/src/lj_trace.c
+++ b/src/lj_trace.c
@@ -360,7 +360,7 @@ static void trace_start(jit_State *J)
360 TraceNo traceno; 360 TraceNo traceno;
361 361
362 if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ 362 if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */
363 if (J->parent == 0) { 363 if (J->parent == 0 && J->exitno == 0) {
364 /* Lazy bytecode patching to disable hotcount events. */ 364 /* Lazy bytecode patching to disable hotcount events. */
365 lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || 365 lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL ||
366 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF); 366 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF);
@@ -453,6 +453,12 @@ static void trace_stop(jit_State *J)
453 root->nextside = (TraceNo1)traceno; 453 root->nextside = (TraceNo1)traceno;
454 } 454 }
455 break; 455 break;
456 case BC_CALLM:
457 case BC_CALL:
458 case BC_ITERC:
459 /* Trace stitching: patch link of previous trace. */
460 traceref(J, J->exitno)->link = traceno;
461 break;
456 default: 462 default:
457 lua_assert(0); 463 lua_assert(0);
458 break; 464 break;
@@ -502,8 +508,12 @@ static int trace_abort(jit_State *J)
502 return 1; /* Retry ASM with new MCode area. */ 508 return 1; /* Retry ASM with new MCode area. */
503 } 509 }
504 /* Penalize or blacklist starting bytecode instruction. */ 510 /* Penalize or blacklist starting bytecode instruction. */
505 if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) 511 if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
506 penalty_pc(J, &gcref(J->cur.startpt)->pt, mref(J->cur.startpc, BCIns), e); 512 if (J->exitno == 0)
513 penalty_pc(J, &gcref(J->cur.startpt)->pt, mref(J->cur.startpc, BCIns), e);
514 else
515 traceref(J, J->exitno)->link = J->exitno; /* Self-link is blacklisted. */
516 }
507 517
508 /* Is there anything to abort? */ 518 /* Is there anything to abort? */
509 traceno = J->cur.traceno; 519 traceno = J->cur.traceno;
@@ -680,6 +690,20 @@ static void trace_hotside(jit_State *J, const BCIns *pc)
680 } 690 }
681} 691}
682 692
693/* Stitch a new trace to the previous trace. */
694void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc)
695{
696 /* Only start a new trace if not recording or inside __gc call or vmevent. */
697 if (J->state == LJ_TRACE_IDLE &&
698 !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
699 J->parent = 0; /* Have to treat it like a root trace. */
700 /* J->exitno is set to the invoking trace. */
701 J->state = LJ_TRACE_START;
702 lj_trace_ins(J, pc);
703 }
704}
705
706
683/* Tiny struct to pass data to protected call. */ 707/* Tiny struct to pass data to protected call. */
684typedef struct ExitDataCP { 708typedef struct ExitDataCP {
685 jit_State *J; 709 jit_State *J;