diff options
-rw-r--r-- | CHANGES | 9 | ||||
-rw-r--r-- | docs/comparison.html | 22 | ||||
-rw-r--r-- | docs/index.html | 4 | ||||
-rw-r--r-- | src/keeper.c | 158 | ||||
-rw-r--r-- | src/keeper.h | 22 | ||||
-rw-r--r-- | src/lanes.c | 547 | ||||
-rw-r--r-- | src/tools.c | 347 | ||||
-rw-r--r-- | src/tools.h | 85 | ||||
-rw-r--r-- | tests/keeper.lua | 2 | ||||
-rw-r--r-- | tests/recursive.lua | 2 |
10 files changed, 653 insertions, 545 deletions
@@ -1,5 +1,14 @@ | |||
1 | CHANGES: | 1 | CHANGES: |
2 | 2 | ||
3 | CHANGE 104: BGe 25-Feb-14 | ||
4 | * Internal rework: the whole Lanes engine now works "per universe" to allow concurrent Lanes execution in more than one embedded master state | ||
5 | * this universe is a full userdata created in the master state, selfdestruct_gc is the __gc for this userdata | ||
6 | * most of what was initialized only once is now per-universe | ||
7 | * Fixed potential crashes at desinit if problems occur during keeper states initialisation | ||
8 | * Fixed require() not always serialized properly | ||
9 | * Raise an error instead of crashing on deep userdata prelude memory allocation failure | ||
10 | * Added forgotten mutex desinitialisation at universe shutdown | ||
11 | |||
3 | CHANGE 103: BGe 24-Feb-14 | 12 | CHANGE 103: BGe 24-Feb-14 |
4 | * Fix lookup database table not being created when it should if Lanes is required in more than one Lua master state | 13 | * Fix lookup database table not being created when it should if Lanes is required in more than one Lua master state |
5 | 14 | ||
diff --git a/docs/comparison.html b/docs/comparison.html index bebc68b..8e28f55 100644 --- a/docs/comparison.html +++ b/docs/comparison.html | |||
@@ -32,7 +32,7 @@ With the introduction of Lindas (Jun-2008), Lua Lanes simplifies its API while | |||
32 | simultaneously adding more power and speed. | 32 | simultaneously adding more power and speed. |
33 | 33 | ||
34 | Pros: | 34 | Pros: |
35 | - regular Lua 5.1 module | 35 | - regular Lua 5.1/5.2 module |
36 | - completely separate Lua states, one per OS thread | 36 | - completely separate Lua states, one per OS thread |
37 | - message passing, or shared data using Lindas | 37 | - message passing, or shared data using Lindas |
38 | - no application level locking, ever | 38 | - no application level locking, ever |
@@ -54,7 +54,7 @@ Cons: | |||
54 | 54 | ||
55 | Sample: | 55 | Sample: |
56 | << | 56 | << |
57 | require "lanes" | 57 | lanes = require "lanes".configure() |
58 | 58 | ||
59 | local function calculate(a,b,c) | 59 | local function calculate(a,b,c) |
60 | if not a then | 60 | if not a then |
@@ -63,9 +63,9 @@ Sample: | |||
63 | return a+b+c | 63 | return a+b+c |
64 | end | 64 | end |
65 | 65 | ||
66 | local h1= lanes.new(calculate)(1,2,3) | 66 | local h1= lanes.gen("base", calculate)(1,2,3) |
67 | local h2= lanes.new(calculate)(10,20,30) | 67 | local h2= lanes.new("base", calculate)(10,20,30) |
68 | local h3= lanes.new(calculate)(100,200,300) | 68 | local h3= lanes.new("base", calculate)(100,200,300) |
69 | 69 | ||
70 | print( h1[1], h2[1], h3[1] ) -- pends for the results, or propagates error | 70 | print( h1[1], h2[1], h3[1] ) -- pends for the results, or propagates error |
71 | << | 71 | << |
@@ -304,11 +304,13 @@ Cons: | |||
304 | <!-- footnotes +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ --> | 304 | <!-- footnotes +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ --> |
305 | <hr/> | 305 | <hr/> |
306 | 306 | ||
307 | <p>For feedback, questions and suggestions: | 307 | <p> |
308 | <UL> | 308 | For feedback, questions and suggestions: |
309 | <li><A HREF="http://luaforge.net/projects/lanes">Lanes @ LuaForge</A></li> | 309 | <ul> |
310 | <li><A HREF="mailto:akauppi@gmail.com">the author</A></li> | 310 | <li><A HREF="http://github.com/LuaLanes/lanes">Lanes @ GitHub</A></li> |
311 | </UL> | 311 | <li><A HREF="mailto:bnt.germain@gmail.com">the maintainer</A></li> |
312 | <li><A HREF="http://www.lua.org/lua-l.html">the lua mailing list</A></li> | ||
313 | </ul> | ||
312 | </p> | 314 | </p> |
313 | 315 | ||
314 | <!-- | 316 | <!-- |
diff --git a/docs/index.html b/docs/index.html index c6d612f..6337647 100644 --- a/docs/index.html +++ b/docs/index.html | |||
@@ -70,7 +70,7 @@ | |||
70 | </p> | 70 | </p> |
71 | 71 | ||
72 | <p> | 72 | <p> |
73 | This document was revised on 17-Feb-14, and applies to version <tt>3.9.1</tt>. | 73 | This document was revised on 26-Feb-14, and applies to version <tt>3.9.2</tt>. |
74 | </p> | 74 | </p> |
75 | </font> | 75 | </font> |
76 | </center> | 76 | </center> |
@@ -105,6 +105,7 @@ | |||
105 | <li>Threads can be given priorities.</li> | 105 | <li>Threads can be given priorities.</li> |
106 | <li>Lanes are cancellable, with proper cleanup.</li> | 106 | <li>Lanes are cancellable, with proper cleanup.</li> |
107 | <li>No Lua-side application level locking - ever!</li> | 107 | <li>No Lua-side application level locking - ever!</li> |
108 | <li>Several totally independant Lanes universes may coexist in an application, one per "master" Lua state.</li> | ||
108 | </ul> | 109 | </ul> |
109 | 110 | ||
110 | 111 | ||
@@ -114,6 +115,7 @@ | |||
114 | <li>Coroutines are not passed between states.</li> | 115 | <li>Coroutines are not passed between states.</li> |
115 | <li>Sharing full userdata between states needs special C side preparations (-> <A HREF="#deep_userdata">deep userdata</A>).</li> | 116 | <li>Sharing full userdata between states needs special C side preparations (-> <A HREF="#deep_userdata">deep userdata</A>).</li> |
116 | <li>Network level parallelism not included.</li> | 117 | <li>Network level parallelism not included.</li> |
118 | <li>Just like independant Lua states, Lanes universes cannot communicate together.</li> | ||
117 | </ul> | 119 | </ul> |
118 | </p> | 120 | </p> |
119 | 121 | ||
diff --git a/src/keeper.c b/src/keeper.c index 0f54e13..9e5317b 100644 --- a/src/keeper.c +++ b/src/keeper.c | |||
@@ -186,9 +186,9 @@ static void push_table( lua_State* L, int idx) | |||
186 | STACK_END( L, 1); | 186 | STACK_END( L, 1); |
187 | } | 187 | } |
188 | 188 | ||
189 | int keeper_push_linda_storage( lua_State* L, void* ptr, unsigned long magic_) | 189 | int keeper_push_linda_storage( struct s_Universe* U, lua_State* L, void* ptr, unsigned long magic_) |
190 | { | 190 | { |
191 | struct s_Keeper* K = keeper_acquire( magic_); | 191 | struct s_Keeper* K = keeper_acquire( U->keepers, magic_); |
192 | lua_State* KL = K ? K->L : NULL; | 192 | lua_State* KL = K ? K->L : NULL; |
193 | if( KL == NULL) return 0; | 193 | if( KL == NULL) return 0; |
194 | STACK_GROW( KL, 4); | 194 | STACK_GROW( KL, 4); |
@@ -213,10 +213,10 @@ int keeper_push_linda_storage( lua_State* L, void* ptr, unsigned long magic_) | |||
213 | { | 213 | { |
214 | keeper_fifo* fifo = prepare_fifo_access( KL, -1); // storage key fifo | 214 | keeper_fifo* fifo = prepare_fifo_access( KL, -1); // storage key fifo |
215 | lua_pushvalue( KL, -2); // storage key fifo key | 215 | lua_pushvalue( KL, -2); // storage key fifo key |
216 | luaG_inter_move( KL, L, 1, eLM_FromKeeper); // storage key fifo // out key | 216 | luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key fifo // out key |
217 | STACK_MID( L, 2); | 217 | STACK_MID( L, 2); |
218 | lua_newtable( L); // out key keyout | 218 | lua_newtable( L); // out key keyout |
219 | luaG_inter_move( KL, L, 1, eLM_FromKeeper); // storage key // out key keyout fifo | 219 | luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key // out key keyout fifo |
220 | lua_pushinteger( L, fifo->first); // out key keyout fifo first | 220 | lua_pushinteger( L, fifo->first); // out key keyout fifo first |
221 | STACK_MID( L, 5); | 221 | STACK_MID( L, 5); |
222 | lua_setfield( L, -3, "first"); // out key keyout fifo | 222 | lua_setfield( L, -3, "first"); // out key keyout fifo |
@@ -577,79 +577,107 @@ int keepercall_count( lua_State* L) | |||
577 | * bigger the pool, the less chances of unnecessary waits. Lindas map to the | 577 | * bigger the pool, the less chances of unnecessary waits. Lindas map to the |
578 | * keepers randomly, by a hash. | 578 | * keepers randomly, by a hash. |
579 | */ | 579 | */ |
580 | static struct s_Keeper *GKeepers = NULL; | ||
581 | static int GNbKeepers = 0; | ||
582 | 580 | ||
583 | void close_keepers( lua_State* L) | 581 | // called as __gc for the keepers array userdata |
582 | void close_keepers( struct s_Universe* U, lua_State* L) | ||
584 | { | 583 | { |
585 | int i; | 584 | if( U->keepers != NULL) |
586 | int const nbKeepers = GNbKeepers; | ||
587 | // NOTE: imagine some keeper state N+1 currently holds a linda that uses another keeper N, and a _gc that will make use of it | ||
588 | // when keeper N+1 is closed, object is GCed, linda operation is called, which attempts to acquire keeper N, whose Lua state no longer exists | ||
589 | // in that case, the linda operation should do nothing. which means that these operations must check for keeper acquisition success | ||
590 | GNbKeepers = 0; | ||
591 | for( i = 0; i < nbKeepers; ++ i) | ||
592 | { | ||
593 | lua_State* L = GKeepers[i].L; | ||
594 | GKeepers[i].L = NULL; | ||
595 | lua_close( L); | ||
596 | } | ||
597 | for( i = 0; i < nbKeepers; ++ i) | ||
598 | { | 585 | { |
599 | MUTEX_FREE( &GKeepers[i].lock_); | 586 | int i; |
600 | } | 587 | int nbKeepers = U->keepers->nb_keepers; |
601 | if( GKeepers != NULL) | 588 | // NOTE: imagine some keeper state N+1 currently holds a linda that uses another keeper N, and a _gc that will make use of it |
602 | { | 589 | // when keeper N+1 is closed, object is GCed, linda operation is called, which attempts to acquire keeper N, whose Lua state no longer exists |
603 | void* allocUD; | 590 | // in that case, the linda operation should do nothing. which means that these operations must check for keeper acquisition success |
604 | lua_Alloc allocF = lua_getallocf( L, &allocUD); | 591 | // which is early-outed with a U->keepers->nbKeepers null-check |
605 | allocF( allocUD, GKeepers, nbKeepers * sizeof( struct s_Keeper), 0); | 592 | U->keepers->nb_keepers = 0; |
593 | for( i = 0; i < nbKeepers; ++ i) | ||
594 | { | ||
595 | lua_State* K = U->keepers->keeper_array[i].L; | ||
596 | U->keepers->keeper_array[i].L = NULL; | ||
597 | if( K != NULL) | ||
598 | { | ||
599 | lua_close( K); | ||
600 | } | ||
601 | else | ||
602 | { | ||
603 | // detected partial init: destroy only the mutexes that got initialized properly | ||
604 | nbKeepers = i; | ||
605 | } | ||
606 | } | ||
607 | for( i = 0; i < nbKeepers; ++ i) | ||
608 | { | ||
609 | MUTEX_FREE( &U->keepers->keeper_array[i].keeper_cs); | ||
610 | } | ||
611 | // free the keeper bookkeeping structure | ||
612 | { | ||
613 | void* allocUD; | ||
614 | lua_Alloc allocF = lua_getallocf( L, &allocUD); | ||
615 | allocF( L, U->keepers, sizeof( struct s_Keepers) + (nbKeepers - 1) * sizeof(struct s_Keeper), 0); | ||
616 | U->keepers = NULL; | ||
617 | } | ||
606 | } | 618 | } |
607 | GKeepers = NULL; | ||
608 | } | 619 | } |
609 | 620 | ||
610 | /* | 621 | /* |
611 | * Initialize keeper states | 622 | * Initialize keeper states |
612 | * | 623 | * |
613 | * If there is a problem, return an error message (NULL for okay). | 624 | * If there is a problem, returns NULL and pushes the error message on the stack |
625 | * else returns the keepers bookkeeping structure. | ||
614 | * | 626 | * |
615 | * Note: Any problems would be design flaws; the created Lua state is left | 627 | * Note: Any problems would be design flaws; the created Lua state is left |
616 | * unclosed, because it does not really matter. In production code, this | 628 | * unclosed, because it does not really matter. In production code, this |
617 | * function never fails. | 629 | * function never fails. |
618 | * settings table is at position 1 on the stack | 630 | * settings table is at position 1 on the stack |
619 | * pushes an error string on the stack in case of problem | ||
620 | */ | 631 | */ |
621 | int init_keepers( lua_State* L) | 632 | void init_keepers( struct s_Universe* U, lua_State* L) |
622 | { | 633 | { |
623 | int i; | 634 | int i; |
635 | int nb_keepers; | ||
624 | void* allocUD; | 636 | void* allocUD; |
625 | lua_Alloc allocF = lua_getallocf( L, &allocUD); | 637 | lua_Alloc allocF = lua_getallocf( L, &allocUD); |
626 | 638 | ||
627 | STACK_CHECK( L); // L K | 639 | STACK_CHECK( L); // L K |
628 | lua_getfield( L, 1, "nb_keepers"); // nb_keepers | 640 | lua_getfield( L, 1, "nb_keepers"); // nb_keepers |
629 | GNbKeepers = (int) lua_tointeger( L, -1); | 641 | nb_keepers = (int) lua_tointeger( L, -1); |
630 | lua_pop( L, 1); // | 642 | lua_pop( L, 1); // |
631 | assert( GNbKeepers >= 1); | 643 | assert( nb_keepers >= 1); |
632 | 644 | ||
633 | GKeepers = (struct s_Keeper*) allocF( allocUD, NULL, 0, GNbKeepers * sizeof( struct s_Keeper)); | 645 | // struct s_Keepers contains an array of 1 s_Keeper, adjust for the actual number of keeper states |
634 | if( GKeepers == NULL) | ||
635 | { | 646 | { |
636 | lua_pushliteral( L, "init_keepers() failed while creating keeper array; out of memory"); | 647 | size_t const bytes = sizeof( struct s_Keepers) + (nb_keepers - 1) * sizeof(struct s_Keeper); |
637 | STACK_MID( L, 1); | 648 | U->keepers = (struct s_Keepers*) allocF( L, NULL, 0, bytes); |
638 | return 1; | 649 | if( U->keepers == NULL) |
650 | { | ||
651 | (void) luaL_error( L, "init_keepers() failed while creating keeper array; out of memory"); | ||
652 | return; | ||
653 | } | ||
654 | memset( U->keepers, 0, bytes); | ||
655 | U->keepers->nb_keepers = nb_keepers; | ||
639 | } | 656 | } |
640 | for( i = 0; i < GNbKeepers; ++ i) | 657 | for( i = 0; i < nb_keepers; ++ i) // keepersUD |
641 | { | 658 | { |
642 | lua_State* K = PROPAGATE_ALLOCF_ALLOC(); | 659 | lua_State* K = PROPAGATE_ALLOCF_ALLOC(); |
643 | if( K == NULL) | 660 | if( K == NULL) |
644 | { | 661 | { |
645 | lua_pushliteral( L, "init_keepers() failed while creating keeper states; out of memory"); | 662 | (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory"); |
646 | STACK_MID( L, 1); | 663 | return; |
647 | return 1; | ||
648 | } | 664 | } |
665 | |||
666 | U->keepers->keeper_array[i].L = K; | ||
667 | // we can trigger a GC from inside keeper_call(), where a keeper is acquired | ||
668 | // from there, GC can collect a linda, which would acquire the keeper again, and deadlock the thread. | ||
669 | // therefore, we need a recursive mutex. | ||
670 | MUTEX_RECURSIVE_INIT( &U->keepers->keeper_array[i].keeper_cs); | ||
649 | STACK_CHECK( K); | 671 | STACK_CHECK( K); |
650 | 672 | ||
673 | // copy the universe pointer in the keeper itself | ||
674 | lua_pushlightuserdata( K, UNIVERSE_REGKEY); | ||
675 | lua_pushlightuserdata( K, U); | ||
676 | lua_rawset( K, LUA_REGISTRYINDEX); | ||
677 | STACK_MID( K, 0); | ||
678 | |||
651 | // make sure 'package' is initialized in keeper states, so that we have require() | 679 | // make sure 'package' is initialized in keeper states, so that we have require() |
652 | // this because this is needed when transfering deep userdata object | 680 | // this because this is needed when transferring deep userdata object |
653 | luaL_requiref( K, "package", luaopen_package, 1); // package | 681 | luaL_requiref( K, "package", luaopen_package, 1); // package |
654 | lua_pop( K, 1); // | 682 | lua_pop( K, 1); // |
655 | STACK_MID( K, 0); | 683 | STACK_MID( K, 0); |
@@ -657,16 +685,16 @@ int init_keepers( lua_State* L) | |||
657 | STACK_MID( K, 0); | 685 | STACK_MID( K, 0); |
658 | 686 | ||
659 | // copy package.path and package.cpath from the source state | 687 | // copy package.path and package.cpath from the source state |
660 | lua_getglobal( L, "package"); // package | 688 | lua_getglobal( L, "package"); // "..." keepersUD package |
661 | if( !lua_isnil( L, -1)) | 689 | if( !lua_isnil( L, -1)) |
662 | { | 690 | { |
663 | // when copying with mode eLM_ToKeeper, error message is pushed at the top of the stack, not raised immediately | 691 | // when copying with mode eLM_ToKeeper, error message is pushed at the top of the stack, not raised immediately |
664 | if( luaG_inter_copy_package( L, K, -1, eLM_ToKeeper)) | 692 | if( luaG_inter_copy_package( U, L, K, -1, eLM_ToKeeper)) |
665 | { | 693 | { |
666 | // if something went wrong, the error message is at the top of the stack | 694 | // if something went wrong, the error message is at the top of the stack |
667 | lua_remove( L, -2); // error_msg | 695 | lua_remove( L, -2); // error_msg |
668 | STACK_MID( L, 1); | 696 | (void) lua_error( L); |
669 | return 1; | 697 | return; |
670 | } | 698 | } |
671 | } | 699 | } |
672 | lua_pop( L, 1); // | 700 | lua_pop( L, 1); // |
@@ -674,12 +702,8 @@ int init_keepers( lua_State* L) | |||
674 | 702 | ||
675 | // attempt to call on_state_create(), if we have one and it is a C function | 703 | // attempt to call on_state_create(), if we have one and it is a C function |
676 | // (only support a C function because we can't transfer executable Lua code in keepers) | 704 | // (only support a C function because we can't transfer executable Lua code in keepers) |
677 | if( call_on_state_create( K, L, eLM_ToKeeper)) | 705 | // will raise an error in L in case of problem |
678 | { | 706 | call_on_state_create( U, K, L, eLM_ToKeeper); |
679 | // if something went wrong, the error message is at the top of the stack | ||
680 | STACK_MID( L, 1); // error_msg | ||
681 | return 1; | ||
682 | } | ||
683 | 707 | ||
684 | // to see VM name in Decoda debugger | 708 | // to see VM name in Decoda debugger |
685 | lua_pushliteral( K, "Keeper #"); // "Keeper #" | 709 | lua_pushliteral( K, "Keeper #"); // "Keeper #" |
@@ -693,19 +717,15 @@ int init_keepers( lua_State* L) | |||
693 | lua_rawset( K, LUA_REGISTRYINDEX); // | 717 | lua_rawset( K, LUA_REGISTRYINDEX); // |
694 | 718 | ||
695 | STACK_END( K, 0); | 719 | STACK_END( K, 0); |
696 | // we can trigger a GC from inside keeper_call(), where a keeper is acquired | ||
697 | // from there, GC can collect a linda, which would acquire the keeper again, and deadlock the thread. | ||
698 | MUTEX_RECURSIVE_INIT( &GKeepers[i].lock_); | ||
699 | GKeepers[i].L = K; | ||
700 | } | 720 | } |
701 | STACK_END( L, 0); | 721 | STACK_END( L, 0); |
702 | return 0; // success | ||
703 | } | 722 | } |
704 | 723 | ||
705 | struct s_Keeper* keeper_acquire( unsigned long magic_) | 724 | struct s_Keeper* keeper_acquire( struct s_Keepers* keepers_, unsigned long magic_) |
706 | { | 725 | { |
726 | int const nbKeepers = keepers_->nb_keepers; | ||
707 | // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) | 727 | // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) |
708 | if( GNbKeepers == 0) | 728 | if( nbKeepers == 0) |
709 | { | 729 | { |
710 | return NULL; | 730 | return NULL; |
711 | } | 731 | } |
@@ -718,10 +738,10 @@ struct s_Keeper* keeper_acquire( unsigned long magic_) | |||
718 | * Pointers are often aligned by 8 or so - ignore the low order bits | 738 | * Pointers are often aligned by 8 or so - ignore the low order bits |
719 | * have to cast to unsigned long to avoid compilation warnings about loss of data when converting pointer-to-integer | 739 | * have to cast to unsigned long to avoid compilation warnings about loss of data when converting pointer-to-integer |
720 | */ | 740 | */ |
721 | unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % GNbKeepers); | 741 | unsigned int i = (unsigned int)((magic_ >> KEEPER_MAGIC_SHIFT) % nbKeepers); |
722 | struct s_Keeper* K= &GKeepers[i]; | 742 | struct s_Keeper* K = &keepers_->keeper_array[i]; |
723 | 743 | ||
724 | MUTEX_LOCK( &K->lock_); | 744 | MUTEX_LOCK( &K->keeper_cs); |
725 | //++ K->count; | 745 | //++ K->count; |
726 | return K; | 746 | return K; |
727 | } | 747 | } |
@@ -730,16 +750,16 @@ struct s_Keeper* keeper_acquire( unsigned long magic_) | |||
730 | void keeper_release( struct s_Keeper* K) | 750 | void keeper_release( struct s_Keeper* K) |
731 | { | 751 | { |
732 | //-- K->count; | 752 | //-- K->count; |
733 | if( K) MUTEX_UNLOCK( &K->lock_); | 753 | if( K) MUTEX_UNLOCK( &K->keeper_cs); |
734 | } | 754 | } |
735 | 755 | ||
736 | void keeper_toggle_nil_sentinels( lua_State* L, int val_i_, enum eLookupMode mode_) | 756 | void keeper_toggle_nil_sentinels( lua_State* L, int val_i_, enum eLookupMode mode_) |
737 | { | 757 | { |
738 | int i, n = lua_gettop( L); | 758 | int i, n = lua_gettop( L); |
739 | /* We could use an empty table in 'keeper.lua' as the sentinel, but maybe | 759 | /* We could use an empty table in 'keeper.lua' as the sentinel, but maybe |
740 | * checking for a lightuserdata is faster. (any unique value will do -> take the address of some global of ours) | 760 | * checking for a lightuserdata is faster. (any unique value will do -> take the address of some global symbol of ours) |
741 | */ | 761 | */ |
742 | void* nil_sentinel = &GNbKeepers; | 762 | void* nil_sentinel = (void*) keeper_toggle_nil_sentinels; |
743 | for( i = val_i_; i <= n; ++ i) | 763 | for( i = val_i_; i <= n; ++ i) |
744 | { | 764 | { |
745 | if( mode_ == eLM_ToKeeper) | 765 | if( mode_ == eLM_ToKeeper) |
@@ -770,7 +790,7 @@ void keeper_toggle_nil_sentinels( lua_State* L, int val_i_, enum eLookupMode mod | |||
770 | * | 790 | * |
771 | * Returns: number of return values (pushed to 'L') or -1 in case of error | 791 | * Returns: number of return values (pushed to 'L') or -1 in case of error |
772 | */ | 792 | */ |
773 | int keeper_call( lua_State* K, keeper_api_t func_, lua_State* L, void* linda, uint_t starting_index) | 793 | int keeper_call( struct s_Universe* U, lua_State* K, keeper_api_t func_, lua_State* L, void* linda, uint_t starting_index) |
774 | { | 794 | { |
775 | int const args = starting_index ? (lua_gettop( L) - starting_index + 1) : 0; | 795 | int const args = starting_index ? (lua_gettop( L) - starting_index + 1) : 0; |
776 | int const Ktos = lua_gettop( K); | 796 | int const Ktos = lua_gettop( K); |
@@ -782,7 +802,7 @@ int keeper_call( lua_State* K, keeper_api_t func_, lua_State* L, void* linda, ui | |||
782 | 802 | ||
783 | lua_pushlightuserdata( K, linda); | 803 | lua_pushlightuserdata( K, linda); |
784 | 804 | ||
785 | if( (args == 0) || luaG_inter_copy( L, K, args, eLM_ToKeeper) == 0) // L->K | 805 | if( (args == 0) || luaG_inter_copy( U, L, K, args, eLM_ToKeeper) == 0) // L->K |
786 | { | 806 | { |
787 | lua_call( K, 1 + args, LUA_MULTRET); | 807 | lua_call( K, 1 + args, LUA_MULTRET); |
788 | 808 | ||
@@ -791,7 +811,7 @@ int keeper_call( lua_State* K, keeper_api_t func_, lua_State* L, void* linda, ui | |||
791 | // this may interrupt a lane, causing the destruction of the underlying OS thread | 811 | // this may interrupt a lane, causing the destruction of the underlying OS thread |
792 | // after this, another lane making use of this keeper can get an error code from the mutex-locking function | 812 | // after this, another lane making use of this keeper can get an error code from the mutex-locking function |
793 | // when attempting to grab the mutex again (WINVER <= 0x400 does this, but locks just fine, I don't know about pthread) | 813 | // when attempting to grab the mutex again (WINVER <= 0x400 does this, but locks just fine, I don't know about pthread) |
794 | if( (retvals > 0) && luaG_inter_move( K, L, retvals, eLM_FromKeeper) != 0) // K->L | 814 | if( (retvals > 0) && luaG_inter_move( U, K, L, retvals, eLM_FromKeeper) != 0) // K->L |
795 | { | 815 | { |
796 | retvals = -1; | 816 | retvals = -1; |
797 | } | 817 | } |
diff --git a/src/keeper.h b/src/keeper.h index 5a52f3b..450f64d 100644 --- a/src/keeper.h +++ b/src/keeper.h | |||
@@ -3,19 +3,25 @@ | |||
3 | 3 | ||
4 | struct s_Keeper | 4 | struct s_Keeper |
5 | { | 5 | { |
6 | MUTEX_T lock_; | 6 | MUTEX_T keeper_cs; |
7 | lua_State *L; | 7 | lua_State* L; |
8 | //int count; | 8 | //int count; |
9 | }; | 9 | }; |
10 | 10 | ||
11 | int init_keepers( lua_State* L); | 11 | struct s_Keepers |
12 | void close_keepers( lua_State* L); | 12 | { |
13 | int nb_keepers; | ||
14 | struct s_Keeper keeper_array[1]; | ||
15 | }; | ||
16 | |||
17 | void init_keepers( struct s_Universe* U, lua_State* L); | ||
18 | void close_keepers( struct s_Universe* U, lua_State* L); | ||
13 | 19 | ||
14 | struct s_Keeper *keeper_acquire( unsigned long magic_); | 20 | struct s_Keeper* keeper_acquire( struct s_Keepers* keepers_, unsigned long magic_); |
15 | #define KEEPER_MAGIC_SHIFT 3 | 21 | #define KEEPER_MAGIC_SHIFT 3 |
16 | void keeper_release( struct s_Keeper* K); | 22 | void keeper_release( struct s_Keeper* K); |
17 | void keeper_toggle_nil_sentinels( lua_State *L, int _val_i, enum eLookupMode const mode_); | 23 | void keeper_toggle_nil_sentinels( lua_State* L, int _val_i, enum eLookupMode const mode_); |
18 | int keeper_push_linda_storage( lua_State* L, void* ptr, unsigned long magic_); | 24 | int keeper_push_linda_storage( struct s_Universe* U, lua_State* L, void* ptr, unsigned long magic_); |
19 | 25 | ||
20 | typedef lua_CFunction keeper_api_t; | 26 | typedef lua_CFunction keeper_api_t; |
21 | #define KEEPER_API( _op) keepercall_ ## _op | 27 | #define KEEPER_API( _op) keepercall_ ## _op |
@@ -30,6 +36,6 @@ int keepercall_get( lua_State* L); | |||
30 | int keepercall_set( lua_State* L); | 36 | int keepercall_set( lua_State* L); |
31 | int keepercall_count( lua_State* L); | 37 | int keepercall_count( lua_State* L); |
32 | 38 | ||
33 | int keeper_call( lua_State *K, keeper_api_t _func, lua_State *L, void *linda, uint_t starting_index); | 39 | int keeper_call( struct s_Universe* U, lua_State* K, keeper_api_t _func, lua_State* L, void* linda, uint_t starting_index); |
34 | 40 | ||
35 | #endif // __keeper_h__ \ No newline at end of file | 41 | #endif // __keeper_h__ \ No newline at end of file |
diff --git a/src/lanes.c b/src/lanes.c index 9f90ff7..fdd2ab9 100644 --- a/src/lanes.c +++ b/src/lanes.c | |||
@@ -52,7 +52,7 @@ | |||
52 | * ... | 52 | * ... |
53 | */ | 53 | */ |
54 | 54 | ||
55 | char const* VERSION = "3.9.1"; | 55 | char const* VERSION = "3.9.2"; |
56 | 56 | ||
57 | /* | 57 | /* |
58 | =============================================================================== | 58 | =============================================================================== |
@@ -104,11 +104,6 @@ THE SOFTWARE. | |||
104 | # include <sys/types.h> | 104 | # include <sys/types.h> |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | /* | ||
108 | * Do we want to activate full lane tracking feature? (EXPERIMENTAL) | ||
109 | */ | ||
110 | #define HAVE_LANE_TRACKING 1 | ||
111 | |||
112 | /* Do you want full call stacks, or just the line where the error happened? | 107 | /* Do you want full call stacks, or just the line where the error happened? |
113 | * | 108 | * |
114 | * TBD: The full stack feature does not seem to work (try 'make error'). | 109 | * TBD: The full stack feature does not seem to work (try 'make error'). |
@@ -138,6 +133,7 @@ struct s_lane | |||
138 | char const* debug_name; | 133 | char const* debug_name; |
139 | 134 | ||
140 | lua_State* L; | 135 | lua_State* L; |
136 | struct s_Universe* U; | ||
141 | // | 137 | // |
142 | // M: prepares the state, and reads results | 138 | // M: prepares the state, and reads results |
143 | // S: while S is running, M must keep out of modifying the state | 139 | // S: while S is running, M must keep out of modifying the state |
@@ -321,9 +317,6 @@ static bool_t push_registry_table( lua_State*L, void *key, bool_t create ) { | |||
321 | 317 | ||
322 | #if HAVE_LANE_TRACKING | 318 | #if HAVE_LANE_TRACKING |
323 | 319 | ||
324 | static MUTEX_T tracking_cs; | ||
325 | struct s_lane* volatile tracking_first = NULL; // will change to TRACKING_END if we want to activate tracking | ||
326 | |||
327 | // The chain is ended by '(struct s_lane*)(-1)', not NULL: | 320 | // The chain is ended by '(struct s_lane*)(-1)', not NULL: |
328 | // 'tracking_first -> ... -> ... -> (-1)' | 321 | // 'tracking_first -> ... -> ... -> (-1)' |
329 | #define TRACKING_END ((struct s_lane *)(-1)) | 322 | #define TRACKING_END ((struct s_lane *)(-1)) |
@@ -335,14 +328,14 @@ struct s_lane* volatile tracking_first = NULL; // will change to TRACKING_END if | |||
335 | static void tracking_add( struct s_lane* s) | 328 | static void tracking_add( struct s_lane* s) |
336 | { | 329 | { |
337 | 330 | ||
338 | MUTEX_LOCK( &tracking_cs); | 331 | MUTEX_LOCK( &s->U->tracking_cs); |
339 | { | 332 | { |
340 | assert( s->tracking_next == NULL); | 333 | assert( s->tracking_next == NULL); |
341 | 334 | ||
342 | s->tracking_next = tracking_first; | 335 | s->tracking_next = s->U->tracking_first; |
343 | tracking_first = s; | 336 | s->U->tracking_first = s; |
344 | } | 337 | } |
345 | MUTEX_UNLOCK( &tracking_cs); | 338 | MUTEX_UNLOCK( &s->U->tracking_cs); |
346 | } | 339 | } |
347 | 340 | ||
348 | /* | 341 | /* |
@@ -351,7 +344,7 @@ static void tracking_add( struct s_lane* s) | |||
351 | static bool_t tracking_remove( struct s_lane* s) | 344 | static bool_t tracking_remove( struct s_lane* s) |
352 | { | 345 | { |
353 | bool_t found = FALSE; | 346 | bool_t found = FALSE; |
354 | MUTEX_LOCK( &tracking_cs); | 347 | MUTEX_LOCK( &s->U->tracking_cs); |
355 | { | 348 | { |
356 | // Make sure (within the MUTEX) that we actually are in the chain | 349 | // Make sure (within the MUTEX) that we actually are in the chain |
357 | // still (at process exit they will remove us from chain and then | 350 | // still (at process exit they will remove us from chain and then |
@@ -359,7 +352,7 @@ static bool_t tracking_remove( struct s_lane* s) | |||
359 | // | 352 | // |
360 | if (s->tracking_next != NULL) | 353 | if (s->tracking_next != NULL) |
361 | { | 354 | { |
362 | struct s_lane** ref= (struct s_lane**) &tracking_first; | 355 | struct s_lane** ref = (struct s_lane**) &s->U->tracking_first; |
363 | 356 | ||
364 | while( *ref != TRACKING_END) | 357 | while( *ref != TRACKING_END) |
365 | { | 358 | { |
@@ -375,7 +368,7 @@ static bool_t tracking_remove( struct s_lane* s) | |||
375 | assert( found); | 368 | assert( found); |
376 | } | 369 | } |
377 | } | 370 | } |
378 | MUTEX_UNLOCK( &tracking_cs); | 371 | MUTEX_UNLOCK( &s->U->tracking_cs); |
379 | return found; | 372 | return found; |
380 | } | 373 | } |
381 | 374 | ||
@@ -394,7 +387,7 @@ static void lane_cleanup( struct s_lane* s) | |||
394 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR | 387 | #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR |
395 | 388 | ||
396 | #if HAVE_LANE_TRACKING | 389 | #if HAVE_LANE_TRACKING |
397 | if( tracking_first != NULL) | 390 | if( s->U->tracking_first != NULL) |
398 | { | 391 | { |
399 | // Lane was cleaned up, no need to handle at process termination | 392 | // Lane was cleaned up, no need to handle at process termination |
400 | tracking_remove( s); | 393 | tracking_remove( s); |
@@ -418,8 +411,9 @@ struct s_Linda | |||
418 | { | 411 | { |
419 | SIGNAL_T read_happened; | 412 | SIGNAL_T read_happened; |
420 | SIGNAL_T write_happened; | 413 | SIGNAL_T write_happened; |
414 | struct s_Universe* U; // the universe this linda belongs to | ||
421 | enum e_cancel_request simulate_cancel; | 415 | enum e_cancel_request simulate_cancel; |
422 | unsigned long group; | 416 | unsigned long group; // a group to control keeper allocation between lindas |
423 | char name[1]; | 417 | char name[1]; |
424 | }; | 418 | }; |
425 | #define LINDA_KEEPER_HASHSEED( linda) (linda->group ? linda->group : (unsigned long)linda) | 419 | #define LINDA_KEEPER_HASHSEED( linda) (linda->group ? linda->group : (unsigned long)linda) |
@@ -491,7 +485,7 @@ LUAG_FUNC( linda_send) | |||
491 | { | 485 | { |
492 | bool_t try_again = TRUE; | 486 | bool_t try_again = TRUE; |
493 | struct s_lane* const s = get_lane_from_registry( L); | 487 | struct s_lane* const s = get_lane_from_registry( L); |
494 | struct s_Keeper* K = keeper_acquire( LINDA_KEEPER_HASHSEED( linda)); | 488 | struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); |
495 | lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' | 489 | lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK' |
496 | if( KL == NULL) return 0; | 490 | if( KL == NULL) return 0; |
497 | STACK_CHECK( KL); | 491 | STACK_CHECK( KL); |
@@ -510,7 +504,7 @@ LUAG_FUNC( linda_send) | |||
510 | } | 504 | } |
511 | 505 | ||
512 | STACK_MID( KL, 0); | 506 | STACK_MID( KL, 0); |
513 | pushed = keeper_call( KL, KEEPER_API( send), L, linda, key_i); | 507 | pushed = keeper_call( linda->U, KL, KEEPER_API( send), L, linda, key_i); |
514 | if( pushed < 0) | 508 | if( pushed < 0) |
515 | { | 509 | { |
516 | ret = FALSE; | 510 | ret = FALSE; |
@@ -547,7 +541,7 @@ LUAG_FUNC( linda_send) | |||
547 | s->waiting_on = &linda->read_happened; | 541 | s->waiting_on = &linda->read_happened; |
548 | } | 542 | } |
549 | // could not send because no room: wait until some data was read before trying again, or until timeout is reached | 543 | // could not send because no room: wait until some data was read before trying again, or until timeout is reached |
550 | try_again = SIGNAL_WAIT( &linda->read_happened, &K->lock_, timeout); | 544 | try_again = SIGNAL_WAIT( &linda->read_happened, &K->keeper_cs, timeout); |
551 | if( s != NULL) | 545 | if( s != NULL) |
552 | { | 546 | { |
553 | s->waiting_on = NULL; | 547 | s->waiting_on = NULL; |
@@ -654,7 +648,7 @@ LUAG_FUNC( linda_receive) | |||
654 | { | 648 | { |
655 | bool_t try_again = TRUE; | 649 | bool_t try_again = TRUE; |
656 | struct s_lane* const s = get_lane_from_registry( L); | 650 | struct s_lane* const s = get_lane_from_registry( L); |
657 | struct s_Keeper* K = keeper_acquire( LINDA_KEEPER_HASHSEED( linda)); | 651 | struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); |
658 | if( K == NULL) return 0; | 652 | if( K == NULL) return 0; |
659 | for( ;;) | 653 | for( ;;) |
660 | { | 654 | { |
@@ -671,7 +665,7 @@ LUAG_FUNC( linda_receive) | |||
671 | } | 665 | } |
672 | 666 | ||
673 | // all arguments of receive() but the first are passed to the keeper's receive function | 667 | // all arguments of receive() but the first are passed to the keeper's receive function |
674 | pushed = keeper_call( K->L, keeper_receive, L, linda, key_i); | 668 | pushed = keeper_call( linda->U, K->L, keeper_receive, L, linda, key_i); |
675 | if( pushed < 0) | 669 | if( pushed < 0) |
676 | { | 670 | { |
677 | break; | 671 | break; |
@@ -705,7 +699,7 @@ LUAG_FUNC( linda_receive) | |||
705 | s->waiting_on = &linda->write_happened; | 699 | s->waiting_on = &linda->write_happened; |
706 | } | 700 | } |
707 | // not enough data to read: wakeup when data was sent, or when timeout is reached | 701 | // not enough data to read: wakeup when data was sent, or when timeout is reached |
708 | try_again = SIGNAL_WAIT( &linda->write_happened, &K->lock_, timeout); | 702 | try_again = SIGNAL_WAIT( &linda->write_happened, &K->keeper_cs, timeout); |
709 | if( s != NULL) | 703 | if( s != NULL) |
710 | { | 704 | { |
711 | s->waiting_on = NULL; | 705 | s->waiting_on = NULL; |
@@ -757,7 +751,7 @@ LUAG_FUNC( linda_set) | |||
757 | check_key_types( L, 2, 2); | 751 | check_key_types( L, 2, 2); |
758 | 752 | ||
759 | { | 753 | { |
760 | struct s_Keeper* K = keeper_acquire( LINDA_KEEPER_HASHSEED( linda)); | 754 | struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); |
761 | if( K == NULL) return 0; | 755 | if( K == NULL) return 0; |
762 | 756 | ||
763 | if( linda->simulate_cancel == CANCEL_NONE) | 757 | if( linda->simulate_cancel == CANCEL_NONE) |
@@ -767,7 +761,7 @@ LUAG_FUNC( linda_set) | |||
767 | // convert nils to some special non-nil sentinel in sent values | 761 | // convert nils to some special non-nil sentinel in sent values |
768 | keeper_toggle_nil_sentinels( L, 3, eLM_ToKeeper); | 762 | keeper_toggle_nil_sentinels( L, 3, eLM_ToKeeper); |
769 | } | 763 | } |
770 | pushed = keeper_call( K->L, KEEPER_API( set), L, linda, 2); | 764 | pushed = keeper_call( linda->U, K->L, KEEPER_API( set), L, linda, 2); |
771 | if( pushed >= 0) // no error? | 765 | if( pushed >= 0) // no error? |
772 | { | 766 | { |
773 | ASSERT_L( pushed == 0 || pushed == 1); | 767 | ASSERT_L( pushed == 0 || pushed == 1); |
@@ -813,9 +807,9 @@ LUAG_FUNC( linda_count) | |||
813 | check_key_types( L, 2, lua_gettop( L)); | 807 | check_key_types( L, 2, lua_gettop( L)); |
814 | 808 | ||
815 | { | 809 | { |
816 | struct s_Keeper* K = keeper_acquire( LINDA_KEEPER_HASHSEED( linda)); | 810 | struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); |
817 | if( K == NULL) return 0; | 811 | if( K == NULL) return 0; |
818 | pushed = keeper_call( K->L, KEEPER_API( count), L, linda, 2); | 812 | pushed = keeper_call( linda->U, K->L, KEEPER_API( count), L, linda, 2); |
819 | keeper_release( K); | 813 | keeper_release( K); |
820 | if( pushed < 0) | 814 | if( pushed < 0) |
821 | { | 815 | { |
@@ -842,12 +836,12 @@ LUAG_FUNC( linda_get) | |||
842 | // make sure the key is of a valid type (throws an error if not the case) | 836 | // make sure the key is of a valid type (throws an error if not the case) |
843 | check_key_types( L, 2, 2); | 837 | check_key_types( L, 2, 2); |
844 | { | 838 | { |
845 | struct s_Keeper* K = keeper_acquire( LINDA_KEEPER_HASHSEED( linda)); | 839 | struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); |
846 | if( K == NULL) return 0; | 840 | if( K == NULL) return 0; |
847 | 841 | ||
848 | if( linda->simulate_cancel == CANCEL_NONE) | 842 | if( linda->simulate_cancel == CANCEL_NONE) |
849 | { | 843 | { |
850 | pushed = keeper_call( K->L, KEEPER_API( get), L, linda, 2); | 844 | pushed = keeper_call( linda->U, K->L, KEEPER_API( get), L, linda, 2); |
851 | if( pushed > 0) | 845 | if( pushed > 0) |
852 | { | 846 | { |
853 | keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); | 847 | keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper); |
@@ -892,12 +886,12 @@ LUAG_FUNC( linda_limit) | |||
892 | check_key_types( L, 2, 2); | 886 | check_key_types( L, 2, 2); |
893 | 887 | ||
894 | { | 888 | { |
895 | struct s_Keeper* K = keeper_acquire( LINDA_KEEPER_HASHSEED( linda)); | 889 | struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); |
896 | if( K == NULL) return 0; | 890 | if( K == NULL) return 0; |
897 | 891 | ||
898 | if( linda->simulate_cancel == CANCEL_NONE) | 892 | if( linda->simulate_cancel == CANCEL_NONE) |
899 | { | 893 | { |
900 | pushed = keeper_call( K->L, KEEPER_API( limit), L, linda, 2); | 894 | pushed = keeper_call( linda->U, K->L, KEEPER_API( limit), L, linda, 2); |
901 | ASSERT_L( pushed == 0 || pushed == 1); // no error, optional boolean value saying if we should wake blocked writer threads | 895 | ASSERT_L( pushed == 0 || pushed == 1); // no error, optional boolean value saying if we should wake blocked writer threads |
902 | if( pushed == 1) | 896 | if( pushed == 1) |
903 | { | 897 | { |
@@ -933,7 +927,7 @@ LUAG_FUNC( linda_cancel) | |||
933 | luaL_argcheck( L, lua_gettop( L) <= 2, 2, "wrong number of arguments"); | 927 | luaL_argcheck( L, lua_gettop( L) <= 2, 2, "wrong number of arguments"); |
934 | 928 | ||
935 | // signalling must be done from inside the K locking area | 929 | // signalling must be done from inside the K locking area |
936 | K = keeper_acquire( LINDA_KEEPER_HASHSEED( linda)); | 930 | K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); |
937 | if( K == NULL) return 0; | 931 | if( K == NULL) return 0; |
938 | 932 | ||
939 | linda->simulate_cancel = CANCEL_SOFT; | 933 | linda->simulate_cancel = CANCEL_SOFT; |
@@ -1059,7 +1053,8 @@ LUAG_FUNC( linda_concat) | |||
1059 | LUAG_FUNC( linda_dump) | 1053 | LUAG_FUNC( linda_dump) |
1060 | { | 1054 | { |
1061 | struct s_Linda* linda = lua_toLinda( L, 1); | 1055 | struct s_Linda* linda = lua_toLinda( L, 1); |
1062 | return keeper_push_linda_storage( L, linda, LINDA_KEEPER_HASHSEED( linda)); | 1056 | ASSERT_L( linda->U == get_universe( L)); |
1057 | return keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda)); | ||
1063 | } | 1058 | } |
1064 | 1059 | ||
1065 | /* | 1060 | /* |
@@ -1129,6 +1124,7 @@ static void* linda_id( lua_State* L, enum eDeepOp op_) | |||
1129 | { | 1124 | { |
1130 | SIGNAL_INIT( &s->read_happened); | 1125 | SIGNAL_INIT( &s->read_happened); |
1131 | SIGNAL_INIT( &s->write_happened); | 1126 | SIGNAL_INIT( &s->write_happened); |
1127 | s->U = get_universe( L); | ||
1132 | s->simulate_cancel = CANCEL_NONE; | 1128 | s->simulate_cancel = CANCEL_NONE; |
1133 | s->group = linda_group << KEEPER_MAGIC_SHIFT; | 1129 | s->group = linda_group << KEEPER_MAGIC_SHIFT; |
1134 | s->name[0] = 0; | 1130 | s->name[0] = 0; |
@@ -1145,10 +1141,10 @@ static void* linda_id( lua_State* L, enum eDeepOp op_) | |||
1145 | 1141 | ||
1146 | /* Clean associated structures in the keeper state. | 1142 | /* Clean associated structures in the keeper state. |
1147 | */ | 1143 | */ |
1148 | K = keeper_acquire( LINDA_KEEPER_HASHSEED( linda)); | 1144 | K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); |
1149 | if( K && K->L) // can be NULL if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup) | 1145 | if( K && K->L) // can be NULL if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup) |
1150 | { | 1146 | { |
1151 | keeper_call( K->L, KEEPER_API( clear), L, linda, 0); | 1147 | keeper_call( linda->U, K->L, KEEPER_API( clear), L, linda, 0); |
1152 | } | 1148 | } |
1153 | keeper_release( K); | 1149 | keeper_release( K); |
1154 | 1150 | ||
@@ -1467,7 +1463,6 @@ static cancel_result thread_cancel( lua_State* L, struct s_lane* s, double secs, | |||
1467 | return result; | 1463 | return result; |
1468 | } | 1464 | } |
1469 | 1465 | ||
1470 | static MUTEX_T selfdestruct_cs; | ||
1471 | // | 1466 | // |
1472 | // Protects modifying the selfdestruct chain | 1467 | // Protects modifying the selfdestruct chain |
1473 | 1468 | ||
@@ -1476,26 +1471,18 @@ static MUTEX_T selfdestruct_cs; | |||
1476 | // The chain is ended by '(struct s_lane*)(-1)', not NULL: | 1471 | // The chain is ended by '(struct s_lane*)(-1)', not NULL: |
1477 | // 'selfdestruct_first -> ... -> ... -> (-1)' | 1472 | // 'selfdestruct_first -> ... -> ... -> (-1)' |
1478 | 1473 | ||
1479 | struct s_lane* volatile selfdestruct_first = SELFDESTRUCT_END; | ||
1480 | |||
1481 | // After a lane has removed itself from the chain, it still performs some processing. | ||
1482 | // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads | ||
1483 | int volatile selfdestructing_count = 0; | ||
1484 | |||
1485 | /* | 1474 | /* |
1486 | * Add the lane to selfdestruct chain; the ones still running at the end of the | 1475 | * Add the lane to selfdestruct chain; the ones still running at the end of the |
1487 | * whole process will be cancelled. | 1476 | * whole process will be cancelled. |
1488 | */ | 1477 | */ |
1489 | static void selfdestruct_add( struct s_lane* s) | 1478 | static void selfdestruct_add( struct s_lane* s) |
1490 | { | 1479 | { |
1491 | MUTEX_LOCK( &selfdestruct_cs ); | 1480 | MUTEX_LOCK( &s->U->selfdestruct_cs); |
1492 | { | 1481 | assert( s->selfdestruct_next == NULL); |
1493 | assert( s->selfdestruct_next == NULL ); | ||
1494 | 1482 | ||
1495 | s->selfdestruct_next= selfdestruct_first; | 1483 | s->selfdestruct_next = s->U->selfdestruct_first; |
1496 | selfdestruct_first= s; | 1484 | s->U->selfdestruct_first= s; |
1497 | } | 1485 | MUTEX_UNLOCK( &s->U->selfdestruct_cs); |
1498 | MUTEX_UNLOCK( &selfdestruct_cs ); | ||
1499 | } | 1486 | } |
1500 | 1487 | ||
1501 | /* | 1488 | /* |
@@ -1503,38 +1490,36 @@ static void selfdestruct_add( struct s_lane* s) | |||
1503 | */ | 1490 | */ |
1504 | static bool_t selfdestruct_remove( struct s_lane* s) | 1491 | static bool_t selfdestruct_remove( struct s_lane* s) |
1505 | { | 1492 | { |
1506 | bool_t found = FALSE; | 1493 | bool_t found = FALSE; |
1507 | MUTEX_LOCK( &selfdestruct_cs ); | 1494 | MUTEX_LOCK( &s->U->selfdestruct_cs); |
1508 | { | 1495 | { |
1509 | // Make sure (within the MUTEX) that we actually are in the chain | 1496 | // Make sure (within the MUTEX) that we actually are in the chain |
1510 | // still (at process exit they will remove us from chain and then | 1497 | // still (at process exit they will remove us from chain and then |
1511 | // cancel/kill). | 1498 | // cancel/kill). |
1512 | // | 1499 | // |
1513 | if (s->selfdestruct_next != NULL) { | 1500 | if( s->selfdestruct_next != NULL) |
1514 | struct s_lane** ref= (struct s_lane**) &selfdestruct_first; | 1501 | { |
1515 | 1502 | struct s_lane** ref = (struct s_lane**) &s->U->selfdestruct_first; | |
1516 | while( *ref != SELFDESTRUCT_END ) { | ||
1517 | if (*ref == s) { | ||
1518 | *ref= s->selfdestruct_next; | ||
1519 | s->selfdestruct_next= NULL; | ||
1520 | // the terminal shutdown should wait until the lane is done with its lua_close() | ||
1521 | ++ selfdestructing_count; | ||
1522 | found= TRUE; | ||
1523 | break; | ||
1524 | } | ||
1525 | ref= (struct s_lane**) &((*ref)->selfdestruct_next); | ||
1526 | } | ||
1527 | assert( found ); | ||
1528 | } | ||
1529 | } | ||
1530 | MUTEX_UNLOCK( &selfdestruct_cs ); | ||
1531 | return found; | ||
1532 | } | ||
1533 | 1503 | ||
1534 | // Initialized by 'init_once_LOCKED()': the deep userdata Linda object | 1504 | while( *ref != SELFDESTRUCT_END ) |
1535 | // used for timers (each lane will get a proxy to this) | 1505 | { |
1536 | // | 1506 | if( *ref == s) |
1537 | volatile DEEP_PRELUDE* timer_deep; // = NULL | 1507 | { |
1508 | *ref = s->selfdestruct_next; | ||
1509 | s->selfdestruct_next = NULL; | ||
1510 | // the terminal shutdown should wait until the lane is done with its lua_close() | ||
1511 | ++ s->U->selfdestructing_count; | ||
1512 | found = TRUE; | ||
1513 | break; | ||
1514 | } | ||
1515 | ref = (struct s_lane**) &((*ref)->selfdestruct_next); | ||
1516 | } | ||
1517 | assert( found); | ||
1518 | } | ||
1519 | } | ||
1520 | MUTEX_UNLOCK( &s->U->selfdestruct_cs); | ||
1521 | return found; | ||
1522 | } | ||
1538 | 1523 | ||
1539 | /* | 1524 | /* |
1540 | ** mutex-protected allocator for use with Lua states that have non-threadsafe allocators (such as LuaJIT) | 1525 | ** mutex-protected allocator for use with Lua states that have non-threadsafe allocators (such as LuaJIT) |
@@ -1560,13 +1545,15 @@ void * protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize) | |||
1560 | */ | 1545 | */ |
1561 | static int selfdestruct_gc( lua_State* L) | 1546 | static int selfdestruct_gc( lua_State* L) |
1562 | { | 1547 | { |
1563 | while( selfdestruct_first != SELFDESTRUCT_END) // true at most once! | 1548 | struct s_Universe* U = (struct s_Universe*) lua_touserdata( L, 1); |
1549 | |||
1550 | while( U->selfdestruct_first != SELFDESTRUCT_END) // true at most once! | ||
1564 | { | 1551 | { |
1565 | // Signal _all_ still running threads to exit (including the timer thread) | 1552 | // Signal _all_ still running threads to exit (including the timer thread) |
1566 | // | 1553 | // |
1567 | MUTEX_LOCK( &selfdestruct_cs); | 1554 | MUTEX_LOCK( &U->selfdestruct_cs); |
1568 | { | 1555 | { |
1569 | struct s_lane* s = selfdestruct_first; | 1556 | struct s_lane* s = U->selfdestruct_first; |
1570 | while( s != SELFDESTRUCT_END) | 1557 | while( s != SELFDESTRUCT_END) |
1571 | { | 1558 | { |
1572 | // attempt a regular unforced hard cancel with a small timeout | 1559 | // attempt a regular unforced hard cancel with a small timeout |
@@ -1583,7 +1570,7 @@ static int selfdestruct_gc( lua_State* L) | |||
1583 | s = s->selfdestruct_next; | 1570 | s = s->selfdestruct_next; |
1584 | } | 1571 | } |
1585 | } | 1572 | } |
1586 | MUTEX_UNLOCK( &selfdestruct_cs); | 1573 | MUTEX_UNLOCK( &U->selfdestruct_cs); |
1587 | 1574 | ||
1588 | // When noticing their cancel, the lanes will remove themselves from | 1575 | // When noticing their cancel, the lanes will remove themselves from |
1589 | // the selfdestruct chain. | 1576 | // the selfdestruct chain. |
@@ -1603,16 +1590,16 @@ static int selfdestruct_gc( lua_State* L) | |||
1603 | lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1)); | 1590 | lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1)); |
1604 | double const t_until = now_secs() + shutdown_timeout; | 1591 | double const t_until = now_secs() + shutdown_timeout; |
1605 | 1592 | ||
1606 | while( selfdestruct_first != SELFDESTRUCT_END) | 1593 | while( U->selfdestruct_first != SELFDESTRUCT_END) |
1607 | { | 1594 | { |
1608 | YIELD(); // give threads time to act on their cancel | 1595 | YIELD(); // give threads time to act on their cancel |
1609 | { | 1596 | { |
1610 | // count the number of cancelled thread that didn't have the time to act yet | 1597 | // count the number of cancelled thread that didn't have the time to act yet |
1611 | int n = 0; | 1598 | int n = 0; |
1612 | double t_now = 0.0; | 1599 | double t_now = 0.0; |
1613 | MUTEX_LOCK( &selfdestruct_cs); | 1600 | MUTEX_LOCK( &U->selfdestruct_cs); |
1614 | { | 1601 | { |
1615 | struct s_lane* s = selfdestruct_first; | 1602 | struct s_lane* s = U->selfdestruct_first; |
1616 | while( s != SELFDESTRUCT_END) | 1603 | while( s != SELFDESTRUCT_END) |
1617 | { | 1604 | { |
1618 | if( s->cancel_request == CANCEL_HARD) | 1605 | if( s->cancel_request == CANCEL_HARD) |
@@ -1620,7 +1607,7 @@ static int selfdestruct_gc( lua_State* L) | |||
1620 | s = s->selfdestruct_next; | 1607 | s = s->selfdestruct_next; |
1621 | } | 1608 | } |
1622 | } | 1609 | } |
1623 | MUTEX_UNLOCK( &selfdestruct_cs); | 1610 | MUTEX_UNLOCK( &U->selfdestruct_cs); |
1624 | // if timeout elapsed, or we know all threads have acted, stop waiting | 1611 | // if timeout elapsed, or we know all threads have acted, stop waiting |
1625 | t_now = now_secs(); | 1612 | t_now = now_secs(); |
1626 | if( n == 0 || (t_now >= t_until)) | 1613 | if( n == 0 || (t_now >= t_until)) |
@@ -1638,9 +1625,9 @@ static int selfdestruct_gc( lua_State* L) | |||
1638 | bool_t again = TRUE; | 1625 | bool_t again = TRUE; |
1639 | do | 1626 | do |
1640 | { | 1627 | { |
1641 | MUTEX_LOCK( &selfdestruct_cs); | 1628 | MUTEX_LOCK( &U->selfdestruct_cs); |
1642 | again = (selfdestructing_count > 0) ? TRUE : FALSE; | 1629 | again = (U->selfdestructing_count > 0) ? TRUE : FALSE; |
1643 | MUTEX_UNLOCK( &selfdestruct_cs); | 1630 | MUTEX_UNLOCK( &U->selfdestruct_cs); |
1644 | YIELD(); | 1631 | YIELD(); |
1645 | } while( again); | 1632 | } while( again); |
1646 | } | 1633 | } |
@@ -1648,15 +1635,15 @@ static int selfdestruct_gc( lua_State* L) | |||
1648 | //--- | 1635 | //--- |
1649 | // Kill the still free running threads | 1636 | // Kill the still free running threads |
1650 | // | 1637 | // |
1651 | if( selfdestruct_first != SELFDESTRUCT_END) | 1638 | if( U->selfdestruct_first != SELFDESTRUCT_END) |
1652 | { | 1639 | { |
1653 | unsigned int n = 0; | 1640 | unsigned int n = 0; |
1654 | // first thing we did was to raise the linda signals the threads were waiting on (if any) | 1641 | // first thing we did was to raise the linda signals the threads were waiting on (if any) |
1655 | // therefore, any well-behaved thread should be in CANCELLED state | 1642 | // therefore, any well-behaved thread should be in CANCELLED state |
1656 | // these are not running, and the state can be closed | 1643 | // these are not running, and the state can be closed |
1657 | MUTEX_LOCK( &selfdestruct_cs); | 1644 | MUTEX_LOCK( &U->selfdestruct_cs); |
1658 | { | 1645 | { |
1659 | struct s_lane* s = selfdestruct_first; | 1646 | struct s_lane* s = U->selfdestruct_first; |
1660 | while( s != SELFDESTRUCT_END) | 1647 | while( s != SELFDESTRUCT_END) |
1661 | { | 1648 | { |
1662 | struct s_lane* next_s = s->selfdestruct_next; | 1649 | struct s_lane* next_s = s->selfdestruct_next; |
@@ -1674,14 +1661,22 @@ static int selfdestruct_gc( lua_State* L) | |||
1674 | s = next_s; | 1661 | s = next_s; |
1675 | ++ n; | 1662 | ++ n; |
1676 | } | 1663 | } |
1677 | selfdestruct_first = SELFDESTRUCT_END; | 1664 | U->selfdestruct_first = SELFDESTRUCT_END; |
1678 | } | 1665 | } |
1679 | MUTEX_UNLOCK( &selfdestruct_cs); | 1666 | MUTEX_UNLOCK( &U->selfdestruct_cs); |
1680 | 1667 | ||
1681 | DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n)); | 1668 | DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n)); |
1682 | } | 1669 | } |
1683 | } | 1670 | } |
1684 | close_keepers( L); | 1671 | |
1672 | // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 | ||
1673 | lua_settop( L, 0); | ||
1674 | // no need to mutex-protect this as all threads in the universe are gone at that point | ||
1675 | -- U->timer_deep->refcount; // should be 0 now | ||
1676 | free_deep_prelude( L, (DEEP_PRELUDE*) U->timer_deep); | ||
1677 | U->timer_deep = NULL; | ||
1678 | |||
1679 | close_keepers( U, L); | ||
1685 | 1680 | ||
1686 | // remove the protected allocator, if any | 1681 | // remove the protected allocator, if any |
1687 | { | 1682 | { |
@@ -1697,6 +1692,16 @@ static int selfdestruct_gc( lua_State* L) | |||
1697 | } | 1692 | } |
1698 | } | 1693 | } |
1699 | 1694 | ||
1695 | #if HAVE_LANE_TRACKING | ||
1696 | MUTEX_FREE( &U->tracking_cs); | ||
1697 | #endif // HAVE_LANE_TRACKING | ||
1698 | // Linked chains handling | ||
1699 | MUTEX_FREE( &U->selfdestruct_cs); | ||
1700 | MUTEX_FREE( &U->require_cs); | ||
1701 | // Locks for 'tools.c' inc/dec counters | ||
1702 | MUTEX_FREE( &U->deep_lock); | ||
1703 | MUTEX_FREE( &U->mtid_lock); | ||
1704 | |||
1700 | return 0; | 1705 | return 0; |
1701 | } | 1706 | } |
1702 | 1707 | ||
@@ -1961,8 +1966,9 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main( void* vs) | |||
1961 | struct s_lane* s = (struct s_lane*) vs; | 1966 | struct s_lane* s = (struct s_lane*) vs; |
1962 | int rc, rc2; | 1967 | int rc, rc2; |
1963 | lua_State* L = s->L; | 1968 | lua_State* L = s->L; |
1969 | DEBUGSPEW_CODE( struct s_Universe* U = get_universe( L)); | ||
1964 | #if HAVE_LANE_TRACKING | 1970 | #if HAVE_LANE_TRACKING |
1965 | if( tracking_first) | 1971 | if( s->U->tracking_first) |
1966 | { | 1972 | { |
1967 | tracking_add( s); | 1973 | tracking_add( s); |
1968 | } | 1974 | } |
@@ -2072,10 +2078,10 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main( void* vs) | |||
2072 | s->debug_name = "<closed>"; | 2078 | s->debug_name = "<closed>"; |
2073 | 2079 | ||
2074 | lane_cleanup( s); | 2080 | lane_cleanup( s); |
2075 | MUTEX_LOCK( &selfdestruct_cs); | 2081 | MUTEX_LOCK( &s->U->selfdestruct_cs); |
2076 | // done with lua_close(), terminal shutdown sequence may proceed | 2082 | // done with lua_close(), terminal shutdown sequence may proceed |
2077 | -- selfdestructing_count; | 2083 | -- s->U->selfdestructing_count; |
2078 | MUTEX_UNLOCK( &selfdestruct_cs); | 2084 | MUTEX_UNLOCK( &s->U->selfdestruct_cs); |
2079 | } | 2085 | } |
2080 | else | 2086 | else |
2081 | { | 2087 | { |
@@ -2112,15 +2118,16 @@ LUAG_FUNC( require) | |||
2112 | { | 2118 | { |
2113 | char const* name = lua_tostring( L, 1); | 2119 | char const* name = lua_tostring( L, 1); |
2114 | int const nargs = lua_gettop( L); | 2120 | int const nargs = lua_gettop( L); |
2121 | DEBUGSPEW_CODE( struct s_Universe* U = get_universe( L)); | ||
2115 | STACK_CHECK( L); | 2122 | STACK_CHECK( L); |
2116 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name)); | 2123 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name)); |
2117 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 2124 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
2118 | lua_pushvalue( L, lua_upvalueindex(1)); // "name" require | 2125 | lua_pushvalue( L, lua_upvalueindex(1)); // "name" require |
2119 | lua_insert( L, 1); // require "name" | 2126 | lua_insert( L, 1); // require "name" |
2120 | lua_call( L, nargs, 1); // module | 2127 | lua_call( L, nargs, 1); // module |
2121 | populate_func_lookup_table( L, -1, name); | 2128 | populate_func_lookup_table( L, -1, name); |
2122 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name)); | 2129 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name)); |
2123 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 2130 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
2124 | STACK_END( L, 0); | 2131 | STACK_END( L, 0); |
2125 | return 1; | 2132 | return 1; |
2126 | } | 2133 | } |
@@ -2156,6 +2163,7 @@ LUAG_FUNC( thread_new) | |||
2156 | 2163 | ||
2157 | #define FIXED_ARGS 8 | 2164 | #define FIXED_ARGS 8 |
2158 | uint_t args = lua_gettop(L) - FIXED_ARGS; | 2165 | uint_t args = lua_gettop(L) - FIXED_ARGS; |
2166 | struct s_Universe* U = get_universe( L); | ||
2159 | 2167 | ||
2160 | // public Lanes API accepts a generic range -3/+3 | 2168 | // public Lanes API accepts a generic range -3/+3 |
2161 | // that will be remapped into the platform-specific scheduler priority scheme | 2169 | // that will be remapped into the platform-specific scheduler priority scheme |
@@ -2167,11 +2175,10 @@ LUAG_FUNC( thread_new) | |||
2167 | 2175 | ||
2168 | /* --- Create and prepare the sub state --- */ | 2176 | /* --- Create and prepare the sub state --- */ |
2169 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: setup\n" INDENT_END)); | 2177 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: setup\n" INDENT_END)); |
2170 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 2178 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
2171 | 2179 | ||
2172 | // populate with selected libraries at the same time | 2180 | // populate with selected libraries at the same time |
2173 | // | 2181 | L2 = luaG_newstate( U, L, libs); |
2174 | L2 = luaG_newstate( L, libs); | ||
2175 | 2182 | ||
2176 | STACK_GROW( L, 2); | 2183 | STACK_GROW( L, 2); |
2177 | STACK_GROW( L2, 3); | 2184 | STACK_GROW( L2, 3); |
@@ -2187,7 +2194,7 @@ LUAG_FUNC( thread_new) | |||
2187 | if( package != 0) | 2194 | if( package != 0) |
2188 | { | 2195 | { |
2189 | // when copying with mode eLM_LaneBody, should raise an error in case of problem, not leave it one the stack | 2196 | // when copying with mode eLM_LaneBody, should raise an error in case of problem, not leave it one the stack |
2190 | (void) luaG_inter_copy_package( L, L2, package, eLM_LaneBody); | 2197 | (void) luaG_inter_copy_package( U, L, L2, package, eLM_LaneBody); |
2191 | } | 2198 | } |
2192 | 2199 | ||
2193 | // modules to require in the target lane *before* the function is transfered! | 2200 | // modules to require in the target lane *before* the function is transfered! |
@@ -2198,7 +2205,7 @@ LUAG_FUNC( thread_new) | |||
2198 | { | 2205 | { |
2199 | int nbRequired = 1; | 2206 | int nbRequired = 1; |
2200 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: require 'required' list\n" INDENT_END)); | 2207 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: require 'required' list\n" INDENT_END)); |
2201 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 2208 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
2202 | // should not happen, was checked in lanes.lua before calling thread_new() | 2209 | // should not happen, was checked in lanes.lua before calling thread_new() |
2203 | if( lua_type( L, required) != LUA_TTABLE) | 2210 | if( lua_type( L, required) != LUA_TTABLE) |
2204 | { | 2211 | { |
@@ -2233,13 +2240,13 @@ LUAG_FUNC( thread_new) | |||
2233 | // which might not be the case if the libs list didn't include lanes.core or "*" | 2240 | // which might not be the case if the libs list didn't include lanes.core or "*" |
2234 | if( strncmp( name, "lanes.core", len) == 0) // this works both both "lanes" and "lanes.core" because of len | 2241 | if( strncmp( name, "lanes.core", len) == 0) // this works both both "lanes" and "lanes.core" because of len |
2235 | { | 2242 | { |
2236 | luaG_copy_one_time_settings( L, L2); | 2243 | luaG_copy_one_time_settings( U, L, L2); |
2237 | } | 2244 | } |
2238 | lua_pushlstring( L2, name, len); // require() name | 2245 | lua_pushlstring( L2, name, len); // require() name |
2239 | if( lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode | 2246 | if( lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode |
2240 | { | 2247 | { |
2241 | // propagate error to main state if any | 2248 | // propagate error to main state if any |
2242 | luaG_inter_move( L2, L, 1, eLM_LaneBody); // | 2249 | luaG_inter_move( U, L2, L, 1, eLM_LaneBody); // |
2243 | return lua_error( L); | 2250 | return lua_error( L); |
2244 | } | 2251 | } |
2245 | STACK_MID( L2, 1); | 2252 | STACK_MID( L2, 1); |
@@ -2253,7 +2260,7 @@ LUAG_FUNC( thread_new) | |||
2253 | lua_pop( L, 1); | 2260 | lua_pop( L, 1); |
2254 | ++ nbRequired; | 2261 | ++ nbRequired; |
2255 | } | 2262 | } |
2256 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 2263 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
2257 | } | 2264 | } |
2258 | STACK_END( L2, 0); | 2265 | STACK_END( L2, 0); |
2259 | STACK_END( L, 0); | 2266 | STACK_END( L, 0); |
@@ -2271,12 +2278,12 @@ LUAG_FUNC( thread_new) | |||
2271 | return luaL_error( L, "Expected table, got %s", luaL_typename( L, glob)); | 2278 | return luaL_error( L, "Expected table, got %s", luaL_typename( L, glob)); |
2272 | } | 2279 | } |
2273 | 2280 | ||
2274 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 2281 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
2275 | lua_pushnil( L); | 2282 | lua_pushnil( L); |
2276 | lua_pushglobaltable( L2); // Lua 5.2 wants us to push the globals table on the stack | 2283 | lua_pushglobaltable( L2); // Lua 5.2 wants us to push the globals table on the stack |
2277 | while( lua_next( L, glob)) | 2284 | while( lua_next( L, glob)) |
2278 | { | 2285 | { |
2279 | luaG_inter_copy( L, L2, 2, eLM_LaneBody); // moves the key/value pair to the L2 stack | 2286 | luaG_inter_copy( U, L, L2, 2, eLM_LaneBody); // moves the key/value pair to the L2 stack |
2280 | // assign it in L2's globals table | 2287 | // assign it in L2's globals table |
2281 | lua_rawset( L2, -3); | 2288 | lua_rawset( L2, -3); |
2282 | lua_pop( L, 1); | 2289 | lua_pop( L, 1); |
@@ -2285,7 +2292,7 @@ LUAG_FUNC( thread_new) | |||
2285 | 2292 | ||
2286 | STACK_END( L2, 0); | 2293 | STACK_END( L2, 0); |
2287 | STACK_END( L, 0); | 2294 | STACK_END( L, 0); |
2288 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 2295 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
2289 | } | 2296 | } |
2290 | 2297 | ||
2291 | ASSERT_L( lua_gettop( L2) == 0); | 2298 | ASSERT_L( lua_gettop( L2) == 0); |
@@ -2297,10 +2304,10 @@ LUAG_FUNC( thread_new) | |||
2297 | { | 2304 | { |
2298 | int res; | 2305 | int res; |
2299 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: transfer lane body\n" INDENT_END)); | 2306 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: transfer lane body\n" INDENT_END)); |
2300 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 2307 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
2301 | lua_pushvalue( L, 1); | 2308 | lua_pushvalue( L, 1); |
2302 | res = luaG_inter_move( L, L2, 1, eLM_LaneBody); // L->L2 | 2309 | res = luaG_inter_move( U, L, L2, 1, eLM_LaneBody); // L->L2 |
2303 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 2310 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
2304 | if( res != 0) | 2311 | if( res != 0) |
2305 | { | 2312 | { |
2306 | return luaL_error( L, "tried to copy unsupported types"); | 2313 | return luaL_error( L, "tried to copy unsupported types"); |
@@ -2325,9 +2332,9 @@ LUAG_FUNC( thread_new) | |||
2325 | { | 2332 | { |
2326 | int res; | 2333 | int res; |
2327 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: transfer lane arguments\n" INDENT_END)); | 2334 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: transfer lane arguments\n" INDENT_END)); |
2328 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 2335 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
2329 | res = luaG_inter_copy( L, L2, args, eLM_LaneBody); // L->L2 | 2336 | res = luaG_inter_copy( U, L, L2, args, eLM_LaneBody); // L->L2 |
2330 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 2337 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
2331 | if( res != 0) | 2338 | if( res != 0) |
2332 | { | 2339 | { |
2333 | return luaL_error( L, "tried to copy unsupported types"); | 2340 | return luaL_error( L, "tried to copy unsupported types"); |
@@ -2348,8 +2355,8 @@ LUAG_FUNC( thread_new) | |||
2348 | return luaL_error( L, "could not create lane: out of memory"); | 2355 | return luaL_error( L, "could not create lane: out of memory"); |
2349 | } | 2356 | } |
2350 | 2357 | ||
2351 | //memset( s, 0, sizeof(struct s_lane) ); | ||
2352 | s->L = L2; | 2358 | s->L = L2; |
2359 | s->U = U; | ||
2353 | s->status = PENDING; | 2360 | s->status = PENDING; |
2354 | s->waiting_on = NULL; | 2361 | s->waiting_on = NULL; |
2355 | s->debug_name = "<unnamed>"; | 2362 | s->debug_name = "<unnamed>"; |
@@ -2399,7 +2406,7 @@ LUAG_FUNC( thread_new) | |||
2399 | THREAD_CREATE( &s->thread, lane_main, s, prio); | 2406 | THREAD_CREATE( &s->thread, lane_main, s, prio); |
2400 | STACK_END( L, 1); | 2407 | STACK_END( L, 1); |
2401 | 2408 | ||
2402 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 2409 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
2403 | 2410 | ||
2404 | return 1; | 2411 | return 1; |
2405 | } | 2412 | } |
@@ -2612,6 +2619,7 @@ LUAG_FUNC( thread_join) | |||
2612 | } | 2619 | } |
2613 | else | 2620 | else |
2614 | { | 2621 | { |
2622 | struct s_Universe* U = get_universe( L); | ||
2615 | // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed | 2623 | // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed |
2616 | // so store it in the userdata uservalue at a key that can't possibly collide | 2624 | // so store it in the userdata uservalue at a key that can't possibly collide |
2617 | securize_debug_threadname( L, s); | 2625 | securize_debug_threadname( L, s); |
@@ -2620,7 +2628,7 @@ LUAG_FUNC( thread_join) | |||
2620 | case DONE: | 2628 | case DONE: |
2621 | { | 2629 | { |
2622 | uint_t n = lua_gettop( L2); // whole L2 stack | 2630 | uint_t n = lua_gettop( L2); // whole L2 stack |
2623 | if( (n > 0) && (luaG_inter_move( L2, L, n, eLM_LaneBody) != 0)) | 2631 | if( (n > 0) && (luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0)) |
2624 | { | 2632 | { |
2625 | return luaL_error( L, "tried to copy unsupported types"); | 2633 | return luaL_error( L, "tried to copy unsupported types"); |
2626 | } | 2634 | } |
@@ -2629,8 +2637,9 @@ LUAG_FUNC( thread_join) | |||
2629 | break; | 2637 | break; |
2630 | 2638 | ||
2631 | case ERROR_ST: | 2639 | case ERROR_ST: |
2640 | STACK_GROW( L, 1); | ||
2632 | lua_pushnil( L); | 2641 | lua_pushnil( L); |
2633 | if( luaG_inter_move( L2, L, 1 + ERROR_FULL_STACK, eLM_LaneBody) != 0) // error message at [-2], stack trace at [-1] | 2642 | if( luaG_inter_move( U, L2, L, 1 + ERROR_FULL_STACK, eLM_LaneBody) != 0) // error message at [-2], stack trace at [-1] |
2634 | { | 2643 | { |
2635 | return luaL_error( L, "tried to copy unsupported types"); | 2644 | return luaL_error( L, "tried to copy unsupported types"); |
2636 | } | 2645 | } |
@@ -2638,12 +2647,13 @@ LUAG_FUNC( thread_join) | |||
2638 | break; | 2647 | break; |
2639 | 2648 | ||
2640 | case CANCELLED: | 2649 | case CANCELLED: |
2641 | ret= 0; | 2650 | ret = 0; |
2642 | break; | 2651 | break; |
2643 | 2652 | ||
2644 | default: | 2653 | default: |
2645 | DEBUGSPEW_CODE( fprintf( stderr, "Status: %d\n", s->status)); | 2654 | DEBUGSPEW_CODE( fprintf( stderr, "Status: %d\n", s->status)); |
2646 | ASSERT_L( FALSE); ret = 0; | 2655 | ASSERT_L( FALSE); |
2656 | ret = 0; | ||
2647 | } | 2657 | } |
2648 | lua_close( L2); | 2658 | lua_close( L2); |
2649 | } | 2659 | } |
@@ -2817,12 +2827,14 @@ LUAG_FUNC( thread_index) | |||
2817 | LUAG_FUNC( threads) | 2827 | LUAG_FUNC( threads) |
2818 | { | 2828 | { |
2819 | int const top = lua_gettop( L); | 2829 | int const top = lua_gettop( L); |
2830 | struct s_Universe* U = get_universe( L); | ||
2831 | |||
2820 | // List _all_ still running threads | 2832 | // List _all_ still running threads |
2821 | // | 2833 | // |
2822 | MUTEX_LOCK( &tracking_cs); | 2834 | MUTEX_LOCK( &U->tracking_cs); |
2823 | if( tracking_first && tracking_first != TRACKING_END) | 2835 | if( U->tracking_first && U->tracking_first != TRACKING_END) |
2824 | { | 2836 | { |
2825 | struct s_lane* s = tracking_first; | 2837 | struct s_lane* s = U->tracking_first; |
2826 | lua_newtable( L); // {} | 2838 | lua_newtable( L); // {} |
2827 | while( s != TRACKING_END) | 2839 | while( s != TRACKING_END) |
2828 | { | 2840 | { |
@@ -2832,7 +2844,7 @@ LUAG_FUNC( threads) | |||
2832 | s = s->tracking_next; | 2844 | s = s->tracking_next; |
2833 | } | 2845 | } |
2834 | } | 2846 | } |
2835 | MUTEX_UNLOCK( &tracking_cs); | 2847 | MUTEX_UNLOCK( &U->tracking_cs); |
2836 | return lua_gettop( L) - top; | 2848 | return lua_gettop( L) - top; |
2837 | } | 2849 | } |
2838 | #endif // HAVE_LANE_TRACKING | 2850 | #endif // HAVE_LANE_TRACKING |
@@ -2922,18 +2934,8 @@ static const struct luaL_Reg lanes_functions [] = { | |||
2922 | * settings table it at position 1 on the stack | 2934 | * settings table it at position 1 on the stack |
2923 | * pushes an error string on the stack in case of problem | 2935 | * pushes an error string on the stack in case of problem |
2924 | */ | 2936 | */ |
2925 | static int init_once_LOCKED( lua_State* L) | 2937 | static void init_once_LOCKED( void) |
2926 | { | 2938 | { |
2927 | initialize_on_state_create( L); | ||
2928 | |||
2929 | STACK_CHECK( L); | ||
2930 | |||
2931 | lua_getfield( L, 1, "verbose_errors"); | ||
2932 | GVerboseErrors = lua_toboolean( L, -1); | ||
2933 | lua_pop( L, 1); | ||
2934 | |||
2935 | STACK_MID( L, 0); | ||
2936 | |||
2937 | #if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) | 2939 | #if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC) |
2938 | now_secs(); // initialize 'now_secs()' internal offset | 2940 | now_secs(); // initialize 'now_secs()' internal offset |
2939 | #endif | 2941 | #endif |
@@ -2942,29 +2944,6 @@ static int init_once_LOCKED( lua_State* L) | |||
2942 | chudInitialize(); | 2944 | chudInitialize(); |
2943 | #endif | 2945 | #endif |
2944 | 2946 | ||
2945 | #if HAVE_LANE_TRACKING | ||
2946 | MUTEX_INIT( &tracking_cs); | ||
2947 | lua_getfield( L, 1, "track_lanes"); | ||
2948 | tracking_first = lua_toboolean( L, -1) ? TRACKING_END : NULL; | ||
2949 | lua_pop( L, 1); | ||
2950 | STACK_MID( L, 0); | ||
2951 | #endif // HAVE_LANE_TRACKING | ||
2952 | |||
2953 | // Locks for 'tools.c' inc/dec counters | ||
2954 | // | ||
2955 | MUTEX_INIT( &deep_lock); | ||
2956 | MUTEX_INIT( &mtid_lock); | ||
2957 | |||
2958 | // Serialize calls to 'require' from now on, also in the primary state | ||
2959 | // | ||
2960 | MUTEX_RECURSIVE_INIT( &require_cs); | ||
2961 | |||
2962 | serialize_require( L); | ||
2963 | |||
2964 | // Linked chains handling | ||
2965 | // | ||
2966 | MUTEX_INIT( &selfdestruct_cs); | ||
2967 | |||
2968 | //--- | 2947 | //--- |
2969 | // Linux needs SCHED_RR to change thread priorities, and that is only | 2948 | // Linux needs SCHED_RR to change thread priorities, and that is only |
2970 | // allowed for sudo'ers. SCHED_OTHER (default) has no priorities. | 2949 | // allowed for sudo'ers. SCHED_OTHER (default) has no priorities. |
@@ -2990,52 +2969,6 @@ static int init_once_LOCKED( lua_State* L) | |||
2990 | } | 2969 | } |
2991 | #endif // LINUX_SCHED_RR | 2970 | #endif // LINUX_SCHED_RR |
2992 | #endif // PLATFORM_LINUX | 2971 | #endif // PLATFORM_LINUX |
2993 | { | ||
2994 | // returns non-0 if an error message was pushed on the stack | ||
2995 | int pushed_error = init_keepers( L); | ||
2996 | if( pushed_error) | ||
2997 | { | ||
2998 | return pushed_error; | ||
2999 | } | ||
3000 | } | ||
3001 | |||
3002 | // Initialize 'timer_deep'; a common Linda object shared by all states | ||
3003 | // | ||
3004 | ASSERT_L( timer_deep == NULL); | ||
3005 | |||
3006 | // proxy_ud= deep_userdata( idfunc ) | ||
3007 | // | ||
3008 | lua_pushliteral( L, "lanes-timer"); // push a name for debug purposes | ||
3009 | luaG_newdeepuserdata( L, linda_id); | ||
3010 | STACK_MID( L, 2); | ||
3011 | lua_remove( L, -2); // remove the name as we no longer need it | ||
3012 | |||
3013 | ASSERT_L( lua_isuserdata(L,-1)); | ||
3014 | |||
3015 | // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer | ||
3016 | // | ||
3017 | timer_deep = * (DEEP_PRELUDE**) lua_touserdata( L, -1); | ||
3018 | ASSERT_L( timer_deep && (timer_deep->refcount == 1) && timer_deep->deep && timer_deep->idfunc == linda_id); | ||
3019 | |||
3020 | // The host Lua state must always have a reference to this Linda object in order for the timer_deep pointer to be valid. | ||
3021 | // So store a reference that we will never actually use. | ||
3022 | // at the same time, use this object as a 'desinit' marker: | ||
3023 | // when the main lua State is closed, this object will be GC'ed | ||
3024 | { | ||
3025 | lua_newuserdata( L, 1); | ||
3026 | lua_newtable( L); | ||
3027 | lua_getfield( L, 1, "shutdown_timeout"); | ||
3028 | lua_pushcclosure( L, selfdestruct_gc, 1); | ||
3029 | lua_setfield( L, -2, "__gc"); | ||
3030 | lua_pushliteral( L, "AtExit"); | ||
3031 | lua_setfield( L, -2, "__metatable"); | ||
3032 | lua_setmetatable( L, -2); | ||
3033 | } | ||
3034 | lua_insert( L, -2); // Swap key with the Linda object | ||
3035 | lua_rawset( L, LUA_REGISTRYINDEX); | ||
3036 | |||
3037 | STACK_END( L, 0); | ||
3038 | return 0; | ||
3039 | } | 2972 | } |
3040 | 2973 | ||
3041 | static volatile long s_initCount = 0; | 2974 | static volatile long s_initCount = 0; |
@@ -3045,32 +2978,9 @@ static volatile long s_initCount = 0; | |||
3045 | // param 1: settings table | 2978 | // param 1: settings table |
3046 | LUAG_FUNC( configure) | 2979 | LUAG_FUNC( configure) |
3047 | { | 2980 | { |
3048 | // set to 1 if an error occured inside init_once_LOCKED(), and message is found at the top of the stack | 2981 | struct s_Universe* U = get_universe( L); |
3049 | int init_once_error = 0; | ||
3050 | char const* name = luaL_checkstring( L, lua_upvalueindex( 1)); | 2982 | char const* name = luaL_checkstring( L, lua_upvalueindex( 1)); |
3051 | _ASSERT_L( L, lua_type( L, 1) == LUA_TTABLE); | 2983 | _ASSERT_L( L, lua_type( L, 1) == LUA_TTABLE); |
3052 | STACK_CHECK( L); | ||
3053 | |||
3054 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); | ||
3055 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | ||
3056 | |||
3057 | // not in init_once_LOCKED because we can have several hosted "master" Lua states where Lanes is require()d. | ||
3058 | lua_getfield( L, 1, "protect_allocator"); // settings protect_allocator | ||
3059 | if( lua_toboolean( L, -1)) | ||
3060 | { | ||
3061 | void* ud; | ||
3062 | lua_Alloc allocf = lua_getallocf( L, &ud); | ||
3063 | if( allocf != protected_lua_Alloc) // just in case | ||
3064 | { | ||
3065 | struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) allocf( ud, NULL, 0, sizeof( struct ProtectedAllocator_s)); | ||
3066 | s->allocf = allocf; | ||
3067 | s->ud = ud; | ||
3068 | MUTEX_INIT( &s->lock); | ||
3069 | lua_setallocf( L, protected_lua_Alloc, s); | ||
3070 | } | ||
3071 | } | ||
3072 | lua_pop( L, 1); // settings | ||
3073 | STACK_MID( L, 0); | ||
3074 | 2984 | ||
3075 | /* | 2985 | /* |
3076 | ** Making one-time initializations. | 2986 | ** Making one-time initializations. |
@@ -3084,7 +2994,7 @@ LUAG_FUNC( configure) | |||
3084 | static volatile int /*bool*/ go_ahead; // = 0 | 2994 | static volatile int /*bool*/ go_ahead; // = 0 |
3085 | if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0) | 2995 | if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0) |
3086 | { | 2996 | { |
3087 | init_once_error = init_once_LOCKED( L); | 2997 | init_once_LOCKED(); |
3088 | go_ahead = 1; // let others pass | 2998 | go_ahead = 1; // let others pass |
3089 | } | 2999 | } |
3090 | else | 3000 | else |
@@ -3102,7 +3012,7 @@ LUAG_FUNC( configure) | |||
3102 | // | 3012 | // |
3103 | if( s_initCount == 0) | 3013 | if( s_initCount == 0) |
3104 | { | 3014 | { |
3105 | init_once_error = init_once_LOCKED( L); | 3015 | init_once_LOCKED(); |
3106 | s_initCount = 1; | 3016 | s_initCount = 1; |
3107 | } | 3017 | } |
3108 | } | 3018 | } |
@@ -3110,85 +3020,148 @@ LUAG_FUNC( configure) | |||
3110 | } | 3020 | } |
3111 | #endif // THREADAPI == THREADAPI_PTHREAD | 3021 | #endif // THREADAPI == THREADAPI_PTHREAD |
3112 | 3022 | ||
3113 | // raise error outside the init-once mutex | 3023 | STACK_GROW( L, 4); |
3114 | if( init_once_error) | 3024 | STACK_CHECK( L); |
3025 | |||
3026 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); | ||
3027 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); | ||
3028 | |||
3029 | lua_getfield( L, 1, "protect_allocator"); // settings protect_allocator | ||
3030 | if( lua_toboolean( L, -1)) | ||
3115 | { | 3031 | { |
3116 | // will raise an error if the error is not a string (should not happen) | 3032 | void* ud; |
3117 | char const* error = luaL_checkstring( L, -1); | 3033 | lua_Alloc allocf = lua_getallocf( L, &ud); |
3118 | // raises an error with the message found at the top of the stack | 3034 | if( allocf != protected_lua_Alloc) // just in case |
3119 | lua_error( L); | 3035 | { |
3036 | struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) allocf( ud, NULL, 0, sizeof( struct ProtectedAllocator_s)); | ||
3037 | s->allocf = allocf; | ||
3038 | s->ud = ud; | ||
3039 | MUTEX_INIT( &s->lock); | ||
3040 | lua_setallocf( L, protected_lua_Alloc, s); | ||
3041 | } | ||
3042 | } | ||
3043 | lua_pop( L, 1); // settings | ||
3044 | STACK_MID( L, 0); | ||
3045 | |||
3046 | // grab or create the universe | ||
3047 | if( U == NULL) | ||
3048 | { | ||
3049 | lua_pushlightuserdata( L, UNIVERSE_REGKEY); // settings UNIVERSE_REGKEY | ||
3050 | U = (struct s_Universe*) lua_newuserdata( L, sizeof( struct s_Universe)); // settings UNIVERSE_REGKEY universe | ||
3051 | memset( U, 0, sizeof( struct s_Universe)); | ||
3052 | lua_newtable( L); // settings UNIVERSE_REGKEY universe mt | ||
3053 | lua_getfield( L, 1, "shutdown_timeout"); // settings UNIVERSE_REGKEY universe mt shutdown_timeout | ||
3054 | lua_pushcclosure( L, selfdestruct_gc, 1); // settings UNIVERSE_REGKEY universe mt selfdestruct_gc | ||
3055 | lua_setfield( L, -2, "__gc"); // settings UNIVERSE_REGKEY universe mt | ||
3056 | lua_setmetatable( L, -2); // settings UNIVERSE_REGKEY universe | ||
3057 | lua_rawset( L, LUA_REGISTRYINDEX); // settings | ||
3058 | lua_getfield( L, 1, "verbose_errors"); // settings verbose_errors | ||
3059 | U->verboseErrors = lua_toboolean( L, -1); | ||
3060 | lua_pop( L, 1); // settings | ||
3061 | #if HAVE_LANE_TRACKING | ||
3062 | MUTEX_INIT( &U->tracking_cs); | ||
3063 | lua_getfield( L, 1, "track_lanes"); // settings track_lanes | ||
3064 | U->tracking_first = lua_toboolean( L, -1) ? TRACKING_END : NULL; | ||
3065 | lua_pop( L, 1); // settings | ||
3066 | #endif // HAVE_LANE_TRACKING | ||
3067 | // Linked chains handling | ||
3068 | MUTEX_INIT( &U->selfdestruct_cs); | ||
3069 | MUTEX_RECURSIVE_INIT( &U->require_cs); | ||
3070 | // Locks for 'tools.c' inc/dec counters | ||
3071 | MUTEX_INIT( &U->deep_lock); | ||
3072 | MUTEX_INIT( &U->mtid_lock); | ||
3073 | U->selfdestruct_first = SELFDESTRUCT_END; | ||
3074 | initialize_on_state_create( U, L); | ||
3075 | init_keepers( U, L); | ||
3076 | STACK_MID( L, 0); | ||
3077 | |||
3078 | // Initialize 'timer_deep'; a common Linda object shared by all states | ||
3079 | lua_pushcfunction( L, LG_linda); // settings lanes.linda | ||
3080 | lua_pushliteral( L, "lanes-timer"); // settings lanes.linda "lanes-timer" | ||
3081 | lua_call( L, 1, 1); // settings linda | ||
3082 | STACK_MID( L, 1); | ||
3083 | |||
3084 | // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer | ||
3085 | U->timer_deep = * (DEEP_PRELUDE**) lua_touserdata( L, -1); | ||
3086 | ASSERT_L( U->timer_deep && (U->timer_deep->refcount == 1) && U->timer_deep->deep && U->timer_deep->idfunc == linda_id); | ||
3087 | // increment refcount that this linda remains alive as long as the universe is. | ||
3088 | ++ U->timer_deep->refcount; | ||
3089 | lua_pop( L, 1); // settings | ||
3120 | } | 3090 | } |
3091 | STACK_MID( L, 0); | ||
3092 | |||
3093 | // Serialize calls to 'require' from now on, also in the primary state | ||
3094 | serialize_require( L); | ||
3121 | 3095 | ||
3122 | // Retrieve main module interface table | 3096 | // Retrieve main module interface table |
3123 | lua_pushvalue( L, lua_upvalueindex( 2)); // settings M | 3097 | lua_pushvalue( L, lua_upvalueindex( 2)); // settings M |
3124 | // remove configure() (this function) from the module interface | 3098 | // remove configure() (this function) from the module interface |
3125 | lua_pushnil( L); // settings M nil | 3099 | lua_pushnil( L); // settings M nil |
3126 | lua_setfield( L, -2, "configure"); // settings M | 3100 | lua_setfield( L, -2, "configure"); // settings M |
3127 | // add functions to the module's table | 3101 | // add functions to the module's table |
3128 | luaG_registerlibfuncs( L, lanes_functions); | 3102 | luaG_registerlibfuncs( L, lanes_functions); |
3129 | #if HAVE_LANE_TRACKING | 3103 | #if HAVE_LANE_TRACKING |
3130 | // register core.threads() only if settings say it should be available | 3104 | // register core.threads() only if settings say it should be available |
3131 | if( tracking_first != NULL) | 3105 | if( U->tracking_first != NULL) |
3132 | { | 3106 | { |
3133 | lua_pushcfunction( L, LG_threads); // settings M LG_threads() | 3107 | lua_pushcfunction( L, LG_threads); // settings M LG_threads() |
3134 | lua_setfield( L, -2, "threads"); // settings M | 3108 | lua_setfield( L, -2, "threads"); // settings M |
3135 | } | 3109 | } |
3136 | #endif // HAVE_LANE_TRACKING | 3110 | #endif // HAVE_LANE_TRACKING |
3137 | STACK_MID( L, 1); | 3111 | STACK_MID( L, 1); |
3138 | 3112 | ||
3139 | { | 3113 | { |
3140 | char const* errmsg; | 3114 | char const* errmsg; |
3141 | ASSERT_L( timer_deep != NULL); // initialized by init_once_LOCKED | 3115 | errmsg = push_deep_proxy( U, L, (DEEP_PRELUDE*) U->timer_deep, eLM_LaneBody); // settings M timer_deep |
3142 | errmsg = push_deep_proxy( L, (DEEP_PRELUDE*) timer_deep, eLM_LaneBody); // settings M timer_deep | ||
3143 | if( errmsg != NULL) | 3116 | if( errmsg != NULL) |
3144 | { | 3117 | { |
3145 | luaL_error( L, errmsg); | 3118 | return luaL_error( L, errmsg); |
3146 | } | 3119 | } |
3147 | lua_setfield( L, -2, "timer_gateway"); // settings M | 3120 | lua_setfield( L, -2, "timer_gateway"); // settings M |
3148 | } | 3121 | } |
3149 | STACK_MID( L, 1); | 3122 | STACK_MID( L, 1); |
3150 | 3123 | ||
3151 | // prepare the metatable for threads | 3124 | // prepare the metatable for threads |
3152 | // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname } | 3125 | // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname } |
3153 | // | 3126 | // |
3154 | if( luaL_newmetatable( L, "Lane")) // settings M mt | 3127 | if( luaL_newmetatable( L, "Lane")) // settings M mt |
3155 | { | 3128 | { |
3156 | lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc | 3129 | lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc |
3157 | lua_setfield( L, -2, "__gc"); // settings M mt | 3130 | lua_setfield( L, -2, "__gc"); // settings M mt |
3158 | lua_pushcfunction( L, LG_thread_index); // settings M mt LG_thread_index | 3131 | lua_pushcfunction( L, LG_thread_index); // settings M mt LG_thread_index |
3159 | lua_setfield( L, -2, "__index"); // settings M mt | 3132 | lua_setfield( L, -2, "__index"); // settings M mt |
3160 | lua_getglobal( L, "error"); // settings M mt error | 3133 | lua_getglobal( L, "error"); // settings M mt error |
3161 | ASSERT_L( lua_isfunction( L, -1)); | 3134 | ASSERT_L( lua_isfunction( L, -1)); |
3162 | lua_setfield( L, -2, "cached_error"); // settings M mt | 3135 | lua_setfield( L, -2, "cached_error"); // settings M mt |
3163 | lua_getglobal( L, "tostring"); // settings M mt tostring | 3136 | lua_getglobal( L, "tostring"); // settings M mt tostring |
3164 | ASSERT_L( lua_isfunction( L, -1)); | 3137 | ASSERT_L( lua_isfunction( L, -1)); |
3165 | lua_setfield( L, -2, "cached_tostring"); // settings M mt | 3138 | lua_setfield( L, -2, "cached_tostring"); // settings M mt |
3166 | lua_pushcfunction( L, LG_thread_join); // settings M mt LG_thread_join | 3139 | lua_pushcfunction( L, LG_thread_join); // settings M mt LG_thread_join |
3167 | lua_setfield( L, -2, "join"); // settings M mt | 3140 | lua_setfield( L, -2, "join"); // settings M mt |
3168 | lua_pushcfunction( L, LG_get_debug_threadname); // settings M mt LG_get_debug_threadname | 3141 | lua_pushcfunction( L, LG_get_debug_threadname); // settings M mt LG_get_debug_threadname |
3169 | lua_setfield( L, -2, "get_debug_threadname"); // settings M mt | 3142 | lua_setfield( L, -2, "get_debug_threadname"); // settings M mt |
3170 | lua_pushcfunction( L, LG_thread_cancel); // settings M mt LG_thread_cancel | 3143 | lua_pushcfunction( L, LG_thread_cancel); // settings M mt LG_thread_cancel |
3171 | lua_setfield( L, -2, "cancel"); // settings M mt | 3144 | lua_setfield( L, -2, "cancel"); // settings M mt |
3172 | lua_pushliteral( L, "Lane"); // settings M mt "Lane" | 3145 | lua_pushliteral( L, "Lane"); // settings M mt "Lane" |
3173 | lua_setfield( L, -2, "__metatable"); // settings M mt | 3146 | lua_setfield( L, -2, "__metatable"); // settings M mt |
3174 | } | 3147 | } |
3175 | 3148 | ||
3176 | lua_pushcclosure( L, LG_thread_new, 1); // settings M LG_thread_new | 3149 | lua_pushcclosure( L, LG_thread_new, 1); // settings M LG_thread_new |
3177 | lua_setfield( L, -2, "thread_new"); // settings M | 3150 | lua_setfield( L, -2, "thread_new"); // settings M |
3178 | 3151 | ||
3179 | // we can't register 'lanes.require' normally because we want to create an upvalued closure | 3152 | // we can't register 'lanes.require' normally because we want to create an upvalued closure |
3180 | lua_getglobal( L, "require"); // settings M require | 3153 | lua_getglobal( L, "require"); // settings M require |
3181 | lua_pushcclosure( L, LG_require, 1); // settings M lanes.require | 3154 | lua_pushcclosure( L, LG_require, 1); // settings M lanes.require |
3182 | lua_setfield( L, -2, "require"); // settings M | 3155 | lua_setfield( L, -2, "require"); // settings M |
3183 | 3156 | ||
3184 | lua_pushstring(L, VERSION); // settings M VERSION | 3157 | lua_pushstring(L, VERSION); // settings M VERSION |
3185 | lua_setfield( L, -2, "version"); // settings M | 3158 | lua_setfield( L, -2, "version"); // settings M |
3186 | 3159 | ||
3187 | lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX | 3160 | lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX |
3188 | lua_setfield( L, -2, "max_prio"); // settings M | 3161 | lua_setfield( L, -2, "max_prio"); // settings M |
3189 | 3162 | ||
3190 | lua_pushlightuserdata( L, CANCEL_ERROR); // settings M CANCEL_ERROR | 3163 | lua_pushlightuserdata( L, CANCEL_ERROR); // settings M CANCEL_ERROR |
3191 | lua_setfield( L, -2, "cancel_error"); // settings M | 3164 | lua_setfield( L, -2, "cancel_error"); // settings M |
3192 | 3165 | ||
3193 | // we'll need this every time we transfer some C function from/to this state | 3166 | // we'll need this every time we transfer some C function from/to this state |
3194 | lua_newtable( L); | 3167 | lua_newtable( L); |
@@ -3201,16 +3174,16 @@ LUAG_FUNC( configure) | |||
3201 | 3174 | ||
3202 | // record all existing C/JIT-fast functions | 3175 | // record all existing C/JIT-fast functions |
3203 | // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack | 3176 | // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack |
3204 | lua_pushglobaltable( L); // settings M _G | 3177 | lua_pushglobaltable( L); // settings M _G |
3205 | populate_func_lookup_table( L, -1, NULL); | 3178 | populate_func_lookup_table( L, -1, NULL); |
3206 | lua_pop( L, 1); // settings M | 3179 | lua_pop( L, 1); // settings M |
3207 | // set _R[CONFIG_REGKEY] = settings | 3180 | // set _R[CONFIG_REGKEY] = settings |
3208 | lua_pushvalue( L, -2); // settings M settings | 3181 | lua_pushvalue( L, -2); // settings M settings |
3209 | lua_setfield( L, LUA_REGISTRYINDEX, CONFIG_REGKEY); // settings M | 3182 | lua_setfield( L, LUA_REGISTRYINDEX, CONFIG_REGKEY); // settings M |
3210 | lua_pop( L, 1); // settings | 3183 | lua_pop( L, 1); // settings |
3211 | STACK_END( L, 0); | 3184 | STACK_END( L, 0); |
3212 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L)); | 3185 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L)); |
3213 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 3186 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
3214 | // Return the settings table | 3187 | // Return the settings table |
3215 | return 1; | 3188 | return 1; |
3216 | } | 3189 | } |
diff --git a/src/tools.c b/src/tools.c index 81ddf5c..65387e5 100644 --- a/src/tools.c +++ b/src/tools.c | |||
@@ -46,6 +46,8 @@ THE SOFTWARE. | |||
46 | #include <malloc.h> | 46 | #include <malloc.h> |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | void* const UNIVERSE_REGKEY = (void*) luaopen_lanes_core; | ||
50 | |||
49 | /* | 51 | /* |
50 | * ############################################################################################### | 52 | * ############################################################################################### |
51 | * ########################################### ASSERT ############################################ | 53 | * ########################################### ASSERT ############################################ |
@@ -59,9 +61,6 @@ void ASSERT_IMPL( lua_State* L, bool_t cond_, char const* file_, int const line_ | |||
59 | } | 61 | } |
60 | } | 62 | } |
61 | 63 | ||
62 | // for verbose errors | ||
63 | bool_t GVerboseErrors = FALSE; | ||
64 | |||
65 | char const* const CONFIG_REGKEY = "ee932492-a654-4506-9da8-f16540bdb5d4"; | 64 | char const* const CONFIG_REGKEY = "ee932492-a654-4506-9da8-f16540bdb5d4"; |
66 | char const* const LOOKUP_REGKEY = "ddea37aa-50c7-4d3f-8e0b-fb7a9d62bac5"; | 65 | char const* const LOOKUP_REGKEY = "ddea37aa-50c7-4d3f-8e0b-fb7a9d62bac5"; |
67 | 66 | ||
@@ -109,11 +108,7 @@ void luaL_requiref (lua_State *L, const char *modname, lua_CFunction openf, int | |||
109 | #endif // LUA_VERSION_NUM | 108 | #endif // LUA_VERSION_NUM |
110 | 109 | ||
111 | DEBUGSPEW_CODE( char const* debugspew_indent = "----+----!----+----!----+----!----+----!----+----!----+----!----+----!----+"); | 110 | DEBUGSPEW_CODE( char const* debugspew_indent = "----+----!----+----!----+----!----+----!----+----!----+----!----+----!----+"); |
112 | DEBUGSPEW_CODE( int debugspew_indent_depth = 0); | ||
113 | |||
114 | 111 | ||
115 | MUTEX_T deep_lock; | ||
116 | MUTEX_T mtid_lock; | ||
117 | 112 | ||
118 | /*---=== luaG_dump ===---*/ | 113 | /*---=== luaG_dump ===---*/ |
119 | 114 | ||
@@ -161,46 +156,58 @@ void luaG_dump( lua_State* L ) { | |||
161 | fprintf( stderr, "\n" ); | 156 | fprintf( stderr, "\n" ); |
162 | } | 157 | } |
163 | 158 | ||
164 | static lua_CFunction s_on_state_create_func = NULL; | 159 | void initialize_on_state_create( struct s_Universe* U, lua_State* L) |
165 | int initialize_on_state_create( lua_State* L) | ||
166 | { | 160 | { |
167 | STACK_CHECK( L); | 161 | STACK_CHECK( L); |
168 | lua_getfield( L, -1, "on_state_create"); // settings on_state_create|nil | 162 | lua_getfield( L, -1, "on_state_create"); // settings on_state_create|nil |
169 | if( !lua_isnil( L, -1)) | 163 | if( !lua_isnil( L, -1)) |
170 | { | 164 | { |
171 | // store C function pointer in an internal variable | 165 | // store C function pointer in an internal variable |
172 | s_on_state_create_func = lua_tocfunction( L, -1); // settings on_state_create | 166 | U->on_state_create_func = lua_tocfunction( L, -1); // settings on_state_create |
173 | if( s_on_state_create_func != NULL) | 167 | if( U->on_state_create_func != NULL) |
174 | { | 168 | { |
175 | // make sure the function doesn't have upvalues | 169 | // make sure the function doesn't have upvalues |
176 | char const* upname = lua_getupvalue( L, -1, 1); // settings on_state_create upval? | 170 | char const* upname = lua_getupvalue( L, -1, 1); // settings on_state_create upval? |
177 | if( upname != NULL) // should be "" for C functions with upvalues if any | 171 | if( upname != NULL) // should be "" for C functions with upvalues if any |
178 | { | 172 | { |
179 | luaL_error( L, "on_state_create shouldn't have upvalues"); | 173 | (void) luaL_error( L, "on_state_create shouldn't have upvalues"); |
180 | } | 174 | } |
181 | // remove this C function from the config table so that it doesn't cause problems | 175 | // remove this C function from the config table so that it doesn't cause problems |
182 | // when we transfer the config table in newly created Lua states | 176 | // when we transfer the config table in newly created Lua states |
183 | lua_pushnil( L); // settings on_state_create nil | 177 | lua_pushnil( L); // settings on_state_create nil |
184 | lua_setfield( L, -3, "on_state_create"); // settings on_state_create | 178 | lua_setfield( L, -3, "on_state_create"); // settings on_state_create |
185 | } | 179 | } |
186 | else | 180 | else |
187 | { | 181 | { |
188 | // optim: store marker saying we have such a function in the config table | 182 | // optim: store marker saying we have such a function in the config table |
189 | s_on_state_create_func = initialize_on_state_create; | 183 | U->on_state_create_func = (lua_CFunction) initialize_on_state_create; |
190 | } | 184 | } |
191 | } | 185 | } |
192 | lua_pop( L, 1); // settings | 186 | lua_pop( L, 1); // settings |
193 | STACK_END( L, 0); | 187 | STACK_END( L, 0); |
194 | return 0; | 188 | } |
189 | |||
190 | |||
191 | struct s_Universe* get_universe( lua_State* L) | ||
192 | { | ||
193 | struct s_Universe* universe; | ||
194 | STACK_GROW( L, 2); | ||
195 | STACK_CHECK( L); | ||
196 | lua_pushlightuserdata( L, UNIVERSE_REGKEY); | ||
197 | lua_rawget( L, LUA_REGISTRYINDEX); | ||
198 | universe = lua_touserdata( L, -1); // NULL if nil | ||
199 | lua_pop( L, 1); | ||
200 | STACK_END( L, 0); | ||
201 | return universe; | ||
195 | } | 202 | } |
196 | 203 | ||
197 | // just like lua_xmove, args are (from, to) | 204 | // just like lua_xmove, args are (from, to) |
198 | void luaG_copy_one_time_settings( lua_State* L, lua_State* L2) | 205 | void luaG_copy_one_time_settings( struct s_Universe* U, lua_State* L, lua_State* L2) |
199 | { | 206 | { |
200 | STACK_GROW( L, 1); | 207 | STACK_GROW( L, 1); |
201 | // copy settings from from source to destination registry | 208 | // copy settings from from source to destination registry |
202 | lua_getfield( L, LUA_REGISTRYINDEX, CONFIG_REGKEY); | 209 | lua_getfield( L, LUA_REGISTRYINDEX, CONFIG_REGKEY); |
203 | if( luaG_inter_move( L, L2, 1, eLM_LaneBody) < 0) // error? | 210 | if( luaG_inter_move( U, L, L2, 1, eLM_LaneBody) < 0) // error? |
204 | { | 211 | { |
205 | (void) luaL_error( L, "failed to copy settings when loading lanes.core"); | 212 | (void) luaL_error( L, "failed to copy settings when loading lanes.core"); |
206 | } | 213 | } |
@@ -241,7 +248,7 @@ static const luaL_Reg libs[] = | |||
241 | { NULL, NULL } | 248 | { NULL, NULL } |
242 | }; | 249 | }; |
243 | 250 | ||
244 | static void open1lib( lua_State* L, char const* name_, size_t len_, lua_State* from_) | 251 | static void open1lib( struct s_Universe* U, lua_State* L, char const* name_, size_t len_, lua_State* from_) |
245 | { | 252 | { |
246 | int i; | 253 | int i; |
247 | for( i = 0; libs[i].name; ++ i) | 254 | for( i = 0; libs[i].name; ++ i) |
@@ -258,7 +265,7 @@ static void open1lib( lua_State* L, char const* name_, size_t len_, lua_State* f | |||
258 | if( isLanesCore == TRUE) | 265 | if( isLanesCore == TRUE) |
259 | { | 266 | { |
260 | // copy settings from from source to destination registry | 267 | // copy settings from from source to destination registry |
261 | luaG_copy_one_time_settings( from_, L); | 268 | luaG_copy_one_time_settings( U, from_, L); |
262 | } | 269 | } |
263 | // open the library as if through require(), and create a global as well if necessary (the library table is left on the stack) | 270 | // open the library as if through require(), and create a global as well if necessary (the library table is left on the stack) |
264 | luaL_requiref( L, name_, libfunc, !isLanesCore); | 271 | luaL_requiref( L, name_, libfunc, !isLanesCore); |
@@ -377,6 +384,7 @@ static void populate_func_lookup_table_recur( lua_State* L, int _ctx_base, int _ | |||
377 | int const cache = _ctx_base + 2; | 384 | int const cache = _ctx_base + 2; |
378 | // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) | 385 | // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) |
379 | int const breadth_first_cache = lua_gettop( L) + 1; | 386 | int const breadth_first_cache = lua_gettop( L) + 1; |
387 | DEBUGSPEW_CODE( struct s_Universe* U = get_universe( L)); | ||
380 | 388 | ||
381 | STACK_GROW( L, 6); | 389 | STACK_GROW( L, 6); |
382 | // slot _i contains a table where we search for functions (or a full userdata with a metatable) | 390 | // slot _i contains a table where we search for functions (or a full userdata with a metatable) |
@@ -503,7 +511,7 @@ static void populate_func_lookup_table_recur( lua_State* L, int _ctx_base, int _ | |||
503 | { | 511 | { |
504 | DEBUGSPEW_CODE( char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"); | 512 | DEBUGSPEW_CODE( char const* key = (lua_type( L, -2) == LUA_TSTRING) ? lua_tostring( L, -2) : "not a string"); |
505 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "table '%s'\n" INDENT_END, key)); | 513 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "table '%s'\n" INDENT_END, key)); |
506 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 514 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
507 | // un-visit this table in case we do need to process it | 515 | // un-visit this table in case we do need to process it |
508 | lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} | 516 | lua_pushvalue( L, -1); // ... {_i} {bfc} k {} {} |
509 | lua_rawget( L, cache); // ... {_i} {bfc} k {} n | 517 | lua_rawget( L, cache); // ... {_i} {bfc} k {} n |
@@ -526,7 +534,7 @@ static void populate_func_lookup_table_recur( lua_State* L, int _ctx_base, int _ | |||
526 | populate_func_lookup_table_recur( L, _ctx_base, lua_gettop( L), _depth); // ... {_i} {bfc} k {} | 534 | populate_func_lookup_table_recur( L, _ctx_base, lua_gettop( L), _depth); // ... {_i} {bfc} k {} |
527 | lua_pop( L, 1); // ... {_i} {bfc} k | 535 | lua_pop( L, 1); // ... {_i} {bfc} k |
528 | STACK_MID( L, 2); | 536 | STACK_MID( L, 2); |
529 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 537 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
530 | } | 538 | } |
531 | // remove table name from fqn stack | 539 | // remove table name from fqn stack |
532 | lua_pushnil( L); // ... {_i} {bfc} nil | 540 | lua_pushnil( L); // ... {_i} {bfc} nil |
@@ -546,8 +554,9 @@ void populate_func_lookup_table( lua_State* L, int _i, char const* name_) | |||
546 | int const ctx_base = lua_gettop( L) + 1; | 554 | int const ctx_base = lua_gettop( L) + 1; |
547 | int const in_base = lua_absindex( L, _i); | 555 | int const in_base = lua_absindex( L, _i); |
548 | int const start_depth = name_ ? 1 : 0; | 556 | int const start_depth = name_ ? 1 : 0; |
557 | DEBUGSPEW_CODE( struct s_Universe* U = get_universe( L)); | ||
549 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END, L, name_ ? name_ : "NULL")); | 558 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END, L, name_ ? name_ : "NULL")); |
550 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 559 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
551 | STACK_GROW( L, 3); | 560 | STACK_GROW( L, 3); |
552 | STACK_CHECK( L); | 561 | STACK_CHECK( L); |
553 | lua_getfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} | 562 | lua_getfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} |
@@ -588,19 +597,19 @@ void populate_func_lookup_table( lua_State* L, int _i, char const* name_) | |||
588 | (void) luaL_error( L, "unsupported module type %s", lua_typename( L, lua_type( L, in_base))); | 597 | (void) luaL_error( L, "unsupported module type %s", lua_typename( L, lua_type( L, in_base))); |
589 | } | 598 | } |
590 | STACK_END( L, 0); | 599 | STACK_END( L, 0); |
591 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 600 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
592 | } | 601 | } |
593 | 602 | ||
594 | int call_on_state_create( lua_State* L, lua_State* from_, enum eLookupMode mode_) | 603 | void call_on_state_create( struct s_Universe* U, lua_State* L, lua_State* from_, enum eLookupMode mode_) |
595 | { | 604 | { |
596 | if( s_on_state_create_func != NULL) | 605 | if( U->on_state_create_func != NULL) |
597 | { | 606 | { |
598 | STACK_CHECK( L); | 607 | STACK_CHECK( L); |
599 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END)); | 608 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END)); |
600 | if( s_on_state_create_func != initialize_on_state_create) | 609 | if( U->on_state_create_func != (lua_CFunction) initialize_on_state_create) |
601 | { | 610 | { |
602 | // C function: recreate a closure in the new state, bypassing the lookup scheme | 611 | // C function: recreate a closure in the new state, bypassing the lookup scheme |
603 | lua_pushcfunction( L, s_on_state_create_func); | 612 | lua_pushcfunction( L, U->on_state_create_func); |
604 | } | 613 | } |
605 | else // Lua function located in the config table, copied when we opened "lanes.core" | 614 | else // Lua function located in the config table, copied when we opened "lanes.core" |
606 | { | 615 | { |
@@ -608,24 +617,22 @@ int call_on_state_create( lua_State* L, lua_State* from_, enum eLookupMode mode_ | |||
608 | { | 617 | { |
609 | // if attempting to call in a keeper state, do nothing because the function doesn't exist there | 618 | // if attempting to call in a keeper state, do nothing because the function doesn't exist there |
610 | // this doesn't count as an error though | 619 | // this doesn't count as an error though |
611 | return 0; | 620 | return; |
612 | } | 621 | } |
613 | lua_getfield( L, LUA_REGISTRYINDEX, CONFIG_REGKEY); | 622 | lua_getfield( L, LUA_REGISTRYINDEX, CONFIG_REGKEY); |
614 | lua_getfield( L, -1, "on_state_create"); | 623 | lua_getfield( L, -1, "on_state_create"); |
615 | lua_remove( L, -2); | 624 | lua_remove( L, -2); |
616 | } | 625 | } |
617 | // capture error and forward it to main state | 626 | // capture error and raise it in caller state |
618 | if( lua_pcall( L, 0, 0, 0) != LUA_OK) | 627 | if( lua_pcall( L, 0, 0, 0) != LUA_OK) |
619 | { | 628 | { |
620 | lua_pushfstring( from_, "on_state_create failed: \"%s\"", lua_isstring( L, -1) ? lua_tostring( L, -1) : lua_typename( L, lua_type( L, -1))); | 629 | luaL_error( from_, "on_state_create failed: \"%s\"", lua_isstring( L, -1) ? lua_tostring( L, -1) : lua_typename( L, lua_type( L, -1))); |
621 | return 1; | ||
622 | } | 630 | } |
623 | STACK_END( L, 0); | 631 | STACK_END( L, 0); |
624 | } | 632 | } |
625 | return 0; | ||
626 | } | 633 | } |
627 | 634 | ||
628 | /* | 635 | /* |
629 | * Like 'luaL_openlibs()' but allows the set of libraries be selected | 636 | * Like 'luaL_openlibs()' but allows the set of libraries be selected |
630 | * | 637 | * |
631 | * NULL no libraries, not even base | 638 | * NULL no libraries, not even base |
@@ -638,7 +645,7 @@ int call_on_state_create( lua_State* L, lua_State* from_, enum eLookupMode mode_ | |||
638 | * *NOT* called for keeper states! | 645 | * *NOT* called for keeper states! |
639 | * | 646 | * |
640 | */ | 647 | */ |
641 | lua_State* luaG_newstate( lua_State* from_, char const* libs_) | 648 | lua_State* luaG_newstate( struct s_Universe* U, lua_State* from_, char const* libs_) |
642 | { | 649 | { |
643 | // re-use alloc function from the originating state | 650 | // re-use alloc function from the originating state |
644 | #if PROPAGATE_ALLOCF | 651 | #if PROPAGATE_ALLOCF |
@@ -651,22 +658,30 @@ lua_State* luaG_newstate( lua_State* from_, char const* libs_) | |||
651 | (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory"); | 658 | (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory"); |
652 | } | 659 | } |
653 | 660 | ||
661 | STACK_GROW( L, 2); | ||
662 | STACK_CHECK( L); | ||
663 | |||
664 | // copy the universe as a light userdata (only the master state holds the full userdata) | ||
665 | // that way, if Lanes is required in this new state, we'll know we are part of this universe | ||
666 | lua_pushlightuserdata( L, UNIVERSE_REGKEY); | ||
667 | lua_pushlightuserdata( L, U); | ||
668 | lua_rawset( L, LUA_REGISTRYINDEX); | ||
669 | STACK_MID( L, 0); | ||
670 | |||
654 | // we'll need this every time we transfer some C function from/to this state | 671 | // we'll need this every time we transfer some C function from/to this state |
655 | lua_newtable( L); | 672 | lua_newtable( L); |
656 | lua_setfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); | 673 | lua_setfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY); |
657 | 674 | ||
658 | // neither libs (not even 'base') nor special init func: we are done | 675 | // neither libs (not even 'base') nor special init func: we are done |
659 | if( libs_ == NULL && s_on_state_create_func == NULL) | 676 | if( libs_ == NULL && U->on_state_create_func == NULL) |
660 | { | 677 | { |
661 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate(NULL)\n" INDENT_END)); | 678 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate(NULL)\n" INDENT_END)); |
662 | return L; | 679 | return L; |
663 | } | 680 | } |
664 | 681 | ||
665 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate()\n" INDENT_END)); | 682 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_newstate()\n" INDENT_END)); |
666 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 683 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
667 | 684 | ||
668 | STACK_GROW( L, 2); | ||
669 | STACK_CHECK( L); | ||
670 | // 'lua.c' stops GC during initialization so perhaps its a good idea. :) | 685 | // 'lua.c' stops GC during initialization so perhaps its a good idea. :) |
671 | lua_gc( L, LUA_GCSTOP, 0); | 686 | lua_gc( L, LUA_GCSTOP, 0); |
672 | 687 | ||
@@ -681,7 +696,7 @@ lua_State* luaG_newstate( lua_State* from_, char const* libs_) | |||
681 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening ALL standard libraries\n" INDENT_END)); | 696 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "opening ALL standard libraries\n" INDENT_END)); |
682 | luaL_openlibs( L); | 697 | luaL_openlibs( L); |
683 | // don't forget lanes.core for regular lane states | 698 | // don't forget lanes.core for regular lane states |
684 | open1lib( L, "lanes.core", 10, from_); | 699 | open1lib( U, L, "lanes.core", 10, from_); |
685 | libs_ = NULL; // done with libs | 700 | libs_ = NULL; // done with libs |
686 | } | 701 | } |
687 | else | 702 | else |
@@ -715,19 +730,16 @@ lua_State* luaG_newstate( lua_State* from_, char const* libs_) | |||
715 | while( isalnum( p[len]) || p[len] == '.') | 730 | while( isalnum( p[len]) || p[len] == '.') |
716 | ++ len; | 731 | ++ len; |
717 | // open library | 732 | // open library |
718 | open1lib( L, p, len, from_); | 733 | open1lib( U, L, p, len, from_); |
719 | } | 734 | } |
720 | serialize_require( L); | ||
721 | } | 735 | } |
722 | |||
723 | lua_gc( L, LUA_GCRESTART, 0); | 736 | lua_gc( L, LUA_GCRESTART, 0); |
724 | 737 | ||
738 | serialize_require( L); | ||
739 | |||
725 | // call this after the base libraries are loaded and GC is restarted | 740 | // call this after the base libraries are loaded and GC is restarted |
726 | if( call_on_state_create( L, from_, eLM_LaneBody)) | 741 | // will raise an error in from_ in case of problem |
727 | { | 742 | call_on_state_create( U, L, from_, eLM_LaneBody); |
728 | // if something went wrong, the error message is pushed on the stack | ||
729 | lua_error( from_); | ||
730 | } | ||
731 | 743 | ||
732 | STACK_CHECK( L); | 744 | STACK_CHECK( L); |
733 | // after all this, register everything we find in our name<->function database | 745 | // after all this, register everything we find in our name<->function database |
@@ -735,7 +747,7 @@ lua_State* luaG_newstate( lua_State* from_, char const* libs_) | |||
735 | populate_func_lookup_table( L, -1, NULL); | 747 | populate_func_lookup_table( L, -1, NULL); |
736 | lua_pop( L, 1); | 748 | lua_pop( L, 1); |
737 | STACK_END( L, 0); | 749 | STACK_END( L, 0); |
738 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 750 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
739 | return L; | 751 | return L; |
740 | } | 752 | } |
741 | 753 | ||
@@ -845,6 +857,16 @@ static inline luaG_IdFunction get_idfunc( lua_State* L, int index, enum eLookupM | |||
845 | } | 857 | } |
846 | 858 | ||
847 | 859 | ||
860 | void free_deep_prelude( lua_State* L, DEEP_PRELUDE* prelude_) | ||
861 | { | ||
862 | // Call 'idfunc( "delete", deep_ptr )' to make deep cleanup | ||
863 | lua_pushlightuserdata( L, prelude_->deep); | ||
864 | ASSERT_L( prelude_->idfunc); | ||
865 | prelude_->idfunc( L, eDO_delete); | ||
866 | DEEP_FREE( (void*) prelude_); | ||
867 | } | ||
868 | |||
869 | |||
848 | /* | 870 | /* |
849 | * void= mt.__gc( proxy_ud ) | 871 | * void= mt.__gc( proxy_ud ) |
850 | * | 872 | * |
@@ -855,23 +877,20 @@ static int deep_userdata_gc( lua_State* L) | |||
855 | { | 877 | { |
856 | DEEP_PRELUDE** proxy = (DEEP_PRELUDE**) lua_touserdata( L, 1); | 878 | DEEP_PRELUDE** proxy = (DEEP_PRELUDE**) lua_touserdata( L, 1); |
857 | DEEP_PRELUDE* p = *proxy; | 879 | DEEP_PRELUDE* p = *proxy; |
880 | struct s_Universe* U = get_universe( L); | ||
858 | int v; | 881 | int v; |
859 | 882 | ||
860 | *proxy = 0; // make sure we don't use it any more | 883 | *proxy = 0; // make sure we don't use it any more |
861 | 884 | ||
862 | MUTEX_LOCK( &deep_lock); | 885 | MUTEX_LOCK( &U->deep_lock); |
863 | v = -- (p->refcount); | 886 | v = -- (p->refcount); |
864 | MUTEX_UNLOCK( &deep_lock); | 887 | MUTEX_UNLOCK( &U->deep_lock); |
865 | 888 | ||
866 | if( v == 0) | 889 | if( v == 0) |
867 | { | 890 | { |
868 | // clean stack so we can call 'idfunc' directly | 891 | // 'idfunc' expects a clean stack to work on |
869 | lua_settop( L, 0); | 892 | lua_settop( L, 0); |
870 | // Call 'idfunc( "delete", deep_ptr )' to make deep cleanup | 893 | free_deep_prelude( L, p); |
871 | lua_pushlightuserdata( L, p->deep); | ||
872 | ASSERT_L( p->idfunc); | ||
873 | p->idfunc( L, eDO_delete); | ||
874 | DEEP_FREE( (void*) p); | ||
875 | 894 | ||
876 | // top was set to 0, then userdata was pushed. "delete" might want to pop the userdata (we don't care), but should not push anything! | 895 | // top was set to 0, then userdata was pushed. "delete" might want to pop the userdata (we don't care), but should not push anything! |
877 | if ( lua_gettop( L) > 1) | 896 | if ( lua_gettop( L) > 1) |
@@ -892,7 +911,7 @@ static int deep_userdata_gc( lua_State* L) | |||
892 | * used in this Lua state (metatable, registring it). Otherwise, increments the | 911 | * used in this Lua state (metatable, registring it). Otherwise, increments the |
893 | * reference count. | 912 | * reference count. |
894 | */ | 913 | */ |
895 | char const* push_deep_proxy( lua_State* L, DEEP_PRELUDE* prelude, enum eLookupMode mode_) | 914 | char const* push_deep_proxy( struct s_Universe* U, lua_State* L, DEEP_PRELUDE* prelude, enum eLookupMode mode_) |
896 | { | 915 | { |
897 | DEEP_PRELUDE** proxy; | 916 | DEEP_PRELUDE** proxy; |
898 | 917 | ||
@@ -910,9 +929,9 @@ char const* push_deep_proxy( lua_State* L, DEEP_PRELUDE* prelude, enum eLookupMo | |||
910 | lua_pop( L, 1); // DPC | 929 | lua_pop( L, 1); // DPC |
911 | } | 930 | } |
912 | 931 | ||
913 | MUTEX_LOCK( &deep_lock); | 932 | MUTEX_LOCK( &U->deep_lock); |
914 | ++ (prelude->refcount); // one more proxy pointing to this deep data | 933 | ++ (prelude->refcount); // one more proxy pointing to this deep data |
915 | MUTEX_UNLOCK( &deep_lock); | 934 | MUTEX_UNLOCK( &U->deep_lock); |
916 | 935 | ||
917 | STACK_GROW( L, 7); | 936 | STACK_GROW( L, 7); |
918 | STACK_CHECK( L); | 937 | STACK_CHECK( L); |
@@ -1048,9 +1067,9 @@ char const* push_deep_proxy( lua_State* L, DEEP_PRELUDE* prelude, enum eLookupMo | |||
1048 | * | 1067 | * |
1049 | * 'idfunc' must fulfill the following features: | 1068 | * 'idfunc' must fulfill the following features: |
1050 | * | 1069 | * |
1051 | * lightuserdata= idfunc( "new" [, ...] ) -- creates a new deep data instance | 1070 | * lightuserdata = idfunc( eDO_new [, ...] ) -- creates a new deep data instance |
1052 | * void= idfunc( "delete", lightuserdata ) -- releases a deep data instance | 1071 | * void = idfunc( eDO_delete, lightuserdata ) -- releases a deep data instance |
1053 | * tbl= idfunc( "metatable" ) -- gives metatable for userdata proxies | 1072 | * tbl = idfunc( eDO_metatable ) -- gives metatable for userdata proxies |
1054 | * | 1073 | * |
1055 | * Reference counting and true userdata proxying are taken care of for the | 1074 | * Reference counting and true userdata proxying are taken care of for the |
1056 | * actual data type. | 1075 | * actual data type. |
@@ -1064,7 +1083,10 @@ int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc) | |||
1064 | { | 1083 | { |
1065 | char const* errmsg; | 1084 | char const* errmsg; |
1066 | DEEP_PRELUDE* prelude = DEEP_MALLOC( sizeof(DEEP_PRELUDE)); | 1085 | DEEP_PRELUDE* prelude = DEEP_MALLOC( sizeof(DEEP_PRELUDE)); |
1067 | ASSERT_L( prelude); | 1086 | if( prelude == NULL) |
1087 | { | ||
1088 | return luaL_error( L, "couldn't not allocate deep prelude: out of memory"); | ||
1089 | } | ||
1068 | 1090 | ||
1069 | prelude->refcount = 0; // 'push_deep_proxy' will lift it to 1 | 1091 | prelude->refcount = 0; // 'push_deep_proxy' will lift it to 1 |
1070 | prelude->idfunc = idfunc; | 1092 | prelude->idfunc = idfunc; |
@@ -1084,7 +1106,7 @@ int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc) | |||
1084 | luaL_error( L, "Bad idfunc(eDO_new): should not push anything on the stack"); | 1106 | luaL_error( L, "Bad idfunc(eDO_new): should not push anything on the stack"); |
1085 | } | 1107 | } |
1086 | } | 1108 | } |
1087 | errmsg = push_deep_proxy( L, prelude, eLM_LaneBody); // proxy | 1109 | errmsg = push_deep_proxy( get_universe( L), L, prelude, eLM_LaneBody); // proxy |
1088 | if( errmsg != NULL) | 1110 | if( errmsg != NULL) |
1089 | { | 1111 | { |
1090 | luaL_error( L, errmsg); | 1112 | luaL_error( L, errmsg); |
@@ -1125,7 +1147,7 @@ void* luaG_todeep( lua_State* L, luaG_IdFunction idfunc, int index) | |||
1125 | * the id function of the copied value, or NULL for non-deep userdata | 1147 | * the id function of the copied value, or NULL for non-deep userdata |
1126 | * (not copied) | 1148 | * (not copied) |
1127 | */ | 1149 | */ |
1128 | static luaG_IdFunction copydeep( lua_State* L, lua_State* L2, int index, enum eLookupMode mode_) | 1150 | static luaG_IdFunction copydeep( struct s_Universe* U, lua_State* L, lua_State* L2, int index, enum eLookupMode mode_) |
1129 | { | 1151 | { |
1130 | char const* errmsg; | 1152 | char const* errmsg; |
1131 | luaG_IdFunction idfunc = get_idfunc( L, index, mode_); | 1153 | luaG_IdFunction idfunc = get_idfunc( L, index, mode_); |
@@ -1134,7 +1156,7 @@ static luaG_IdFunction copydeep( lua_State* L, lua_State* L2, int index, enum eL | |||
1134 | return NULL; // not a deep userdata | 1156 | return NULL; // not a deep userdata |
1135 | } | 1157 | } |
1136 | 1158 | ||
1137 | errmsg = push_deep_proxy( L2, *(DEEP_PRELUDE**) lua_touserdata( L, index), mode_); | 1159 | errmsg = push_deep_proxy( U, L2, *(DEEP_PRELUDE**) lua_touserdata( L, index), mode_); |
1138 | if( errmsg != NULL) | 1160 | if( errmsg != NULL) |
1139 | { | 1161 | { |
1140 | // raise the error in the proper state (not the keeper) | 1162 | // raise the error in the proper state (not the keeper) |
@@ -1207,47 +1229,47 @@ static inline void push_registry_subtable( lua_State* L, void* key_) | |||
1207 | /* | 1229 | /* |
1208 | * Get a unique ID for metatable at [i]. | 1230 | * Get a unique ID for metatable at [i]. |
1209 | */ | 1231 | */ |
1210 | static | 1232 | static uint_t get_mt_id( struct s_Universe* U, lua_State* L, int i) |
1211 | uint_t get_mt_id( lua_State *L, int i ) { | 1233 | { |
1212 | static uint_t last_id= 0; | 1234 | uint_t id; |
1213 | uint_t id; | 1235 | |
1214 | 1236 | i = lua_absindex( L, i); | |
1215 | i = lua_absindex( L, i); | 1237 | |
1216 | 1238 | STACK_GROW( L, 3); | |
1217 | STACK_GROW(L,3); | 1239 | |
1218 | 1240 | STACK_CHECK( L); | |
1219 | STACK_CHECK( L); | 1241 | push_registry_subtable( L, REG_MTID); |
1220 | push_registry_subtable( L, REG_MTID ); | 1242 | lua_pushvalue( L, i); |
1221 | lua_pushvalue(L, i); | 1243 | lua_rawget( L, -2); |
1222 | lua_rawget( L, -2 ); | 1244 | // |
1223 | // | 1245 | // [-2]: reg[REG_MTID] |
1224 | // [-2]: reg[REG_MTID] | 1246 | // [-1]: nil/uint |
1225 | // [-1]: nil/uint | 1247 | |
1226 | 1248 | id = (uint_t) lua_tointeger( L, -1); // 0 for nil | |
1227 | id= (uint_t)lua_tointeger(L,-1); // 0 for nil | 1249 | lua_pop( L, 1); |
1228 | lua_pop(L,1); | 1250 | STACK_MID( L, 1); |
1229 | STACK_MID( L, 1); | 1251 | |
1230 | 1252 | if( id == 0) | |
1231 | if (id==0) { | 1253 | { |
1232 | MUTEX_LOCK( &mtid_lock ); | 1254 | MUTEX_LOCK( &U->mtid_lock); |
1233 | id= ++last_id; | 1255 | id = ++ U->last_mt_id; |
1234 | MUTEX_UNLOCK( &mtid_lock ); | 1256 | MUTEX_UNLOCK( &U->mtid_lock); |
1235 | 1257 | ||
1236 | /* Create two-way references: id_uint <-> table | 1258 | /* Create two-way references: id_uint <-> table |
1237 | */ | 1259 | */ |
1238 | lua_pushvalue(L,i); | 1260 | lua_pushvalue( L, i); |
1239 | lua_pushinteger(L,id); | 1261 | lua_pushinteger( L, id); |
1240 | lua_rawset( L, -3 ); | 1262 | lua_rawset( L, -3); |
1241 | 1263 | ||
1242 | lua_pushinteger(L,id); | 1264 | lua_pushinteger( L, id); |
1243 | lua_pushvalue(L,i); | 1265 | lua_pushvalue( L, i); |
1244 | lua_rawset( L, -3 ); | 1266 | lua_rawset( L, -3); |
1245 | } | 1267 | } |
1246 | lua_pop(L,1); // remove 'reg[REG_MTID]' reference | 1268 | lua_pop( L, 1); // remove 'reg[REG_MTID]' reference |
1247 | 1269 | ||
1248 | STACK_END( L, 0); | 1270 | STACK_END( L, 0); |
1249 | 1271 | ||
1250 | return id; | 1272 | return id; |
1251 | } | 1273 | } |
1252 | 1274 | ||
1253 | 1275 | ||
@@ -1518,7 +1540,7 @@ static int sentinelfunc( lua_State* L) | |||
1518 | /* | 1540 | /* |
1519 | * Push a looked-up native/LuaJIT function. | 1541 | * Push a looked-up native/LuaJIT function. |
1520 | */ | 1542 | */ |
1521 | static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, enum eLookupMode mode_, char const* upName_) | 1543 | static void lookup_native_func( struct s_Universe* U, lua_State* L2, lua_State* L, uint_t i, enum eLookupMode mode_, char const* upName_) |
1522 | { | 1544 | { |
1523 | char const* fqn; // L // L2 | 1545 | char const* fqn; // L // L2 |
1524 | size_t len; | 1546 | size_t len; |
@@ -1576,14 +1598,20 @@ static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, enum eLoo | |||
1576 | // push the equivalent function in the destination's stack, retrieved from the lookup table | 1598 | // push the equivalent function in the destination's stack, retrieved from the lookup table |
1577 | STACK_CHECK( L2); | 1599 | STACK_CHECK( L2); |
1578 | STACK_GROW( L2, 3); // up to 3 slots are necessary on error | 1600 | STACK_GROW( L2, 3); // up to 3 slots are necessary on error |
1579 | if( mode_ == eLM_ToKeeper) | 1601 | switch( mode_) |
1580 | { | 1602 | { |
1603 | default: // shouldn't happen, in theory... | ||
1604 | (void) luaL_error( L, "internal error: unknown lookup mode"); | ||
1605 | return; | ||
1606 | |||
1607 | case eLM_ToKeeper: | ||
1581 | // push a sentinel closure that holds the lookup name as upvalue | 1608 | // push a sentinel closure that holds the lookup name as upvalue |
1582 | lua_pushlstring( L2, fqn, len); // "f.q.n" | 1609 | lua_pushlstring( L2, fqn, len); // "f.q.n" |
1583 | lua_pushcclosure( L2, sentinelfunc, 1); // f | 1610 | lua_pushcclosure( L2, sentinelfunc, 1); // f |
1584 | } | 1611 | break; |
1585 | else | 1612 | |
1586 | { | 1613 | case eLM_LaneBody: |
1614 | case eLM_FromKeeper: | ||
1587 | lua_getfield( L2, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} | 1615 | lua_getfield( L2, LUA_REGISTRYINDEX, LOOKUP_REGKEY); // {} |
1588 | ASSERT_L( lua_istable( L2, -1)); | 1616 | ASSERT_L( lua_istable( L2, -1)); |
1589 | lua_pushlstring( L2, fqn, len); // {} "f.q.n" | 1617 | lua_pushlstring( L2, fqn, len); // {} "f.q.n" |
@@ -1600,6 +1628,23 @@ static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, enum eLoo | |||
1600 | return; | 1628 | return; |
1601 | } | 1629 | } |
1602 | lua_remove( L2, -2); // f | 1630 | lua_remove( L2, -2); // f |
1631 | break; | ||
1632 | |||
1633 | /* keep it in case I need it someday, who knows... | ||
1634 | case eLM_RawFunctions: | ||
1635 | { | ||
1636 | int n; | ||
1637 | char const* upname; | ||
1638 | lua_CFunction f = lua_tocfunction( L, i); | ||
1639 | // copy upvalues | ||
1640 | for( n = 0; (upname = lua_getupvalue( L, i, 1 + n)) != NULL; ++ n) | ||
1641 | { | ||
1642 | luaG_inter_move( U, L, L2, 1, mode_); // [up[,up ...]] | ||
1643 | } | ||
1644 | lua_pushcclosure( L2, f, n); // | ||
1645 | } | ||
1646 | break; | ||
1647 | */ | ||
1603 | } | 1648 | } |
1604 | STACK_END( L2, 1); | 1649 | STACK_END( L2, 1); |
1605 | } | 1650 | } |
@@ -1608,12 +1653,15 @@ static void lookup_native_func( lua_State* L2, lua_State* L, uint_t i, enum eLoo | |||
1608 | * Copy a function over, which has not been found in the cache. | 1653 | * Copy a function over, which has not been found in the cache. |
1609 | * L2 has the cache key for this function at the top of the stack | 1654 | * L2 has the cache key for this function at the top of the stack |
1610 | */ | 1655 | */ |
1611 | enum e_vt { | 1656 | enum e_vt |
1612 | VT_NORMAL, VT_KEY, VT_METATABLE | 1657 | { |
1658 | VT_NORMAL, | ||
1659 | VT_KEY, | ||
1660 | VT_METATABLE | ||
1613 | }; | 1661 | }; |
1614 | static bool_t inter_copy_one_( lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt value_type, enum eLookupMode mode_, char const* upName_); | 1662 | static bool_t inter_copy_one_( struct s_Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt value_type, enum eLookupMode mode_, char const* upName_); |
1615 | 1663 | ||
1616 | static void inter_copy_func( lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum eLookupMode mode_, char const* upName_) | 1664 | static void inter_copy_func( struct s_Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum eLookupMode mode_, char const* upName_) |
1617 | { | 1665 | { |
1618 | int n, needToPush; | 1666 | int n, needToPush; |
1619 | luaL_Buffer b; | 1667 | luaL_Buffer b; |
@@ -1722,7 +1770,7 @@ static void inter_copy_func( lua_State* L2, uint_t L2_cache_i, lua_State* L, uin | |||
1722 | #endif // LUA_VERSION_NUM | 1770 | #endif // LUA_VERSION_NUM |
1723 | { | 1771 | { |
1724 | DEBUGSPEW_CODE( fprintf( stderr, "copying value\n")); | 1772 | DEBUGSPEW_CODE( fprintf( stderr, "copying value\n")); |
1725 | if( !inter_copy_one_( L2, L2_cache_i, L, lua_gettop( L), VT_NORMAL, mode_, upname)) // ... {cache} ... function <upvalues> | 1773 | if( !inter_copy_one_( U, L2, L2_cache_i, L, lua_gettop( L), VT_NORMAL, mode_, upname)) // ... {cache} ... function <upvalues> |
1726 | { | 1774 | { |
1727 | luaL_error( L, "Cannot copy upvalue type '%s'", luaL_typename( L, -1)); | 1775 | luaL_error( L, "Cannot copy upvalue type '%s'", luaL_typename( L, -1)); |
1728 | } | 1776 | } |
@@ -1742,7 +1790,7 @@ static void inter_copy_func( lua_State* L2, uint_t L2_cache_i, lua_State* L, uin | |||
1742 | int func_index = lua_gettop( L2) - n; | 1790 | int func_index = lua_gettop( L2) - n; |
1743 | for( ; n > 0; -- n) | 1791 | for( ; n > 0; -- n) |
1744 | { | 1792 | { |
1745 | char const* rc = lua_setupvalue( L2, func_index, n); // ... {cache} ... function | 1793 | char const* rc = lua_setupvalue( L2, func_index, n); // ... {cache} ... function |
1746 | // | 1794 | // |
1747 | // "assigns the value at the top of the stack to the upvalue and returns its name. | 1795 | // "assigns the value at the top of the stack to the upvalue and returns its name. |
1748 | // It also pops the value from the stack." | 1796 | // It also pops the value from the stack." |
@@ -1750,7 +1798,7 @@ static void inter_copy_func( lua_State* L2, uint_t L2_cache_i, lua_State* L, uin | |||
1750 | ASSERT_L( rc); // not having enough slots? | 1798 | ASSERT_L( rc); // not having enough slots? |
1751 | } | 1799 | } |
1752 | // once all upvalues have been set we are left | 1800 | // once all upvalues have been set we are left |
1753 | // with the function at the top of the stack // ... {cache} ... function | 1801 | // with the function at the top of the stack // ... {cache} ... function |
1754 | } | 1802 | } |
1755 | } | 1803 | } |
1756 | STACK_END( L, 0); | 1804 | STACK_END( L, 0); |
@@ -1762,7 +1810,7 @@ static void inter_copy_func( lua_State* L2, uint_t L2_cache_i, lua_State* L, uin | |||
1762 | * | 1810 | * |
1763 | * Always pushes a function to 'L2'. | 1811 | * Always pushes a function to 'L2'. |
1764 | */ | 1812 | */ |
1765 | static void push_cached_func( lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum eLookupMode mode_, char const* upName_) | 1813 | static void push_cached_func( struct s_Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum eLookupMode mode_, char const* upName_) |
1766 | { | 1814 | { |
1767 | FuncSubType funcSubType; | 1815 | FuncSubType funcSubType; |
1768 | /*lua_CFunction cfunc =*/ luaG_tocfunction( L, i, &funcSubType); // NULL for LuaJIT-fast && bytecode functions | 1816 | /*lua_CFunction cfunc =*/ luaG_tocfunction( L, i, &funcSubType); // NULL for LuaJIT-fast && bytecode functions |
@@ -1798,7 +1846,7 @@ static void push_cached_func( lua_State* L2, uint_t L2_cache_i, lua_State* L, ui | |||
1798 | // via upvalues | 1846 | // via upvalues |
1799 | // | 1847 | // |
1800 | // pushes a copy of the func, stores a reference in the cache | 1848 | // pushes a copy of the func, stores a reference in the cache |
1801 | inter_copy_func( L2, L2_cache_i, L, i, mode_, upName_); // ... {cache} ... function | 1849 | inter_copy_func( U, L2, L2_cache_i, L, i, mode_, upName_); // ... {cache} ... function |
1802 | } | 1850 | } |
1803 | else // found function in the cache | 1851 | else // found function in the cache |
1804 | { | 1852 | { |
@@ -1808,7 +1856,7 @@ static void push_cached_func( lua_State* L2, uint_t L2_cache_i, lua_State* L, ui | |||
1808 | } | 1856 | } |
1809 | else // function is native/LuaJIT: no need to cache | 1857 | else // function is native/LuaJIT: no need to cache |
1810 | { | 1858 | { |
1811 | lookup_native_func( L2, L, i, mode_, upName_); // ... {cache} ... function | 1859 | lookup_native_func( U, L2, L, i, mode_, upName_); // ... {cache} ... function |
1812 | } | 1860 | } |
1813 | 1861 | ||
1814 | // | 1862 | // |
@@ -1826,7 +1874,7 @@ static void push_cached_func( lua_State* L2, uint_t L2_cache_i, lua_State* L, ui | |||
1826 | * | 1874 | * |
1827 | * Returns TRUE if value was pushed, FALSE if its type is non-supported. | 1875 | * Returns TRUE if value was pushed, FALSE if its type is non-supported. |
1828 | */ | 1876 | */ |
1829 | static bool_t inter_copy_one_( lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, enum eLookupMode mode_, char const* upName_) | 1877 | static bool_t inter_copy_one_( struct s_Universe* U, lua_State* L2, uint_t L2_cache_i, lua_State* L, uint_t i, enum e_vt vt, enum eLookupMode mode_, char const* upName_) |
1830 | { | 1878 | { |
1831 | bool_t ret = TRUE; | 1879 | bool_t ret = TRUE; |
1832 | STACK_GROW( L2, 1); | 1880 | STACK_GROW( L2, 1); |
@@ -1884,7 +1932,7 @@ static bool_t inter_copy_one_( lua_State* L2, uint_t L2_cache_i, lua_State* L, u | |||
1884 | /* Allow only deep userdata entities to be copied across | 1932 | /* Allow only deep userdata entities to be copied across |
1885 | */ | 1933 | */ |
1886 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "USERDATA\n" INDENT_END)); | 1934 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "USERDATA\n" INDENT_END)); |
1887 | if( !copydeep( L, L2, i, mode_)) | 1935 | if( !copydeep( U, L, L2, i, mode_)) |
1888 | { | 1936 | { |
1889 | // Not a deep full userdata | 1937 | // Not a deep full userdata |
1890 | bool_t demote = FALSE; | 1938 | bool_t demote = FALSE; |
@@ -1928,11 +1976,11 @@ static bool_t inter_copy_one_( lua_State* L2, uint_t L2_cache_i, lua_State* L, u | |||
1928 | } | 1976 | } |
1929 | { | 1977 | { |
1930 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "FUNCTION %s\n" INDENT_END, upName_)); | 1978 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "FUNCTION %s\n" INDENT_END, upName_)); |
1931 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 1979 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
1932 | STACK_CHECK( L2); | 1980 | STACK_CHECK( L2); |
1933 | push_cached_func( L2, L2_cache_i, L, i, mode_, upName_); | 1981 | push_cached_func( U, L2, L2_cache_i, L, i, mode_, upName_); |
1934 | STACK_END( L2, 1); | 1982 | STACK_END( L2, 1); |
1935 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 1983 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
1936 | } | 1984 | } |
1937 | break; | 1985 | break; |
1938 | 1986 | ||
@@ -1973,10 +2021,10 @@ static bool_t inter_copy_one_( lua_State* L2, uint_t L2_cache_i, lua_State* L, u | |||
1973 | 2021 | ||
1974 | /* Only basic key types are copied over; others ignored | 2022 | /* Only basic key types are copied over; others ignored |
1975 | */ | 2023 | */ |
1976 | if( inter_copy_one_( L2, 0 /*key*/, L, key_i, VT_KEY, mode_, upName_)) | 2024 | if( inter_copy_one_( U, L2, 0 /*key*/, L, key_i, VT_KEY, mode_, upName_)) |
1977 | { | 2025 | { |
1978 | char* valPath = (char*) upName_; | 2026 | char* valPath = (char*) upName_; |
1979 | if( GVerboseErrors) | 2027 | if( U->verboseErrors) |
1980 | { | 2028 | { |
1981 | // for debug purposes, let's try to build a useful name | 2029 | // for debug purposes, let's try to build a useful name |
1982 | if( lua_type( L, key_i) == LUA_TSTRING) | 2030 | if( lua_type( L, key_i) == LUA_TSTRING) |
@@ -1994,7 +2042,7 @@ static bool_t inter_copy_one_( lua_State* L2, uint_t L2_cache_i, lua_State* L, u | |||
1994 | * Contents of metatables are copied with cache checking; | 2042 | * Contents of metatables are copied with cache checking; |
1995 | * important to detect loops. | 2043 | * important to detect loops. |
1996 | */ | 2044 | */ |
1997 | if( inter_copy_one_( L2, L2_cache_i, L, val_i, VT_NORMAL, mode_, valPath)) | 2045 | if( inter_copy_one_( U, L2, L2_cache_i, L, val_i, VT_NORMAL, mode_, valPath)) |
1998 | { | 2046 | { |
1999 | ASSERT_L( lua_istable( L2, -3)); | 2047 | ASSERT_L( lua_istable( L2, -3)); |
2000 | lua_rawset( L2, -3); // add to table (pops key & val) | 2048 | lua_rawset( L2, -3); // add to table (pops key & val) |
@@ -2016,7 +2064,7 @@ static bool_t inter_copy_one_( lua_State* L2, uint_t L2_cache_i, lua_State* L, u | |||
2016 | // | 2064 | // |
2017 | // L [-1]: metatable | 2065 | // L [-1]: metatable |
2018 | 2066 | ||
2019 | uint_t mt_id = get_mt_id( L, -1); // Unique id for the metatable | 2067 | uint_t mt_id = get_mt_id( U, L, -1); // Unique id for the metatable |
2020 | 2068 | ||
2021 | STACK_GROW( L2, 4); | 2069 | STACK_GROW( L2, 4); |
2022 | 2070 | ||
@@ -2036,7 +2084,7 @@ static bool_t inter_copy_one_( lua_State* L2, uint_t L2_cache_i, lua_State* L, u | |||
2036 | lua_pop( L2, 1); | 2084 | lua_pop( L2, 1); |
2037 | STACK_MID( L2, 2); | 2085 | STACK_MID( L2, 2); |
2038 | ASSERT_L( lua_istable(L,-1)); | 2086 | ASSERT_L( lua_istable(L,-1)); |
2039 | if( inter_copy_one_( L2, L2_cache_i /*for function cacheing*/, L, lua_gettop(L) /*[-1]*/, VT_METATABLE, mode_, upName_)) | 2087 | if( inter_copy_one_( U, L2, L2_cache_i /*for function cacheing*/, L, lua_gettop(L) /*[-1]*/, VT_METATABLE, mode_, upName_)) |
2040 | { | 2088 | { |
2041 | // | 2089 | // |
2042 | // L2 ([-3]: copied table) | 2090 | // L2 ([-3]: copied table) |
@@ -2103,13 +2151,13 @@ static bool_t inter_copy_one_( lua_State* L2, uint_t L2_cache_i, lua_State* L, u | |||
2103 | * | 2151 | * |
2104 | * Note: Parameters are in this order ('L' = from first) to be same as 'lua_xmove'. | 2152 | * Note: Parameters are in this order ('L' = from first) to be same as 'lua_xmove'. |
2105 | */ | 2153 | */ |
2106 | int luaG_inter_copy( lua_State* L, lua_State* L2, uint_t n, enum eLookupMode mode_) | 2154 | int luaG_inter_copy( struct s_Universe* U, lua_State* L, lua_State* L2, uint_t n, enum eLookupMode mode_) |
2107 | { | 2155 | { |
2108 | uint_t top_L = lua_gettop( L); | 2156 | uint_t top_L = lua_gettop( L); |
2109 | uint_t top_L2 = lua_gettop( L2); | 2157 | uint_t top_L2 = lua_gettop( L2); |
2110 | uint_t i, j; | 2158 | uint_t i, j; |
2111 | char tmpBuf[16]; | 2159 | char tmpBuf[16]; |
2112 | char* pBuf = GVerboseErrors ? tmpBuf : "?"; | 2160 | char* pBuf = U->verboseErrors ? tmpBuf : "?"; |
2113 | bool_t copyok = TRUE; | 2161 | bool_t copyok = TRUE; |
2114 | 2162 | ||
2115 | if( n > top_L) | 2163 | if( n > top_L) |
@@ -2129,11 +2177,11 @@ int luaG_inter_copy( lua_State* L, lua_State* L2, uint_t n, enum eLookupMode mod | |||
2129 | 2177 | ||
2130 | for( i = top_L - n + 1, j = 1; i <= top_L; ++ i, ++ j) | 2178 | for( i = top_L - n + 1, j = 1; i <= top_L; ++ i, ++ j) |
2131 | { | 2179 | { |
2132 | if( GVerboseErrors) | 2180 | if( U->verboseErrors) |
2133 | { | 2181 | { |
2134 | sprintf( tmpBuf, "arg_%d", j); | 2182 | sprintf( tmpBuf, "arg_%d", j); |
2135 | } | 2183 | } |
2136 | copyok = inter_copy_one_( L2, top_L2 + 1, L, i, VT_NORMAL, mode_, pBuf); | 2184 | copyok = inter_copy_one_( U, L2, top_L2 + 1, L, i, VT_NORMAL, mode_, pBuf); |
2137 | if( !copyok) | 2185 | if( !copyok) |
2138 | { | 2186 | { |
2139 | break; | 2187 | break; |
@@ -2161,17 +2209,17 @@ int luaG_inter_copy( lua_State* L, lua_State* L2, uint_t n, enum eLookupMode mod | |||
2161 | } | 2209 | } |
2162 | 2210 | ||
2163 | 2211 | ||
2164 | int luaG_inter_move( lua_State* L, lua_State* L2, uint_t n, enum eLookupMode mode_) | 2212 | int luaG_inter_move( struct s_Universe* U, lua_State* L, lua_State* L2, uint_t n, enum eLookupMode mode_) |
2165 | { | 2213 | { |
2166 | int ret = luaG_inter_copy( L, L2, n, mode_); | 2214 | int ret = luaG_inter_copy( U, L, L2, n, mode_); |
2167 | lua_pop( L, (int) n); | 2215 | lua_pop( L, (int) n); |
2168 | return ret; | 2216 | return ret; |
2169 | } | 2217 | } |
2170 | 2218 | ||
2171 | int luaG_inter_copy_package( lua_State* L, lua_State* L2, int package_idx_, enum eLookupMode mode_) | 2219 | int luaG_inter_copy_package( struct s_Universe* U, lua_State* L, lua_State* L2, int package_idx_, enum eLookupMode mode_) |
2172 | { | 2220 | { |
2173 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy_package()\n" INDENT_END)); | 2221 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "luaG_inter_copy_package()\n" INDENT_END)); |
2174 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 2222 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
2175 | // package | 2223 | // package |
2176 | STACK_CHECK( L); | 2224 | STACK_CHECK( L); |
2177 | STACK_CHECK( L2); | 2225 | STACK_CHECK( L2); |
@@ -2202,9 +2250,9 @@ int luaG_inter_copy_package( lua_State* L, lua_State* L2, int package_idx_, enum | |||
2202 | } | 2250 | } |
2203 | else | 2251 | else |
2204 | { | 2252 | { |
2205 | DEBUGSPEW_CODE( ++ debugspew_indent_depth); | 2253 | DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); |
2206 | luaG_inter_move( L, L2, 1, mode_); // moves the entry to L2 | 2254 | luaG_inter_move( U, L, L2, 1, mode_); // moves the entry to L2 |
2207 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 2255 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
2208 | lua_setfield( L2, -2, entries[i]); // set package[entries[i]] | 2256 | lua_setfield( L2, -2, entries[i]); // set package[entries[i]] |
2209 | } | 2257 | } |
2210 | } | 2258 | } |
@@ -2216,7 +2264,7 @@ int luaG_inter_copy_package( lua_State* L, lua_State* L2, int package_idx_, enum | |||
2216 | lua_pop( L2, 1); | 2264 | lua_pop( L2, 1); |
2217 | STACK_END( L2, 0); | 2265 | STACK_END( L2, 0); |
2218 | STACK_END( L, 0); | 2266 | STACK_END( L, 0); |
2219 | DEBUGSPEW_CODE( -- debugspew_indent_depth); | 2267 | DEBUGSPEW_CODE( -- U->debugspew_indent_depth); |
2220 | return 0; | 2268 | return 0; |
2221 | } | 2269 | } |
2222 | 2270 | ||
@@ -2224,8 +2272,6 @@ int luaG_inter_copy_package( lua_State* L, lua_State* L2, int package_idx_, enum | |||
2224 | /*---=== Serialize require ===--- | 2272 | /*---=== Serialize require ===--- |
2225 | */ | 2273 | */ |
2226 | 2274 | ||
2227 | MUTEX_T require_cs; | ||
2228 | |||
2229 | //--- | 2275 | //--- |
2230 | // [val]= new_require( ... ) | 2276 | // [val]= new_require( ... ) |
2231 | // | 2277 | // |
@@ -2237,12 +2283,13 @@ int luaG_new_require( lua_State* L) | |||
2237 | { | 2283 | { |
2238 | int rc, i; | 2284 | int rc, i; |
2239 | int args = lua_gettop( L); | 2285 | int args = lua_gettop( L); |
2286 | struct s_Universe* U = get_universe( L); | ||
2240 | //char const* modname = luaL_checkstring( L, 1); | 2287 | //char const* modname = luaL_checkstring( L, 1); |
2241 | 2288 | ||
2242 | STACK_GROW( L, args + 1); | 2289 | STACK_GROW( L, args + 1); |
2243 | STACK_CHECK( L); | 2290 | STACK_CHECK( L); |
2244 | 2291 | ||
2245 | lua_pushvalue( L, lua_upvalueindex(1)); | 2292 | lua_pushvalue( L, lua_upvalueindex( 1)); |
2246 | for( i = 1; i <= args; ++ i) | 2293 | for( i = 1; i <= args; ++ i) |
2247 | { | 2294 | { |
2248 | lua_pushvalue( L, i); | 2295 | lua_pushvalue( L, i); |
@@ -2251,9 +2298,9 @@ int luaG_new_require( lua_State* L) | |||
2251 | // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would | 2298 | // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would |
2252 | // leave us locked, blocking any future 'require' calls from other lanes. | 2299 | // leave us locked, blocking any future 'require' calls from other lanes. |
2253 | // | 2300 | // |
2254 | MUTEX_LOCK( &require_cs); | 2301 | MUTEX_LOCK( &U->require_cs); |
2255 | rc = lua_pcall( L, args, 1 /*retvals*/, 0 /*errfunc*/ ); | 2302 | rc = lua_pcall( L, args, 1 /*retvals*/, 0 /*errfunc*/ ); |
2256 | MUTEX_UNLOCK( &require_cs); | 2303 | MUTEX_UNLOCK( &U->require_cs); |
2257 | 2304 | ||
2258 | // the required module (or an error message) is left on the stack as returned value by original require function | 2305 | // the required module (or an error message) is left on the stack as returned value by original require function |
2259 | STACK_END( L, 1); | 2306 | STACK_END( L, 1); |
diff --git a/src/tools.h b/src/tools.h index 5bd4b69..1d80309 100644 --- a/src/tools.h +++ b/src/tools.h | |||
@@ -55,9 +55,8 @@ void luaL_requiref (lua_State* L, const char* modname, lua_CFunction openf, int | |||
55 | #define USE_DEBUG_SPEW 0 | 55 | #define USE_DEBUG_SPEW 0 |
56 | #if USE_DEBUG_SPEW | 56 | #if USE_DEBUG_SPEW |
57 | extern char const* debugspew_indent; | 57 | extern char const* debugspew_indent; |
58 | extern int debugspew_indent_depth; | ||
59 | #define INDENT_BEGIN "%.*s " | 58 | #define INDENT_BEGIN "%.*s " |
60 | #define INDENT_END , debugspew_indent_depth, debugspew_indent | 59 | #define INDENT_END , (U ? U->debugspew_indent_depth : 0), debugspew_indent |
61 | #define DEBUGSPEW_CODE(_code) _code | 60 | #define DEBUGSPEW_CODE(_code) _code |
62 | #else // USE_DEBUG_SPEW | 61 | #else // USE_DEBUG_SPEW |
63 | #define DEBUGSPEW_CODE(_code) | 62 | #define DEBUGSPEW_CODE(_code) |
@@ -98,8 +97,10 @@ extern int debugspew_indent_depth; | |||
98 | 97 | ||
99 | void luaG_dump( lua_State* L ); | 98 | void luaG_dump( lua_State* L ); |
100 | 99 | ||
101 | lua_State* luaG_newstate( lua_State* _from, char const* libs); | 100 | lua_State* luaG_newstate( struct s_Universe* U, lua_State* _from, char const* libs); |
102 | void luaG_copy_one_time_settings( lua_State* L, lua_State* L2); | 101 | void luaG_copy_one_time_settings( struct s_Universe* U, lua_State* L, lua_State* L2); |
102 | |||
103 | // ################################################################################################ | ||
103 | 104 | ||
104 | // this is pointed to by full userdata proxies, and allocated with malloc() to survive any lua_State lifetime | 105 | // this is pointed to by full userdata proxies, and allocated with malloc() to survive any lua_State lifetime |
105 | typedef struct | 106 | typedef struct |
@@ -117,29 +118,77 @@ enum eLookupMode | |||
117 | eLM_FromKeeper // send a function from a keeper state to a lane | 118 | eLM_FromKeeper // send a function from a keeper state to a lane |
118 | }; | 119 | }; |
119 | 120 | ||
120 | char const* push_deep_proxy( lua_State* L, DEEP_PRELUDE* prelude, enum eLookupMode mode_); | 121 | char const* push_deep_proxy( struct s_Universe* U, lua_State* L, DEEP_PRELUDE* prelude, enum eLookupMode mode_); |
121 | int luaG_inter_copy_package( lua_State* L, lua_State* L2, int package_idx_, enum eLookupMode mode_); | 122 | void free_deep_prelude( lua_State* L, DEEP_PRELUDE* prelude_); |
123 | |||
124 | int luaG_inter_copy_package( struct s_Universe* U, lua_State* L, lua_State* L2, int package_idx_, enum eLookupMode mode_); | ||
122 | 125 | ||
123 | int luaG_inter_copy( lua_State *L, lua_State *L2, uint_t n, enum eLookupMode mode_); | 126 | int luaG_inter_copy( struct s_Universe* U, lua_State* L, lua_State* L2, uint_t n, enum eLookupMode mode_); |
124 | int luaG_inter_move( lua_State *L, lua_State *L2, uint_t n, enum eLookupMode mode_); | 127 | int luaG_inter_move( struct s_Universe* U, lua_State* L, lua_State* L2, uint_t n, enum eLookupMode mode_); |
125 | 128 | ||
126 | int luaG_nameof( lua_State* L); | 129 | int luaG_nameof( lua_State* L); |
127 | int luaG_new_require( lua_State* L); | 130 | int luaG_new_require( lua_State* L); |
128 | 131 | ||
129 | // Lock for reference counter inc/dec locks (to be initialized by outside code) | ||
130 | // | ||
131 | extern MUTEX_T deep_lock; | ||
132 | extern MUTEX_T mtid_lock; | ||
133 | |||
134 | void populate_func_lookup_table( lua_State* L, int _i, char const* _name); | 132 | void populate_func_lookup_table( lua_State* L, int _i, char const* _name); |
135 | void serialize_require( lua_State *L); | 133 | void serialize_require( lua_State *L); |
136 | int initialize_on_state_create( lua_State *L); | 134 | void initialize_on_state_create( struct s_Universe* U, lua_State* L); |
137 | int call_on_state_create( lua_State* L, lua_State* from_, enum eLookupMode mode_); | 135 | void call_on_state_create( struct s_Universe* U, lua_State* L, lua_State* from_, enum eLookupMode mode_); |
136 | |||
137 | // ################################################################################################ | ||
138 | |||
139 | /* | ||
140 | * Do we want to activate full lane tracking feature? (EXPERIMENTAL) | ||
141 | */ | ||
142 | #define HAVE_LANE_TRACKING 1 | ||
143 | |||
144 | // ################################################################################################ | ||
145 | |||
146 | // everything regarding the a Lanes universe is stored in that global structure | ||
147 | // held as a full userdata in the master Lua state that required it for the first time | ||
148 | // don't forget to initialize all members in LG_configure() | ||
149 | struct s_Universe | ||
150 | { | ||
151 | // for verbose errors | ||
152 | bool_t verboseErrors; | ||
153 | |||
154 | lua_CFunction on_state_create_func; | ||
155 | |||
156 | struct s_Keepers* keepers; | ||
157 | |||
158 | // Initialized by 'init_once_LOCKED()': the deep userdata Linda object | ||
159 | // used for timers (each lane will get a proxy to this) | ||
160 | volatile DEEP_PRELUDE* timer_deep; // = NULL | ||
161 | |||
162 | #if HAVE_LANE_TRACKING | ||
163 | MUTEX_T tracking_cs; | ||
164 | struct s_lane* volatile tracking_first; // will change to TRACKING_END if we want to activate tracking | ||
165 | #endif // HAVE_LANE_TRACKING | ||
166 | |||
167 | MUTEX_T selfdestruct_cs; | ||
168 | |||
169 | // require() serialization | ||
170 | MUTEX_T require_cs; | ||
171 | |||
172 | // Lock for reference counter inc/dec locks (to be initialized by outside code) TODO: get rid of this and use atomics instead! | ||
173 | MUTEX_T deep_lock; | ||
174 | MUTEX_T mtid_lock; | ||
175 | |||
176 | int last_mt_id; | ||
177 | |||
178 | #if USE_DEBUG_SPEW | ||
179 | int debugspew_indent_depth; | ||
180 | #endif // USE_DEBUG_SPEW | ||
181 | |||
182 | struct s_lane* volatile selfdestruct_first; | ||
183 | // After a lane has removed itself from the chain, it still performs some processing. | ||
184 | // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads | ||
185 | int volatile selfdestructing_count; | ||
186 | }; | ||
138 | 187 | ||
139 | extern MUTEX_T require_cs; | 188 | struct s_Universe* get_universe( lua_State* L); |
189 | extern void* const UNIVERSE_REGKEY; | ||
140 | 190 | ||
141 | // for verbose errors | 191 | // ################################################################################################ |
142 | extern bool_t GVerboseErrors; | ||
143 | 192 | ||
144 | extern char const* const CONFIG_REGKEY; | 193 | extern char const* const CONFIG_REGKEY; |
145 | extern char const* const LOOKUP_REGKEY; | 194 | extern char const* const LOOKUP_REGKEY; |
diff --git a/tests/keeper.lua b/tests/keeper.lua index 4aff51c..f8c915d 100644 --- a/tests/keeper.lua +++ b/tests/keeper.lua | |||
@@ -4,7 +4,7 @@ | |||
4 | -- Test program for Lua Lanes | 4 | -- Test program for Lua Lanes |
5 | -- | 5 | -- |
6 | 6 | ||
7 | local lanes = require "lanes".configure{ with_timers = false} | 7 | local lanes = require "lanes".configure{ with_timers = false, nb_keepers = 200} |
8 | 8 | ||
9 | local function keeper(linda) | 9 | local function keeper(linda) |
10 | local mt= { | 10 | local mt= { |
diff --git a/tests/recursive.lua b/tests/recursive.lua index 82a43b9..139f4c8 100644 --- a/tests/recursive.lua +++ b/tests/recursive.lua | |||
@@ -14,7 +14,7 @@ local function func( depth ) | |||
14 | local lanes = require "lanes" | 14 | local lanes = require "lanes" |
15 | -- lanes.configure() is available only at the first require() | 15 | -- lanes.configure() is available only at the first require() |
16 | if lanes.configure then | 16 | if lanes.configure then |
17 | lanes = lanes.configure() | 17 | lanes = lanes.configure{with_timers = false} |
18 | end | 18 | end |
19 | local lane= lanes.gen("*", func)( depth+1 ) | 19 | local lane= lanes.gen("*", func)( depth+1 ) |
20 | return lane[1] | 20 | return lane[1] |