aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/keeper.c2
-rw-r--r--src/lanes.c68
-rw-r--r--src/lanes.lua8
-rw-r--r--src/macros_and_utils.h20
-rw-r--r--src/tools.c109
-rw-r--r--src/tools.h4
-rw-r--r--src/universe.h26
7 files changed, 147 insertions, 90 deletions
diff --git a/src/keeper.c b/src/keeper.c
index ae3e2a8..7eda598 100644
--- a/src/keeper.c
+++ b/src/keeper.c
@@ -657,7 +657,7 @@ void init_keepers( Universe* U, lua_State* L)
657 for( i = 0; i < nb_keepers; ++ i) // keepersUD 657 for( i = 0; i < nb_keepers; ++ i) // keepersUD
658 { 658 {
659 // note that we will leak K if we raise an error later 659 // note that we will leak K if we raise an error later
660 lua_State* K = PROPAGATE_ALLOCF_ALLOC(); 660 lua_State* K = create_state( U, L);
661 if( K == NULL) 661 if( K == NULL)
662 { 662 {
663 (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory"); 663 (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory");
diff --git a/src/lanes.c b/src/lanes.c
index c8e012c..ccb32c0 100644
--- a/src/lanes.c
+++ b/src/lanes.c
@@ -580,25 +580,6 @@ static bool_t selfdestruct_remove( Lane* s)
580} 580}
581 581
582/* 582/*
583** mutex-protected allocator for use with Lua states that have non-threadsafe allocators (such as LuaJIT)
584*/
585struct ProtectedAllocator_s
586{
587 lua_Alloc allocF;
588 void* allocUD;
589 MUTEX_T lock;
590};
591void * protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize)
592{
593 void* p;
594 struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud;
595 MUTEX_LOCK( &s->lock);
596 p = s->allocF( s->allocUD, ptr, osize, nsize);
597 MUTEX_UNLOCK( &s->lock);
598 return p;
599}
600
601/*
602* Process end; cancel any still free-running threads 583* Process end; cancel any still free-running threads
603*/ 584*/
604static int selfdestruct_gc( lua_State* L) 585static int selfdestruct_gc( lua_State* L)
@@ -679,15 +660,9 @@ static int selfdestruct_gc( lua_State* L)
679 660
680 // If some lanes are currently cleaning after themselves, wait until they are done. 661 // If some lanes are currently cleaning after themselves, wait until they are done.
681 // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). 662 // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
663 while( U->selfdestructing_count > 0)
682 { 664 {
683 bool_t again = TRUE; 665 YIELD();
684 do
685 {
686 MUTEX_LOCK( &U->selfdestruct_cs);
687 again = (U->selfdestructing_count > 0) ? TRUE : FALSE;
688 MUTEX_UNLOCK( &U->selfdestruct_cs);
689 YIELD();
690 } while( again);
691 } 666 }
692 667
693 //--- 668 //---
@@ -727,6 +702,13 @@ static int selfdestruct_gc( lua_State* L)
727 } 702 }
728 } 703 }
729 704
705 // If some lanes are currently cleaning after themselves, wait until they are done.
706 // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
707 while( U->selfdestructing_count > 0)
708 {
709 YIELD();
710 }
711
730 // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 712 // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1
731 lua_settop( L, 0); 713 lua_settop( L, 0);
732 // no need to mutex-protect this as all threads in the universe are gone at that point 714 // no need to mutex-protect this as all threads in the universe are gone at that point
@@ -740,18 +722,7 @@ static int selfdestruct_gc( lua_State* L)
740 close_keepers( U, L); 722 close_keepers( U, L);
741 723
742 // remove the protected allocator, if any 724 // remove the protected allocator, if any
743 { 725 cleanup_allocator_function( U, L);
744 void* ud;
745 lua_Alloc allocF = lua_getallocf( L, &ud);
746
747 if( allocF == protected_lua_Alloc)
748 {
749 struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud;
750 lua_setallocf( L, s->allocF, s->allocUD);
751 MUTEX_FREE( &s->lock);
752 s->allocF( s->allocUD, s, sizeof( struct ProtectedAllocator_s), 0);
753 }
754 }
755 726
756#if HAVE_LANE_TRACKING 727#if HAVE_LANE_TRACKING
757 MUTEX_FREE( &U->tracking_cs); 728 MUTEX_FREE( &U->tracking_cs);
@@ -2097,24 +2068,6 @@ LUAG_FUNC( configure)
2097 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); 2068 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L));
2098 DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); 2069 DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth);
2099 2070
2100 lua_getfield( L, 1, "protect_allocator"); // settings protect_allocator
2101 if( lua_toboolean( L, -1))
2102 {
2103 void* allocUD;
2104 lua_Alloc allocF = lua_getallocf( L, &allocUD);
2105 if( allocF != protected_lua_Alloc) // just in case
2106 {
2107 struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) allocF( allocUD, NULL, 0, sizeof( struct ProtectedAllocator_s));
2108 s->allocF = allocF;
2109 s->allocUD = allocUD;
2110 MUTEX_INIT( &s->lock);
2111 lua_setallocf( L, protected_lua_Alloc, s);
2112 }
2113 }
2114 lua_pop( L, 1); // settings
2115 STACK_MID( L, 1);
2116
2117 // grab or create the universe
2118 if( U == NULL) 2071 if( U == NULL)
2119 { 2072 {
2120 U = universe_create( L); // settings universe 2073 U = universe_create( L); // settings universe
@@ -2144,6 +2097,7 @@ LUAG_FUNC( configure)
2144 MUTEX_INIT( &U->deep_lock); 2097 MUTEX_INIT( &U->deep_lock);
2145 MUTEX_INIT( &U->mtid_lock); 2098 MUTEX_INIT( &U->mtid_lock);
2146 U->selfdestruct_first = SELFDESTRUCT_END; 2099 U->selfdestruct_first = SELFDESTRUCT_END;
2100 initialize_allocator_function( U, L);
2147 initialize_on_state_create( U, L); 2101 initialize_on_state_create( U, L);
2148 init_keepers( U, L); 2102 init_keepers( U, L);
2149 STACK_MID( L, 1); 2103 STACK_MID( L, 1);
diff --git a/src/lanes.lua b/src/lanes.lua
index 6779095..15908fa 100644
--- a/src/lanes.lua
+++ b/src/lanes.lua
@@ -76,8 +76,7 @@ lanes.configure = function( settings_)
76 track_lanes = false, 76 track_lanes = false,
77 demote_full_userdata = nil, 77 demote_full_userdata = nil,
78 verbose_errors = false, 78 verbose_errors = false,
79 -- LuaJIT provides a thread-unsafe allocator by default, so we need to protect it when used in parallel lanes 79 allocator = nil
80 protect_allocator = (package.loaded.jit and jit.version and package.loaded.ffi and (package.loaded.ffi.abi( "32bit") or package.loaded.ffi.abi( "gc64"))) and true or false
81 } 80 }
82 local boolean_param_checker = function( val_) 81 local boolean_param_checker = function( val_)
83 -- non-'boolean-false' should be 'boolean-true' or nil 82 -- non-'boolean-false' should be 'boolean-true' or nil
@@ -90,7 +89,10 @@ lanes.configure = function( settings_)
90 return type( val_) == "number" and val_ > 0 89 return type( val_) == "number" and val_ > 0
91 end, 90 end,
92 with_timers = boolean_param_checker, 91 with_timers = boolean_param_checker,
93 protect_allocator = boolean_param_checker, 92 allocator = function( val_)
93 -- can be nil, "protected", or a function
94 return val_ and (type( val_) == "function" or val_ == "protected") or true
95 end,
94 on_state_create = function( val_) 96 on_state_create = function( val_)
95 -- on_state_create may be nil or a function 97 -- on_state_create may be nil or a function
96 return val_ and type( val_) == "function" or true 98 return val_ and type( val_) == "function" or true
diff --git a/src/macros_and_utils.h b/src/macros_and_utils.h
index acbe690..e40a615 100644
--- a/src/macros_and_utils.h
+++ b/src/macros_and_utils.h
@@ -12,25 +12,7 @@
12#define inline __inline 12#define inline __inline
13#endif 13#endif
14 14
15 // For some reason, LuaJIT 64bits doesn't support lua_newstate() 15 #define USE_DEBUG_SPEW 0
16#ifndef PROPAGATE_ALLOCF //you should #define PROPAGATE_ALLOCF 1 for LuaJIT in GC64 mode
17#if defined(LUA_JITLIBNAME) && (defined(__x86_64__) || defined(_M_X64))
18 //#pragma message( "LuaJIT 64 bits detected: don't propagate allocf")
19#define PROPAGATE_ALLOCF 0
20#else // LuaJIT x64
21 //#pragma message( "PUC-Lua detected: propagate allocf")
22#define PROPAGATE_ALLOCF 1
23#endif // LuaJIT x64
24#endif // PROPAGATE_ALLOCF defined
25#if PROPAGATE_ALLOCF
26#define PROPAGATE_ALLOCF_PREP( L) void* allocUD; lua_Alloc allocF = lua_getallocf( L, &allocUD)
27#define PROPAGATE_ALLOCF_ALLOC() lua_newstate( allocF, allocUD)
28#else // PROPAGATE_ALLOCF
29#define PROPAGATE_ALLOCF_PREP( L)
30#define PROPAGATE_ALLOCF_ALLOC() luaL_newstate()
31#endif // PROPAGATE_ALLOCF
32
33#define USE_DEBUG_SPEW 0
34#if USE_DEBUG_SPEW 16#if USE_DEBUG_SPEW
35extern char const* debugspew_indent; 17extern char const* debugspew_indent;
36#define INDENT_BEGIN "%.*s " 18#define INDENT_BEGIN "%.*s "
diff --git a/src/tools.c b/src/tools.c
index f8fc342..8885dea 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -105,6 +105,79 @@ void luaG_dump( lua_State* L)
105} 105}
106#endif // _DEBUG 106#endif // _DEBUG
107 107
108// ################################################################################################
109
110static void* protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize)
111{
112 void* p;
113 ProtectedAllocator* s = (ProtectedAllocator*) ud;
114 MUTEX_LOCK( &s->lock);
115 p = s->definition.allocF( s->definition.allocUD, ptr, osize, nsize);
116 MUTEX_UNLOCK( &s->lock);
117 return p;
118}
119
120static int luaG_provide_protected_allocator( lua_State* L)
121{
122 Universe* U = universe_get( L);
123 AllocatorDefinition* def = lua_newuserdata( L, sizeof(AllocatorDefinition));
124 def->allocF = protected_lua_Alloc;
125 def->allocUD = &U->protected_allocator;
126 return 1;
127}
128
129// Do I need to disable this when compiling for LuaJIT to prevent issues?
130void initialize_allocator_function( Universe* U, lua_State* L)
131{
132 STACK_CHECK( L, 0);
133 lua_getfield( L, -1, "allocator"); // settings allocator|nil|"protected"
134 if( !lua_isnil( L, -1))
135 {
136 // store C function pointer in an internal variable
137 U->provide_allocator = lua_tocfunction( L, -1); // settings allocator
138 if( U->provide_allocator != NULL)
139 {
140 // make sure the function doesn't have upvalues
141 char const* upname = lua_getupvalue( L, -1, 1); // settings allocator upval?
142 if( upname != NULL) // should be "" for C functions with upvalues if any
143 {
144 (void) luaL_error( L, "config.allocator() shouldn't have upvalues");
145 }
146 // remove this C function from the config table so that it doesn't cause problems
147 // when we transfer the config table in newly created Lua states
148 lua_pushnil( L); // settings allocator nil
149 lua_setfield( L, -3, "allocator"); // settings allocator
150 }
151 else if( lua_type( L, -1) == LUA_TSTRING)
152 {
153 // initialize all we need for the protected allocator
154 MUTEX_INIT( &U->protected_allocator.lock); // the mutex
155 // and the original allocator to call from inside protection by the mutex
156 U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD);
157 // before a state is created, this function will be called to obtain the allocator
158 U->provide_allocator = luaG_provide_protected_allocator;
159
160 lua_setallocf( L, protected_lua_Alloc, &U->protected_allocator);
161 }
162 }
163 lua_pop( L, 1); // settings
164 STACK_END( L, 0);
165}
166
167void cleanup_allocator_function( Universe* U, lua_State* L)
168{
169 // remove the protected allocator, if any
170 if( U->protected_allocator.definition.allocF != NULL)
171 {
172 // install the non-protected allocator
173 lua_setallocf( L, U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD);
174 // release the mutex
175 MUTEX_FREE( &U->protected_allocator.lock);
176 }
177}
178
179// ################################################################################################
180
108void initialize_on_state_create( Universe* U, lua_State* L) 181void initialize_on_state_create( Universe* U, lua_State* L)
109{ 182{
110 STACK_CHECK( L, 0); 183 STACK_CHECK( L, 0);
@@ -629,6 +702,31 @@ void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMo
629 } 702 }
630} 703}
631 704
705lua_State* create_state( Universe* U, lua_State* from_)
706{
707 lua_State* L;
708 if( U->provide_allocator != NULL)
709 {
710 lua_pushcclosure( from_, U->provide_allocator, 0);
711 lua_call( from_, 0, 1);
712 {
713 AllocatorDefinition* def = lua_touserdata( from_, -1);
714 L = lua_newstate( def->allocF, def->allocUD);
715 }
716 lua_pop( from_, 1);
717 }
718 else
719 {
720 L = luaL_newstate();
721 }
722
723 if( L == NULL)
724 {
725 (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory");
726 }
727 return L;
728}
729
632/* 730/*
633 * Like 'luaL_openlibs()' but allows the set of libraries be selected 731 * Like 'luaL_openlibs()' but allows the set of libraries be selected
634 * 732 *
@@ -644,16 +742,7 @@ void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMo
644 */ 742 */
645lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) 743lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_)
646{ 744{
647 // re-use alloc function from the originating state 745 lua_State* L = create_state( U, from_);
648#if PROPAGATE_ALLOCF
649 PROPAGATE_ALLOCF_PREP( from_);
650#endif // PROPAGATE_ALLOCF
651 lua_State* L = PROPAGATE_ALLOCF_ALLOC();
652
653 if( L == NULL)
654 {
655 (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory");
656 }
657 746
658 STACK_GROW( L, 2); 747 STACK_GROW( L, 2);
659 STACK_CHECK_ABS( L, 0); 748 STACK_CHECK_ABS( L, 0);
diff --git a/src/tools.h b/src/tools.h
index ca0b9fc..71460c3 100644
--- a/src/tools.h
+++ b/src/tools.h
@@ -22,6 +22,7 @@ typedef struct s_Universe Universe;
22void luaG_dump( lua_State* L); 22void luaG_dump( lua_State* L);
23#endif // _DEBUG 23#endif // _DEBUG
24 24
25lua_State* create_state( Universe* U, lua_State* from_);
25lua_State* luaG_newstate( Universe* U, lua_State* _from, char const* libs); 26lua_State* luaG_newstate( Universe* U, lua_State* _from, char const* libs);
26 27
27// ################################################################################################ 28// ################################################################################################
@@ -36,6 +37,9 @@ int luaG_new_require( lua_State* L);
36 37
37void populate_func_lookup_table( lua_State* L, int _i, char const* _name); 38void populate_func_lookup_table( lua_State* L, int _i, char const* _name);
38void serialize_require( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State *L); 39void serialize_require( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State *L);
40void initialize_allocator_function( Universe* U, lua_State* L);
41void cleanup_allocator_function( Universe* U, lua_State* L);
42
39void initialize_on_state_create( Universe* U, lua_State* L); 43void initialize_on_state_create( Universe* U, lua_State* L);
40void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMode mode_); 44void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMode mode_);
41 45
diff --git a/src/universe.h b/src/universe.h
index 359dc90..8727bf7 100644
--- a/src/universe.h
+++ b/src/universe.h
@@ -24,6 +24,24 @@ typedef struct s_Lane Lane;
24 24
25// ################################################################################################ 25// ################################################################################################
26 26
27// everything we need to provide to lua_newstate()
28struct AllocatorDefinition_s
29{
30 lua_Alloc allocF;
31 void* allocUD;
32};
33typedef struct AllocatorDefinition_s AllocatorDefinition;
34
35// mutex-protected allocator for use with Lua states that share a non-threadsafe allocator
36struct ProtectedAllocator_s
37{
38 AllocatorDefinition definition;
39 MUTEX_T lock;
40};
41typedef struct ProtectedAllocator_s ProtectedAllocator;
42
43// ################################################################################################
44
27// everything regarding the a Lanes universe is stored in that global structure 45// everything regarding the a Lanes universe is stored in that global structure
28// held as a full userdata in the master Lua state that required it for the first time 46// held as a full userdata in the master Lua state that required it for the first time
29// don't forget to initialize all members in LG_configure() 47// don't forget to initialize all members in LG_configure()
@@ -34,8 +52,16 @@ struct s_Universe
34 52
35 bool_t demoteFullUserdata; 53 bool_t demoteFullUserdata;
36 54
55 // before a state is created, this function will be called to obtain the allocator
56 lua_CFunction provide_allocator;
57
58 // after a state is created, this function will be called right after the bases libraries are loaded
37 lua_CFunction on_state_create_func; 59 lua_CFunction on_state_create_func;
38 60
61 // Initialized and used only if allocator="protected" is found in the configuration settings
62 // contains a mutex and the original allocator definition
63 ProtectedAllocator protected_allocator;
64
39 Keepers* keepers; 65 Keepers* keepers;
40 66
41 // Initialized by 'init_once_LOCKED()': the deep userdata Linda object 67 // Initialized by 'init_once_LOCKED()': the deep userdata Linda object