diff options
author | Benoit Germain <b n t DOT g e r m a i n AT g m a i l DOT c o m> | 2018-11-25 12:45:11 +0100 |
---|---|---|
committer | Benoit Germain <b n t DOT g e r m a i n AT g m a i l DOT c o m> | 2018-11-25 12:45:11 +0100 |
commit | 8d6500fe389624be422ee546f71a1efd4456eabe (patch) | |
tree | b9462a142180b8edb01e1eb284a42fea9f0156bc | |
parent | 60e5d94af372471c2a3bab91963588ca650bff7b (diff) | |
download | lanes-8d6500fe389624be422ee546f71a1efd4456eabe.tar.gz lanes-8d6500fe389624be422ee546f71a1efd4456eabe.tar.bz2 lanes-8d6500fe389624be422ee546f71a1efd4456eabe.zip |
protect_allocator configure option is gone, long live allocator (more embedders-friendly)
-rw-r--r-- | CHANGES | 3 | ||||
-rw-r--r-- | README | 10 | ||||
-rw-r--r-- | docs/index.html | 25 | ||||
-rw-r--r-- | src/keeper.c | 2 | ||||
-rw-r--r-- | src/lanes.c | 68 | ||||
-rw-r--r-- | src/lanes.lua | 8 | ||||
-rw-r--r-- | src/macros_and_utils.h | 20 | ||||
-rw-r--r-- | src/tools.c | 109 | ||||
-rw-r--r-- | src/tools.h | 4 | ||||
-rw-r--r-- | src/universe.h | 26 |
10 files changed, 181 insertions, 94 deletions
@@ -1,5 +1,8 @@ | |||
1 | CHANGES: | 1 | CHANGES: |
2 | 2 | ||
3 | CHANGE 141: BGe 25-Nov-18 | ||
4 | * protect_allocator configure option is gone, long live allocator (more embedders-friendly) | ||
5 | |||
3 | CHANGE 140: BGe 22-Nov-18 | 6 | CHANGE 140: BGe 22-Nov-18 |
4 | * Raise an error instead of crashing when attempting to transfer a non-deep full userdata | 7 | * Raise an error instead of crashing when attempting to transfer a non-deep full userdata |
5 | 8 | ||
@@ -123,9 +123,11 @@ LUA_CPATH. If you are not sure how this works, try creating | |||
123 | ======================= | 123 | ======================= |
124 | Note about LuaJIT | 124 | Note about LuaJIT |
125 | ======================= | 125 | ======================= |
126 | By default LuaJIT2 provides an non-thread-safe memory allocator to improve performance. | 126 | It looks like LuaJIT makes some assumptions about the usage of its allocator. |
127 | Of course this will break when running several Lua states concurrently. | 127 | Namely, when a Lua state closes, memory allocated from its alloc function might be freed, even if said memory |
128 | Don't forget to initialize Lanes with the 'protect_allocator' option (see documentation) | 128 | isn't actually owned by the state (for example if the allocator was used explicitly after retrieving if with lua_getallocf) |
129 | if you experience random crash issues. | 129 | Therefore it seems to be a bad idea, when creating a new lua_State, to propagate the allocator |
130 | from another state, as closing the first state would invalidate all the memory used by the second one... | ||
131 | The best is therefore to leave the 'allocator' configuration option unset when running LuaJIT. | ||
130 | 132 | ||
131 | (end) | 133 | (end) |
diff --git a/docs/index.html b/docs/index.html index 723766d..460a786 100644 --- a/docs/index.html +++ b/docs/index.html | |||
@@ -324,12 +324,37 @@ | |||
324 | <tt>nil</tt>/<tt>false</tt>/<tt>true</tt> | 324 | <tt>nil</tt>/<tt>false</tt>/<tt>true</tt> |
325 | </td> | 325 | </td> |
326 | <td> | 326 | <td> |
327 | REPLACED BY <tt>allocator="protected"</tt> AS OF VERSION v3.13.0. | ||
327 | (Since v3.5.2) If equal to <tt>true</tt>, Lanes wraps all calls to the state's allocator function inside a mutex. Since v3.6.3, when left unset, Lanes attempts to autodetect this value for LuaJIT (the guess might be wrong if <tt>"ffi"</tt> isn't loaded though). | 328 | (Since v3.5.2) If equal to <tt>true</tt>, Lanes wraps all calls to the state's allocator function inside a mutex. Since v3.6.3, when left unset, Lanes attempts to autodetect this value for LuaJIT (the guess might be wrong if <tt>"ffi"</tt> isn't loaded though). |
328 | Default is <tt>true</tt> when Lanes detects it is run by LuaJIT, else <tt>nil</tt>. | 329 | Default is <tt>true</tt> when Lanes detects it is run by LuaJIT, else <tt>nil</tt>. |
329 | </td> | 330 | </td> |
330 | </tr> | 331 | </tr> |
331 | 332 | ||
332 | <tr valign=top> | 333 | <tr valign=top> |
334 | <td id="allocator"> | ||
335 | <code>.allocator</code> | ||
336 | </td> | ||
337 | <td> | ||
338 | <tt>nil</tt>/<tt>"protected"</tt>/function | ||
339 | </td> | ||
340 | <td> | ||
341 | (Since v3.13.0)<br/> | ||
342 | If <tt>nil</tt>, Lua states are created with <tt>luaL_newstate()</tt> and use the default allocator.<br/> | ||
343 | If <tt>"protected"</tt>, The default allocator obtained from <tt>lua_getallocf()</tt> in the state that initializes Lanes is wrapped inside a critical section and used in all newly created states.<br/> | ||
344 | If a <tt>function</tt>, this function is called prior to creating the state. It should return a full userdata containing the following structure: | ||
345 | <table border="1" bgcolor="#E0E0FF" cellpadding="10" style="width:50%"> | ||
346 | <tr> | ||
347 | <td> | ||
348 | <pre> struct { lua_Alloc allocF; void* allocUD;}</pre> | ||
349 | </td> | ||
350 | </tr> | ||
351 | </table> | ||
352 | The contents will be used to create the state with <tt>lua_newstate( allocF, allocUD)</tt>. | ||
353 | This option is mostly useful for embedders that want to provide different allocators to each lane, for example to have each one work in a different memory pool thus preventing the need for the allocator itself to be threadsafe. | ||
354 | </td> | ||
355 | </tr> | ||
356 | |||
357 | <tr valign=top> | ||
333 | <td id="demote_full_userdata"> | 358 | <td id="demote_full_userdata"> |
334 | <code>.demote_full_userdata</code> | 359 | <code>.demote_full_userdata</code> |
335 | </td> | 360 | </td> |
diff --git a/src/keeper.c b/src/keeper.c index ae3e2a8..7eda598 100644 --- a/src/keeper.c +++ b/src/keeper.c | |||
@@ -657,7 +657,7 @@ void init_keepers( Universe* U, lua_State* L) | |||
657 | for( i = 0; i < nb_keepers; ++ i) // keepersUD | 657 | for( i = 0; i < nb_keepers; ++ i) // keepersUD |
658 | { | 658 | { |
659 | // note that we will leak K if we raise an error later | 659 | // note that we will leak K if we raise an error later |
660 | lua_State* K = PROPAGATE_ALLOCF_ALLOC(); | 660 | lua_State* K = create_state( U, L); |
661 | if( K == NULL) | 661 | if( K == NULL) |
662 | { | 662 | { |
663 | (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory"); | 663 | (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory"); |
diff --git a/src/lanes.c b/src/lanes.c index c8e012c..ccb32c0 100644 --- a/src/lanes.c +++ b/src/lanes.c | |||
@@ -580,25 +580,6 @@ static bool_t selfdestruct_remove( Lane* s) | |||
580 | } | 580 | } |
581 | 581 | ||
582 | /* | 582 | /* |
583 | ** mutex-protected allocator for use with Lua states that have non-threadsafe allocators (such as LuaJIT) | ||
584 | */ | ||
585 | struct ProtectedAllocator_s | ||
586 | { | ||
587 | lua_Alloc allocF; | ||
588 | void* allocUD; | ||
589 | MUTEX_T lock; | ||
590 | }; | ||
591 | void * protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize) | ||
592 | { | ||
593 | void* p; | ||
594 | struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud; | ||
595 | MUTEX_LOCK( &s->lock); | ||
596 | p = s->allocF( s->allocUD, ptr, osize, nsize); | ||
597 | MUTEX_UNLOCK( &s->lock); | ||
598 | return p; | ||
599 | } | ||
600 | |||
601 | /* | ||
602 | * Process end; cancel any still free-running threads | 583 | * Process end; cancel any still free-running threads |
603 | */ | 584 | */ |
604 | static int selfdestruct_gc( lua_State* L) | 585 | static int selfdestruct_gc( lua_State* L) |
@@ -679,15 +660,9 @@ static int selfdestruct_gc( lua_State* L) | |||
679 | 660 | ||
680 | // If some lanes are currently cleaning after themselves, wait until they are done. | 661 | // If some lanes are currently cleaning after themselves, wait until they are done. |
681 | // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). | 662 | // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). |
663 | while( U->selfdestructing_count > 0) | ||
682 | { | 664 | { |
683 | bool_t again = TRUE; | 665 | YIELD(); |
684 | do | ||
685 | { | ||
686 | MUTEX_LOCK( &U->selfdestruct_cs); | ||
687 | again = (U->selfdestructing_count > 0) ? TRUE : FALSE; | ||
688 | MUTEX_UNLOCK( &U->selfdestruct_cs); | ||
689 | YIELD(); | ||
690 | } while( again); | ||
691 | } | 666 | } |
692 | 667 | ||
693 | //--- | 668 | //--- |
@@ -727,6 +702,13 @@ static int selfdestruct_gc( lua_State* L) | |||
727 | } | 702 | } |
728 | } | 703 | } |
729 | 704 | ||
705 | // If some lanes are currently cleaning after themselves, wait until they are done. | ||
706 | // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). | ||
707 | while( U->selfdestructing_count > 0) | ||
708 | { | ||
709 | YIELD(); | ||
710 | } | ||
711 | |||
730 | // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 | 712 | // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 |
731 | lua_settop( L, 0); | 713 | lua_settop( L, 0); |
732 | // no need to mutex-protect this as all threads in the universe are gone at that point | 714 | // no need to mutex-protect this as all threads in the universe are gone at that point |
@@ -740,18 +722,7 @@ static int selfdestruct_gc( lua_State* L) | |||
740 | close_keepers( U, L); | 722 | close_keepers( U, L); |
741 | 723 | ||
742 | // remove the protected allocator, if any | 724 | // remove the protected allocator, if any |
743 | { | 725 | cleanup_allocator_function( U, L); |
744 | void* ud; | ||
745 | lua_Alloc allocF = lua_getallocf( L, &ud); | ||
746 | |||
747 | if( allocF == protected_lua_Alloc) | ||
748 | { | ||
749 | struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud; | ||
750 | lua_setallocf( L, s->allocF, s->allocUD); | ||
751 | MUTEX_FREE( &s->lock); | ||
752 | s->allocF( s->allocUD, s, sizeof( struct ProtectedAllocator_s), 0); | ||
753 | } | ||
754 | } | ||
755 | 726 | ||
756 | #if HAVE_LANE_TRACKING | 727 | #if HAVE_LANE_TRACKING |
757 | MUTEX_FREE( &U->tracking_cs); | 728 | MUTEX_FREE( &U->tracking_cs); |
@@ -2097,24 +2068,6 @@ LUAG_FUNC( configure) | |||
2097 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); | 2068 | DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); |
2098 | DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); | 2069 | DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); |
2099 | 2070 | ||
2100 | lua_getfield( L, 1, "protect_allocator"); // settings protect_allocator | ||
2101 | if( lua_toboolean( L, -1)) | ||
2102 | { | ||
2103 | void* allocUD; | ||
2104 | lua_Alloc allocF = lua_getallocf( L, &allocUD); | ||
2105 | if( allocF != protected_lua_Alloc) // just in case | ||
2106 | { | ||
2107 | struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) allocF( allocUD, NULL, 0, sizeof( struct ProtectedAllocator_s)); | ||
2108 | s->allocF = allocF; | ||
2109 | s->allocUD = allocUD; | ||
2110 | MUTEX_INIT( &s->lock); | ||
2111 | lua_setallocf( L, protected_lua_Alloc, s); | ||
2112 | } | ||
2113 | } | ||
2114 | lua_pop( L, 1); // settings | ||
2115 | STACK_MID( L, 1); | ||
2116 | |||
2117 | // grab or create the universe | ||
2118 | if( U == NULL) | 2071 | if( U == NULL) |
2119 | { | 2072 | { |
2120 | U = universe_create( L); // settings universe | 2073 | U = universe_create( L); // settings universe |
@@ -2144,6 +2097,7 @@ LUAG_FUNC( configure) | |||
2144 | MUTEX_INIT( &U->deep_lock); | 2097 | MUTEX_INIT( &U->deep_lock); |
2145 | MUTEX_INIT( &U->mtid_lock); | 2098 | MUTEX_INIT( &U->mtid_lock); |
2146 | U->selfdestruct_first = SELFDESTRUCT_END; | 2099 | U->selfdestruct_first = SELFDESTRUCT_END; |
2100 | initialize_allocator_function( U, L); | ||
2147 | initialize_on_state_create( U, L); | 2101 | initialize_on_state_create( U, L); |
2148 | init_keepers( U, L); | 2102 | init_keepers( U, L); |
2149 | STACK_MID( L, 1); | 2103 | STACK_MID( L, 1); |
diff --git a/src/lanes.lua b/src/lanes.lua index 6779095..15908fa 100644 --- a/src/lanes.lua +++ b/src/lanes.lua | |||
@@ -76,8 +76,7 @@ lanes.configure = function( settings_) | |||
76 | track_lanes = false, | 76 | track_lanes = false, |
77 | demote_full_userdata = nil, | 77 | demote_full_userdata = nil, |
78 | verbose_errors = false, | 78 | verbose_errors = false, |
79 | -- LuaJIT provides a thread-unsafe allocator by default, so we need to protect it when used in parallel lanes | 79 | allocator = nil |
80 | protect_allocator = (package.loaded.jit and jit.version and package.loaded.ffi and (package.loaded.ffi.abi( "32bit") or package.loaded.ffi.abi( "gc64"))) and true or false | ||
81 | } | 80 | } |
82 | local boolean_param_checker = function( val_) | 81 | local boolean_param_checker = function( val_) |
83 | -- non-'boolean-false' should be 'boolean-true' or nil | 82 | -- non-'boolean-false' should be 'boolean-true' or nil |
@@ -90,7 +89,10 @@ lanes.configure = function( settings_) | |||
90 | return type( val_) == "number" and val_ > 0 | 89 | return type( val_) == "number" and val_ > 0 |
91 | end, | 90 | end, |
92 | with_timers = boolean_param_checker, | 91 | with_timers = boolean_param_checker, |
93 | protect_allocator = boolean_param_checker, | 92 | allocator = function( val_) |
93 | -- can be nil, "protected", or a function | ||
94 | return val_ and (type( val_) == "function" or val_ == "protected") or true | ||
95 | end, | ||
94 | on_state_create = function( val_) | 96 | on_state_create = function( val_) |
95 | -- on_state_create may be nil or a function | 97 | -- on_state_create may be nil or a function |
96 | return val_ and type( val_) == "function" or true | 98 | return val_ and type( val_) == "function" or true |
diff --git a/src/macros_and_utils.h b/src/macros_and_utils.h index acbe690..e40a615 100644 --- a/src/macros_and_utils.h +++ b/src/macros_and_utils.h | |||
@@ -12,25 +12,7 @@ | |||
12 | #define inline __inline | 12 | #define inline __inline |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | // For some reason, LuaJIT 64bits doesn't support lua_newstate() | 15 | #define USE_DEBUG_SPEW 0 |
16 | #ifndef PROPAGATE_ALLOCF //you should #define PROPAGATE_ALLOCF 1 for LuaJIT in GC64 mode | ||
17 | #if defined(LUA_JITLIBNAME) && (defined(__x86_64__) || defined(_M_X64)) | ||
18 | //#pragma message( "LuaJIT 64 bits detected: don't propagate allocf") | ||
19 | #define PROPAGATE_ALLOCF 0 | ||
20 | #else // LuaJIT x64 | ||
21 | //#pragma message( "PUC-Lua detected: propagate allocf") | ||
22 | #define PROPAGATE_ALLOCF 1 | ||
23 | #endif // LuaJIT x64 | ||
24 | #endif // PROPAGATE_ALLOCF defined | ||
25 | #if PROPAGATE_ALLOCF | ||
26 | #define PROPAGATE_ALLOCF_PREP( L) void* allocUD; lua_Alloc allocF = lua_getallocf( L, &allocUD) | ||
27 | #define PROPAGATE_ALLOCF_ALLOC() lua_newstate( allocF, allocUD) | ||
28 | #else // PROPAGATE_ALLOCF | ||
29 | #define PROPAGATE_ALLOCF_PREP( L) | ||
30 | #define PROPAGATE_ALLOCF_ALLOC() luaL_newstate() | ||
31 | #endif // PROPAGATE_ALLOCF | ||
32 | |||
33 | #define USE_DEBUG_SPEW 0 | ||
34 | #if USE_DEBUG_SPEW | 16 | #if USE_DEBUG_SPEW |
35 | extern char const* debugspew_indent; | 17 | extern char const* debugspew_indent; |
36 | #define INDENT_BEGIN "%.*s " | 18 | #define INDENT_BEGIN "%.*s " |
diff --git a/src/tools.c b/src/tools.c index f8fc342..8885dea 100644 --- a/src/tools.c +++ b/src/tools.c | |||
@@ -105,6 +105,79 @@ void luaG_dump( lua_State* L) | |||
105 | } | 105 | } |
106 | #endif // _DEBUG | 106 | #endif // _DEBUG |
107 | 107 | ||
108 | // ################################################################################################ | ||
109 | |||
110 | static void* protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize) | ||
111 | { | ||
112 | void* p; | ||
113 | ProtectedAllocator* s = (ProtectedAllocator*) ud; | ||
114 | MUTEX_LOCK( &s->lock); | ||
115 | p = s->definition.allocF( s->definition.allocUD, ptr, osize, nsize); | ||
116 | MUTEX_UNLOCK( &s->lock); | ||
117 | return p; | ||
118 | } | ||
119 | |||
120 | static int luaG_provide_protected_allocator( lua_State* L) | ||
121 | { | ||
122 | Universe* U = universe_get( L); | ||
123 | AllocatorDefinition* def = lua_newuserdata( L, sizeof(AllocatorDefinition)); | ||
124 | def->allocF = protected_lua_Alloc; | ||
125 | def->allocUD = &U->protected_allocator; | ||
126 | return 1; | ||
127 | } | ||
128 | |||
129 | // Do I need to disable this when compiling for LuaJIT to prevent issues? | ||
130 | void initialize_allocator_function( Universe* U, lua_State* L) | ||
131 | { | ||
132 | STACK_CHECK( L, 0); | ||
133 | lua_getfield( L, -1, "allocator"); // settings allocator|nil|"protected" | ||
134 | if( !lua_isnil( L, -1)) | ||
135 | { | ||
136 | // store C function pointer in an internal variable | ||
137 | U->provide_allocator = lua_tocfunction( L, -1); // settings allocator | ||
138 | if( U->provide_allocator != NULL) | ||
139 | { | ||
140 | // make sure the function doesn't have upvalues | ||
141 | char const* upname = lua_getupvalue( L, -1, 1); // settings allocator upval? | ||
142 | if( upname != NULL) // should be "" for C functions with upvalues if any | ||
143 | { | ||
144 | (void) luaL_error( L, "config.allocator() shouldn't have upvalues"); | ||
145 | } | ||
146 | // remove this C function from the config table so that it doesn't cause problems | ||
147 | // when we transfer the config table in newly created Lua states | ||
148 | lua_pushnil( L); // settings allocator nil | ||
149 | lua_setfield( L, -3, "allocator"); // settings allocator | ||
150 | } | ||
151 | else if( lua_type( L, -1) == LUA_TSTRING) | ||
152 | { | ||
153 | // initialize all we need for the protected allocator | ||
154 | MUTEX_INIT( &U->protected_allocator.lock); // the mutex | ||
155 | // and the original allocator to call from inside protection by the mutex | ||
156 | U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); | ||
157 | // before a state is created, this function will be called to obtain the allocator | ||
158 | U->provide_allocator = luaG_provide_protected_allocator; | ||
159 | |||
160 | lua_setallocf( L, protected_lua_Alloc, &U->protected_allocator); | ||
161 | } | ||
162 | } | ||
163 | lua_pop( L, 1); // settings | ||
164 | STACK_END( L, 0); | ||
165 | } | ||
166 | |||
167 | void cleanup_allocator_function( Universe* U, lua_State* L) | ||
168 | { | ||
169 | // remove the protected allocator, if any | ||
170 | if( U->protected_allocator.definition.allocF != NULL) | ||
171 | { | ||
172 | // install the non-protected allocator | ||
173 | lua_setallocf( L, U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); | ||
174 | // release the mutex | ||
175 | MUTEX_FREE( &U->protected_allocator.lock); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | // ################################################################################################ | ||
180 | |||
108 | void initialize_on_state_create( Universe* U, lua_State* L) | 181 | void initialize_on_state_create( Universe* U, lua_State* L) |
109 | { | 182 | { |
110 | STACK_CHECK( L, 0); | 183 | STACK_CHECK( L, 0); |
@@ -629,6 +702,31 @@ void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMo | |||
629 | } | 702 | } |
630 | } | 703 | } |
631 | 704 | ||
705 | lua_State* create_state( Universe* U, lua_State* from_) | ||
706 | { | ||
707 | lua_State* L; | ||
708 | if( U->provide_allocator != NULL) | ||
709 | { | ||
710 | lua_pushcclosure( from_, U->provide_allocator, 0); | ||
711 | lua_call( from_, 0, 1); | ||
712 | { | ||
713 | AllocatorDefinition* def = lua_touserdata( from_, -1); | ||
714 | L = lua_newstate( def->allocF, def->allocUD); | ||
715 | } | ||
716 | lua_pop( from_, 1); | ||
717 | } | ||
718 | else | ||
719 | { | ||
720 | L = luaL_newstate(); | ||
721 | } | ||
722 | |||
723 | if( L == NULL) | ||
724 | { | ||
725 | (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory"); | ||
726 | } | ||
727 | return L; | ||
728 | } | ||
729 | |||
632 | /* | 730 | /* |
633 | * Like 'luaL_openlibs()' but allows the set of libraries be selected | 731 | * Like 'luaL_openlibs()' but allows the set of libraries be selected |
634 | * | 732 | * |
@@ -644,16 +742,7 @@ void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMo | |||
644 | */ | 742 | */ |
645 | lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) | 743 | lua_State* luaG_newstate( Universe* U, lua_State* from_, char const* libs_) |
646 | { | 744 | { |
647 | // re-use alloc function from the originating state | 745 | lua_State* L = create_state( U, from_); |
648 | #if PROPAGATE_ALLOCF | ||
649 | PROPAGATE_ALLOCF_PREP( from_); | ||
650 | #endif // PROPAGATE_ALLOCF | ||
651 | lua_State* L = PROPAGATE_ALLOCF_ALLOC(); | ||
652 | |||
653 | if( L == NULL) | ||
654 | { | ||
655 | (void) luaL_error( from_, "luaG_newstate() failed while creating state; out of memory"); | ||
656 | } | ||
657 | 746 | ||
658 | STACK_GROW( L, 2); | 747 | STACK_GROW( L, 2); |
659 | STACK_CHECK_ABS( L, 0); | 748 | STACK_CHECK_ABS( L, 0); |
diff --git a/src/tools.h b/src/tools.h index ca0b9fc..71460c3 100644 --- a/src/tools.h +++ b/src/tools.h | |||
@@ -22,6 +22,7 @@ typedef struct s_Universe Universe; | |||
22 | void luaG_dump( lua_State* L); | 22 | void luaG_dump( lua_State* L); |
23 | #endif // _DEBUG | 23 | #endif // _DEBUG |
24 | 24 | ||
25 | lua_State* create_state( Universe* U, lua_State* from_); | ||
25 | lua_State* luaG_newstate( Universe* U, lua_State* _from, char const* libs); | 26 | lua_State* luaG_newstate( Universe* U, lua_State* _from, char const* libs); |
26 | 27 | ||
27 | // ################################################################################################ | 28 | // ################################################################################################ |
@@ -36,6 +37,9 @@ int luaG_new_require( lua_State* L); | |||
36 | 37 | ||
37 | void populate_func_lookup_table( lua_State* L, int _i, char const* _name); | 38 | void populate_func_lookup_table( lua_State* L, int _i, char const* _name); |
38 | void serialize_require( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State *L); | 39 | void serialize_require( DEBUGSPEW_PARAM_COMMA( Universe* U) lua_State *L); |
40 | void initialize_allocator_function( Universe* U, lua_State* L); | ||
41 | void cleanup_allocator_function( Universe* U, lua_State* L); | ||
42 | |||
39 | void initialize_on_state_create( Universe* U, lua_State* L); | 43 | void initialize_on_state_create( Universe* U, lua_State* L); |
40 | void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMode mode_); | 44 | void call_on_state_create( Universe* U, lua_State* L, lua_State* from_, LookupMode mode_); |
41 | 45 | ||
diff --git a/src/universe.h b/src/universe.h index 359dc90..8727bf7 100644 --- a/src/universe.h +++ b/src/universe.h | |||
@@ -24,6 +24,24 @@ typedef struct s_Lane Lane; | |||
24 | 24 | ||
25 | // ################################################################################################ | 25 | // ################################################################################################ |
26 | 26 | ||
27 | // everything we need to provide to lua_newstate() | ||
28 | struct AllocatorDefinition_s | ||
29 | { | ||
30 | lua_Alloc allocF; | ||
31 | void* allocUD; | ||
32 | }; | ||
33 | typedef struct AllocatorDefinition_s AllocatorDefinition; | ||
34 | |||
35 | // mutex-protected allocator for use with Lua states that share a non-threadsafe allocator | ||
36 | struct ProtectedAllocator_s | ||
37 | { | ||
38 | AllocatorDefinition definition; | ||
39 | MUTEX_T lock; | ||
40 | }; | ||
41 | typedef struct ProtectedAllocator_s ProtectedAllocator; | ||
42 | |||
43 | // ################################################################################################ | ||
44 | |||
27 | // everything regarding the a Lanes universe is stored in that global structure | 45 | // everything regarding the a Lanes universe is stored in that global structure |
28 | // held as a full userdata in the master Lua state that required it for the first time | 46 | // held as a full userdata in the master Lua state that required it for the first time |
29 | // don't forget to initialize all members in LG_configure() | 47 | // don't forget to initialize all members in LG_configure() |
@@ -34,8 +52,16 @@ struct s_Universe | |||
34 | 52 | ||
35 | bool_t demoteFullUserdata; | 53 | bool_t demoteFullUserdata; |
36 | 54 | ||
55 | // before a state is created, this function will be called to obtain the allocator | ||
56 | lua_CFunction provide_allocator; | ||
57 | |||
58 | // after a state is created, this function will be called right after the bases libraries are loaded | ||
37 | lua_CFunction on_state_create_func; | 59 | lua_CFunction on_state_create_func; |
38 | 60 | ||
61 | // Initialized and used only if allocator="protected" is found in the configuration settings | ||
62 | // contains a mutex and the original allocator definition | ||
63 | ProtectedAllocator protected_allocator; | ||
64 | |||
39 | Keepers* keepers; | 65 | Keepers* keepers; |
40 | 66 | ||
41 | // Initialized by 'init_once_LOCKED()': the deep userdata Linda object | 67 | // Initialized by 'init_once_LOCKED()': the deep userdata Linda object |