aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenoit Germain <bnt.germain@gmail.com>2022-02-07 14:51:54 +0100
committerBenoit Germain <bnt.germain@gmail.com>2022-02-07 14:51:54 +0100
commitc913a6747c420ff12fc1f0c39df791215ad2fcfd (patch)
tree8175f6fa39e1ca22881c178a0c3cfde5f95fdeb5
parentd5c81657982e96f768f76ea6c01db536f8649284 (diff)
downloadlanes-c913a6747c420ff12fc1f0c39df791215ad2fcfd.tar.gz
lanes-c913a6747c420ff12fc1f0c39df791215ad2fcfd.tar.bz2
lanes-c913a6747c420ff12fc1f0c39df791215ad2fcfd.zip
removed explicit calls to malloc/free
Lane and linda userdata were allocated with malloc/free, preventing embedders from fully controlling memory operations. Now all internal Lanes allocations go through the master state alloc function.
Diffstat (limited to '')
-rw-r--r--CHANGES3
-rw-r--r--src/lanes.c42
-rw-r--r--src/linda.c7
-rw-r--r--src/tools.c20
-rw-r--r--src/universe.h4
5 files changed, 46 insertions, 30 deletions
diff --git a/CHANGES b/CHANGES
index 0aca28a..2aca378 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,5 +1,8 @@
1CHANGES: 1CHANGES:
2 2
3CHANGE 151: BGe 7-Feb-22
4 * Lanes no longer relies on malloc/free for internal allocations, but uses the primary alloc function from the master Lua state
5
3CHANGE 150: BGe 22-Sep-21 6CHANGE 150: BGe 22-Sep-21
4 * fix require() wrapper to return all values returned by original require() 7 * fix require() wrapper to return all values returned by original require()
5 8
diff --git a/src/lanes.c b/src/lanes.c
index c5b6c4f..8817071 100644
--- a/src/lanes.c
+++ b/src/lanes.c
@@ -238,6 +238,7 @@ static bool_t tracking_remove( Lane* s)
238 238
239static void lane_cleanup( Lane* s) 239static void lane_cleanup( Lane* s)
240{ 240{
241 AllocatorDefinition* const allocD = &s->U->protected_allocator.definition;
241 // Clean up after a (finished) thread 242 // Clean up after a (finished) thread
242 // 243 //
243#if THREADWAIT_METHOD == THREADWAIT_CONDVAR 244#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
@@ -253,7 +254,7 @@ static void lane_cleanup( Lane* s)
253 } 254 }
254#endif // HAVE_LANE_TRACKING 255#endif // HAVE_LANE_TRACKING
255 256
256 free( s); 257 allocD->allocF(allocD->allocUD, s, sizeof(Lane), 0);
257} 258}
258 259
259/* 260/*
@@ -457,9 +458,9 @@ static int selfdestruct_gc( lua_State* L)
457 // if we failed, and we know the thread is waiting on a linda 458 // if we failed, and we know the thread is waiting on a linda
458 if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL) 459 if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL)
459 { 460 {
460 // signal the linda the wake up the thread so that it can react to the cancel query 461 // signal the linda to wake up the thread so that it can react to the cancel query
461 // let us hope we never land here with a pointer on a linda that has been destroyed... 462 // let us hope we never land here with a pointer on a linda that has been destroyed...
462 SIGNAL_T *waiting_on = s->waiting_on; 463 SIGNAL_T* waiting_on = s->waiting_on;
463 //s->waiting_on = NULL; // useful, or not? 464 //s->waiting_on = NULL; // useful, or not?
464 SIGNAL_ALL( waiting_on); 465 SIGNAL_ALL( waiting_on);
465 } 466 }
@@ -1053,6 +1054,7 @@ LUAG_FUNC( lane_new)
1053#define FIXED_ARGS 7 1054#define FIXED_ARGS 7
1054 int const nargs = lua_gettop(L) - FIXED_ARGS; 1055 int const nargs = lua_gettop(L) - FIXED_ARGS;
1055 Universe* U = universe_get( L); 1056 Universe* U = universe_get( L);
1057 AllocatorDefinition* const allocD = &U->protected_allocator.definition;
1056 ASSERT_L( nargs >= 0); 1058 ASSERT_L( nargs >= 0);
1057 1059
1058 // public Lanes API accepts a generic range -3/+3 1060 // public Lanes API accepts a generic range -3/+3
@@ -1222,7 +1224,7 @@ LUAG_FUNC( lane_new)
1222 // 1224 //
1223 // a Lane full userdata needs a single uservalue 1225 // a Lane full userdata needs a single uservalue
1224 ud = lua_newuserdatauv( L, sizeof( Lane*), 1); // func libs priority globals package required gc_cb lane 1226 ud = lua_newuserdatauv( L, sizeof( Lane*), 1); // func libs priority globals package required gc_cb lane
1225 s = *ud = (Lane*) malloc( sizeof( Lane)); 1227 s = *ud = (Lane*) allocD->allocF( allocD->allocUD, NULL, 0, sizeof(Lane));
1226 if( s == NULL) 1228 if( s == NULL)
1227 { 1229 {
1228 return luaL_error( L, "could not create lane: out of memory"); 1230 return luaL_error( L, "could not create lane: out of memory");
@@ -1856,7 +1858,7 @@ LUAG_FUNC( configure)
1856#endif // THREADAPI == THREADAPI_PTHREAD 1858#endif // THREADAPI == THREADAPI_PTHREAD
1857 1859
1858 STACK_GROW( L, 4); 1860 STACK_GROW( L, 4);
1859 STACK_CHECK_ABS( L, 1); // settings 1861 STACK_CHECK_ABS( L, 1); // settings
1860 1862
1861 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); 1863 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L));
1862 DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth); 1864 DEBUGSPEW_CODE( if( U) ++ U->debugspew_indent_depth);
@@ -1913,10 +1915,10 @@ LUAG_FUNC( configure)
1913 serialize_require( DEBUGSPEW_PARAM_COMMA( U) L); 1915 serialize_require( DEBUGSPEW_PARAM_COMMA( U) L);
1914 1916
1915 // Retrieve main module interface table 1917 // Retrieve main module interface table
1916 lua_pushvalue( L, lua_upvalueindex( 2)); // settings M 1918 lua_pushvalue( L, lua_upvalueindex( 2)); // settings M
1917 // remove configure() (this function) from the module interface 1919 // remove configure() (this function) from the module interface
1918 lua_pushnil( L); // settings M nil 1920 lua_pushnil( L); // settings M nil
1919 lua_setfield( L, -2, "configure"); // settings M 1921 lua_setfield( L, -2, "configure"); // settings M
1920 // add functions to the module's table 1922 // add functions to the module's table
1921 luaG_registerlibfuncs( L, lanes_functions); 1923 luaG_registerlibfuncs( L, lanes_functions);
1922#if HAVE_LANE_TRACKING 1924#if HAVE_LANE_TRACKING
@@ -1943,7 +1945,7 @@ LUAG_FUNC( configure)
1943 // prepare the metatable for threads 1945 // prepare the metatable for threads
1944 // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname } 1946 // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname }
1945 // 1947 //
1946 if( luaL_newmetatable( L, "Lane")) // settings M mt 1948 if( luaL_newmetatable( L, "Lane")) // settings M mt
1947 { 1949 {
1948 lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc 1950 lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc
1949 lua_setfield( L, -2, "__gc"); // settings M mt 1951 lua_setfield( L, -2, "__gc"); // settings M mt
@@ -1965,25 +1967,25 @@ LUAG_FUNC( configure)
1965 lua_setfield( L, -2, "__metatable"); // settings M mt 1967 lua_setfield( L, -2, "__metatable"); // settings M mt
1966 } 1968 }
1967 1969
1968 lua_pushcclosure( L, LG_lane_new, 1); // settings M lane_new 1970 lua_pushcclosure( L, LG_lane_new, 1); // settings M lane_new
1969 lua_setfield( L, -2, "lane_new"); // settings M 1971 lua_setfield( L, -2, "lane_new"); // settings M
1970 1972
1971 // we can't register 'lanes.require' normally because we want to create an upvalued closure 1973 // we can't register 'lanes.require' normally because we want to create an upvalued closure
1972 lua_getglobal( L, "require"); // settings M require 1974 lua_getglobal( L, "require"); // settings M require
1973 lua_pushcclosure( L, LG_require, 1); // settings M lanes.require 1975 lua_pushcclosure( L, LG_require, 1); // settings M lanes.require
1974 lua_setfield( L, -2, "require"); // settings M 1976 lua_setfield( L, -2, "require"); // settings M
1975 1977
1976 lua_pushfstring( 1978 lua_pushfstring(
1977 L, "%d.%d.%d" 1979 L, "%d.%d.%d"
1978 , LANES_VERSION_MAJOR, LANES_VERSION_MINOR, LANES_VERSION_PATCH 1980 , LANES_VERSION_MAJOR, LANES_VERSION_MINOR, LANES_VERSION_PATCH
1979 ); // settings M VERSION 1981 ); // settings M VERSION
1980 lua_setfield( L, -2, "version"); // settings M 1982 lua_setfield( L, -2, "version"); // settings M
1981 1983
1982 lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX 1984 lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX
1983 lua_setfield( L, -2, "max_prio"); // settings M 1985 lua_setfield( L, -2, "max_prio"); // settings M
1984 1986
1985 push_unique_key( L, CANCEL_ERROR); // settings M CANCEL_ERROR 1987 push_unique_key( L, CANCEL_ERROR); // settings M CANCEL_ERROR
1986 lua_setfield( L, -2, "cancel_error"); // settings M 1988 lua_setfield( L, -2, "cancel_error"); // settings M
1987 1989
1988 STACK_MID( L, 2); // reference stack contains only the function argument 'settings' 1990 STACK_MID( L, 2); // reference stack contains only the function argument 'settings'
1989 // we'll need this every time we transfer some C function from/to this state 1991 // we'll need this every time we transfer some C function from/to this state
diff --git a/src/linda.c b/src/linda.c
index a9c9710..21b38fe 100644
--- a/src/linda.c
+++ b/src/linda.c
@@ -758,6 +758,9 @@ LUAG_FUNC( linda_towatch)
758*/ 758*/
759static void* linda_id( lua_State* L, DeepOp op_) 759static void* linda_id( lua_State* L, DeepOp op_)
760{ 760{
761 Universe* const U = universe_get(L);
762 AllocatorDefinition* const allocD = &U->protected_allocator.definition;
763
761 switch( op_) 764 switch( op_)
762 { 765 {
763 case eDO_new: 766 case eDO_new:
@@ -794,7 +797,7 @@ static void* linda_id( lua_State* L, DeepOp op_)
794 * One can use any memory allocation scheme. 797 * One can use any memory allocation scheme.
795 * just don't use L's allocF because we don't know which state will get the honor of GCing the linda 798 * just don't use L's allocF because we don't know which state will get the honor of GCing the linda
796 */ 799 */
797 s = (struct s_Linda*) malloc( sizeof(struct s_Linda) + name_len); // terminating 0 is already included 800 s = (struct s_Linda*) allocD->allocF( allocD->allocUD, NULL, 0, sizeof(struct s_Linda) + name_len); // terminating 0 is already included
798 if( s) 801 if( s)
799 { 802 {
800 s->prelude.magic.value = DEEP_VERSION.value; 803 s->prelude.magic.value = DEEP_VERSION.value;
@@ -827,7 +830,7 @@ static void* linda_id( lua_State* L, DeepOp op_)
827 // There aren't any lanes waiting on these lindas, since all proxies have been gc'ed. Right? 830 // There aren't any lanes waiting on these lindas, since all proxies have been gc'ed. Right?
828 SIGNAL_FREE( &linda->read_happened); 831 SIGNAL_FREE( &linda->read_happened);
829 SIGNAL_FREE( &linda->write_happened); 832 SIGNAL_FREE( &linda->write_happened);
830 free( linda); 833 allocD->allocF( allocD->allocUD, linda, sizeof(struct s_Linda) + strlen(linda->name), 0);
831 return NULL; 834 return NULL;
832 } 835 }
833 836
diff --git a/src/tools.c b/src/tools.c
index acb78e6..e72d441 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -174,11 +174,12 @@ static int luaG_provide_protected_allocator( lua_State* L)
174 return 1; 174 return 1;
175} 175}
176 176
177// called once at the creation of the universe (therefore L is the master Lua state everything originates from)
177// Do I need to disable this when compiling for LuaJIT to prevent issues? 178// Do I need to disable this when compiling for LuaJIT to prevent issues?
178void initialize_allocator_function( Universe* U, lua_State* L) 179void initialize_allocator_function( Universe* U, lua_State* L)
179{ 180{
180 STACK_CHECK( L, 0); 181 STACK_CHECK( L, 0);
181 lua_getfield( L, -1, "allocator"); // settings allocator|nil|"protected" 182 lua_getfield( L, -1, "allocator"); // settings allocator|nil|"protected"
182 if( !lua_isnil( L, -1)) 183 if( !lua_isnil( L, -1))
183 { 184 {
184 // store C function pointer in an internal variable 185 // store C function pointer in an internal variable
@@ -186,17 +187,17 @@ void initialize_allocator_function( Universe* U, lua_State* L)
186 if( U->provide_allocator != NULL) 187 if( U->provide_allocator != NULL)
187 { 188 {
188 // make sure the function doesn't have upvalues 189 // make sure the function doesn't have upvalues
189 char const* upname = lua_getupvalue( L, -1, 1); // settings allocator upval? 190 char const* upname = lua_getupvalue( L, -1, 1); // settings allocator upval?
190 if( upname != NULL) // should be "" for C functions with upvalues if any 191 if( upname != NULL) // should be "" for C functions with upvalues if any
191 { 192 {
192 (void) luaL_error( L, "config.allocator() shouldn't have upvalues"); 193 (void) luaL_error( L, "config.allocator() shouldn't have upvalues");
193 } 194 }
194 // remove this C function from the config table so that it doesn't cause problems 195 // remove this C function from the config table so that it doesn't cause problems
195 // when we transfer the config table in newly created Lua states 196 // when we transfer the config table in newly created Lua states
196 lua_pushnil( L); // settings allocator nil 197 lua_pushnil( L); // settings allocator nil
197 lua_setfield( L, -3, "allocator"); // settings allocator 198 lua_setfield( L, -3, "allocator"); // settings allocator
198 } 199 }
199 else if( lua_type( L, -1) == LUA_TSTRING) 200 else if( lua_type( L, -1) == LUA_TSTRING) // should be "protected"
200 { 201 {
201 // initialize all we need for the protected allocator 202 // initialize all we need for the protected allocator
202 MUTEX_INIT( &U->protected_allocator.lock); // the mutex 203 MUTEX_INIT( &U->protected_allocator.lock); // the mutex
@@ -208,7 +209,14 @@ void initialize_allocator_function( Universe* U, lua_State* L)
208 lua_setallocf( L, protected_lua_Alloc, &U->protected_allocator); 209 lua_setallocf( L, protected_lua_Alloc, &U->protected_allocator);
209 } 210 }
210 } 211 }
211 lua_pop( L, 1); // settings 212 else
213 {
214 // initialize the mutex even if we are not going to use it, because cleanup_allocator_function will deinitialize it
215 MUTEX_INIT( &U->protected_allocator.lock);
216 // just grab whatever allocator was provided to lua_newstate
217 U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD);
218 }
219 lua_pop( L, 1); // settings
212 STACK_END( L, 0); 220 STACK_END( L, 0);
213} 221}
214 222
diff --git a/src/universe.h b/src/universe.h
index 248a117..e4c1191 100644
--- a/src/universe.h
+++ b/src/universe.h
@@ -42,7 +42,7 @@ typedef struct ProtectedAllocator_s ProtectedAllocator;
42 42
43// ################################################################################################ 43// ################################################################################################
44 44
45// everything regarding the a Lanes universe is stored in that global structure 45// everything regarding the Lanes universe is stored in that global structure
46// held as a full userdata in the master Lua state that required it for the first time 46// held as a full userdata in the master Lua state that required it for the first time
47// don't forget to initialize all members in LG_configure() 47// don't forget to initialize all members in LG_configure()
48struct s_Universe 48struct s_Universe
@@ -58,7 +58,7 @@ struct s_Universe
58 // after a state is created, this function will be called right after the bases libraries are loaded 58 // after a state is created, this function will be called right after the bases libraries are loaded
59 lua_CFunction on_state_create_func; 59 lua_CFunction on_state_create_func;
60 60
61 // Initialized and used only if allocator="protected" is found in the configuration settings 61 // if allocator="protected" is found in the configuration settings, a wrapper allocator will protect all allocator calls with a mutex
62 // contains a mutex and the original allocator definition 62 // contains a mutex and the original allocator definition
63 ProtectedAllocator protected_allocator; 63 ProtectedAllocator protected_allocator;
64 64