From be2f2b4314799d38901c9e80dd35dee63f541c90 Mon Sep 17 00:00:00 2001 From: Benoit Germain Date: Tue, 26 Mar 2024 11:36:30 +0100 Subject: C++ migration: refactor AllocatorDefinition and ProtectedAllocator into classes --- docs/index.html | 7 ++-- src/keeper.cpp | 12 ++----- src/lanes.cpp | 20 +++-------- src/linda.cpp | 18 +++------- src/state.cpp | 4 +-- src/tools.cpp | 87 +++++++++++++++++++-------------------------- src/tools.h | 1 - src/universe.h | 86 ++++++++++++++++++++++++++++++++++++++++---- tests/protect_allocator.lua | 2 ++ 9 files changed, 137 insertions(+), 100 deletions(-) diff --git a/docs/index.html b/docs/index.html index e9c3239..b48c527 100644 --- a/docs/index.html +++ b/docs/index.html @@ -345,13 +345,12 @@
-
	struct { lua_Alloc allocF; void* allocUD;}
+
	struct { lua_Alloc allocF; void* allocUD; }
- The contents will be used to create the state with lua_newstate( allocF, allocUD). + The contents will be used to create the state with lua_newstate(allocF, allocUD). This option is mostly useful for embedders that want to provide different allocators to each lane, for example to have each one work in a different memory pool thus preventing the need for the allocator itself to be threadsafe. - Note however that linda deep proxy are allocated with the allocator from the master state, because they are not tied to a particular state. @@ -364,7 +363,7 @@ (Since v3.16.1)
- Controls which allocator is used for Lanest internal allocations (for keeper and deep userdata management). + Controls which allocator is used for Lanes internal allocations (for keeper, linda and lane management). If "libc", Lanes uses realloc and free.
If "allocator", Lanes uses whatever was obtained from the "allocator" setting.
This option is mostly useful for embedders that want control all memory allocations, but have issues when Lanes tries to use the Lua State allocator for internal purposes (especially with LuaJIT). diff --git a/src/keeper.cpp b/src/keeper.cpp index 19f6ae1..88bd4ff 100644 --- a/src/keeper.cpp +++ b/src/keeper.cpp @@ -614,11 +614,8 @@ void close_keepers(Universe* U) MUTEX_FREE(&U->keepers->keeper_array[i].keeper_cs); } // free the keeper bookkeeping structure - { - AllocatorDefinition* const allocD = &U->internal_allocator; - std::ignore = allocD->allocF(allocD->allocUD, U->keepers, sizeof(Keepers) + (nbKeepers - 1) * sizeof(Keeper), 0); - U->keepers = nullptr; - } + U->internal_allocator.free(U->keepers, sizeof(Keepers) + (nbKeepers - 1) * sizeof(Keeper)); + U->keepers = nullptr; } } @@ -649,10 +646,7 @@ void init_keepers(Universe* U, lua_State* L) // Keepers contains an array of 1 Keeper, adjust for the actual number of keeper states { size_t const bytes = sizeof(Keepers) + (nb_keepers - 1) * sizeof(Keeper); - { - AllocatorDefinition* const allocD = &U->internal_allocator; - U->keepers = (Keepers*) allocD->allocF(allocD->allocUD, nullptr, 0, bytes); - } + U->keepers = static_cast(U->internal_allocator.alloc(bytes)); if (U->keepers == nullptr) { (void) luaL_error(L, "init_keepers() failed while creating keeper array; out of memory"); diff --git a/src/lanes.cpp b/src/lanes.cpp index 99c5812..5945a1a 100644 --- a/src/lanes.cpp +++ b/src/lanes.cpp @@ -254,10 +254,7 @@ static void lane_cleanup( Lane* s) } #endif // HAVE_LANE_TRACKING() - { - AllocatorDefinition* const allocD = &s->U->internal_allocator; - (void) allocD->allocF(allocD->allocUD, s, sizeof(Lane), 0); - } + s->U->internal_allocator.free(s, sizeof(Lane)); } /* @@ -584,7 +581,7 @@ static int selfdestruct_gc( lua_State* L) close_keepers( U); // remove the protected allocator, if any - cleanup_allocator_function( U, L); + U->protected_allocator.removeFrom(L); U->Universe::~Universe(); @@ -1036,10 +1033,6 @@ static constexpr UniqueKey GCCB_KEY{ 0xcfb1f046ef074e88ull }; // LUAG_FUNC( lane_new) { - lua_State* L2; - Lane* s; - Lane** ud; - char const* libs_str = lua_tostring( L, 2); bool const have_priority{ !lua_isnoneornil(L, 3) }; int const priority = have_priority ? (int) lua_tointeger( L, 3) : THREAD_PRIO_DEFAULT; @@ -1066,7 +1059,7 @@ LUAG_FUNC( lane_new) DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); // populate with selected libraries at the same time - L2 = luaG_newstate( U, L, libs_str); // L // L2 + lua_State* const L2{ luaG_newstate(U, L, libs_str) }; // L // L2 STACK_GROW( L2, nargs + 3); // STACK_CHECK_START_REL(L2, 0); @@ -1219,11 +1212,8 @@ LUAG_FUNC( lane_new) // 's' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread) // // a Lane full userdata needs a single uservalue - ud = (Lane**) lua_newuserdatauv( L, sizeof( Lane*), 1); // func libs priority globals package required gc_cb lane - { - AllocatorDefinition* const allocD = &U->internal_allocator; - s = *ud = (Lane*) allocD->allocF(allocD->allocUD, nullptr, 0, sizeof(Lane)); - } + Lane** const ud{ static_cast(lua_newuserdatauv(L, sizeof(Lane*), 1)) }; // func libs priority globals package required gc_cb lane + Lane* const s{ *ud = static_cast(U->internal_allocator.alloc(sizeof(Lane))) }; // don't forget to store the pointer in the userdata! if( s == nullptr) { return luaL_error( L, "could not create lane: out of memory"); diff --git a/src/linda.cpp b/src/linda.cpp index f2e39a8..eb2349e 100644 --- a/src/linda.cpp +++ b/src/linda.cpp @@ -759,7 +759,6 @@ static void* linda_id( lua_State* L, DeepOp op_) { case eDO_new: { - struct s_Linda* s; size_t name_len = 0; char const* linda_name = nullptr; unsigned long linda_group = 0; @@ -791,12 +790,9 @@ static void* linda_id( lua_State* L, DeepOp op_) * One can use any memory allocation scheme. * just don't use L's allocF because we don't know which state will get the honor of GCing the linda */ - Universe* const U = universe_get(L); - { - AllocatorDefinition* const allocD = &U->internal_allocator; - s = (struct s_Linda*) allocD->allocF(allocD->allocUD, nullptr, 0, sizeof(struct s_Linda) + name_len); // terminating 0 is already included - } - if( s) + Universe* const U{ universe_get(L) }; + struct s_Linda* s{ static_cast(U->internal_allocator.alloc(sizeof(struct s_Linda) + name_len)) }; // terminating 0 is already included + if (s) { s->prelude.DeepPrelude::DeepPrelude(); SIGNAL_INIT( &s->read_happened); @@ -812,12 +808,11 @@ static void* linda_id( lua_State* L, DeepOp op_) case eDO_delete: { - Keeper* K; struct s_Linda* linda = (struct s_Linda*) lua_touserdata( L, 1); ASSERT_L( linda); // Clean associated structures in the keeper state. - K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda)); + Keeper* const K{ keeper_acquire(linda->U->keepers, LINDA_KEEPER_HASHSEED(linda)) }; if( K && K->L) // can be nullptr if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup) { // hopefully this won't ever raise an error as we would jump to the closest pcall site while forgetting to release the keeper mutex... @@ -829,10 +824,7 @@ static void* linda_id( lua_State* L, DeepOp op_) SIGNAL_FREE( &linda->read_happened); SIGNAL_FREE( &linda->write_happened); linda->prelude.DeepPrelude::~DeepPrelude(); - { - AllocatorDefinition* const allocD = &linda->U->internal_allocator; - (void) allocD->allocF(allocD->allocUD, linda, sizeof(struct s_Linda) + strlen(linda->name), 0); - } + linda->U->internal_allocator.free(linda, sizeof(struct s_Linda) + strlen(linda->name)); return nullptr; } diff --git a/src/state.cpp b/src/state.cpp index c66242c..7bdaec9 100644 --- a/src/state.cpp +++ b/src/state.cpp @@ -260,14 +260,14 @@ lua_State* create_state( Universe* U, lua_State* from_) lua_call( from_, 0, 1); { AllocatorDefinition* const def = (AllocatorDefinition*) lua_touserdata( from_, -1); - L = lua_newstate( def->allocF, def->allocUD); + L = lua_newstate( def->m_allocF, def->m_allocUD); } lua_pop( from_, 1); } else { // reuse the allocator provided when the master state was created - L = lua_newstate( U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); + L = lua_newstate(U->protected_allocator.m_allocF, U->protected_allocator.m_allocUD); } #endif // LUAJIT_FLAVOR() == 64 diff --git a/src/tools.cpp b/src/tools.cpp index 68846ba..89e5499 100644 --- a/src/tools.cpp +++ b/src/tools.cpp @@ -156,105 +156,92 @@ void luaG_dump( lua_State* L) // ################################################################################################ // same as PUC-Lua l_alloc -static void* libc_lua_Alloc(void* ud, void* ptr, size_t osize, size_t nsize) +extern "C" static void* libc_lua_Alloc([[maybe_unused]] void* ud, [[maybe_unused]] void* ptr_, size_t osize_, size_t nsize_) { - (void)ud; (void)osize; /* not used */ - if (nsize == 0) + if (nsize_ == 0) { - free(ptr); + free(ptr_); return nullptr; } else { - return realloc(ptr, nsize); + return realloc(ptr_, nsize_); } } -static void* protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize) -{ - void* p; - ProtectedAllocator* s = (ProtectedAllocator*) ud; - s->lock.lock(); - p = s->definition.allocF( s->definition.allocUD, ptr, osize, nsize); - s->lock.unlock(); - return p; -} +// ################################################################################################# -static int luaG_provide_protected_allocator( lua_State* L) +static int luaG_provide_protected_allocator(lua_State* L) { - Universe* U = universe_get( L); - AllocatorDefinition* const def = (AllocatorDefinition*) lua_newuserdatauv( L, sizeof(AllocatorDefinition), 0); - def->allocF = protected_lua_Alloc; - def->allocUD = &U->protected_allocator; + Universe* const U{ universe_get(L) }; + // push a new full userdata on the stack, giving access to the universe's protected allocator + AllocatorDefinition* const def{ new (L) AllocatorDefinition{ U->protected_allocator.makeDefinition() } }; return 1; } +// ################################################################################################# + // called once at the creation of the universe (therefore L is the master Lua state everything originates from) // Do I need to disable this when compiling for LuaJIT to prevent issues? -void initialize_allocator_function( Universe* U, lua_State* L) +void initialize_allocator_function(Universe* U, lua_State* L) { STACK_CHECK_START_REL(L, 1); // settings - lua_getfield( L, -1, "allocator"); // settings allocator|nil|"protected" - if( !lua_isnil( L, -1)) + lua_getfield(L, -1, "allocator"); // settings allocator|nil|"protected" + if (!lua_isnil(L, -1)) { // store C function pointer in an internal variable - U->provide_allocator = lua_tocfunction( L, -1); // settings allocator + U->provide_allocator = lua_tocfunction(L, -1); // settings allocator if (U->provide_allocator != nullptr) { // make sure the function doesn't have upvalues - char const* upname = lua_getupvalue( L, -1, 1); // settings allocator upval? + char const* upname = lua_getupvalue(L, -1, 1); // settings allocator upval? if (upname != nullptr) // should be "" for C functions with upvalues if any { - (void) luaL_error( L, "config.allocator() shouldn't have upvalues"); + (void) luaL_error(L, "config.allocator() shouldn't have upvalues"); } // remove this C function from the config table so that it doesn't cause problems // when we transfer the config table in newly created Lua states - lua_pushnil( L); // settings allocator nil - lua_setfield( L, -3, "allocator"); // settings allocator + lua_pushnil(L); // settings allocator nil + lua_setfield(L, -3, "allocator"); // settings allocator } - else if( lua_type( L, -1) == LUA_TSTRING) // should be "protected" + else if (lua_type(L, -1) == LUA_TSTRING) // should be "protected" { + ASSERT_L(strcmp(lua_tostring(L, -1), "protected") == 0); // set the original allocator to call from inside protection by the mutex - U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); + U->protected_allocator.initFrom(L); + U->protected_allocator.installIn(L); // before a state is created, this function will be called to obtain the allocator U->provide_allocator = luaG_provide_protected_allocator; - - lua_setallocf( L, protected_lua_Alloc, &U->protected_allocator); } } else { // just grab whatever allocator was provided to lua_newstate - U->protected_allocator.definition.allocF = lua_getallocf( L, &U->protected_allocator.definition.allocUD); + U->protected_allocator.initFrom(L); } - lua_pop( L, 1); // settings + lua_pop(L, 1); // settings STACK_CHECK(L, 1); - lua_getfield( L, -1, "internal_allocator"); // settings "libc"|"allocator" + lua_getfield(L, -1, "internal_allocator"); // settings "libc"|"allocator" { - char const* allocator = lua_tostring( L, -1); + char const* allocator = lua_tostring(L, -1); if (strcmp(allocator, "libc") == 0) { - U->internal_allocator.allocF = libc_lua_Alloc; - U->internal_allocator.allocUD = nullptr; + U->internal_allocator = AllocatorDefinition{ libc_lua_Alloc, nullptr }; + } + else if (U->provide_allocator = luaG_provide_protected_allocator) + { + // user wants mutex protection on the state's allocator. Use protection for our own allocations too, just in case. + U->internal_allocator = U->protected_allocator.makeDefinition(); } else { - U->internal_allocator = U->protected_allocator.definition; + // no protection required, just use whatever we have as-is. + U->internal_allocator = U->protected_allocator; } } - lua_pop( L, 1); // settings - STACK_CHECK( L, 1); -} - -void cleanup_allocator_function( Universe* U, lua_State* L) -{ - // remove the protected allocator, if any - if (U->protected_allocator.definition.allocF != nullptr) - { - // install the non-protected allocator - lua_setallocf( L, U->protected_allocator.definition.allocF, U->protected_allocator.definition.allocUD); - } + lua_pop(L, 1); // settings + STACK_CHECK(L, 1); } // ################################################################################################ diff --git a/src/tools.h b/src/tools.h index 95324ee..5e6ce78 100644 --- a/src/tools.h +++ b/src/tools.h @@ -38,7 +38,6 @@ int luaG_nameof( lua_State* L); void populate_func_lookup_table( lua_State* L, int _i, char const* _name); void initialize_allocator_function( Universe* U, lua_State* L); -void cleanup_allocator_function( Universe* U, lua_State* L); // ################################################################################################ diff --git a/src/universe.h b/src/universe.h index a6beb68..3aac20d 100644 --- a/src/universe.h +++ b/src/universe.h @@ -27,17 +27,91 @@ struct Lane; // ################################################################################################ // everything we need to provide to lua_newstate() -struct AllocatorDefinition +class AllocatorDefinition { - lua_Alloc allocF{ nullptr }; - void* allocUD{ nullptr }; + public: + + lua_Alloc m_allocF{ nullptr }; + void* m_allocUD{ nullptr }; + + static void* operator new(size_t size_, lua_State* L) noexcept { return lua_newuserdatauv(L, size_, 0); } + // always embedded somewhere else or "in-place constructed" as a full userdata + // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception + static void operator delete(void* p_, lua_State* L) { ASSERT_L(!"should never be called") }; + + AllocatorDefinition(lua_Alloc allocF_, void* allocUD_) noexcept + : m_allocF{ allocF_ } + , m_allocUD{ allocUD_ } + { + } + AllocatorDefinition() = default; + AllocatorDefinition(AllocatorDefinition const& rhs_) = default; + AllocatorDefinition(AllocatorDefinition&& rhs_) = default; + AllocatorDefinition& operator=(AllocatorDefinition const& rhs_) = default; + AllocatorDefinition& operator=(AllocatorDefinition&& rhs_) = default; + + void initFrom(lua_State* L) + { + m_allocF = lua_getallocf(L, &m_allocUD); + } + + void* lua_alloc(void* ptr_, size_t osize_, size_t nsize_) + { + m_allocF(m_allocUD, ptr_, osize_, nsize_); + } + + void* alloc(size_t nsize_) + { + return m_allocF(m_allocUD, nullptr, 0, nsize_); + } + + void free(void* ptr_, size_t osize_) + { + std::ignore = m_allocF(m_allocUD, ptr_, osize_, 0); + } }; +// ################################################################################################ + // mutex-protected allocator for use with Lua states that share a non-threadsafe allocator -struct ProtectedAllocator +class ProtectedAllocator : public AllocatorDefinition { - AllocatorDefinition definition; - std::mutex lock; + private: + + std::mutex m_lock; + + static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_) + { + ProtectedAllocator* const s{ static_cast(ud_) }; + std::lock_guard guard{ s->m_lock }; + return s->m_allocF(s->m_allocUD, ptr_, osize_, nsize_); + } + + public: + + // we are not like our base class: we can't be created inside a full userdata (or we would have to install a metatable and __gc handler to destroy ourselves properly) + static void* operator new(size_t size_, lua_State* L) noexcept = delete; + static void operator delete(void* p_, lua_State* L) = delete; + + AllocatorDefinition makeDefinition() + { + return AllocatorDefinition{ protected_lua_Alloc, this}; + } + + void installIn(lua_State* L) + { + lua_setallocf(L, protected_lua_Alloc, this); + } + + void removeFrom(lua_State* L) + { + // remove the protected allocator, if any + if (m_allocF != nullptr) + { + // install the non-protected allocator + lua_setallocf(L, m_allocF, m_allocUD); + } + } }; // ################################################################################################ diff --git a/tests/protect_allocator.lua b/tests/protect_allocator.lua index 593261e..6eead5d 100644 --- a/tests/protect_allocator.lua +++ b/tests/protect_allocator.lua @@ -47,4 +47,6 @@ end -- wait for completion linda:receive( linda.batched, "key", COUNT) +print "waiting a bit more ..." +linda:receive( 1, "foo") print "SUCCESS" -- cgit v1.2.3-55-g6feb