From 84889233bfec4ad11ee1160fe63acbbbba7275e7 Mon Sep 17 00:00:00 2001 From: Benoit Germain Date: Thu, 2 May 2024 10:18:37 +0200 Subject: Progressively applying the coding rules --- src/cancel.cpp | 6 +-- src/keeper.cpp | 14 +++--- src/keeper.h | 2 +- src/lanes.cpp | 134 ++++++++++++++++++++++++------------------------- src/lanes_private.h | 16 +++--- src/linda.cpp | 72 +++++++++++++------------- src/linda.h | 18 +++---- src/macros_and_utils.h | 2 +- src/state.cpp | 34 ++++++------- src/state.h | 4 +- src/tools.cpp | 64 ++++++++++++----------- src/universe.cpp | 2 +- src/universe.h | 60 +++++++++++----------- 13 files changed, 213 insertions(+), 215 deletions(-) (limited to 'src') diff --git a/src/cancel.cpp b/src/cancel.cpp index ed450f0..dd848a7 100644 --- a/src/cancel.cpp +++ b/src/cancel.cpp @@ -55,7 +55,7 @@ THE SOFTWARE. { Lane* const lane{ kLanePointerRegKey.readLightUserDataValue(L_) }; // 'lane' is nullptr for the original main state (and no-one can cancel that) - return lane ? lane->cancel_request : CancelRequest::None; + return lane ? lane->cancelRequest : CancelRequest::None; } // ################################################################################################# @@ -109,7 +109,7 @@ LUAG_FUNC(cancel_test) [[nodiscard]] static CancelResult thread_cancel_soft(Lane* lane_, lua_Duration duration_, bool wakeLane_) { - lane_->cancel_request = CancelRequest::Soft; // it's now signaled to stop + lane_->cancelRequest = CancelRequest::Soft; // it's now signaled to stop // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own if (wakeLane_) { // wake the thread so that execution returns from any pending linda operation if desired std::condition_variable* const waiting_on{ lane_->waiting_on }; @@ -125,7 +125,7 @@ LUAG_FUNC(cancel_test) [[nodiscard]] static CancelResult thread_cancel_hard(Lane* lane_, lua_Duration duration_, bool wakeLane_) { - lane_->cancel_request = CancelRequest::Hard; // it's now signaled to stop + lane_->cancelRequest = CancelRequest::Hard; // it's now signaled to stop // lane_->thread.get_stop_source().request_stop(); if (wakeLane_) { // wake the thread so that execution returns from any pending linda operation if desired std::condition_variable* waiting_on = lane_->waiting_on; diff --git a/src/keeper.cpp b/src/keeper.cpp index 5350d26..763bcf7 100644 --- a/src/keeper.cpp +++ b/src/keeper.cpp @@ -580,7 +580,7 @@ void close_keepers(Universe* U_) U_->keepers->keeper_array[i].~Keeper(); } // free the keeper bookkeeping structure - U_->internal_allocator.free(U_->keepers, sizeof(Keepers) + (nbKeepers - 1) * sizeof(Keeper)); + U_->internalAllocator.free(U_->keepers, sizeof(Keepers) + (nbKeepers - 1) * sizeof(Keeper)); U_->keepers = nullptr; } } @@ -618,7 +618,7 @@ void init_keepers(Universe* U_, lua_State* L_) // Keepers contains an array of 1 Keeper, adjust for the actual number of keeper states { size_t const bytes = sizeof(Keepers) + (nb_keepers - 1) * sizeof(Keeper); - U_->keepers = static_cast(U_->internal_allocator.alloc(bytes)); + U_->keepers = static_cast(U_->internalAllocator.alloc(bytes)); if (U_->keepers == nullptr) { raise_luaL_error(L_, "init_keepers() failed while creating keeper array; out of memory"); } @@ -675,7 +675,7 @@ void init_keepers(Universe* U_, lua_State* L_) // attempt to call on_state_create(), if we have one and it is a C function // (only support a C function because we can't transfer executable Lua code in keepers) // will raise an error in L_ in case of problem - call_on_state_create(U_, K, L_, LookupMode::ToKeeper); + callOnStateCreate(U_, K, L_, LookupMode::ToKeeper); // to see VM name in Decoda debugger lua_pushfstring(K, "Keeper #%d", i + 1); // L_: settings K: "Keeper #n" @@ -694,8 +694,8 @@ Keeper* Linda::acquireKeeper() const int const nbKeepers{ U->keepers->nb_keepers }; // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) if (nbKeepers) { - Keeper* const K{ &U->keepers->keeper_array[m_keeper_index] }; - K->m_mutex.lock(); + Keeper* const K{ &U->keepers->keeper_array[keeperIndex] }; + K->mutex.lock(); return K; } return nullptr; @@ -706,8 +706,8 @@ Keeper* Linda::acquireKeeper() const void Linda::releaseKeeper(Keeper* K_) const { if (K_) { // can be nullptr if we tried to acquire during shutdown - assert(K_ == &U->keepers->keeper_array[m_keeper_index]); - K_->m_mutex.unlock(); + assert(K_ == &U->keepers->keeper_array[keeperIndex]); + K_->mutex.unlock(); } } diff --git a/src/keeper.h b/src/keeper.h index 275d134..37642fd 100644 --- a/src/keeper.h +++ b/src/keeper.h @@ -24,7 +24,7 @@ using KeeperState = Unique; struct Keeper { - std::mutex m_mutex; + std::mutex mutex; KeeperState L{ nullptr }; // int count; }; diff --git a/src/lanes.cpp b/src/lanes.cpp index 38fe2b9..d027cff 100644 --- a/src/lanes.cpp +++ b/src/lanes.cpp @@ -106,7 +106,7 @@ THE SOFTWARE. #if HAVE_LANE_TRACKING() // The chain is ended by '(Lane*)(-1)', not nullptr: -// 'tracking_first -> ... -> ... -> (-1)' +// 'trackingFirst -> ... -> ... -> (-1)' #define TRACKING_END ((Lane*) (-1)) /* @@ -115,11 +115,11 @@ THE SOFTWARE. */ static void tracking_add(Lane* lane_) { - std::lock_guard guard{ lane_->U->tracking_cs }; + std::lock_guard guard{ lane_->U->trackingMutex }; assert(lane_->tracking_next == nullptr); - lane_->tracking_next = lane_->U->tracking_first; - lane_->U->tracking_first = lane_; + lane_->tracking_next = lane_->U->trackingFirst; + lane_->U->trackingFirst = lane_; } // ################################################################################################# @@ -130,13 +130,13 @@ static void tracking_add(Lane* lane_) [[nodiscard]] static bool tracking_remove(Lane* lane_) { bool found{ false }; - std::lock_guard guard{ lane_->U->tracking_cs }; + std::lock_guard guard{ lane_->U->trackingMutex }; // Make sure (within the MUTEX) that we actually are in the chain // still (at process exit they will remove us from chain and then // cancel/kill). // if (lane_->tracking_next != nullptr) { - Lane** ref = (Lane**) &lane_->U->tracking_first; + Lane** ref = (Lane**) &lane_->U->trackingFirst; while (*ref != TRACKING_END) { if (*ref == lane_) { @@ -161,7 +161,7 @@ Lane::Lane(Universe* U_, lua_State* L_) , L{ L_ } { #if HAVE_LANE_TRACKING() - if (U->tracking_first) { + if (U->trackingFirst) { tracking_add(this); } #endif // HAVE_LANE_TRACKING() @@ -176,10 +176,10 @@ bool Lane::waitForCompletion(lua_Duration duration_) until = std::chrono::steady_clock::now() + std::chrono::duration_cast(duration_); } - std::unique_lock lock{ done_mutex }; + std::unique_lock lock{ doneMutex }; // std::stop_token token{ thread.get_stop_token() }; - // return done_signal.wait_until(lock, token, secs_, [this](){ return status >= Lane::Done; }); - return done_signal.wait_until(lock, until, [this]() { return status >= Lane::Done; }); + // return doneCondVar.wait_until(lock, token, secs_, [this](){ return status >= Lane::Done; }); + return doneCondVar.wait_until(lock, until, [this]() { return status >= Lane::Done; }); } // ################################################################################################# @@ -189,7 +189,7 @@ void Lane::startThread(int priority_) { thread = std::jthread([this]() { lane_main(this); }); if (priority_ != kThreadPrioDefault) { - JTHREAD_SET_PRIORITY(thread, priority_, U->m_sudo); + JTHREAD_SET_PRIORITY(thread, priority_, U->sudo); } } @@ -208,9 +208,9 @@ static void securize_debug_threadname(lua_State* L_, Lane* lane_) STACK_GROW(L_, 3); lua_getiuservalue(L_, 1, 1); lua_newtable(L_); - // Lua 5.1 can't do 'lane_->debug_name = lua_pushstring(L_, lane_->debug_name);' - lua_pushstring(L_, lane_->debug_name); - lane_->debug_name = lua_tostring(L_, -1); + // Lua 5.1 can't do 'lane_->debugName = lua_pushstring(L_, lane_->debugName);' + lua_pushstring(L_, lane_->debugName); + lane_->debugName = lua_tostring(L_, -1); lua_rawset(L_, -3); lua_pop(L_, 1); STACK_CHECK(L_, 0); @@ -242,7 +242,7 @@ Lane::~Lane() // Clean up after a (finished) thread // #if HAVE_LANE_TRACKING() - if (U->tracking_first != nullptr) { + if (U->trackingFirst != nullptr) { // Lane was cleaned up, no need to handle at process termination std::ignore = tracking_remove(this); } @@ -414,7 +414,7 @@ static void push_stack_trace(lua_State* L_, int rc_, int stk_base_) #define SELFDESTRUCT_END ((Lane*) (-1)) // // The chain is ended by '(Lane*)(-1)', not nullptr: -// 'selfdestruct_first -> ... -> ... -> (-1)' +// 'selfdestructFirst -> ... -> ... -> (-1)' /* * Add the lane to selfdestruct chain; the ones still running at the end of the @@ -422,11 +422,11 @@ static void push_stack_trace(lua_State* L_, int rc_, int stk_base_) */ static void selfdestruct_add(Lane* lane_) { - std::lock_guard guard{ lane_->U->selfdestruct_cs }; + std::lock_guard guard{ lane_->U->selfdestructMutex }; assert(lane_->selfdestruct_next == nullptr); - lane_->selfdestruct_next = lane_->U->selfdestruct_first; - lane_->U->selfdestruct_first = lane_; + lane_->selfdestruct_next = lane_->U->selfdestructFirst; + lane_->U->selfdestructFirst = lane_; } // ################################################################################################# @@ -435,20 +435,20 @@ static void selfdestruct_add(Lane* lane_) [[nodiscard]] static bool selfdestruct_remove(Lane* lane_) { bool found{ false }; - std::lock_guard guard{ lane_->U->selfdestruct_cs }; + std::lock_guard guard{ lane_->U->selfdestructMutex }; // Make sure (within the MUTEX) that we actually are in the chain // still (at process exit they will remove us from chain and then // cancel/kill). // if (lane_->selfdestruct_next != nullptr) { - Lane* volatile* ref = static_cast(&lane_->U->selfdestruct_first); + Lane* volatile* ref = static_cast(&lane_->U->selfdestructFirst); while (*ref != SELFDESTRUCT_END) { if (*ref == lane_) { *ref = lane_->selfdestruct_next; lane_->selfdestruct_next = nullptr; // the terminal shutdown should wait until the lane is done with its lua_close() - lane_->U->selfdestructing_count.fetch_add(1, std::memory_order_release); + lane_->U->selfdestructingCount.fetch_add(1, std::memory_order_release); found = true; break; } @@ -469,11 +469,11 @@ static void selfdestruct_add(Lane* lane_) [[maybe_unused]] char const* const op_string{ lua_tostring(L_, lua_upvalueindex(2)) }; CancelOp const op{ which_cancel_op(op_string) }; - if (U->selfdestruct_first != SELFDESTRUCT_END) { + if (U->selfdestructFirst != SELFDESTRUCT_END) { // Signal _all_ still running threads to exit (including the timer thread) { - std::lock_guard guard{ U->selfdestruct_cs }; - Lane* lane{ U->selfdestruct_first }; + std::lock_guard guard{ U->selfdestructMutex }; + Lane* lane{ U->selfdestructFirst }; lua_Duration timeout{ 1us }; while (lane != SELFDESTRUCT_END) { // attempt the requested cancel with a small timeout. @@ -490,16 +490,16 @@ static void selfdestruct_add(Lane* lane_) { std::chrono::time_point t_until{ std::chrono::steady_clock::now() + std::chrono::duration_cast(shutdown_timeout) }; - while (U->selfdestruct_first != SELFDESTRUCT_END) { + while (U->selfdestructFirst != SELFDESTRUCT_END) { // give threads time to act on their cancel std::this_thread::yield(); // count the number of cancelled thread that didn't have the time to act yet int n{ 0 }; { - std::lock_guard guard{ U->selfdestruct_cs }; - Lane* lane{ U->selfdestruct_first }; + std::lock_guard guard{ U->selfdestructMutex }; + Lane* lane{ U->selfdestructFirst }; while (lane != SELFDESTRUCT_END) { - if (lane->cancel_request != CancelRequest::None) + if (lane->cancelRequest != CancelRequest::None) ++n; lane = lane->selfdestruct_next; } @@ -515,33 +515,33 @@ static void selfdestruct_add(Lane* lane_) // If some lanes are currently cleaning after themselves, wait until they are done. // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). - while (U->selfdestructing_count.load(std::memory_order_acquire) > 0) { + while (U->selfdestructingCount.load(std::memory_order_acquire) > 0) { std::this_thread::yield(); } } // If after all this, we still have some free-running lanes, it's an external user error, they should have stopped appropriately { - std::lock_guard guard{ U->selfdestruct_cs }; - Lane* lane{ U->selfdestruct_first }; + std::lock_guard guard{ U->selfdestructMutex }; + Lane* lane{ U->selfdestructFirst }; if (lane != SELFDESTRUCT_END) { // this causes a leak because we don't call U's destructor (which could be bad if the still running lanes are accessing it) - raise_luaL_error(L_, "Zombie thread %s refuses to die!", lane->debug_name); + raise_luaL_error(L_, "Zombie thread %s refuses to die!", lane->debugName); } } // no need to mutex-protect this as all threads in the universe are gone at that point - if (U->timer_deep != nullptr) { // test ins case some early internal error prevented Lanes from creating the deep timer - [[maybe_unused]] int const prev_ref_count{ U->timer_deep->refcount.fetch_sub(1, std::memory_order_relaxed) }; + if (U->timerLinda != nullptr) { // test in case some early internal error prevented Lanes from creating the deep timer + [[maybe_unused]] int const prev_ref_count{ U->timerLinda->refcount.fetch_sub(1, std::memory_order_relaxed) }; LUA_ASSERT(L_, prev_ref_count == 1); // this should be the last reference - DeepFactory::DeleteDeepObject(L_, U->timer_deep); - U->timer_deep = nullptr; + DeepFactory::DeleteDeepObject(L_, U->timerLinda); + U->timerLinda = nullptr; } close_keepers(U); // remove the protected allocator, if any - U->protected_allocator.removeFrom(L_); + U->protectedAllocator.removeFrom(L_); U->Universe::~Universe(); @@ -701,9 +701,9 @@ LUAG_FUNC(set_debug_threadname) // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global... hidden_regkey.setValue(L_, [](lua_State* L_) { lua_pushvalue(L_, -2); }); STACK_CHECK(L_, 1); - lane->debug_name = lua_tostring(L_, -1); + lane->debugName = lua_tostring(L_, -1); // keep a direct pointer on the string - THREAD_SETNAME(lane->debug_name); + THREAD_SETNAME(lane->debugName); // to see VM name in Decoda debugger Virtual Machine window lua_setglobal(L_, "decoda_name"); // STACK_CHECK(L_, 0); @@ -716,7 +716,7 @@ LUAG_FUNC(get_debug_threadname) { Lane* const lane{ ToLane(L_, 1) }; luaL_argcheck(L_, lua_gettop(L_) == 1, 2, "too many arguments"); - lua_pushstring(L_, lane->debug_name); + lua_pushstring(L_, lane->debugName); return 1; } @@ -731,7 +731,7 @@ LUAG_FUNC(set_thread_priority) if (prio < kThreadPrioMin || prio > kThreadPrioMax) { raise_luaL_error(L_, "priority out of range: %d..+%d (%d)", kThreadPrioMin, kThreadPrioMax, prio); } - THREAD_SET_PRIORITY(static_cast(prio), universe_get(L_)->m_sudo); + THREAD_SET_PRIORITY(static_cast(prio), universe_get(L_)->sudo); return 0; } @@ -843,10 +843,10 @@ static void lane_main(Lane* lane_) // We're a free-running thread and no-one's there to clean us up. lua_close(lane_->L); lane_->L = nullptr; // just in case - lane_->U->selfdestruct_cs.lock(); + lane_->U->selfdestructMutex.lock(); // done with lua_close(), terminal shutdown sequence may proceed - lane_->U->selfdestructing_count.fetch_sub(1, std::memory_order_release); - lane_->U->selfdestruct_cs.unlock(); + lane_->U->selfdestructingCount.fetch_sub(1, std::memory_order_release); + lane_->U->selfdestructMutex.unlock(); // we destroy our jthread member from inside the thread body, so we have to detach so that we don't try to join, as this doesn't seem a good idea lane_->thread.detach(); @@ -860,10 +860,10 @@ static void lane_main(Lane* lane_) Lane::Status const st = (rc == LUA_OK) ? Lane::Done : kCancelError.equals(L, 1) ? Lane::Cancelled : Lane::Error; { - // 'done_mutex' protects the -> Done|Error|Cancelled state change - std::lock_guard lock{ lane_->done_mutex }; + // 'doneMutex' protects the -> Done|Error|Cancelled state change + std::lock_guard lock{ lane_->doneMutex }; lane_->status = st; - lane_->done_signal.notify_one(); // wake up master (while 'lane_->done_mutex' is on) + lane_->doneCondVar.notify_one(); // wake up master (while 'lane_->doneMutex' is on) } } } @@ -994,9 +994,9 @@ LUAG_FUNC(lane_new) lua_settop(m_lane->L, 0); kCancelError.pushKey(m_lane->L); { - std::lock_guard lock{ m_lane->done_mutex }; + std::lock_guard lock{ m_lane->doneMutex }; m_lane->status = Lane::Cancelled; - m_lane->done_signal.notify_one(); // wake up master (while 'lane->done_mutex' is on) + m_lane->doneCondVar.notify_one(); // wake up master (while 'lane->doneMutex' is on) } // unblock the thread so that it can terminate gracefully m_lane->ready.count_down(); @@ -1207,7 +1207,7 @@ LUAG_FUNC(lane_new) lua_rawget(L_, -2); // L_: ud uservalue gc_cb|nil if (!lua_isnil(L_, -1)) { lua_remove(L_, -2); // L_: ud gc_cb|nil - lua_pushstring(L_, lane->debug_name); // L_: ud gc_cb name + lua_pushstring(L_, lane->debugName); // L_: ud gc_cb name have_gc_cb = true; } else { lua_pop(L_, 2); // L_: ud @@ -1228,7 +1228,7 @@ LUAG_FUNC(lane_new) lua_close(lane->L); lane->L = nullptr; // just in case, but s will be freed soon so... - lane->debug_name = ""; + lane->debugName = ""; } // Clean up after a (finished) thread @@ -1307,7 +1307,7 @@ LUAG_FUNC(thread_join) int ret{ 0 }; Universe* const U{ lane->U }; - // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed + // debugName is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed // so store it in the userdata uservalue at a key that can't possibly collide securize_debug_threadname(L_, lane); switch (lane->status) { @@ -1508,15 +1508,15 @@ LUAG_FUNC(threads) // List _all_ still running threads // - std::lock_guard guard{ U->tracking_cs }; - if (U->tracking_first && U->tracking_first != TRACKING_END) { - Lane* lane{ U->tracking_first }; - int index = 0; + std::lock_guard guard{ U->trackingMutex }; + if (U->trackingFirst && U->trackingFirst != TRACKING_END) { + Lane* lane{ U->trackingFirst }; + int index{ 0 }; lua_newtable(L_); // L_: {} while (lane != TRACKING_END) { // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other lua_newtable(L_); // L_: {} {} - lua_pushstring(L_, lane->debug_name); // L_: {} {} "name" + lua_pushstring(L_, lane->debugName); // L_: {} {} "name" lua_setfield(L_, -2, "name"); // L_: {} {} lane->pushThreadStatus(L_); // L_: {} {} "status" lua_setfield(L_, -2, "status"); // L_: {} {} @@ -1663,26 +1663,26 @@ LUAG_FUNC(configure) lua_pop(L_, 1); // L_: settings #if HAVE_LANE_TRACKING() lua_getfield(L_, 1, "track_lanes"); // L_: settings track_lanes - U->tracking_first = lua_toboolean(L_, -1) ? TRACKING_END : nullptr; + U->trackingFirst = lua_toboolean(L_, -1) ? TRACKING_END : nullptr; lua_pop(L_, 1); // L_: settings #endif // HAVE_LANE_TRACKING() // Linked chains handling - U->selfdestruct_first = SELFDESTRUCT_END; + U->selfdestructFirst = SELFDESTRUCT_END; initialize_allocator_function(U, L_); - initialize_on_state_create(U, L_); + initializeOnStateCreate(U, L_); init_keepers(U, L_); STACK_CHECK(L_, 1); - // Initialize 'timer_deep'; a common Linda object shared by all states + // Initialize 'timerLinda'; a common Linda object shared by all states lua_pushcfunction(L_, LG_linda); // L_: settings lanes.linda lua_pushliteral(L_, "lanes-timer"); // L_: settings lanes.linda "lanes-timer" lua_call(L_, 1, 1); // L_: settings linda STACK_CHECK(L_, 2); // Proxy userdata contents is only a 'DeepPrelude*' pointer - U->timer_deep = *lua_tofulluserdata(L_, -1); + U->timerLinda = *lua_tofulluserdata(L_, -1); // increment refcount so that this linda remains alive as long as the universe exists. - U->timer_deep->refcount.fetch_add(1, std::memory_order_relaxed); + U->timerLinda->refcount.fetch_add(1, std::memory_order_relaxed); lua_pop(L_, 1); // L_: settings } STACK_CHECK(L_, 1); @@ -1699,7 +1699,7 @@ LUAG_FUNC(configure) luaG_registerlibfuncs(L_, lanes_functions); #if HAVE_LANE_TRACKING() // register core.threads() only if settings say it should be available - if (U->tracking_first != nullptr) { + if (U->trackingFirst != nullptr) { lua_pushcfunction(L_, LG_threads); // L_: settings M LG_threads() lua_setfield(L_, -2, "threads"); // L_: settings M } @@ -1708,8 +1708,8 @@ LUAG_FUNC(configure) { char const* errmsg{ - DeepFactory::PushDeepProxy(DestState{ L_ }, U->timer_deep, 0, LookupMode::LaneBody) - }; // L_: settings M timer_deep + DeepFactory::PushDeepProxy(DestState{ L_ }, U->timerLinda, 0, LookupMode::LaneBody) + }; // L_: settings M timerLinda if (errmsg != nullptr) { raise_luaL_error(L_, errmsg); } diff --git a/src/lanes_private.h b/src/lanes_private.h index 1d476cf..01630ba 100644 --- a/src/lanes_private.h +++ b/src/lanes_private.h @@ -36,14 +36,14 @@ class Lane std::jthread thread; // a latch to wait for the lua_State to be ready std::latch ready{ 1 }; - // to wait for stop requests through m_thread's stop_source - std::mutex done_mutex; - std::condition_variable done_signal; // use condition_variable_any if waiting for a stop_token + // to wait for stop requests through thread's stop_source + std::mutex doneMutex; + std::condition_variable doneCondVar; // use condition_variable_any if waiting for a stop_token // // M: sub-thread OS thread // S: not used - char const* debug_name{ "" }; + char const* debugName{ "" }; Universe* const U; lua_State* L; @@ -60,7 +60,7 @@ class Lane // // When status is Waiting, points on the linda's signal the thread waits on, else nullptr - CancelRequest volatile cancel_request{ CancelRequest::None }; + CancelRequest volatile cancelRequest{ CancelRequest::None }; // // M: sets to false, flags true for cancel request // S: reads to see if cancel is requested @@ -77,11 +77,11 @@ class Lane // // For tracking only - [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internal_allocator.alloc(size_); } + [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internalAllocator.alloc(size_); } // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception - static void operator delete(void* p_, Universe* U_) { U_->internal_allocator.free(p_, sizeof(Lane)); } + static void operator delete(void* p_, Universe* U_) { U_->internalAllocator.free(p_, sizeof(Lane)); } // this one is for us, to make sure memory is freed by the correct allocator - static void operator delete(void* p_) { static_cast(p_)->U->internal_allocator.free(p_, sizeof(Lane)); } + static void operator delete(void* p_) { static_cast(p_)->U->internalAllocator.free(p_, sizeof(Lane)); } Lane(Universe* U_, lua_State* L_); ~Lane(); diff --git a/src/linda.cpp b/src/linda.cpp index cda3a63..bbfbd69 100644 --- a/src/linda.cpp +++ b/src/linda.cpp @@ -53,7 +53,7 @@ static constexpr uintptr_t kPointerMagicShift{ 3 }; Linda::Linda(Universe* U_, LindaGroup group_, char const* name_, size_t len_) : DeepPrelude{ LindaFactory::Instance } , U{ U_ } -, m_keeper_index{ (group_ ? group_ : static_cast(std::bit_cast(this) >> kPointerMagicShift)) % U_->keepers->nb_keepers } +, keeperIndex{ (group_ ? group_ : static_cast(std::bit_cast(this) >> kPointerMagicShift)) % U_->keepers->nb_keepers } { setName(name_, len_); } @@ -62,9 +62,9 @@ Linda::Linda(Universe* U_, LindaGroup group_, char const* name_, size_t len_) Linda::~Linda() { - if (std::holds_alternative(m_name)) { - AllocatedName& name = std::get(m_name); - U->internal_allocator.free(name.name, name.len); + if (std::holds_alternative(nameVariant)) { + AllocatedName& name = std::get(nameVariant); + U->internalAllocator.free(name.name, name.len); } } @@ -78,12 +78,12 @@ void Linda::setName(char const* name_, size_t len_) } ++len_; // don't forget terminating 0 if (len_ < kEmbeddedNameLength) { - m_name.emplace(); - char* const name{ std::get(m_name).data() }; + nameVariant.emplace(); + char* const name{ std::get(nameVariant).data() }; memcpy(name, name_, len_); } else { - AllocatedName& name = std::get(m_name); - name.name = static_cast(U->internal_allocator.alloc(len_)); + AllocatedName& name = std::get(nameVariant); + name.name = static_cast(U->internalAllocator.alloc(len_)); name.len = len_; memcpy(name.name, name_, len_); } @@ -93,12 +93,12 @@ void Linda::setName(char const* name_, size_t len_) char const* Linda::getName() const { - if (std::holds_alternative(m_name)) { - AllocatedName const& name = std::get(m_name); + if (std::holds_alternative(nameVariant)) { + AllocatedName const& name = std::get(nameVariant); return name.name; } - if (std::holds_alternative(m_name)) { - char const* const name{ std::get(m_name).data() }; + if (std::holds_alternative(nameVariant)) { + char const* const name{ std::get(nameVariant).data() }; return name; } return nullptr; @@ -241,9 +241,9 @@ LUAG_FUNC(linda_send) STACK_CHECK_START_REL(KL, 0); for (bool try_again{ true };;) { if (lane != nullptr) { - cancel = lane->cancel_request; + cancel = lane->cancelRequest; } - cancel = (cancel != CancelRequest::None) ? cancel : linda->simulate_cancel; + cancel = (cancel != CancelRequest::None) ? cancel : linda->cancelRequest; // if user wants to cancel, or looped because of a timeout, the call returns without sending anything if (!try_again || cancel != CancelRequest::None) { pushed.emplace(0); @@ -262,7 +262,7 @@ LUAG_FUNC(linda_send) if (ret) { // Wake up ALL waiting threads - linda->m_write_happened.notify_all(); + linda->writeHappened.notify_all(); break; } @@ -280,11 +280,11 @@ LUAG_FUNC(linda_send) LUA_ASSERT(L_, prev_status == Lane::Running); // but check, just in case lane->status = Lane::Waiting; LUA_ASSERT(L_, lane->waiting_on == nullptr); - lane->waiting_on = &linda->m_read_happened; + lane->waiting_on = &linda->readHappened; } // could not send because no room: wait until some data was read before trying again, or until timeout is reached - std::unique_lock keeper_lock{ K->m_mutex, std::adopt_lock }; - std::cv_status const status{ linda->m_read_happened.wait_until(keeper_lock, until) }; + std::unique_lock keeper_lock{ K->mutex, std::adopt_lock }; + std::cv_status const status{ linda->readHappened.wait_until(keeper_lock, until) }; keeper_lock.release(); // we don't want to release the lock! try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups if (lane != nullptr) { @@ -390,9 +390,9 @@ LUAG_FUNC(linda_receive) STACK_CHECK_START_REL(KL, 0); for (bool try_again{ true };;) { if (lane != nullptr) { - cancel = lane->cancel_request; + cancel = lane->cancelRequest; } - cancel = (cancel != CancelRequest::None) ? cancel : linda->simulate_cancel; + cancel = (cancel != CancelRequest::None) ? cancel : linda->cancelRequest; // if user wants to cancel, or looped because of a timeout, the call returns without sending anything if (!try_again || cancel != CancelRequest::None) { pushed.emplace(0); @@ -410,7 +410,7 @@ LUAG_FUNC(linda_receive) keeper_toggle_nil_sentinels(L_, lua_gettop(L_) - pushed.value(), LookupMode::FromKeeper); // To be done from within the 'K' locking area // - linda->m_read_happened.notify_all(); + linda->readHappened.notify_all(); break; } @@ -427,11 +427,11 @@ LUAG_FUNC(linda_receive) LUA_ASSERT(L_, prev_status == Lane::Running); // but check, just in case lane->status = Lane::Waiting; LUA_ASSERT(L_, lane->waiting_on == nullptr); - lane->waiting_on = &linda->m_write_happened; + lane->waiting_on = &linda->writeHappened; } // not enough data to read: wakeup when data was sent, or when timeout is reached - std::unique_lock keeper_lock{ K->m_mutex, std::adopt_lock }; - std::cv_status const status{ linda->m_write_happened.wait_until(keeper_lock, until) }; + std::unique_lock keeper_lock{ K->mutex, std::adopt_lock }; + std::cv_status const status{ linda->writeHappened.wait_until(keeper_lock, until) }; keeper_lock.release(); // we don't want to release the lock! try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups if (lane != nullptr) { @@ -483,7 +483,7 @@ LUAG_FUNC(linda_set) Keeper* const K{ linda->whichKeeper() }; KeeperCallResult pushed; - if (linda->simulate_cancel == CancelRequest::None) { + if (linda->cancelRequest == CancelRequest::None) { if (has_value) { // convert nils to some special non-nil sentinel in sent values keeper_toggle_nil_sentinels(L_, 3, LookupMode::ToKeeper); @@ -494,12 +494,12 @@ LUAG_FUNC(linda_set) if (has_value) { // we put some data in the slot, tell readers that they should wake - linda->m_write_happened.notify_all(); // To be done from within the 'K' locking area + linda->writeHappened.notify_all(); // To be done from within the 'K' locking area } if (pushed.value() == 1) { // the key was full, but it is no longer the case, tell writers they should wake LUA_ASSERT(L_, lua_type(L_, -1) == LUA_TBOOLEAN && lua_toboolean(L_, -1) == 1); - linda->m_read_happened.notify_all(); // To be done from within the 'K' locking area + linda->readHappened.notify_all(); // To be done from within the 'K' locking area } } } else { // linda is cancelled @@ -553,7 +553,7 @@ LUAG_FUNC(linda_get) check_key_types(L_, 2, 2); KeeperCallResult pushed; - if (linda->simulate_cancel == CancelRequest::None) { + if (linda->cancelRequest == CancelRequest::None) { Keeper* const K{ linda->whichKeeper() }; pushed = keeper_call(linda->U, K->L, KEEPER_API(get), L_, linda, 2); if (pushed.value_or(0) > 0) { @@ -590,13 +590,13 @@ LUAG_FUNC(linda_limit) check_key_types(L_, 2, 2); KeeperCallResult pushed; - if (linda->simulate_cancel == CancelRequest::None) { + if (linda->cancelRequest == CancelRequest::None) { Keeper* const K{ linda->whichKeeper() }; pushed = keeper_call(linda->U, K->L, KEEPER_API(limit), L_, linda, 2); LUA_ASSERT(L_, pushed.has_value() && (pushed.value() == 0 || pushed.value() == 1)); // no error, optional boolean value saying if we should wake blocked writer threads if (pushed.value() == 1) { LUA_ASSERT(L_, lua_type(L_, -1) == LUA_TBOOLEAN && lua_toboolean(L_, -1) == 1); - linda->m_read_happened.notify_all(); // To be done from within the 'K' locking area + linda->readHappened.notify_all(); // To be done from within the 'K' locking area } } else { // linda is cancelled // do nothing and return lanes.cancel_error @@ -623,16 +623,16 @@ LUAG_FUNC(linda_cancel) // make sure we got 3 arguments: the linda, a key and a limit luaL_argcheck(L_, lua_gettop(L_) <= 2, 2, "wrong number of arguments"); - linda->simulate_cancel = CancelRequest::Soft; + linda->cancelRequest = CancelRequest::Soft; if (strcmp(who, "both") == 0) { // tell everyone writers to wake up - linda->m_write_happened.notify_all(); - linda->m_read_happened.notify_all(); + linda->writeHappened.notify_all(); + linda->readHappened.notify_all(); } else if (strcmp(who, "none") == 0) { // reset flag - linda->simulate_cancel = CancelRequest::None; + linda->cancelRequest = CancelRequest::None; } else if (strcmp(who, "read") == 0) { // tell blocked readers to wake up - linda->m_write_happened.notify_all(); + linda->writeHappened.notify_all(); } else if (strcmp(who, "write") == 0) { // tell blocked writers to wake up - linda->m_read_happened.notify_all(); + linda->readHappened.notify_all(); } else { raise_luaL_error(L_, "unknown wake hint '%s'", who); } diff --git a/src/linda.h b/src/linda.h index 7a21571..56941a1 100644 --- a/src/linda.h +++ b/src/linda.h @@ -32,23 +32,23 @@ class Linda char* name{ nullptr }; }; // depending on the name length, it is either embedded inside the Linda, or allocated separately - std::variant m_name; + std::variant nameVariant; public: - std::condition_variable m_read_happened; - std::condition_variable m_write_happened; + std::condition_variable readHappened; + std::condition_variable writeHappened; Universe* const U{ nullptr }; // the universe this linda belongs to - int const m_keeper_index{ -1 }; // the keeper associated to this linda - CancelRequest simulate_cancel{ CancelRequest::None }; + int const keeperIndex{ -1 }; // the keeper associated to this linda + CancelRequest cancelRequest{ CancelRequest::None }; public: // a fifo full userdata has one uservalue, the table that holds the actual fifo contents - [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internal_allocator.alloc(size_); } + [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internalAllocator.alloc(size_); } // always embedded somewhere else or "in-place constructed" as a full userdata // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception - static void operator delete(void* p_, Universe* U_) { U_->internal_allocator.free(p_, sizeof(Linda)); } + static void operator delete(void* p_, Universe* U_) { U_->internalAllocator.free(p_, sizeof(Linda)); } // this one is for us, to make sure memory is freed by the correct allocator - static void operator delete(void* p_) { static_cast(p_)->U->internal_allocator.free(p_, sizeof(Linda)); } + static void operator delete(void* p_) { static_cast(p_)->U->internalAllocator.free(p_, sizeof(Linda)); } ~Linda(); Linda(Universe* U_, LindaGroup group_, char const* name_, size_t len_); @@ -66,7 +66,7 @@ class Linda public: [[nodiscard]] char const* getName() const; - [[nodiscard]] Keeper* whichKeeper() const { return U->keepers->nb_keepers ? &U->keepers->keeper_array[m_keeper_index] : nullptr; } + [[nodiscard]] Keeper* whichKeeper() const { return U->keepers->nb_keepers ? &U->keepers->keeper_array[keeperIndex] : nullptr; } [[nodiscard]] Keeper* acquireKeeper() const; void releaseKeeper(Keeper* keeper_) const; }; diff --git a/src/macros_and_utils.h b/src/macros_and_utils.h index 58567ac..a1f6cba 100644 --- a/src/macros_and_utils.h +++ b/src/macros_and_utils.h @@ -64,7 +64,7 @@ template #define USE_DEBUG_SPEW() 0 #if USE_DEBUG_SPEW() #define INDENT_BEGIN "%.*s " -#define INDENT_END(U_) , (U_ ? U_->debugspew_indent_depth.load(std::memory_order_relaxed) : 0), DebugSpewIndentScope::debugspew_indent +#define INDENT_END(U_) , (U_ ? U_->debugspewIndentDepth.load(std::memory_order_relaxed) : 0), DebugSpewIndentScope::debugspew_indent #define DEBUGSPEW_CODE(_code) _code #define DEBUGSPEW_OR_NOT(a_, b_) a_ #define DEBUGSPEW_PARAM_COMMA(param_) param_, diff --git a/src/state.cpp b/src/state.cpp index ebb24dd..2893907 100644 --- a/src/state.cpp +++ b/src/state.cpp @@ -64,10 +64,10 @@ THE SOFTWARE. // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would // leave us locked, blocking any future 'require' calls from other lanes. - U->require_cs.lock(); + U->requireMutex.lock(); // starting with Lua 5.4, require may return a second optional value, so we need LUA_MULTRET rc = lua_pcall(L_, args, LUA_MULTRET, 0 /*errfunc*/); // L_: err|result(s) - U->require_cs.unlock(); + U->requireMutex.unlock(); // the required module (or an error message) is left on the stack as returned value by original require function @@ -205,14 +205,14 @@ static void copy_one_time_settings(Universe* U_, SourceState L1_, DestState L2_) // ################################################################################################# -void initialize_on_state_create(Universe* U_, lua_State* L_) +void initializeOnStateCreate(Universe* U_, lua_State* L_) { STACK_CHECK_START_REL(L_, 1); // L_: settings lua_getfield(L_, -1, "on_state_create"); // L_: settings on_state_create|nil if (!lua_isnil(L_, -1)) { // store C function pointer in an internal variable - U_->on_state_create_func = lua_tocfunction(L_, -1); // L_: settings on_state_create - if (U_->on_state_create_func != nullptr) { + U_->onStateCreateFunc = lua_tocfunction(L_, -1); // L_: settings on_state_create + if (U_->onStateCreateFunc != nullptr) { // make sure the function doesn't have upvalues char const* upname = lua_getupvalue(L_, -1, 1); // L_: settings on_state_create upval? if (upname != nullptr) { // should be "" for C functions with upvalues if any @@ -224,7 +224,7 @@ void initialize_on_state_create(Universe* U_, lua_State* L_) lua_setfield(L_, -3, "on_state_create"); // L_: settings on_state_create } else { // optim: store marker saying we have such a function in the config table - U_->on_state_create_func = (lua_CFunction) initialize_on_state_create; + U_->onStateCreateFunc = reinterpret_cast(initializeOnStateCreate); } } lua_pop(L_, 1); // L_: settings @@ -240,17 +240,17 @@ lua_State* create_state(Universe* U_, lua_State* from_) // for some reason, LuaJIT 64 bits does not support creating a state with lua_newstate... L = luaL_newstate(); #else // LUAJIT_FLAVOR() == 64 - if (U_->provide_allocator != nullptr) { // we have a function we can call to obtain an allocator - lua_pushcclosure(from_, U_->provide_allocator, 0); + if (U_->provideAllocator != nullptr) { // we have a function we can call to obtain an allocator + lua_pushcclosure(from_, U_->provideAllocator, 0); lua_call(from_, 0, 1); { AllocatorDefinition* const def{ lua_tofulluserdata(from_, -1) }; - L = lua_newstate(def->m_allocF, def->m_allocUD); + L = lua_newstate(def->allocF, def->allocUD); } lua_pop(from_, 1); } else { // reuse the allocator provided when the master state was created - L = lua_newstate(U_->protected_allocator.m_allocF, U_->protected_allocator.m_allocUD); + L = lua_newstate(U_->protectedAllocator.allocF, U_->protectedAllocator.allocUD); } #endif // LUAJIT_FLAVOR() == 64 @@ -262,14 +262,14 @@ lua_State* create_state(Universe* U_, lua_State* from_) // ################################################################################################# -void call_on_state_create(Universe* U_, lua_State* L_, lua_State* from_, LookupMode mode_) +void callOnStateCreate(Universe* U_, lua_State* L_, lua_State* from_, LookupMode mode_) { - if (U_->on_state_create_func != nullptr) { + if (U_->onStateCreateFunc != nullptr) { STACK_CHECK_START_REL(L_, 0); DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END(U_))); - if (U_->on_state_create_func != (lua_CFunction) initialize_on_state_create) { + if (U_->onStateCreateFunc != reinterpret_cast(initializeOnStateCreate)) { // C function: recreate a closure in the new state, bypassing the lookup scheme - lua_pushcfunction(L_, U_->on_state_create_func); // on_state_create() + lua_pushcfunction(L_, U_->onStateCreateFunc); // on_state_create() } else { // Lua function located in the config table, copied when we opened "lanes.core" if (mode_ != LookupMode::LaneBody) { // if attempting to call in a keeper state, do nothing because the function doesn't exist there @@ -323,7 +323,7 @@ lua_State* luaG_newstate(Universe* U_, SourceState from_, char const* libs_) STACK_CHECK(L, 0); // neither libs (not even 'base') nor special init func: we are done - if (libs_ == nullptr && U_->on_state_create_func == nullptr) { + if (libs_ == nullptr && U_->onStateCreateFunc == nullptr) { DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "luaG_newstate(nullptr)\n" INDENT_END(U_))); return L; } @@ -384,7 +384,7 @@ lua_State* luaG_newstate(Universe* U_, SourceState from_, char const* libs_) // call this after the base libraries are loaded and GC is restarted // will raise an error in from_ in case of problem - call_on_state_create(U_, L, from_, LookupMode::LaneBody); + callOnStateCreate(U_, L, from_, LookupMode::LaneBody); STACK_CHECK(L, 0); // after all this, register everything we find in our name<->function database @@ -398,7 +398,7 @@ lua_State* luaG_newstate(Universe* U_, SourceState from_, char const* libs_) lua_pushnil(L); // L: {} nil while (lua_next(L, -2)) { // L: {} k v lua_getglobal(L, "print"); // L: {} k v print - int const indent{ U_->debugspew_indent_depth.load(std::memory_order_relaxed) }; + int const indent{ U_->debugspewIndentDepth.load(std::memory_order_relaxed) }; lua_pushlstring(L, DebugSpewIndentScope::debugspew_indent, indent); // L: {} k v print " " lua_pushvalue(L, -4); // L: {} k v print " " k lua_pushvalue(L, -4); // L: {} k v print " " k v diff --git a/src/state.h b/src/state.h index 197e052..1b25736 100644 --- a/src/state.h +++ b/src/state.h @@ -15,5 +15,5 @@ void serialize_require(DEBUGSPEW_PARAM_COMMA(Universe* U_) lua_State* L_); // ################################################################################################# -void initialize_on_state_create(Universe* U_, lua_State* L_); -void call_on_state_create(Universe* U_, lua_State* L_, lua_State* from_, LookupMode mode_); +void initializeOnStateCreate(Universe* U_, lua_State* L_); +void callOnStateCreate(Universe* U_, lua_State* L_, lua_State* from_, LookupMode mode_); diff --git a/src/tools.cpp b/src/tools.cpp index c4ce24f..0495561 100644 --- a/src/tools.cpp +++ b/src/tools.cpp @@ -101,7 +101,7 @@ extern "C" [[nodiscard]] static void* libc_lua_Alloc([[maybe_unused]] void* ud, { Universe* const U{ universe_get(L_) }; // push a new full userdata on the stack, giving access to the universe's protected allocator - [[maybe_unused]] AllocatorDefinition* const def{ new (L_) AllocatorDefinition{ U->protected_allocator.makeDefinition() } }; + [[maybe_unused]] AllocatorDefinition* const def{ new (L_) AllocatorDefinition{ U->protectedAllocator.makeDefinition() } }; return 1; } @@ -115,8 +115,8 @@ void initialize_allocator_function(Universe* U_, lua_State* L_) lua_getfield(L_, -1, "allocator"); // L_: settings allocator|nil|"protected" if (!lua_isnil(L_, -1)) { // store C function pointer in an internal variable - U_->provide_allocator = lua_tocfunction(L_, -1); // L_: settings allocator - if (U_->provide_allocator != nullptr) { + U_->provideAllocator = lua_tocfunction(L_, -1); // L_: settings allocator + if (U_->provideAllocator != nullptr) { // make sure the function doesn't have upvalues char const* upname = lua_getupvalue(L_, -1, 1); // L_: settings allocator upval? if (upname != nullptr) { // should be "" for C functions with upvalues if any @@ -129,14 +129,14 @@ void initialize_allocator_function(Universe* U_, lua_State* L_) } else if (lua_type(L_, -1) == LUA_TSTRING) { // should be "protected" LUA_ASSERT(L_, strcmp(lua_tostring(L_, -1), "protected") == 0); // set the original allocator to call from inside protection by the mutex - U_->protected_allocator.initFrom(L_); - U_->protected_allocator.installIn(L_); + U_->protectedAllocator.initFrom(L_); + U_->protectedAllocator.installIn(L_); // before a state is created, this function will be called to obtain the allocator - U_->provide_allocator = luaG_provide_protected_allocator; + U_->provideAllocator = luaG_provide_protected_allocator; } } else { // just grab whatever allocator was provided to lua_newstate - U_->protected_allocator.initFrom(L_); + U_->protectedAllocator.initFrom(L_); } lua_pop(L_, 1); // L_: settings STACK_CHECK(L_, 1); @@ -145,13 +145,13 @@ void initialize_allocator_function(Universe* U_, lua_State* L_) { char const* allocator = lua_tostring(L_, -1); if (strcmp(allocator, "libc") == 0) { - U_->internal_allocator = AllocatorDefinition{ libc_lua_Alloc, nullptr }; - } else if (U_->provide_allocator == luaG_provide_protected_allocator) { + U_->internalAllocator = AllocatorDefinition{ libc_lua_Alloc, nullptr }; + } else if (U_->provideAllocator == luaG_provide_protected_allocator) { // user wants mutex protection on the state's allocator. Use protection for our own allocations too, just in case. - U_->internal_allocator = U_->protected_allocator.makeDefinition(); + U_->internalAllocator = U_->protectedAllocator.makeDefinition(); } else { // no protection required, just use whatever we have as-is. - U_->internal_allocator = U_->protected_allocator; + U_->internalAllocator = U_->protectedAllocator; } } lua_pop(L_, 1); // L_: settings @@ -314,14 +314,13 @@ static void update_lookup_entry(DEBUGSPEW_PARAM_COMMA(Universe* U_) lua_State* L // ################################################################################################# -static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_) lua_State* L_, int ctxBase_, int i_, int depth_) +static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_) lua_State* L_, int dbIdx_, int i_, int depth_) { - // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot i_ - int const fqn = ctxBase_ + 1; - // slot 3 contains a cache that stores all already visited tables to avoid infinite recursion loops - int const cache = ctxBase_ + 2; - // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) - int const breadth_first_cache = lua_gettop(L_) + 1; + // slot dbIdx_ contains the lookup database table + // slot dbIdx_ + 1 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot i_ + int const fqn{ dbIdx_ + 1 }; + // slot dbIdx_ + 2 contains a cache that stores all already visited tables to avoid infinite recursion loops + int const cache{ dbIdx_ + 2 }; DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END(U_))); DEBUGSPEW_CODE(DebugSpewIndentScope scope{ U_ }); @@ -352,9 +351,9 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_) lua_rawset(L_, cache); // L_: ... {i_} STACK_CHECK(L_, 0); - // this table is at breadth_first_cache index + // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) lua_newtable(L_); // L_: ... {i_} {bfc} - LUA_ASSERT(L_, lua_gettop(L_) == breadth_first_cache); + int const breadthFirstCache{ lua_gettop(L_) }; // iterate over all entries in the processed table lua_pushnil(L_); // L_: ... {i_} {bfc} nil while (lua_next(L_, i_) != 0) { // L_: ... {i_} {bfc} k v @@ -373,13 +372,13 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_) // store the table in the breadth-first cache lua_pushvalue(L_, -2); // L_: ... {i_} {bfc} k {} k lua_pushvalue(L_, -2); // L_: ... {i_} {bfc} k {} k {} - lua_rawset(L_, breadth_first_cache); // L_: ... {i_} {bfc} k {} + lua_rawset(L_, breadthFirstCache); // L_: ... {i_} {bfc} k {} // generate a name, and if we already had one name, keep whichever is the shorter - update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U_) L_, ctxBase_, depth_); // L_: ... {i_} {bfc} k + update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U_) L_, dbIdx_, depth_); // L_: ... {i_} {bfc} k } else if (lua_isfunction(L_, -1) && (luaG_getfuncsubtype(L_, -1) != FuncSubType::Bytecode)) { // generate a name, and if we already had one name, keep whichever is the shorter // this pops the function from the stack - update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U_) L_, ctxBase_, depth_); // L_: ... {i_} {bfc} k + update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U_) L_, dbIdx_, depth_); // L_: ... {i_} {bfc} k } else { lua_pop(L_, 1); // L_: ... {i_} {bfc} k } @@ -388,7 +387,7 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_) // now process the tables we encountered at that depth ++depth_; lua_pushnil(L_); // L_: ... {i_} {bfc} nil - while (lua_next(L_, breadth_first_cache) != 0) { // L_: ... {i_} {bfc} k {} + while (lua_next(L_, breadthFirstCache) != 0) { // L_: ... {i_} {bfc} k {} DEBUGSPEW_CODE(char const* key = (lua_type(L_, -2) == LUA_TSTRING) ? lua_tostring(L_, -2) : "not a string"); DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "table '%s'\n" INDENT_END(U_), key)); DEBUGSPEW_CODE(DebugSpewIndentScope scope{ U_ }); @@ -408,7 +407,7 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_) // push table name in fqn stack (note that concatenation will crash if name is a not string!) lua_pushvalue(L_, -2); // L_: ... {i_} {bfc} k {} k lua_rawseti(L_, fqn, depth_); // L_: ... {i_} {bfc} k {} - populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(U_) L_, ctxBase_, lua_gettop(L_), depth_); + populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(U_) L_, dbIdx_, lua_gettop(L_), depth_); lua_pop(L_, 1); // L_: ... {i_} {bfc} k STACK_CHECK(L_, 2); } @@ -427,15 +426,14 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_) // create a "fully.qualified.name" <-> function equivalence database void populate_func_lookup_table(lua_State* L_, int i_, char const* name_) { - int const ctx_base = lua_gettop(L_) + 1; int const in_base = lua_absindex(L_, i_); - int start_depth = 0; DEBUGSPEW_CODE(Universe* U = universe_get(L_)); DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END(U), L_, name_ ? name_ : "nullptr")); DEBUGSPEW_CODE(DebugSpewIndentScope scope{ U }); STACK_GROW(L_, 3); STACK_CHECK_START_REL(L_, 0); kLookupRegKey.pushValue(L_); // L_: {} + int const dbIdx{ lua_gettop(L_) }; STACK_CHECK(L_, 1); LUA_ASSERT(L_, lua_istable(L_, -1)); if (lua_type(L_, in_base) == LUA_TFUNCTION) { // for example when a module is a simple function @@ -449,15 +447,15 @@ void populate_func_lookup_table(lua_State* L_, int i_, char const* name_) lua_pop(L_, 1); // L_: } else if (lua_type(L_, in_base) == LUA_TTABLE) { lua_newtable(L_); // L_: {} {fqn} + int startDepth{ 0 }; if (name_) { STACK_CHECK(L_, 2); lua_pushstring(L_, name_); // L_: {} {fqn} "name" // generate a name, and if we already had one name, keep whichever is the shorter lua_pushvalue(L_, in_base); // L_: {} {fqn} "name" t - update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U) L_, ctx_base, start_depth); // L_: {} {fqn} "name" + update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U) L_, dbIdx, startDepth); // L_: {} {fqn} "name" // don't forget to store the name at the bottom of the fqn stack - ++start_depth; - lua_rawseti(L_, -2, start_depth); // L_: {} {fqn} + lua_rawseti(L_, -2, ++startDepth); // L_: {} {fqn} STACK_CHECK(L_, 2); } // retrieve the cache, create it if we haven't done it yet @@ -469,8 +467,8 @@ void populate_func_lookup_table(lua_State* L_, int i_, char const* name_) STACK_CHECK(L_, 3); } // process everything we find in that table, filling in lookup data for all functions and tables we see there - populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(U) L_, ctx_base, in_base, start_depth); - lua_pop(L_, 3); + populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(U) L_, dbIdx, in_base, startDepth); + lua_pop(L_, 3); // L_: } else { lua_pop(L_, 1); // L_: raise_luaL_error(L_, "unsupported module type %s", lua_typename(L_, lua_type(L_, in_base))); @@ -502,7 +500,7 @@ static constexpr RegistryUniqueKey kMtIdRegKey{ 0xA8895DCF4EC3FE3Cull }; STACK_CHECK(L_, 1); if (id == 0) { - id = U_->next_mt_id.fetch_add(1, std::memory_order_relaxed); + id = U_->nextMetatableId.fetch_add(1, std::memory_order_relaxed); // Create two-way references: id_uint <-> table lua_pushvalue(L_, idx_); // L_: ... _R[kMtIdRegKey] {mt} diff --git a/src/universe.cpp b/src/universe.cpp index 4dce427..bf64560 100644 --- a/src/universe.cpp +++ b/src/universe.cpp @@ -60,7 +60,7 @@ Universe::Universe() // the launched threads (even -2). // #ifdef LINUX_SCHED_RR - if (m_sudo) { + if (sudo) { struct sched_param sp; sp.sched_priority = _PRIO_0; PT_CALL(pthread_setschedparam(pthread_self(), SCHED_RR, &sp)); diff --git a/src/universe.h b/src/universe.h index c6c9c03..b2107af 100644 --- a/src/universe.h +++ b/src/universe.h @@ -30,8 +30,8 @@ class Lane; class AllocatorDefinition { public: - lua_Alloc m_allocF{ nullptr }; - void* m_allocUD{ nullptr }; + lua_Alloc allocF{ nullptr }; + void* allocUD{ nullptr }; [[nodiscard]] static void* operator new(size_t size_) noexcept = delete; // can't create one outside of a Lua state [[nodiscard]] static void* operator new(size_t size_, lua_State* L_) noexcept { return lua_newuserdatauv(L_, size_, 0); } @@ -40,8 +40,8 @@ class AllocatorDefinition static void operator delete([[maybe_unused]] void* p_, lua_State* L_) { LUA_ASSERT(L_, !"should never be called"); } AllocatorDefinition(lua_Alloc allocF_, void* allocUD_) noexcept - : m_allocF{ allocF_ } - , m_allocUD{ allocUD_ } + : allocF{ allocF_ } + , allocUD{ allocUD_ } { } AllocatorDefinition() = default; @@ -52,22 +52,22 @@ class AllocatorDefinition void initFrom(lua_State* L_) { - m_allocF = lua_getallocf(L_, &m_allocUD); + allocF = lua_getallocf(L_, &allocUD); } void* lua_alloc(void* ptr_, size_t osize_, size_t nsize_) { - m_allocF(m_allocUD, ptr_, osize_, nsize_); + allocF(allocUD, ptr_, osize_, nsize_); } void* alloc(size_t nsize_) { - return m_allocF(m_allocUD, nullptr, 0, nsize_); + return allocF(allocUD, nullptr, 0, nsize_); } void free(void* ptr_, size_t osize_) { - std::ignore = m_allocF(m_allocUD, ptr_, osize_, 0); + std::ignore = allocF(allocUD, ptr_, osize_, 0); } }; @@ -78,13 +78,13 @@ class ProtectedAllocator : public AllocatorDefinition { private: - std::mutex m_lock; + std::mutex mutex; [[nodiscard]] static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_) { ProtectedAllocator* const allocator{ static_cast(ud_) }; - std::lock_guard guard{ allocator->m_lock }; - return allocator->m_allocF(allocator->m_allocUD, ptr_, osize_, nsize_); + std::lock_guard guard{ allocator->mutex }; + return allocator->allocF(allocator->allocUD, ptr_, osize_, nsize_); } public: @@ -105,9 +105,9 @@ class ProtectedAllocator void removeFrom(lua_State* L_) { // remove the protected allocator, if any - if (m_allocF != nullptr) { + if (allocF != nullptr) { // install the non-protected allocator - lua_setallocf(L_, m_allocF, m_allocUD); + lua_setallocf(L_, allocF, allocUD); } } }; @@ -121,9 +121,9 @@ class Universe public: #ifdef PLATFORM_LINUX // Linux needs to check, whether it's been run as root - bool const m_sudo{ geteuid() == 0 }; + bool const sudo{ geteuid() == 0 }; #else - bool const m_sudo{ false }; + bool const sudo{ false }; #endif // PLATFORM_LINUX // for verbose errors @@ -132,44 +132,44 @@ class Universe bool demoteFullUserdata{ false }; // before a state is created, this function will be called to obtain the allocator - lua_CFunction provide_allocator{ nullptr }; + lua_CFunction provideAllocator{ nullptr }; // after a state is created, this function will be called right after the bases libraries are loaded - lua_CFunction on_state_create_func{ nullptr }; + lua_CFunction onStateCreateFunc{ nullptr }; // if allocator="protected" is found in the configuration settings, a wrapper allocator will protect all allocator calls with a mutex // contains a mutex and the original allocator definition - ProtectedAllocator protected_allocator; + ProtectedAllocator protectedAllocator; - AllocatorDefinition internal_allocator; + AllocatorDefinition internalAllocator; Keepers* keepers{ nullptr }; // Initialized by 'init_once_LOCKED()': the deep userdata Linda object // used for timers (each lane will get a proxy to this) - DeepPrelude* timer_deep{ nullptr }; + DeepPrelude* timerLinda{ nullptr }; #if HAVE_LANE_TRACKING() - std::mutex tracking_cs; - Lane* volatile tracking_first{ nullptr }; // will change to TRACKING_END if we want to activate tracking + std::mutex trackingMutex; + Lane* volatile trackingFirst{ nullptr }; // will change to TRACKING_END if we want to activate tracking #endif // HAVE_LANE_TRACKING() - std::mutex selfdestruct_cs; + std::mutex selfdestructMutex; // require() serialization - std::recursive_mutex require_cs; + std::recursive_mutex requireMutex; // metatable unique identifiers - std::atomic next_mt_id{ 1 }; + std::atomic nextMetatableId{ 1 }; #if USE_DEBUG_SPEW() - std::atomic debugspew_indent_depth{ 0 }; + std::atomic debugspewIndentDepth{ 0 }; #endif // USE_DEBUG_SPEW() - Lane* volatile selfdestruct_first{ nullptr }; + Lane* volatile selfdestructFirst{ nullptr }; // After a lane has removed itself from the chain, it still performs some processing. // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads - std::atomic selfdestructing_count{ 0 }; + std::atomic selfdestructingCount{ 0 }; Universe(); ~Universe() = default; @@ -201,13 +201,13 @@ class DebugSpewIndentScope : U{ U_ } { if (U) - U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed); + U->debugspewIndentDepth.fetch_add(1, std::memory_order_relaxed); } ~DebugSpewIndentScope() { if (U) - U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed); + U->debugspewIndentDepth.fetch_sub(1, std::memory_order_relaxed); } }; #endif // USE_DEBUG_SPEW() -- cgit v1.2.3-55-g6feb