aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorBenoit Germain <benoit.germain@ubisoft.com>2024-05-02 10:18:37 +0200
committerBenoit Germain <benoit.germain@ubisoft.com>2024-05-02 10:18:37 +0200
commit84889233bfec4ad11ee1160fe63acbbbba7275e7 (patch)
tree236643b48bbc1a31f26a70d702a7e3f6b93b723f /src
parent8e64f794f08cb3e4f930df5bb17c3a7061516cca (diff)
downloadlanes-84889233bfec4ad11ee1160fe63acbbbba7275e7.tar.gz
lanes-84889233bfec4ad11ee1160fe63acbbbba7275e7.tar.bz2
lanes-84889233bfec4ad11ee1160fe63acbbbba7275e7.zip
Progressively applying the coding rules
Diffstat (limited to 'src')
-rw-r--r--src/cancel.cpp6
-rw-r--r--src/keeper.cpp14
-rw-r--r--src/keeper.h2
-rw-r--r--src/lanes.cpp134
-rw-r--r--src/lanes_private.h16
-rw-r--r--src/linda.cpp72
-rw-r--r--src/linda.h18
-rw-r--r--src/macros_and_utils.h2
-rw-r--r--src/state.cpp34
-rw-r--r--src/state.h4
-rw-r--r--src/tools.cpp64
-rw-r--r--src/universe.cpp2
-rw-r--r--src/universe.h60
13 files changed, 213 insertions, 215 deletions
diff --git a/src/cancel.cpp b/src/cancel.cpp
index ed450f0..dd848a7 100644
--- a/src/cancel.cpp
+++ b/src/cancel.cpp
@@ -55,7 +55,7 @@ THE SOFTWARE.
55{ 55{
56 Lane* const lane{ kLanePointerRegKey.readLightUserDataValue<Lane>(L_) }; 56 Lane* const lane{ kLanePointerRegKey.readLightUserDataValue<Lane>(L_) };
57 // 'lane' is nullptr for the original main state (and no-one can cancel that) 57 // 'lane' is nullptr for the original main state (and no-one can cancel that)
58 return lane ? lane->cancel_request : CancelRequest::None; 58 return lane ? lane->cancelRequest : CancelRequest::None;
59} 59}
60 60
61// ################################################################################################# 61// #################################################################################################
@@ -109,7 +109,7 @@ LUAG_FUNC(cancel_test)
109 109
110[[nodiscard]] static CancelResult thread_cancel_soft(Lane* lane_, lua_Duration duration_, bool wakeLane_) 110[[nodiscard]] static CancelResult thread_cancel_soft(Lane* lane_, lua_Duration duration_, bool wakeLane_)
111{ 111{
112 lane_->cancel_request = CancelRequest::Soft; // it's now signaled to stop 112 lane_->cancelRequest = CancelRequest::Soft; // it's now signaled to stop
113 // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own 113 // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own
114 if (wakeLane_) { // wake the thread so that execution returns from any pending linda operation if desired 114 if (wakeLane_) { // wake the thread so that execution returns from any pending linda operation if desired
115 std::condition_variable* const waiting_on{ lane_->waiting_on }; 115 std::condition_variable* const waiting_on{ lane_->waiting_on };
@@ -125,7 +125,7 @@ LUAG_FUNC(cancel_test)
125 125
126[[nodiscard]] static CancelResult thread_cancel_hard(Lane* lane_, lua_Duration duration_, bool wakeLane_) 126[[nodiscard]] static CancelResult thread_cancel_hard(Lane* lane_, lua_Duration duration_, bool wakeLane_)
127{ 127{
128 lane_->cancel_request = CancelRequest::Hard; // it's now signaled to stop 128 lane_->cancelRequest = CancelRequest::Hard; // it's now signaled to stop
129 // lane_->thread.get_stop_source().request_stop(); 129 // lane_->thread.get_stop_source().request_stop();
130 if (wakeLane_) { // wake the thread so that execution returns from any pending linda operation if desired 130 if (wakeLane_) { // wake the thread so that execution returns from any pending linda operation if desired
131 std::condition_variable* waiting_on = lane_->waiting_on; 131 std::condition_variable* waiting_on = lane_->waiting_on;
diff --git a/src/keeper.cpp b/src/keeper.cpp
index 5350d26..763bcf7 100644
--- a/src/keeper.cpp
+++ b/src/keeper.cpp
@@ -580,7 +580,7 @@ void close_keepers(Universe* U_)
580 U_->keepers->keeper_array[i].~Keeper(); 580 U_->keepers->keeper_array[i].~Keeper();
581 } 581 }
582 // free the keeper bookkeeping structure 582 // free the keeper bookkeeping structure
583 U_->internal_allocator.free(U_->keepers, sizeof(Keepers) + (nbKeepers - 1) * sizeof(Keeper)); 583 U_->internalAllocator.free(U_->keepers, sizeof(Keepers) + (nbKeepers - 1) * sizeof(Keeper));
584 U_->keepers = nullptr; 584 U_->keepers = nullptr;
585 } 585 }
586} 586}
@@ -618,7 +618,7 @@ void init_keepers(Universe* U_, lua_State* L_)
618 // Keepers contains an array of 1 Keeper, adjust for the actual number of keeper states 618 // Keepers contains an array of 1 Keeper, adjust for the actual number of keeper states
619 { 619 {
620 size_t const bytes = sizeof(Keepers) + (nb_keepers - 1) * sizeof(Keeper); 620 size_t const bytes = sizeof(Keepers) + (nb_keepers - 1) * sizeof(Keeper);
621 U_->keepers = static_cast<Keepers*>(U_->internal_allocator.alloc(bytes)); 621 U_->keepers = static_cast<Keepers*>(U_->internalAllocator.alloc(bytes));
622 if (U_->keepers == nullptr) { 622 if (U_->keepers == nullptr) {
623 raise_luaL_error(L_, "init_keepers() failed while creating keeper array; out of memory"); 623 raise_luaL_error(L_, "init_keepers() failed while creating keeper array; out of memory");
624 } 624 }
@@ -675,7 +675,7 @@ void init_keepers(Universe* U_, lua_State* L_)
675 // attempt to call on_state_create(), if we have one and it is a C function 675 // attempt to call on_state_create(), if we have one and it is a C function
676 // (only support a C function because we can't transfer executable Lua code in keepers) 676 // (only support a C function because we can't transfer executable Lua code in keepers)
677 // will raise an error in L_ in case of problem 677 // will raise an error in L_ in case of problem
678 call_on_state_create(U_, K, L_, LookupMode::ToKeeper); 678 callOnStateCreate(U_, K, L_, LookupMode::ToKeeper);
679 679
680 // to see VM name in Decoda debugger 680 // to see VM name in Decoda debugger
681 lua_pushfstring(K, "Keeper #%d", i + 1); // L_: settings K: "Keeper #n" 681 lua_pushfstring(K, "Keeper #%d", i + 1); // L_: settings K: "Keeper #n"
@@ -694,8 +694,8 @@ Keeper* Linda::acquireKeeper() const
694 int const nbKeepers{ U->keepers->nb_keepers }; 694 int const nbKeepers{ U->keepers->nb_keepers };
695 // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers) 695 // can be 0 if this happens during main state shutdown (lanes is being GC'ed -> no keepers)
696 if (nbKeepers) { 696 if (nbKeepers) {
697 Keeper* const K{ &U->keepers->keeper_array[m_keeper_index] }; 697 Keeper* const K{ &U->keepers->keeper_array[keeperIndex] };
698 K->m_mutex.lock(); 698 K->mutex.lock();
699 return K; 699 return K;
700 } 700 }
701 return nullptr; 701 return nullptr;
@@ -706,8 +706,8 @@ Keeper* Linda::acquireKeeper() const
706void Linda::releaseKeeper(Keeper* K_) const 706void Linda::releaseKeeper(Keeper* K_) const
707{ 707{
708 if (K_) { // can be nullptr if we tried to acquire during shutdown 708 if (K_) { // can be nullptr if we tried to acquire during shutdown
709 assert(K_ == &U->keepers->keeper_array[m_keeper_index]); 709 assert(K_ == &U->keepers->keeper_array[keeperIndex]);
710 K_->m_mutex.unlock(); 710 K_->mutex.unlock();
711 } 711 }
712} 712}
713 713
diff --git a/src/keeper.h b/src/keeper.h
index 275d134..37642fd 100644
--- a/src/keeper.h
+++ b/src/keeper.h
@@ -24,7 +24,7 @@ using KeeperState = Unique<lua_State*>;
24 24
25struct Keeper 25struct Keeper
26{ 26{
27 std::mutex m_mutex; 27 std::mutex mutex;
28 KeeperState L{ nullptr }; 28 KeeperState L{ nullptr };
29 // int count; 29 // int count;
30}; 30};
diff --git a/src/lanes.cpp b/src/lanes.cpp
index 38fe2b9..d027cff 100644
--- a/src/lanes.cpp
+++ b/src/lanes.cpp
@@ -106,7 +106,7 @@ THE SOFTWARE.
106#if HAVE_LANE_TRACKING() 106#if HAVE_LANE_TRACKING()
107 107
108// The chain is ended by '(Lane*)(-1)', not nullptr: 108// The chain is ended by '(Lane*)(-1)', not nullptr:
109// 'tracking_first -> ... -> ... -> (-1)' 109// 'trackingFirst -> ... -> ... -> (-1)'
110#define TRACKING_END ((Lane*) (-1)) 110#define TRACKING_END ((Lane*) (-1))
111 111
112/* 112/*
@@ -115,11 +115,11 @@ THE SOFTWARE.
115 */ 115 */
116static void tracking_add(Lane* lane_) 116static void tracking_add(Lane* lane_)
117{ 117{
118 std::lock_guard<std::mutex> guard{ lane_->U->tracking_cs }; 118 std::lock_guard<std::mutex> guard{ lane_->U->trackingMutex };
119 assert(lane_->tracking_next == nullptr); 119 assert(lane_->tracking_next == nullptr);
120 120
121 lane_->tracking_next = lane_->U->tracking_first; 121 lane_->tracking_next = lane_->U->trackingFirst;
122 lane_->U->tracking_first = lane_; 122 lane_->U->trackingFirst = lane_;
123} 123}
124 124
125// ################################################################################################# 125// #################################################################################################
@@ -130,13 +130,13 @@ static void tracking_add(Lane* lane_)
130[[nodiscard]] static bool tracking_remove(Lane* lane_) 130[[nodiscard]] static bool tracking_remove(Lane* lane_)
131{ 131{
132 bool found{ false }; 132 bool found{ false };
133 std::lock_guard<std::mutex> guard{ lane_->U->tracking_cs }; 133 std::lock_guard<std::mutex> guard{ lane_->U->trackingMutex };
134 // Make sure (within the MUTEX) that we actually are in the chain 134 // Make sure (within the MUTEX) that we actually are in the chain
135 // still (at process exit they will remove us from chain and then 135 // still (at process exit they will remove us from chain and then
136 // cancel/kill). 136 // cancel/kill).
137 // 137 //
138 if (lane_->tracking_next != nullptr) { 138 if (lane_->tracking_next != nullptr) {
139 Lane** ref = (Lane**) &lane_->U->tracking_first; 139 Lane** ref = (Lane**) &lane_->U->trackingFirst;
140 140
141 while (*ref != TRACKING_END) { 141 while (*ref != TRACKING_END) {
142 if (*ref == lane_) { 142 if (*ref == lane_) {
@@ -161,7 +161,7 @@ Lane::Lane(Universe* U_, lua_State* L_)
161, L{ L_ } 161, L{ L_ }
162{ 162{
163#if HAVE_LANE_TRACKING() 163#if HAVE_LANE_TRACKING()
164 if (U->tracking_first) { 164 if (U->trackingFirst) {
165 tracking_add(this); 165 tracking_add(this);
166 } 166 }
167#endif // HAVE_LANE_TRACKING() 167#endif // HAVE_LANE_TRACKING()
@@ -176,10 +176,10 @@ bool Lane::waitForCompletion(lua_Duration duration_)
176 until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration_); 176 until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration_);
177 } 177 }
178 178
179 std::unique_lock lock{ done_mutex }; 179 std::unique_lock lock{ doneMutex };
180 // std::stop_token token{ thread.get_stop_token() }; 180 // std::stop_token token{ thread.get_stop_token() };
181 // return done_signal.wait_until(lock, token, secs_, [this](){ return status >= Lane::Done; }); 181 // return doneCondVar.wait_until(lock, token, secs_, [this](){ return status >= Lane::Done; });
182 return done_signal.wait_until(lock, until, [this]() { return status >= Lane::Done; }); 182 return doneCondVar.wait_until(lock, until, [this]() { return status >= Lane::Done; });
183} 183}
184 184
185// ################################################################################################# 185// #################################################################################################
@@ -189,7 +189,7 @@ void Lane::startThread(int priority_)
189{ 189{
190 thread = std::jthread([this]() { lane_main(this); }); 190 thread = std::jthread([this]() { lane_main(this); });
191 if (priority_ != kThreadPrioDefault) { 191 if (priority_ != kThreadPrioDefault) {
192 JTHREAD_SET_PRIORITY(thread, priority_, U->m_sudo); 192 JTHREAD_SET_PRIORITY(thread, priority_, U->sudo);
193 } 193 }
194} 194}
195 195
@@ -208,9 +208,9 @@ static void securize_debug_threadname(lua_State* L_, Lane* lane_)
208 STACK_GROW(L_, 3); 208 STACK_GROW(L_, 3);
209 lua_getiuservalue(L_, 1, 1); 209 lua_getiuservalue(L_, 1, 1);
210 lua_newtable(L_); 210 lua_newtable(L_);
211 // Lua 5.1 can't do 'lane_->debug_name = lua_pushstring(L_, lane_->debug_name);' 211 // Lua 5.1 can't do 'lane_->debugName = lua_pushstring(L_, lane_->debugName);'
212 lua_pushstring(L_, lane_->debug_name); 212 lua_pushstring(L_, lane_->debugName);
213 lane_->debug_name = lua_tostring(L_, -1); 213 lane_->debugName = lua_tostring(L_, -1);
214 lua_rawset(L_, -3); 214 lua_rawset(L_, -3);
215 lua_pop(L_, 1); 215 lua_pop(L_, 1);
216 STACK_CHECK(L_, 0); 216 STACK_CHECK(L_, 0);
@@ -242,7 +242,7 @@ Lane::~Lane()
242 // Clean up after a (finished) thread 242 // Clean up after a (finished) thread
243 // 243 //
244#if HAVE_LANE_TRACKING() 244#if HAVE_LANE_TRACKING()
245 if (U->tracking_first != nullptr) { 245 if (U->trackingFirst != nullptr) {
246 // Lane was cleaned up, no need to handle at process termination 246 // Lane was cleaned up, no need to handle at process termination
247 std::ignore = tracking_remove(this); 247 std::ignore = tracking_remove(this);
248 } 248 }
@@ -414,7 +414,7 @@ static void push_stack_trace(lua_State* L_, int rc_, int stk_base_)
414#define SELFDESTRUCT_END ((Lane*) (-1)) 414#define SELFDESTRUCT_END ((Lane*) (-1))
415// 415//
416// The chain is ended by '(Lane*)(-1)', not nullptr: 416// The chain is ended by '(Lane*)(-1)', not nullptr:
417// 'selfdestruct_first -> ... -> ... -> (-1)' 417// 'selfdestructFirst -> ... -> ... -> (-1)'
418 418
419/* 419/*
420 * Add the lane to selfdestruct chain; the ones still running at the end of the 420 * Add the lane to selfdestruct chain; the ones still running at the end of the
@@ -422,11 +422,11 @@ static void push_stack_trace(lua_State* L_, int rc_, int stk_base_)
422 */ 422 */
423static void selfdestruct_add(Lane* lane_) 423static void selfdestruct_add(Lane* lane_)
424{ 424{
425 std::lock_guard<std::mutex> guard{ lane_->U->selfdestruct_cs }; 425 std::lock_guard<std::mutex> guard{ lane_->U->selfdestructMutex };
426 assert(lane_->selfdestruct_next == nullptr); 426 assert(lane_->selfdestruct_next == nullptr);
427 427
428 lane_->selfdestruct_next = lane_->U->selfdestruct_first; 428 lane_->selfdestruct_next = lane_->U->selfdestructFirst;
429 lane_->U->selfdestruct_first = lane_; 429 lane_->U->selfdestructFirst = lane_;
430} 430}
431 431
432// ################################################################################################# 432// #################################################################################################
@@ -435,20 +435,20 @@ static void selfdestruct_add(Lane* lane_)
435[[nodiscard]] static bool selfdestruct_remove(Lane* lane_) 435[[nodiscard]] static bool selfdestruct_remove(Lane* lane_)
436{ 436{
437 bool found{ false }; 437 bool found{ false };
438 std::lock_guard<std::mutex> guard{ lane_->U->selfdestruct_cs }; 438 std::lock_guard<std::mutex> guard{ lane_->U->selfdestructMutex };
439 // Make sure (within the MUTEX) that we actually are in the chain 439 // Make sure (within the MUTEX) that we actually are in the chain
440 // still (at process exit they will remove us from chain and then 440 // still (at process exit they will remove us from chain and then
441 // cancel/kill). 441 // cancel/kill).
442 // 442 //
443 if (lane_->selfdestruct_next != nullptr) { 443 if (lane_->selfdestruct_next != nullptr) {
444 Lane* volatile* ref = static_cast<Lane* volatile*>(&lane_->U->selfdestruct_first); 444 Lane* volatile* ref = static_cast<Lane* volatile*>(&lane_->U->selfdestructFirst);
445 445
446 while (*ref != SELFDESTRUCT_END) { 446 while (*ref != SELFDESTRUCT_END) {
447 if (*ref == lane_) { 447 if (*ref == lane_) {
448 *ref = lane_->selfdestruct_next; 448 *ref = lane_->selfdestruct_next;
449 lane_->selfdestruct_next = nullptr; 449 lane_->selfdestruct_next = nullptr;
450 // the terminal shutdown should wait until the lane is done with its lua_close() 450 // the terminal shutdown should wait until the lane is done with its lua_close()
451 lane_->U->selfdestructing_count.fetch_add(1, std::memory_order_release); 451 lane_->U->selfdestructingCount.fetch_add(1, std::memory_order_release);
452 found = true; 452 found = true;
453 break; 453 break;
454 } 454 }
@@ -469,11 +469,11 @@ static void selfdestruct_add(Lane* lane_)
469 [[maybe_unused]] char const* const op_string{ lua_tostring(L_, lua_upvalueindex(2)) }; 469 [[maybe_unused]] char const* const op_string{ lua_tostring(L_, lua_upvalueindex(2)) };
470 CancelOp const op{ which_cancel_op(op_string) }; 470 CancelOp const op{ which_cancel_op(op_string) };
471 471
472 if (U->selfdestruct_first != SELFDESTRUCT_END) { 472 if (U->selfdestructFirst != SELFDESTRUCT_END) {
473 // Signal _all_ still running threads to exit (including the timer thread) 473 // Signal _all_ still running threads to exit (including the timer thread)
474 { 474 {
475 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs }; 475 std::lock_guard<std::mutex> guard{ U->selfdestructMutex };
476 Lane* lane{ U->selfdestruct_first }; 476 Lane* lane{ U->selfdestructFirst };
477 lua_Duration timeout{ 1us }; 477 lua_Duration timeout{ 1us };
478 while (lane != SELFDESTRUCT_END) { 478 while (lane != SELFDESTRUCT_END) {
479 // attempt the requested cancel with a small timeout. 479 // attempt the requested cancel with a small timeout.
@@ -490,16 +490,16 @@ static void selfdestruct_add(Lane* lane_)
490 { 490 {
491 std::chrono::time_point<std::chrono::steady_clock> t_until{ std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(shutdown_timeout) }; 491 std::chrono::time_point<std::chrono::steady_clock> t_until{ std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(shutdown_timeout) };
492 492
493 while (U->selfdestruct_first != SELFDESTRUCT_END) { 493 while (U->selfdestructFirst != SELFDESTRUCT_END) {
494 // give threads time to act on their cancel 494 // give threads time to act on their cancel
495 std::this_thread::yield(); 495 std::this_thread::yield();
496 // count the number of cancelled thread that didn't have the time to act yet 496 // count the number of cancelled thread that didn't have the time to act yet
497 int n{ 0 }; 497 int n{ 0 };
498 { 498 {
499 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs }; 499 std::lock_guard<std::mutex> guard{ U->selfdestructMutex };
500 Lane* lane{ U->selfdestruct_first }; 500 Lane* lane{ U->selfdestructFirst };
501 while (lane != SELFDESTRUCT_END) { 501 while (lane != SELFDESTRUCT_END) {
502 if (lane->cancel_request != CancelRequest::None) 502 if (lane->cancelRequest != CancelRequest::None)
503 ++n; 503 ++n;
504 lane = lane->selfdestruct_next; 504 lane = lane->selfdestruct_next;
505 } 505 }
@@ -515,33 +515,33 @@ static void selfdestruct_add(Lane* lane_)
515 515
516 // If some lanes are currently cleaning after themselves, wait until they are done. 516 // If some lanes are currently cleaning after themselves, wait until they are done.
517 // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). 517 // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
518 while (U->selfdestructing_count.load(std::memory_order_acquire) > 0) { 518 while (U->selfdestructingCount.load(std::memory_order_acquire) > 0) {
519 std::this_thread::yield(); 519 std::this_thread::yield();
520 } 520 }
521 } 521 }
522 522
523 // If after all this, we still have some free-running lanes, it's an external user error, they should have stopped appropriately 523 // If after all this, we still have some free-running lanes, it's an external user error, they should have stopped appropriately
524 { 524 {
525 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs }; 525 std::lock_guard<std::mutex> guard{ U->selfdestructMutex };
526 Lane* lane{ U->selfdestruct_first }; 526 Lane* lane{ U->selfdestructFirst };
527 if (lane != SELFDESTRUCT_END) { 527 if (lane != SELFDESTRUCT_END) {
528 // this causes a leak because we don't call U's destructor (which could be bad if the still running lanes are accessing it) 528 // this causes a leak because we don't call U's destructor (which could be bad if the still running lanes are accessing it)
529 raise_luaL_error(L_, "Zombie thread %s refuses to die!", lane->debug_name); 529 raise_luaL_error(L_, "Zombie thread %s refuses to die!", lane->debugName);
530 } 530 }
531 } 531 }
532 532
533 // no need to mutex-protect this as all threads in the universe are gone at that point 533 // no need to mutex-protect this as all threads in the universe are gone at that point
534 if (U->timer_deep != nullptr) { // test ins case some early internal error prevented Lanes from creating the deep timer 534 if (U->timerLinda != nullptr) { // test in case some early internal error prevented Lanes from creating the deep timer
535 [[maybe_unused]] int const prev_ref_count{ U->timer_deep->refcount.fetch_sub(1, std::memory_order_relaxed) }; 535 [[maybe_unused]] int const prev_ref_count{ U->timerLinda->refcount.fetch_sub(1, std::memory_order_relaxed) };
536 LUA_ASSERT(L_, prev_ref_count == 1); // this should be the last reference 536 LUA_ASSERT(L_, prev_ref_count == 1); // this should be the last reference
537 DeepFactory::DeleteDeepObject(L_, U->timer_deep); 537 DeepFactory::DeleteDeepObject(L_, U->timerLinda);
538 U->timer_deep = nullptr; 538 U->timerLinda = nullptr;
539 } 539 }
540 540
541 close_keepers(U); 541 close_keepers(U);
542 542
543 // remove the protected allocator, if any 543 // remove the protected allocator, if any
544 U->protected_allocator.removeFrom(L_); 544 U->protectedAllocator.removeFrom(L_);
545 545
546 U->Universe::~Universe(); 546 U->Universe::~Universe();
547 547
@@ -701,9 +701,9 @@ LUAG_FUNC(set_debug_threadname)
701 // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global... 701 // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global...
702 hidden_regkey.setValue(L_, [](lua_State* L_) { lua_pushvalue(L_, -2); }); 702 hidden_regkey.setValue(L_, [](lua_State* L_) { lua_pushvalue(L_, -2); });
703 STACK_CHECK(L_, 1); 703 STACK_CHECK(L_, 1);
704 lane->debug_name = lua_tostring(L_, -1); 704 lane->debugName = lua_tostring(L_, -1);
705 // keep a direct pointer on the string 705 // keep a direct pointer on the string
706 THREAD_SETNAME(lane->debug_name); 706 THREAD_SETNAME(lane->debugName);
707 // to see VM name in Decoda debugger Virtual Machine window 707 // to see VM name in Decoda debugger Virtual Machine window
708 lua_setglobal(L_, "decoda_name"); // 708 lua_setglobal(L_, "decoda_name"); //
709 STACK_CHECK(L_, 0); 709 STACK_CHECK(L_, 0);
@@ -716,7 +716,7 @@ LUAG_FUNC(get_debug_threadname)
716{ 716{
717 Lane* const lane{ ToLane(L_, 1) }; 717 Lane* const lane{ ToLane(L_, 1) };
718 luaL_argcheck(L_, lua_gettop(L_) == 1, 2, "too many arguments"); 718 luaL_argcheck(L_, lua_gettop(L_) == 1, 2, "too many arguments");
719 lua_pushstring(L_, lane->debug_name); 719 lua_pushstring(L_, lane->debugName);
720 return 1; 720 return 1;
721} 721}
722 722
@@ -731,7 +731,7 @@ LUAG_FUNC(set_thread_priority)
731 if (prio < kThreadPrioMin || prio > kThreadPrioMax) { 731 if (prio < kThreadPrioMin || prio > kThreadPrioMax) {
732 raise_luaL_error(L_, "priority out of range: %d..+%d (%d)", kThreadPrioMin, kThreadPrioMax, prio); 732 raise_luaL_error(L_, "priority out of range: %d..+%d (%d)", kThreadPrioMin, kThreadPrioMax, prio);
733 } 733 }
734 THREAD_SET_PRIORITY(static_cast<int>(prio), universe_get(L_)->m_sudo); 734 THREAD_SET_PRIORITY(static_cast<int>(prio), universe_get(L_)->sudo);
735 return 0; 735 return 0;
736} 736}
737 737
@@ -843,10 +843,10 @@ static void lane_main(Lane* lane_)
843 // We're a free-running thread and no-one's there to clean us up. 843 // We're a free-running thread and no-one's there to clean us up.
844 lua_close(lane_->L); 844 lua_close(lane_->L);
845 lane_->L = nullptr; // just in case 845 lane_->L = nullptr; // just in case
846 lane_->U->selfdestruct_cs.lock(); 846 lane_->U->selfdestructMutex.lock();
847 // done with lua_close(), terminal shutdown sequence may proceed 847 // done with lua_close(), terminal shutdown sequence may proceed
848 lane_->U->selfdestructing_count.fetch_sub(1, std::memory_order_release); 848 lane_->U->selfdestructingCount.fetch_sub(1, std::memory_order_release);
849 lane_->U->selfdestruct_cs.unlock(); 849 lane_->U->selfdestructMutex.unlock();
850 850
851 // we destroy our jthread member from inside the thread body, so we have to detach so that we don't try to join, as this doesn't seem a good idea 851 // we destroy our jthread member from inside the thread body, so we have to detach so that we don't try to join, as this doesn't seem a good idea
852 lane_->thread.detach(); 852 lane_->thread.detach();
@@ -860,10 +860,10 @@ static void lane_main(Lane* lane_)
860 Lane::Status const st = (rc == LUA_OK) ? Lane::Done : kCancelError.equals(L, 1) ? Lane::Cancelled : Lane::Error; 860 Lane::Status const st = (rc == LUA_OK) ? Lane::Done : kCancelError.equals(L, 1) ? Lane::Cancelled : Lane::Error;
861 861
862 { 862 {
863 // 'done_mutex' protects the -> Done|Error|Cancelled state change 863 // 'doneMutex' protects the -> Done|Error|Cancelled state change
864 std::lock_guard lock{ lane_->done_mutex }; 864 std::lock_guard lock{ lane_->doneMutex };
865 lane_->status = st; 865 lane_->status = st;
866 lane_->done_signal.notify_one(); // wake up master (while 'lane_->done_mutex' is on) 866 lane_->doneCondVar.notify_one(); // wake up master (while 'lane_->doneMutex' is on)
867 } 867 }
868 } 868 }
869} 869}
@@ -994,9 +994,9 @@ LUAG_FUNC(lane_new)
994 lua_settop(m_lane->L, 0); 994 lua_settop(m_lane->L, 0);
995 kCancelError.pushKey(m_lane->L); 995 kCancelError.pushKey(m_lane->L);
996 { 996 {
997 std::lock_guard lock{ m_lane->done_mutex }; 997 std::lock_guard lock{ m_lane->doneMutex };
998 m_lane->status = Lane::Cancelled; 998 m_lane->status = Lane::Cancelled;
999 m_lane->done_signal.notify_one(); // wake up master (while 'lane->done_mutex' is on) 999 m_lane->doneCondVar.notify_one(); // wake up master (while 'lane->doneMutex' is on)
1000 } 1000 }
1001 // unblock the thread so that it can terminate gracefully 1001 // unblock the thread so that it can terminate gracefully
1002 m_lane->ready.count_down(); 1002 m_lane->ready.count_down();
@@ -1207,7 +1207,7 @@ LUAG_FUNC(lane_new)
1207 lua_rawget(L_, -2); // L_: ud uservalue gc_cb|nil 1207 lua_rawget(L_, -2); // L_: ud uservalue gc_cb|nil
1208 if (!lua_isnil(L_, -1)) { 1208 if (!lua_isnil(L_, -1)) {
1209 lua_remove(L_, -2); // L_: ud gc_cb|nil 1209 lua_remove(L_, -2); // L_: ud gc_cb|nil
1210 lua_pushstring(L_, lane->debug_name); // L_: ud gc_cb name 1210 lua_pushstring(L_, lane->debugName); // L_: ud gc_cb name
1211 have_gc_cb = true; 1211 have_gc_cb = true;
1212 } else { 1212 } else {
1213 lua_pop(L_, 2); // L_: ud 1213 lua_pop(L_, 2); // L_: ud
@@ -1228,7 +1228,7 @@ LUAG_FUNC(lane_new)
1228 lua_close(lane->L); 1228 lua_close(lane->L);
1229 lane->L = nullptr; 1229 lane->L = nullptr;
1230 // just in case, but s will be freed soon so... 1230 // just in case, but s will be freed soon so...
1231 lane->debug_name = "<gc>"; 1231 lane->debugName = "<gc>";
1232 } 1232 }
1233 1233
1234 // Clean up after a (finished) thread 1234 // Clean up after a (finished) thread
@@ -1307,7 +1307,7 @@ LUAG_FUNC(thread_join)
1307 1307
1308 int ret{ 0 }; 1308 int ret{ 0 };
1309 Universe* const U{ lane->U }; 1309 Universe* const U{ lane->U };
1310 // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed 1310 // debugName is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed
1311 // so store it in the userdata uservalue at a key that can't possibly collide 1311 // so store it in the userdata uservalue at a key that can't possibly collide
1312 securize_debug_threadname(L_, lane); 1312 securize_debug_threadname(L_, lane);
1313 switch (lane->status) { 1313 switch (lane->status) {
@@ -1508,15 +1508,15 @@ LUAG_FUNC(threads)
1508 1508
1509 // List _all_ still running threads 1509 // List _all_ still running threads
1510 // 1510 //
1511 std::lock_guard<std::mutex> guard{ U->tracking_cs }; 1511 std::lock_guard<std::mutex> guard{ U->trackingMutex };
1512 if (U->tracking_first && U->tracking_first != TRACKING_END) { 1512 if (U->trackingFirst && U->trackingFirst != TRACKING_END) {
1513 Lane* lane{ U->tracking_first }; 1513 Lane* lane{ U->trackingFirst };
1514 int index = 0; 1514 int index{ 0 };
1515 lua_newtable(L_); // L_: {} 1515 lua_newtable(L_); // L_: {}
1516 while (lane != TRACKING_END) { 1516 while (lane != TRACKING_END) {
1517 // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other 1517 // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other
1518 lua_newtable(L_); // L_: {} {} 1518 lua_newtable(L_); // L_: {} {}
1519 lua_pushstring(L_, lane->debug_name); // L_: {} {} "name" 1519 lua_pushstring(L_, lane->debugName); // L_: {} {} "name"
1520 lua_setfield(L_, -2, "name"); // L_: {} {} 1520 lua_setfield(L_, -2, "name"); // L_: {} {}
1521 lane->pushThreadStatus(L_); // L_: {} {} "status" 1521 lane->pushThreadStatus(L_); // L_: {} {} "status"
1522 lua_setfield(L_, -2, "status"); // L_: {} {} 1522 lua_setfield(L_, -2, "status"); // L_: {} {}
@@ -1663,26 +1663,26 @@ LUAG_FUNC(configure)
1663 lua_pop(L_, 1); // L_: settings 1663 lua_pop(L_, 1); // L_: settings
1664#if HAVE_LANE_TRACKING() 1664#if HAVE_LANE_TRACKING()
1665 lua_getfield(L_, 1, "track_lanes"); // L_: settings track_lanes 1665 lua_getfield(L_, 1, "track_lanes"); // L_: settings track_lanes
1666 U->tracking_first = lua_toboolean(L_, -1) ? TRACKING_END : nullptr; 1666 U->trackingFirst = lua_toboolean(L_, -1) ? TRACKING_END : nullptr;
1667 lua_pop(L_, 1); // L_: settings 1667 lua_pop(L_, 1); // L_: settings
1668#endif // HAVE_LANE_TRACKING() 1668#endif // HAVE_LANE_TRACKING()
1669 // Linked chains handling 1669 // Linked chains handling
1670 U->selfdestruct_first = SELFDESTRUCT_END; 1670 U->selfdestructFirst = SELFDESTRUCT_END;
1671 initialize_allocator_function(U, L_); 1671 initialize_allocator_function(U, L_);
1672 initialize_on_state_create(U, L_); 1672 initializeOnStateCreate(U, L_);
1673 init_keepers(U, L_); 1673 init_keepers(U, L_);
1674 STACK_CHECK(L_, 1); 1674 STACK_CHECK(L_, 1);
1675 1675
1676 // Initialize 'timer_deep'; a common Linda object shared by all states 1676 // Initialize 'timerLinda'; a common Linda object shared by all states
1677 lua_pushcfunction(L_, LG_linda); // L_: settings lanes.linda 1677 lua_pushcfunction(L_, LG_linda); // L_: settings lanes.linda
1678 lua_pushliteral(L_, "lanes-timer"); // L_: settings lanes.linda "lanes-timer" 1678 lua_pushliteral(L_, "lanes-timer"); // L_: settings lanes.linda "lanes-timer"
1679 lua_call(L_, 1, 1); // L_: settings linda 1679 lua_call(L_, 1, 1); // L_: settings linda
1680 STACK_CHECK(L_, 2); 1680 STACK_CHECK(L_, 2);
1681 1681
1682 // Proxy userdata contents is only a 'DeepPrelude*' pointer 1682 // Proxy userdata contents is only a 'DeepPrelude*' pointer
1683 U->timer_deep = *lua_tofulluserdata<DeepPrelude*>(L_, -1); 1683 U->timerLinda = *lua_tofulluserdata<DeepPrelude*>(L_, -1);
1684 // increment refcount so that this linda remains alive as long as the universe exists. 1684 // increment refcount so that this linda remains alive as long as the universe exists.
1685 U->timer_deep->refcount.fetch_add(1, std::memory_order_relaxed); 1685 U->timerLinda->refcount.fetch_add(1, std::memory_order_relaxed);
1686 lua_pop(L_, 1); // L_: settings 1686 lua_pop(L_, 1); // L_: settings
1687 } 1687 }
1688 STACK_CHECK(L_, 1); 1688 STACK_CHECK(L_, 1);
@@ -1699,7 +1699,7 @@ LUAG_FUNC(configure)
1699 luaG_registerlibfuncs(L_, lanes_functions); 1699 luaG_registerlibfuncs(L_, lanes_functions);
1700#if HAVE_LANE_TRACKING() 1700#if HAVE_LANE_TRACKING()
1701 // register core.threads() only if settings say it should be available 1701 // register core.threads() only if settings say it should be available
1702 if (U->tracking_first != nullptr) { 1702 if (U->trackingFirst != nullptr) {
1703 lua_pushcfunction(L_, LG_threads); // L_: settings M LG_threads() 1703 lua_pushcfunction(L_, LG_threads); // L_: settings M LG_threads()
1704 lua_setfield(L_, -2, "threads"); // L_: settings M 1704 lua_setfield(L_, -2, "threads"); // L_: settings M
1705 } 1705 }
@@ -1708,8 +1708,8 @@ LUAG_FUNC(configure)
1708 1708
1709 { 1709 {
1710 char const* errmsg{ 1710 char const* errmsg{
1711 DeepFactory::PushDeepProxy(DestState{ L_ }, U->timer_deep, 0, LookupMode::LaneBody) 1711 DeepFactory::PushDeepProxy(DestState{ L_ }, U->timerLinda, 0, LookupMode::LaneBody)
1712 }; // L_: settings M timer_deep 1712 }; // L_: settings M timerLinda
1713 if (errmsg != nullptr) { 1713 if (errmsg != nullptr) {
1714 raise_luaL_error(L_, errmsg); 1714 raise_luaL_error(L_, errmsg);
1715 } 1715 }
diff --git a/src/lanes_private.h b/src/lanes_private.h
index 1d476cf..01630ba 100644
--- a/src/lanes_private.h
+++ b/src/lanes_private.h
@@ -36,14 +36,14 @@ class Lane
36 std::jthread thread; 36 std::jthread thread;
37 // a latch to wait for the lua_State to be ready 37 // a latch to wait for the lua_State to be ready
38 std::latch ready{ 1 }; 38 std::latch ready{ 1 };
39 // to wait for stop requests through m_thread's stop_source 39 // to wait for stop requests through thread's stop_source
40 std::mutex done_mutex; 40 std::mutex doneMutex;
41 std::condition_variable done_signal; // use condition_variable_any if waiting for a stop_token 41 std::condition_variable doneCondVar; // use condition_variable_any if waiting for a stop_token
42 // 42 //
43 // M: sub-thread OS thread 43 // M: sub-thread OS thread
44 // S: not used 44 // S: not used
45 45
46 char const* debug_name{ "<unnamed>" }; 46 char const* debugName{ "<unnamed>" };
47 47
48 Universe* const U; 48 Universe* const U;
49 lua_State* L; 49 lua_State* L;
@@ -60,7 +60,7 @@ class Lane
60 // 60 //
61 // When status is Waiting, points on the linda's signal the thread waits on, else nullptr 61 // When status is Waiting, points on the linda's signal the thread waits on, else nullptr
62 62
63 CancelRequest volatile cancel_request{ CancelRequest::None }; 63 CancelRequest volatile cancelRequest{ CancelRequest::None };
64 // 64 //
65 // M: sets to false, flags true for cancel request 65 // M: sets to false, flags true for cancel request
66 // S: reads to see if cancel is requested 66 // S: reads to see if cancel is requested
@@ -77,11 +77,11 @@ class Lane
77 // 77 //
78 // For tracking only 78 // For tracking only
79 79
80 [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internal_allocator.alloc(size_); } 80 [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internalAllocator.alloc(size_); }
81 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception 81 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception
82 static void operator delete(void* p_, Universe* U_) { U_->internal_allocator.free(p_, sizeof(Lane)); } 82 static void operator delete(void* p_, Universe* U_) { U_->internalAllocator.free(p_, sizeof(Lane)); }
83 // this one is for us, to make sure memory is freed by the correct allocator 83 // this one is for us, to make sure memory is freed by the correct allocator
84 static void operator delete(void* p_) { static_cast<Lane*>(p_)->U->internal_allocator.free(p_, sizeof(Lane)); } 84 static void operator delete(void* p_) { static_cast<Lane*>(p_)->U->internalAllocator.free(p_, sizeof(Lane)); }
85 85
86 Lane(Universe* U_, lua_State* L_); 86 Lane(Universe* U_, lua_State* L_);
87 ~Lane(); 87 ~Lane();
diff --git a/src/linda.cpp b/src/linda.cpp
index cda3a63..bbfbd69 100644
--- a/src/linda.cpp
+++ b/src/linda.cpp
@@ -53,7 +53,7 @@ static constexpr uintptr_t kPointerMagicShift{ 3 };
53Linda::Linda(Universe* U_, LindaGroup group_, char const* name_, size_t len_) 53Linda::Linda(Universe* U_, LindaGroup group_, char const* name_, size_t len_)
54: DeepPrelude{ LindaFactory::Instance } 54: DeepPrelude{ LindaFactory::Instance }
55, U{ U_ } 55, U{ U_ }
56, m_keeper_index{ (group_ ? group_ : static_cast<int>(std::bit_cast<uintptr_t>(this) >> kPointerMagicShift)) % U_->keepers->nb_keepers } 56, keeperIndex{ (group_ ? group_ : static_cast<int>(std::bit_cast<uintptr_t>(this) >> kPointerMagicShift)) % U_->keepers->nb_keepers }
57{ 57{
58 setName(name_, len_); 58 setName(name_, len_);
59} 59}
@@ -62,9 +62,9 @@ Linda::Linda(Universe* U_, LindaGroup group_, char const* name_, size_t len_)
62 62
63Linda::~Linda() 63Linda::~Linda()
64{ 64{
65 if (std::holds_alternative<AllocatedName>(m_name)) { 65 if (std::holds_alternative<AllocatedName>(nameVariant)) {
66 AllocatedName& name = std::get<AllocatedName>(m_name); 66 AllocatedName& name = std::get<AllocatedName>(nameVariant);
67 U->internal_allocator.free(name.name, name.len); 67 U->internalAllocator.free(name.name, name.len);
68 } 68 }
69} 69}
70 70
@@ -78,12 +78,12 @@ void Linda::setName(char const* name_, size_t len_)
78 } 78 }
79 ++len_; // don't forget terminating 0 79 ++len_; // don't forget terminating 0
80 if (len_ < kEmbeddedNameLength) { 80 if (len_ < kEmbeddedNameLength) {
81 m_name.emplace<EmbeddedName>(); 81 nameVariant.emplace<EmbeddedName>();
82 char* const name{ std::get<EmbeddedName>(m_name).data() }; 82 char* const name{ std::get<EmbeddedName>(nameVariant).data() };
83 memcpy(name, name_, len_); 83 memcpy(name, name_, len_);
84 } else { 84 } else {
85 AllocatedName& name = std::get<AllocatedName>(m_name); 85 AllocatedName& name = std::get<AllocatedName>(nameVariant);
86 name.name = static_cast<char*>(U->internal_allocator.alloc(len_)); 86 name.name = static_cast<char*>(U->internalAllocator.alloc(len_));
87 name.len = len_; 87 name.len = len_;
88 memcpy(name.name, name_, len_); 88 memcpy(name.name, name_, len_);
89 } 89 }
@@ -93,12 +93,12 @@ void Linda::setName(char const* name_, size_t len_)
93 93
94char const* Linda::getName() const 94char const* Linda::getName() const
95{ 95{
96 if (std::holds_alternative<AllocatedName>(m_name)) { 96 if (std::holds_alternative<AllocatedName>(nameVariant)) {
97 AllocatedName const& name = std::get<AllocatedName>(m_name); 97 AllocatedName const& name = std::get<AllocatedName>(nameVariant);
98 return name.name; 98 return name.name;
99 } 99 }
100 if (std::holds_alternative<EmbeddedName>(m_name)) { 100 if (std::holds_alternative<EmbeddedName>(nameVariant)) {
101 char const* const name{ std::get<EmbeddedName>(m_name).data() }; 101 char const* const name{ std::get<EmbeddedName>(nameVariant).data() };
102 return name; 102 return name;
103 } 103 }
104 return nullptr; 104 return nullptr;
@@ -241,9 +241,9 @@ LUAG_FUNC(linda_send)
241 STACK_CHECK_START_REL(KL, 0); 241 STACK_CHECK_START_REL(KL, 0);
242 for (bool try_again{ true };;) { 242 for (bool try_again{ true };;) {
243 if (lane != nullptr) { 243 if (lane != nullptr) {
244 cancel = lane->cancel_request; 244 cancel = lane->cancelRequest;
245 } 245 }
246 cancel = (cancel != CancelRequest::None) ? cancel : linda->simulate_cancel; 246 cancel = (cancel != CancelRequest::None) ? cancel : linda->cancelRequest;
247 // if user wants to cancel, or looped because of a timeout, the call returns without sending anything 247 // if user wants to cancel, or looped because of a timeout, the call returns without sending anything
248 if (!try_again || cancel != CancelRequest::None) { 248 if (!try_again || cancel != CancelRequest::None) {
249 pushed.emplace(0); 249 pushed.emplace(0);
@@ -262,7 +262,7 @@ LUAG_FUNC(linda_send)
262 262
263 if (ret) { 263 if (ret) {
264 // Wake up ALL waiting threads 264 // Wake up ALL waiting threads
265 linda->m_write_happened.notify_all(); 265 linda->writeHappened.notify_all();
266 break; 266 break;
267 } 267 }
268 268
@@ -280,11 +280,11 @@ LUAG_FUNC(linda_send)
280 LUA_ASSERT(L_, prev_status == Lane::Running); // but check, just in case 280 LUA_ASSERT(L_, prev_status == Lane::Running); // but check, just in case
281 lane->status = Lane::Waiting; 281 lane->status = Lane::Waiting;
282 LUA_ASSERT(L_, lane->waiting_on == nullptr); 282 LUA_ASSERT(L_, lane->waiting_on == nullptr);
283 lane->waiting_on = &linda->m_read_happened; 283 lane->waiting_on = &linda->readHappened;
284 } 284 }
285 // could not send because no room: wait until some data was read before trying again, or until timeout is reached 285 // could not send because no room: wait until some data was read before trying again, or until timeout is reached
286 std::unique_lock<std::mutex> keeper_lock{ K->m_mutex, std::adopt_lock }; 286 std::unique_lock<std::mutex> keeper_lock{ K->mutex, std::adopt_lock };
287 std::cv_status const status{ linda->m_read_happened.wait_until(keeper_lock, until) }; 287 std::cv_status const status{ linda->readHappened.wait_until(keeper_lock, until) };
288 keeper_lock.release(); // we don't want to release the lock! 288 keeper_lock.release(); // we don't want to release the lock!
289 try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups 289 try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups
290 if (lane != nullptr) { 290 if (lane != nullptr) {
@@ -390,9 +390,9 @@ LUAG_FUNC(linda_receive)
390 STACK_CHECK_START_REL(KL, 0); 390 STACK_CHECK_START_REL(KL, 0);
391 for (bool try_again{ true };;) { 391 for (bool try_again{ true };;) {
392 if (lane != nullptr) { 392 if (lane != nullptr) {
393 cancel = lane->cancel_request; 393 cancel = lane->cancelRequest;
394 } 394 }
395 cancel = (cancel != CancelRequest::None) ? cancel : linda->simulate_cancel; 395 cancel = (cancel != CancelRequest::None) ? cancel : linda->cancelRequest;
396 // if user wants to cancel, or looped because of a timeout, the call returns without sending anything 396 // if user wants to cancel, or looped because of a timeout, the call returns without sending anything
397 if (!try_again || cancel != CancelRequest::None) { 397 if (!try_again || cancel != CancelRequest::None) {
398 pushed.emplace(0); 398 pushed.emplace(0);
@@ -410,7 +410,7 @@ LUAG_FUNC(linda_receive)
410 keeper_toggle_nil_sentinels(L_, lua_gettop(L_) - pushed.value(), LookupMode::FromKeeper); 410 keeper_toggle_nil_sentinels(L_, lua_gettop(L_) - pushed.value(), LookupMode::FromKeeper);
411 // To be done from within the 'K' locking area 411 // To be done from within the 'K' locking area
412 // 412 //
413 linda->m_read_happened.notify_all(); 413 linda->readHappened.notify_all();
414 break; 414 break;
415 } 415 }
416 416
@@ -427,11 +427,11 @@ LUAG_FUNC(linda_receive)
427 LUA_ASSERT(L_, prev_status == Lane::Running); // but check, just in case 427 LUA_ASSERT(L_, prev_status == Lane::Running); // but check, just in case
428 lane->status = Lane::Waiting; 428 lane->status = Lane::Waiting;
429 LUA_ASSERT(L_, lane->waiting_on == nullptr); 429 LUA_ASSERT(L_, lane->waiting_on == nullptr);
430 lane->waiting_on = &linda->m_write_happened; 430 lane->waiting_on = &linda->writeHappened;
431 } 431 }
432 // not enough data to read: wakeup when data was sent, or when timeout is reached 432 // not enough data to read: wakeup when data was sent, or when timeout is reached
433 std::unique_lock<std::mutex> keeper_lock{ K->m_mutex, std::adopt_lock }; 433 std::unique_lock<std::mutex> keeper_lock{ K->mutex, std::adopt_lock };
434 std::cv_status const status{ linda->m_write_happened.wait_until(keeper_lock, until) }; 434 std::cv_status const status{ linda->writeHappened.wait_until(keeper_lock, until) };
435 keeper_lock.release(); // we don't want to release the lock! 435 keeper_lock.release(); // we don't want to release the lock!
436 try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups 436 try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups
437 if (lane != nullptr) { 437 if (lane != nullptr) {
@@ -483,7 +483,7 @@ LUAG_FUNC(linda_set)
483 483
484 Keeper* const K{ linda->whichKeeper() }; 484 Keeper* const K{ linda->whichKeeper() };
485 KeeperCallResult pushed; 485 KeeperCallResult pushed;
486 if (linda->simulate_cancel == CancelRequest::None) { 486 if (linda->cancelRequest == CancelRequest::None) {
487 if (has_value) { 487 if (has_value) {
488 // convert nils to some special non-nil sentinel in sent values 488 // convert nils to some special non-nil sentinel in sent values
489 keeper_toggle_nil_sentinels(L_, 3, LookupMode::ToKeeper); 489 keeper_toggle_nil_sentinels(L_, 3, LookupMode::ToKeeper);
@@ -494,12 +494,12 @@ LUAG_FUNC(linda_set)
494 494
495 if (has_value) { 495 if (has_value) {
496 // we put some data in the slot, tell readers that they should wake 496 // we put some data in the slot, tell readers that they should wake
497 linda->m_write_happened.notify_all(); // To be done from within the 'K' locking area 497 linda->writeHappened.notify_all(); // To be done from within the 'K' locking area
498 } 498 }
499 if (pushed.value() == 1) { 499 if (pushed.value() == 1) {
500 // the key was full, but it is no longer the case, tell writers they should wake 500 // the key was full, but it is no longer the case, tell writers they should wake
501 LUA_ASSERT(L_, lua_type(L_, -1) == LUA_TBOOLEAN && lua_toboolean(L_, -1) == 1); 501 LUA_ASSERT(L_, lua_type(L_, -1) == LUA_TBOOLEAN && lua_toboolean(L_, -1) == 1);
502 linda->m_read_happened.notify_all(); // To be done from within the 'K' locking area 502 linda->readHappened.notify_all(); // To be done from within the 'K' locking area
503 } 503 }
504 } 504 }
505 } else { // linda is cancelled 505 } else { // linda is cancelled
@@ -553,7 +553,7 @@ LUAG_FUNC(linda_get)
553 check_key_types(L_, 2, 2); 553 check_key_types(L_, 2, 2);
554 554
555 KeeperCallResult pushed; 555 KeeperCallResult pushed;
556 if (linda->simulate_cancel == CancelRequest::None) { 556 if (linda->cancelRequest == CancelRequest::None) {
557 Keeper* const K{ linda->whichKeeper() }; 557 Keeper* const K{ linda->whichKeeper() };
558 pushed = keeper_call(linda->U, K->L, KEEPER_API(get), L_, linda, 2); 558 pushed = keeper_call(linda->U, K->L, KEEPER_API(get), L_, linda, 2);
559 if (pushed.value_or(0) > 0) { 559 if (pushed.value_or(0) > 0) {
@@ -590,13 +590,13 @@ LUAG_FUNC(linda_limit)
590 check_key_types(L_, 2, 2); 590 check_key_types(L_, 2, 2);
591 591
592 KeeperCallResult pushed; 592 KeeperCallResult pushed;
593 if (linda->simulate_cancel == CancelRequest::None) { 593 if (linda->cancelRequest == CancelRequest::None) {
594 Keeper* const K{ linda->whichKeeper() }; 594 Keeper* const K{ linda->whichKeeper() };
595 pushed = keeper_call(linda->U, K->L, KEEPER_API(limit), L_, linda, 2); 595 pushed = keeper_call(linda->U, K->L, KEEPER_API(limit), L_, linda, 2);
596 LUA_ASSERT(L_, pushed.has_value() && (pushed.value() == 0 || pushed.value() == 1)); // no error, optional boolean value saying if we should wake blocked writer threads 596 LUA_ASSERT(L_, pushed.has_value() && (pushed.value() == 0 || pushed.value() == 1)); // no error, optional boolean value saying if we should wake blocked writer threads
597 if (pushed.value() == 1) { 597 if (pushed.value() == 1) {
598 LUA_ASSERT(L_, lua_type(L_, -1) == LUA_TBOOLEAN && lua_toboolean(L_, -1) == 1); 598 LUA_ASSERT(L_, lua_type(L_, -1) == LUA_TBOOLEAN && lua_toboolean(L_, -1) == 1);
599 linda->m_read_happened.notify_all(); // To be done from within the 'K' locking area 599 linda->readHappened.notify_all(); // To be done from within the 'K' locking area
600 } 600 }
601 } else { // linda is cancelled 601 } else { // linda is cancelled
602 // do nothing and return lanes.cancel_error 602 // do nothing and return lanes.cancel_error
@@ -623,16 +623,16 @@ LUAG_FUNC(linda_cancel)
623 // make sure we got 3 arguments: the linda, a key and a limit 623 // make sure we got 3 arguments: the linda, a key and a limit
624 luaL_argcheck(L_, lua_gettop(L_) <= 2, 2, "wrong number of arguments"); 624 luaL_argcheck(L_, lua_gettop(L_) <= 2, 2, "wrong number of arguments");
625 625
626 linda->simulate_cancel = CancelRequest::Soft; 626 linda->cancelRequest = CancelRequest::Soft;
627 if (strcmp(who, "both") == 0) { // tell everyone writers to wake up 627 if (strcmp(who, "both") == 0) { // tell everyone writers to wake up
628 linda->m_write_happened.notify_all(); 628 linda->writeHappened.notify_all();
629 linda->m_read_happened.notify_all(); 629 linda->readHappened.notify_all();
630 } else if (strcmp(who, "none") == 0) { // reset flag 630 } else if (strcmp(who, "none") == 0) { // reset flag
631 linda->simulate_cancel = CancelRequest::None; 631 linda->cancelRequest = CancelRequest::None;
632 } else if (strcmp(who, "read") == 0) { // tell blocked readers to wake up 632 } else if (strcmp(who, "read") == 0) { // tell blocked readers to wake up
633 linda->m_write_happened.notify_all(); 633 linda->writeHappened.notify_all();
634 } else if (strcmp(who, "write") == 0) { // tell blocked writers to wake up 634 } else if (strcmp(who, "write") == 0) { // tell blocked writers to wake up
635 linda->m_read_happened.notify_all(); 635 linda->readHappened.notify_all();
636 } else { 636 } else {
637 raise_luaL_error(L_, "unknown wake hint '%s'", who); 637 raise_luaL_error(L_, "unknown wake hint '%s'", who);
638 } 638 }
diff --git a/src/linda.h b/src/linda.h
index 7a21571..56941a1 100644
--- a/src/linda.h
+++ b/src/linda.h
@@ -32,23 +32,23 @@ class Linda
32 char* name{ nullptr }; 32 char* name{ nullptr };
33 }; 33 };
34 // depending on the name length, it is either embedded inside the Linda, or allocated separately 34 // depending on the name length, it is either embedded inside the Linda, or allocated separately
35 std::variant<AllocatedName, EmbeddedName> m_name; 35 std::variant<AllocatedName, EmbeddedName> nameVariant;
36 36
37 public: 37 public:
38 std::condition_variable m_read_happened; 38 std::condition_variable readHappened;
39 std::condition_variable m_write_happened; 39 std::condition_variable writeHappened;
40 Universe* const U{ nullptr }; // the universe this linda belongs to 40 Universe* const U{ nullptr }; // the universe this linda belongs to
41 int const m_keeper_index{ -1 }; // the keeper associated to this linda 41 int const keeperIndex{ -1 }; // the keeper associated to this linda
42 CancelRequest simulate_cancel{ CancelRequest::None }; 42 CancelRequest cancelRequest{ CancelRequest::None };
43 43
44 public: 44 public:
45 // a fifo full userdata has one uservalue, the table that holds the actual fifo contents 45 // a fifo full userdata has one uservalue, the table that holds the actual fifo contents
46 [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internal_allocator.alloc(size_); } 46 [[nodiscard]] static void* operator new(size_t size_, Universe* U_) noexcept { return U_->internalAllocator.alloc(size_); }
47 // always embedded somewhere else or "in-place constructed" as a full userdata 47 // always embedded somewhere else or "in-place constructed" as a full userdata
48 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception 48 // can't actually delete the operator because the compiler generates stack unwinding code that could call it in case of exception
49 static void operator delete(void* p_, Universe* U_) { U_->internal_allocator.free(p_, sizeof(Linda)); } 49 static void operator delete(void* p_, Universe* U_) { U_->internalAllocator.free(p_, sizeof(Linda)); }
50 // this one is for us, to make sure memory is freed by the correct allocator 50 // this one is for us, to make sure memory is freed by the correct allocator
51 static void operator delete(void* p_) { static_cast<Linda*>(p_)->U->internal_allocator.free(p_, sizeof(Linda)); } 51 static void operator delete(void* p_) { static_cast<Linda*>(p_)->U->internalAllocator.free(p_, sizeof(Linda)); }
52 52
53 ~Linda(); 53 ~Linda();
54 Linda(Universe* U_, LindaGroup group_, char const* name_, size_t len_); 54 Linda(Universe* U_, LindaGroup group_, char const* name_, size_t len_);
@@ -66,7 +66,7 @@ class Linda
66 66
67 public: 67 public:
68 [[nodiscard]] char const* getName() const; 68 [[nodiscard]] char const* getName() const;
69 [[nodiscard]] Keeper* whichKeeper() const { return U->keepers->nb_keepers ? &U->keepers->keeper_array[m_keeper_index] : nullptr; } 69 [[nodiscard]] Keeper* whichKeeper() const { return U->keepers->nb_keepers ? &U->keepers->keeper_array[keeperIndex] : nullptr; }
70 [[nodiscard]] Keeper* acquireKeeper() const; 70 [[nodiscard]] Keeper* acquireKeeper() const;
71 void releaseKeeper(Keeper* keeper_) const; 71 void releaseKeeper(Keeper* keeper_) const;
72}; 72};
diff --git a/src/macros_and_utils.h b/src/macros_and_utils.h
index 58567ac..a1f6cba 100644
--- a/src/macros_and_utils.h
+++ b/src/macros_and_utils.h
@@ -64,7 +64,7 @@ template <typename... ARGS>
64#define USE_DEBUG_SPEW() 0 64#define USE_DEBUG_SPEW() 0
65#if USE_DEBUG_SPEW() 65#if USE_DEBUG_SPEW()
66#define INDENT_BEGIN "%.*s " 66#define INDENT_BEGIN "%.*s "
67#define INDENT_END(U_) , (U_ ? U_->debugspew_indent_depth.load(std::memory_order_relaxed) : 0), DebugSpewIndentScope::debugspew_indent 67#define INDENT_END(U_) , (U_ ? U_->debugspewIndentDepth.load(std::memory_order_relaxed) : 0), DebugSpewIndentScope::debugspew_indent
68#define DEBUGSPEW_CODE(_code) _code 68#define DEBUGSPEW_CODE(_code) _code
69#define DEBUGSPEW_OR_NOT(a_, b_) a_ 69#define DEBUGSPEW_OR_NOT(a_, b_) a_
70#define DEBUGSPEW_PARAM_COMMA(param_) param_, 70#define DEBUGSPEW_PARAM_COMMA(param_) param_,
diff --git a/src/state.cpp b/src/state.cpp
index ebb24dd..2893907 100644
--- a/src/state.cpp
+++ b/src/state.cpp
@@ -64,10 +64,10 @@ THE SOFTWARE.
64 // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would 64 // Using 'lua_pcall()' to catch errors; otherwise a failing 'require' would
65 // leave us locked, blocking any future 'require' calls from other lanes. 65 // leave us locked, blocking any future 'require' calls from other lanes.
66 66
67 U->require_cs.lock(); 67 U->requireMutex.lock();
68 // starting with Lua 5.4, require may return a second optional value, so we need LUA_MULTRET 68 // starting with Lua 5.4, require may return a second optional value, so we need LUA_MULTRET
69 rc = lua_pcall(L_, args, LUA_MULTRET, 0 /*errfunc*/); // L_: err|result(s) 69 rc = lua_pcall(L_, args, LUA_MULTRET, 0 /*errfunc*/); // L_: err|result(s)
70 U->require_cs.unlock(); 70 U->requireMutex.unlock();
71 71
72 // the required module (or an error message) is left on the stack as returned value by original require function 72 // the required module (or an error message) is left on the stack as returned value by original require function
73 73
@@ -205,14 +205,14 @@ static void copy_one_time_settings(Universe* U_, SourceState L1_, DestState L2_)
205 205
206// ################################################################################################# 206// #################################################################################################
207 207
208void initialize_on_state_create(Universe* U_, lua_State* L_) 208void initializeOnStateCreate(Universe* U_, lua_State* L_)
209{ 209{
210 STACK_CHECK_START_REL(L_, 1); // L_: settings 210 STACK_CHECK_START_REL(L_, 1); // L_: settings
211 lua_getfield(L_, -1, "on_state_create"); // L_: settings on_state_create|nil 211 lua_getfield(L_, -1, "on_state_create"); // L_: settings on_state_create|nil
212 if (!lua_isnil(L_, -1)) { 212 if (!lua_isnil(L_, -1)) {
213 // store C function pointer in an internal variable 213 // store C function pointer in an internal variable
214 U_->on_state_create_func = lua_tocfunction(L_, -1); // L_: settings on_state_create 214 U_->onStateCreateFunc = lua_tocfunction(L_, -1); // L_: settings on_state_create
215 if (U_->on_state_create_func != nullptr) { 215 if (U_->onStateCreateFunc != nullptr) {
216 // make sure the function doesn't have upvalues 216 // make sure the function doesn't have upvalues
217 char const* upname = lua_getupvalue(L_, -1, 1); // L_: settings on_state_create upval? 217 char const* upname = lua_getupvalue(L_, -1, 1); // L_: settings on_state_create upval?
218 if (upname != nullptr) { // should be "" for C functions with upvalues if any 218 if (upname != nullptr) { // should be "" for C functions with upvalues if any
@@ -224,7 +224,7 @@ void initialize_on_state_create(Universe* U_, lua_State* L_)
224 lua_setfield(L_, -3, "on_state_create"); // L_: settings on_state_create 224 lua_setfield(L_, -3, "on_state_create"); // L_: settings on_state_create
225 } else { 225 } else {
226 // optim: store marker saying we have such a function in the config table 226 // optim: store marker saying we have such a function in the config table
227 U_->on_state_create_func = (lua_CFunction) initialize_on_state_create; 227 U_->onStateCreateFunc = reinterpret_cast<lua_CFunction>(initializeOnStateCreate);
228 } 228 }
229 } 229 }
230 lua_pop(L_, 1); // L_: settings 230 lua_pop(L_, 1); // L_: settings
@@ -240,17 +240,17 @@ lua_State* create_state(Universe* U_, lua_State* from_)
240 // for some reason, LuaJIT 64 bits does not support creating a state with lua_newstate... 240 // for some reason, LuaJIT 64 bits does not support creating a state with lua_newstate...
241 L = luaL_newstate(); 241 L = luaL_newstate();
242#else // LUAJIT_FLAVOR() == 64 242#else // LUAJIT_FLAVOR() == 64
243 if (U_->provide_allocator != nullptr) { // we have a function we can call to obtain an allocator 243 if (U_->provideAllocator != nullptr) { // we have a function we can call to obtain an allocator
244 lua_pushcclosure(from_, U_->provide_allocator, 0); 244 lua_pushcclosure(from_, U_->provideAllocator, 0);
245 lua_call(from_, 0, 1); 245 lua_call(from_, 0, 1);
246 { 246 {
247 AllocatorDefinition* const def{ lua_tofulluserdata<AllocatorDefinition>(from_, -1) }; 247 AllocatorDefinition* const def{ lua_tofulluserdata<AllocatorDefinition>(from_, -1) };
248 L = lua_newstate(def->m_allocF, def->m_allocUD); 248 L = lua_newstate(def->allocF, def->allocUD);
249 } 249 }
250 lua_pop(from_, 1); 250 lua_pop(from_, 1);
251 } else { 251 } else {
252 // reuse the allocator provided when the master state was created 252 // reuse the allocator provided when the master state was created
253 L = lua_newstate(U_->protected_allocator.m_allocF, U_->protected_allocator.m_allocUD); 253 L = lua_newstate(U_->protectedAllocator.allocF, U_->protectedAllocator.allocUD);
254 } 254 }
255#endif // LUAJIT_FLAVOR() == 64 255#endif // LUAJIT_FLAVOR() == 64
256 256
@@ -262,14 +262,14 @@ lua_State* create_state(Universe* U_, lua_State* from_)
262 262
263// ################################################################################################# 263// #################################################################################################
264 264
265void call_on_state_create(Universe* U_, lua_State* L_, lua_State* from_, LookupMode mode_) 265void callOnStateCreate(Universe* U_, lua_State* L_, lua_State* from_, LookupMode mode_)
266{ 266{
267 if (U_->on_state_create_func != nullptr) { 267 if (U_->onStateCreateFunc != nullptr) {
268 STACK_CHECK_START_REL(L_, 0); 268 STACK_CHECK_START_REL(L_, 0);
269 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END(U_))); 269 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "calling on_state_create()\n" INDENT_END(U_)));
270 if (U_->on_state_create_func != (lua_CFunction) initialize_on_state_create) { 270 if (U_->onStateCreateFunc != reinterpret_cast<lua_CFunction>(initializeOnStateCreate)) {
271 // C function: recreate a closure in the new state, bypassing the lookup scheme 271 // C function: recreate a closure in the new state, bypassing the lookup scheme
272 lua_pushcfunction(L_, U_->on_state_create_func); // on_state_create() 272 lua_pushcfunction(L_, U_->onStateCreateFunc); // on_state_create()
273 } else { // Lua function located in the config table, copied when we opened "lanes.core" 273 } else { // Lua function located in the config table, copied when we opened "lanes.core"
274 if (mode_ != LookupMode::LaneBody) { 274 if (mode_ != LookupMode::LaneBody) {
275 // if attempting to call in a keeper state, do nothing because the function doesn't exist there 275 // if attempting to call in a keeper state, do nothing because the function doesn't exist there
@@ -323,7 +323,7 @@ lua_State* luaG_newstate(Universe* U_, SourceState from_, char const* libs_)
323 STACK_CHECK(L, 0); 323 STACK_CHECK(L, 0);
324 324
325 // neither libs (not even 'base') nor special init func: we are done 325 // neither libs (not even 'base') nor special init func: we are done
326 if (libs_ == nullptr && U_->on_state_create_func == nullptr) { 326 if (libs_ == nullptr && U_->onStateCreateFunc == nullptr) {
327 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "luaG_newstate(nullptr)\n" INDENT_END(U_))); 327 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "luaG_newstate(nullptr)\n" INDENT_END(U_)));
328 return L; 328 return L;
329 } 329 }
@@ -384,7 +384,7 @@ lua_State* luaG_newstate(Universe* U_, SourceState from_, char const* libs_)
384 384
385 // call this after the base libraries are loaded and GC is restarted 385 // call this after the base libraries are loaded and GC is restarted
386 // will raise an error in from_ in case of problem 386 // will raise an error in from_ in case of problem
387 call_on_state_create(U_, L, from_, LookupMode::LaneBody); 387 callOnStateCreate(U_, L, from_, LookupMode::LaneBody);
388 388
389 STACK_CHECK(L, 0); 389 STACK_CHECK(L, 0);
390 // after all this, register everything we find in our name<->function database 390 // after all this, register everything we find in our name<->function database
@@ -398,7 +398,7 @@ lua_State* luaG_newstate(Universe* U_, SourceState from_, char const* libs_)
398 lua_pushnil(L); // L: {} nil 398 lua_pushnil(L); // L: {} nil
399 while (lua_next(L, -2)) { // L: {} k v 399 while (lua_next(L, -2)) { // L: {} k v
400 lua_getglobal(L, "print"); // L: {} k v print 400 lua_getglobal(L, "print"); // L: {} k v print
401 int const indent{ U_->debugspew_indent_depth.load(std::memory_order_relaxed) }; 401 int const indent{ U_->debugspewIndentDepth.load(std::memory_order_relaxed) };
402 lua_pushlstring(L, DebugSpewIndentScope::debugspew_indent, indent); // L: {} k v print " " 402 lua_pushlstring(L, DebugSpewIndentScope::debugspew_indent, indent); // L: {} k v print " "
403 lua_pushvalue(L, -4); // L: {} k v print " " k 403 lua_pushvalue(L, -4); // L: {} k v print " " k
404 lua_pushvalue(L, -4); // L: {} k v print " " k v 404 lua_pushvalue(L, -4); // L: {} k v print " " k v
diff --git a/src/state.h b/src/state.h
index 197e052..1b25736 100644
--- a/src/state.h
+++ b/src/state.h
@@ -15,5 +15,5 @@ void serialize_require(DEBUGSPEW_PARAM_COMMA(Universe* U_) lua_State* L_);
15 15
16// ################################################################################################# 16// #################################################################################################
17 17
18void initialize_on_state_create(Universe* U_, lua_State* L_); 18void initializeOnStateCreate(Universe* U_, lua_State* L_);
19void call_on_state_create(Universe* U_, lua_State* L_, lua_State* from_, LookupMode mode_); 19void callOnStateCreate(Universe* U_, lua_State* L_, lua_State* from_, LookupMode mode_);
diff --git a/src/tools.cpp b/src/tools.cpp
index c4ce24f..0495561 100644
--- a/src/tools.cpp
+++ b/src/tools.cpp
@@ -101,7 +101,7 @@ extern "C" [[nodiscard]] static void* libc_lua_Alloc([[maybe_unused]] void* ud,
101{ 101{
102 Universe* const U{ universe_get(L_) }; 102 Universe* const U{ universe_get(L_) };
103 // push a new full userdata on the stack, giving access to the universe's protected allocator 103 // push a new full userdata on the stack, giving access to the universe's protected allocator
104 [[maybe_unused]] AllocatorDefinition* const def{ new (L_) AllocatorDefinition{ U->protected_allocator.makeDefinition() } }; 104 [[maybe_unused]] AllocatorDefinition* const def{ new (L_) AllocatorDefinition{ U->protectedAllocator.makeDefinition() } };
105 return 1; 105 return 1;
106} 106}
107 107
@@ -115,8 +115,8 @@ void initialize_allocator_function(Universe* U_, lua_State* L_)
115 lua_getfield(L_, -1, "allocator"); // L_: settings allocator|nil|"protected" 115 lua_getfield(L_, -1, "allocator"); // L_: settings allocator|nil|"protected"
116 if (!lua_isnil(L_, -1)) { 116 if (!lua_isnil(L_, -1)) {
117 // store C function pointer in an internal variable 117 // store C function pointer in an internal variable
118 U_->provide_allocator = lua_tocfunction(L_, -1); // L_: settings allocator 118 U_->provideAllocator = lua_tocfunction(L_, -1); // L_: settings allocator
119 if (U_->provide_allocator != nullptr) { 119 if (U_->provideAllocator != nullptr) {
120 // make sure the function doesn't have upvalues 120 // make sure the function doesn't have upvalues
121 char const* upname = lua_getupvalue(L_, -1, 1); // L_: settings allocator upval? 121 char const* upname = lua_getupvalue(L_, -1, 1); // L_: settings allocator upval?
122 if (upname != nullptr) { // should be "" for C functions with upvalues if any 122 if (upname != nullptr) { // should be "" for C functions with upvalues if any
@@ -129,14 +129,14 @@ void initialize_allocator_function(Universe* U_, lua_State* L_)
129 } else if (lua_type(L_, -1) == LUA_TSTRING) { // should be "protected" 129 } else if (lua_type(L_, -1) == LUA_TSTRING) { // should be "protected"
130 LUA_ASSERT(L_, strcmp(lua_tostring(L_, -1), "protected") == 0); 130 LUA_ASSERT(L_, strcmp(lua_tostring(L_, -1), "protected") == 0);
131 // set the original allocator to call from inside protection by the mutex 131 // set the original allocator to call from inside protection by the mutex
132 U_->protected_allocator.initFrom(L_); 132 U_->protectedAllocator.initFrom(L_);
133 U_->protected_allocator.installIn(L_); 133 U_->protectedAllocator.installIn(L_);
134 // before a state is created, this function will be called to obtain the allocator 134 // before a state is created, this function will be called to obtain the allocator
135 U_->provide_allocator = luaG_provide_protected_allocator; 135 U_->provideAllocator = luaG_provide_protected_allocator;
136 } 136 }
137 } else { 137 } else {
138 // just grab whatever allocator was provided to lua_newstate 138 // just grab whatever allocator was provided to lua_newstate
139 U_->protected_allocator.initFrom(L_); 139 U_->protectedAllocator.initFrom(L_);
140 } 140 }
141 lua_pop(L_, 1); // L_: settings 141 lua_pop(L_, 1); // L_: settings
142 STACK_CHECK(L_, 1); 142 STACK_CHECK(L_, 1);
@@ -145,13 +145,13 @@ void initialize_allocator_function(Universe* U_, lua_State* L_)
145 { 145 {
146 char const* allocator = lua_tostring(L_, -1); 146 char const* allocator = lua_tostring(L_, -1);
147 if (strcmp(allocator, "libc") == 0) { 147 if (strcmp(allocator, "libc") == 0) {
148 U_->internal_allocator = AllocatorDefinition{ libc_lua_Alloc, nullptr }; 148 U_->internalAllocator = AllocatorDefinition{ libc_lua_Alloc, nullptr };
149 } else if (U_->provide_allocator == luaG_provide_protected_allocator) { 149 } else if (U_->provideAllocator == luaG_provide_protected_allocator) {
150 // user wants mutex protection on the state's allocator. Use protection for our own allocations too, just in case. 150 // user wants mutex protection on the state's allocator. Use protection for our own allocations too, just in case.
151 U_->internal_allocator = U_->protected_allocator.makeDefinition(); 151 U_->internalAllocator = U_->protectedAllocator.makeDefinition();
152 } else { 152 } else {
153 // no protection required, just use whatever we have as-is. 153 // no protection required, just use whatever we have as-is.
154 U_->internal_allocator = U_->protected_allocator; 154 U_->internalAllocator = U_->protectedAllocator;
155 } 155 }
156 } 156 }
157 lua_pop(L_, 1); // L_: settings 157 lua_pop(L_, 1); // L_: settings
@@ -314,14 +314,13 @@ static void update_lookup_entry(DEBUGSPEW_PARAM_COMMA(Universe* U_) lua_State* L
314 314
315// ################################################################################################# 315// #################################################################################################
316 316
317static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_) lua_State* L_, int ctxBase_, int i_, int depth_) 317static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_) lua_State* L_, int dbIdx_, int i_, int depth_)
318{ 318{
319 // slot 2 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot i_ 319 // slot dbIdx_ contains the lookup database table
320 int const fqn = ctxBase_ + 1; 320 // slot dbIdx_ + 1 contains a table that, when concatenated, produces the fully qualified name of scanned elements in the table provided at slot i_
321 // slot 3 contains a cache that stores all already visited tables to avoid infinite recursion loops 321 int const fqn{ dbIdx_ + 1 };
322 int const cache = ctxBase_ + 2; 322 // slot dbIdx_ + 2 contains a cache that stores all already visited tables to avoid infinite recursion loops
323 // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search) 323 int const cache{ dbIdx_ + 2 };
324 int const breadth_first_cache = lua_gettop(L_) + 1;
325 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END(U_))); 324 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "populate_func_lookup_table_recur()\n" INDENT_END(U_)));
326 DEBUGSPEW_CODE(DebugSpewIndentScope scope{ U_ }); 325 DEBUGSPEW_CODE(DebugSpewIndentScope scope{ U_ });
327 326
@@ -352,9 +351,9 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_)
352 lua_rawset(L_, cache); // L_: ... {i_} 351 lua_rawset(L_, cache); // L_: ... {i_}
353 STACK_CHECK(L_, 0); 352 STACK_CHECK(L_, 0);
354 353
355 // this table is at breadth_first_cache index 354 // we need to remember subtables to process them after functions encountered at the current depth (breadth-first search)
356 lua_newtable(L_); // L_: ... {i_} {bfc} 355 lua_newtable(L_); // L_: ... {i_} {bfc}
357 LUA_ASSERT(L_, lua_gettop(L_) == breadth_first_cache); 356 int const breadthFirstCache{ lua_gettop(L_) };
358 // iterate over all entries in the processed table 357 // iterate over all entries in the processed table
359 lua_pushnil(L_); // L_: ... {i_} {bfc} nil 358 lua_pushnil(L_); // L_: ... {i_} {bfc} nil
360 while (lua_next(L_, i_) != 0) { // L_: ... {i_} {bfc} k v 359 while (lua_next(L_, i_) != 0) { // L_: ... {i_} {bfc} k v
@@ -373,13 +372,13 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_)
373 // store the table in the breadth-first cache 372 // store the table in the breadth-first cache
374 lua_pushvalue(L_, -2); // L_: ... {i_} {bfc} k {} k 373 lua_pushvalue(L_, -2); // L_: ... {i_} {bfc} k {} k
375 lua_pushvalue(L_, -2); // L_: ... {i_} {bfc} k {} k {} 374 lua_pushvalue(L_, -2); // L_: ... {i_} {bfc} k {} k {}
376 lua_rawset(L_, breadth_first_cache); // L_: ... {i_} {bfc} k {} 375 lua_rawset(L_, breadthFirstCache); // L_: ... {i_} {bfc} k {}
377 // generate a name, and if we already had one name, keep whichever is the shorter 376 // generate a name, and if we already had one name, keep whichever is the shorter
378 update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U_) L_, ctxBase_, depth_); // L_: ... {i_} {bfc} k 377 update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U_) L_, dbIdx_, depth_); // L_: ... {i_} {bfc} k
379 } else if (lua_isfunction(L_, -1) && (luaG_getfuncsubtype(L_, -1) != FuncSubType::Bytecode)) { 378 } else if (lua_isfunction(L_, -1) && (luaG_getfuncsubtype(L_, -1) != FuncSubType::Bytecode)) {
380 // generate a name, and if we already had one name, keep whichever is the shorter 379 // generate a name, and if we already had one name, keep whichever is the shorter
381 // this pops the function from the stack 380 // this pops the function from the stack
382 update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U_) L_, ctxBase_, depth_); // L_: ... {i_} {bfc} k 381 update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U_) L_, dbIdx_, depth_); // L_: ... {i_} {bfc} k
383 } else { 382 } else {
384 lua_pop(L_, 1); // L_: ... {i_} {bfc} k 383 lua_pop(L_, 1); // L_: ... {i_} {bfc} k
385 } 384 }
@@ -388,7 +387,7 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_)
388 // now process the tables we encountered at that depth 387 // now process the tables we encountered at that depth
389 ++depth_; 388 ++depth_;
390 lua_pushnil(L_); // L_: ... {i_} {bfc} nil 389 lua_pushnil(L_); // L_: ... {i_} {bfc} nil
391 while (lua_next(L_, breadth_first_cache) != 0) { // L_: ... {i_} {bfc} k {} 390 while (lua_next(L_, breadthFirstCache) != 0) { // L_: ... {i_} {bfc} k {}
392 DEBUGSPEW_CODE(char const* key = (lua_type(L_, -2) == LUA_TSTRING) ? lua_tostring(L_, -2) : "not a string"); 391 DEBUGSPEW_CODE(char const* key = (lua_type(L_, -2) == LUA_TSTRING) ? lua_tostring(L_, -2) : "not a string");
393 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "table '%s'\n" INDENT_END(U_), key)); 392 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "table '%s'\n" INDENT_END(U_), key));
394 DEBUGSPEW_CODE(DebugSpewIndentScope scope{ U_ }); 393 DEBUGSPEW_CODE(DebugSpewIndentScope scope{ U_ });
@@ -408,7 +407,7 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_)
408 // push table name in fqn stack (note that concatenation will crash if name is a not string!) 407 // push table name in fqn stack (note that concatenation will crash if name is a not string!)
409 lua_pushvalue(L_, -2); // L_: ... {i_} {bfc} k {} k 408 lua_pushvalue(L_, -2); // L_: ... {i_} {bfc} k {} k
410 lua_rawseti(L_, fqn, depth_); // L_: ... {i_} {bfc} k {} 409 lua_rawseti(L_, fqn, depth_); // L_: ... {i_} {bfc} k {}
411 populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(U_) L_, ctxBase_, lua_gettop(L_), depth_); 410 populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(U_) L_, dbIdx_, lua_gettop(L_), depth_);
412 lua_pop(L_, 1); // L_: ... {i_} {bfc} k 411 lua_pop(L_, 1); // L_: ... {i_} {bfc} k
413 STACK_CHECK(L_, 2); 412 STACK_CHECK(L_, 2);
414 } 413 }
@@ -427,15 +426,14 @@ static void populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(Universe* U_)
427// create a "fully.qualified.name" <-> function equivalence database 426// create a "fully.qualified.name" <-> function equivalence database
428void populate_func_lookup_table(lua_State* L_, int i_, char const* name_) 427void populate_func_lookup_table(lua_State* L_, int i_, char const* name_)
429{ 428{
430 int const ctx_base = lua_gettop(L_) + 1;
431 int const in_base = lua_absindex(L_, i_); 429 int const in_base = lua_absindex(L_, i_);
432 int start_depth = 0;
433 DEBUGSPEW_CODE(Universe* U = universe_get(L_)); 430 DEBUGSPEW_CODE(Universe* U = universe_get(L_));
434 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END(U), L_, name_ ? name_ : "nullptr")); 431 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "%p: populate_func_lookup_table('%s')\n" INDENT_END(U), L_, name_ ? name_ : "nullptr"));
435 DEBUGSPEW_CODE(DebugSpewIndentScope scope{ U }); 432 DEBUGSPEW_CODE(DebugSpewIndentScope scope{ U });
436 STACK_GROW(L_, 3); 433 STACK_GROW(L_, 3);
437 STACK_CHECK_START_REL(L_, 0); 434 STACK_CHECK_START_REL(L_, 0);
438 kLookupRegKey.pushValue(L_); // L_: {} 435 kLookupRegKey.pushValue(L_); // L_: {}
436 int const dbIdx{ lua_gettop(L_) };
439 STACK_CHECK(L_, 1); 437 STACK_CHECK(L_, 1);
440 LUA_ASSERT(L_, lua_istable(L_, -1)); 438 LUA_ASSERT(L_, lua_istable(L_, -1));
441 if (lua_type(L_, in_base) == LUA_TFUNCTION) { // for example when a module is a simple function 439 if (lua_type(L_, in_base) == LUA_TFUNCTION) { // for example when a module is a simple function
@@ -449,15 +447,15 @@ void populate_func_lookup_table(lua_State* L_, int i_, char const* name_)
449 lua_pop(L_, 1); // L_: 447 lua_pop(L_, 1); // L_:
450 } else if (lua_type(L_, in_base) == LUA_TTABLE) { 448 } else if (lua_type(L_, in_base) == LUA_TTABLE) {
451 lua_newtable(L_); // L_: {} {fqn} 449 lua_newtable(L_); // L_: {} {fqn}
450 int startDepth{ 0 };
452 if (name_) { 451 if (name_) {
453 STACK_CHECK(L_, 2); 452 STACK_CHECK(L_, 2);
454 lua_pushstring(L_, name_); // L_: {} {fqn} "name" 453 lua_pushstring(L_, name_); // L_: {} {fqn} "name"
455 // generate a name, and if we already had one name, keep whichever is the shorter 454 // generate a name, and if we already had one name, keep whichever is the shorter
456 lua_pushvalue(L_, in_base); // L_: {} {fqn} "name" t 455 lua_pushvalue(L_, in_base); // L_: {} {fqn} "name" t
457 update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U) L_, ctx_base, start_depth); // L_: {} {fqn} "name" 456 update_lookup_entry(DEBUGSPEW_PARAM_COMMA(U) L_, dbIdx, startDepth); // L_: {} {fqn} "name"
458 // don't forget to store the name at the bottom of the fqn stack 457 // don't forget to store the name at the bottom of the fqn stack
459 ++start_depth; 458 lua_rawseti(L_, -2, ++startDepth); // L_: {} {fqn}
460 lua_rawseti(L_, -2, start_depth); // L_: {} {fqn}
461 STACK_CHECK(L_, 2); 459 STACK_CHECK(L_, 2);
462 } 460 }
463 // retrieve the cache, create it if we haven't done it yet 461 // retrieve the cache, create it if we haven't done it yet
@@ -469,8 +467,8 @@ void populate_func_lookup_table(lua_State* L_, int i_, char const* name_)
469 STACK_CHECK(L_, 3); 467 STACK_CHECK(L_, 3);
470 } 468 }
471 // process everything we find in that table, filling in lookup data for all functions and tables we see there 469 // process everything we find in that table, filling in lookup data for all functions and tables we see there
472 populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(U) L_, ctx_base, in_base, start_depth); 470 populate_func_lookup_table_recur(DEBUGSPEW_PARAM_COMMA(U) L_, dbIdx, in_base, startDepth);
473 lua_pop(L_, 3); 471 lua_pop(L_, 3); // L_:
474 } else { 472 } else {
475 lua_pop(L_, 1); // L_: 473 lua_pop(L_, 1); // L_:
476 raise_luaL_error(L_, "unsupported module type %s", lua_typename(L_, lua_type(L_, in_base))); 474 raise_luaL_error(L_, "unsupported module type %s", lua_typename(L_, lua_type(L_, in_base)));
@@ -502,7 +500,7 @@ static constexpr RegistryUniqueKey kMtIdRegKey{ 0xA8895DCF4EC3FE3Cull };
502 STACK_CHECK(L_, 1); 500 STACK_CHECK(L_, 1);
503 501
504 if (id == 0) { 502 if (id == 0) {
505 id = U_->next_mt_id.fetch_add(1, std::memory_order_relaxed); 503 id = U_->nextMetatableId.fetch_add(1, std::memory_order_relaxed);
506 504
507 // Create two-way references: id_uint <-> table 505 // Create two-way references: id_uint <-> table
508 lua_pushvalue(L_, idx_); // L_: ... _R[kMtIdRegKey] {mt} 506 lua_pushvalue(L_, idx_); // L_: ... _R[kMtIdRegKey] {mt}
diff --git a/src/universe.cpp b/src/universe.cpp
index 4dce427..bf64560 100644
--- a/src/universe.cpp
+++ b/src/universe.cpp
@@ -60,7 +60,7 @@ Universe::Universe()
60 // the launched threads (even -2). 60 // the launched threads (even -2).
61 // 61 //
62#ifdef LINUX_SCHED_RR 62#ifdef LINUX_SCHED_RR
63 if (m_sudo) { 63 if (sudo) {
64 struct sched_param sp; 64 struct sched_param sp;
65 sp.sched_priority = _PRIO_0; 65 sp.sched_priority = _PRIO_0;
66 PT_CALL(pthread_setschedparam(pthread_self(), SCHED_RR, &sp)); 66 PT_CALL(pthread_setschedparam(pthread_self(), SCHED_RR, &sp));
diff --git a/src/universe.h b/src/universe.h
index c6c9c03..b2107af 100644
--- a/src/universe.h
+++ b/src/universe.h
@@ -30,8 +30,8 @@ class Lane;
30class AllocatorDefinition 30class AllocatorDefinition
31{ 31{
32 public: 32 public:
33 lua_Alloc m_allocF{ nullptr }; 33 lua_Alloc allocF{ nullptr };
34 void* m_allocUD{ nullptr }; 34 void* allocUD{ nullptr };
35 35
36 [[nodiscard]] static void* operator new(size_t size_) noexcept = delete; // can't create one outside of a Lua state 36 [[nodiscard]] static void* operator new(size_t size_) noexcept = delete; // can't create one outside of a Lua state
37 [[nodiscard]] static void* operator new(size_t size_, lua_State* L_) noexcept { return lua_newuserdatauv(L_, size_, 0); } 37 [[nodiscard]] static void* operator new(size_t size_, lua_State* L_) noexcept { return lua_newuserdatauv(L_, size_, 0); }
@@ -40,8 +40,8 @@ class AllocatorDefinition
40 static void operator delete([[maybe_unused]] void* p_, lua_State* L_) { LUA_ASSERT(L_, !"should never be called"); } 40 static void operator delete([[maybe_unused]] void* p_, lua_State* L_) { LUA_ASSERT(L_, !"should never be called"); }
41 41
42 AllocatorDefinition(lua_Alloc allocF_, void* allocUD_) noexcept 42 AllocatorDefinition(lua_Alloc allocF_, void* allocUD_) noexcept
43 : m_allocF{ allocF_ } 43 : allocF{ allocF_ }
44 , m_allocUD{ allocUD_ } 44 , allocUD{ allocUD_ }
45 { 45 {
46 } 46 }
47 AllocatorDefinition() = default; 47 AllocatorDefinition() = default;
@@ -52,22 +52,22 @@ class AllocatorDefinition
52 52
53 void initFrom(lua_State* L_) 53 void initFrom(lua_State* L_)
54 { 54 {
55 m_allocF = lua_getallocf(L_, &m_allocUD); 55 allocF = lua_getallocf(L_, &allocUD);
56 } 56 }
57 57
58 void* lua_alloc(void* ptr_, size_t osize_, size_t nsize_) 58 void* lua_alloc(void* ptr_, size_t osize_, size_t nsize_)
59 { 59 {
60 m_allocF(m_allocUD, ptr_, osize_, nsize_); 60 allocF(allocUD, ptr_, osize_, nsize_);
61 } 61 }
62 62
63 void* alloc(size_t nsize_) 63 void* alloc(size_t nsize_)
64 { 64 {
65 return m_allocF(m_allocUD, nullptr, 0, nsize_); 65 return allocF(allocUD, nullptr, 0, nsize_);
66 } 66 }
67 67
68 void free(void* ptr_, size_t osize_) 68 void free(void* ptr_, size_t osize_)
69 { 69 {
70 std::ignore = m_allocF(m_allocUD, ptr_, osize_, 0); 70 std::ignore = allocF(allocUD, ptr_, osize_, 0);
71 } 71 }
72}; 72};
73 73
@@ -78,13 +78,13 @@ class ProtectedAllocator
78: public AllocatorDefinition 78: public AllocatorDefinition
79{ 79{
80 private: 80 private:
81 std::mutex m_lock; 81 std::mutex mutex;
82 82
83 [[nodiscard]] static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_) 83 [[nodiscard]] static void* protected_lua_Alloc(void* ud_, void* ptr_, size_t osize_, size_t nsize_)
84 { 84 {
85 ProtectedAllocator* const allocator{ static_cast<ProtectedAllocator*>(ud_) }; 85 ProtectedAllocator* const allocator{ static_cast<ProtectedAllocator*>(ud_) };
86 std::lock_guard<std::mutex> guard{ allocator->m_lock }; 86 std::lock_guard<std::mutex> guard{ allocator->mutex };
87 return allocator->m_allocF(allocator->m_allocUD, ptr_, osize_, nsize_); 87 return allocator->allocF(allocator->allocUD, ptr_, osize_, nsize_);
88 } 88 }
89 89
90 public: 90 public:
@@ -105,9 +105,9 @@ class ProtectedAllocator
105 void removeFrom(lua_State* L_) 105 void removeFrom(lua_State* L_)
106 { 106 {
107 // remove the protected allocator, if any 107 // remove the protected allocator, if any
108 if (m_allocF != nullptr) { 108 if (allocF != nullptr) {
109 // install the non-protected allocator 109 // install the non-protected allocator
110 lua_setallocf(L_, m_allocF, m_allocUD); 110 lua_setallocf(L_, allocF, allocUD);
111 } 111 }
112 } 112 }
113}; 113};
@@ -121,9 +121,9 @@ class Universe
121 public: 121 public:
122#ifdef PLATFORM_LINUX 122#ifdef PLATFORM_LINUX
123 // Linux needs to check, whether it's been run as root 123 // Linux needs to check, whether it's been run as root
124 bool const m_sudo{ geteuid() == 0 }; 124 bool const sudo{ geteuid() == 0 };
125#else 125#else
126 bool const m_sudo{ false }; 126 bool const sudo{ false };
127#endif // PLATFORM_LINUX 127#endif // PLATFORM_LINUX
128 128
129 // for verbose errors 129 // for verbose errors
@@ -132,44 +132,44 @@ class Universe
132 bool demoteFullUserdata{ false }; 132 bool demoteFullUserdata{ false };
133 133
134 // before a state is created, this function will be called to obtain the allocator 134 // before a state is created, this function will be called to obtain the allocator
135 lua_CFunction provide_allocator{ nullptr }; 135 lua_CFunction provideAllocator{ nullptr };
136 136
137 // after a state is created, this function will be called right after the bases libraries are loaded 137 // after a state is created, this function will be called right after the bases libraries are loaded
138 lua_CFunction on_state_create_func{ nullptr }; 138 lua_CFunction onStateCreateFunc{ nullptr };
139 139
140 // if allocator="protected" is found in the configuration settings, a wrapper allocator will protect all allocator calls with a mutex 140 // if allocator="protected" is found in the configuration settings, a wrapper allocator will protect all allocator calls with a mutex
141 // contains a mutex and the original allocator definition 141 // contains a mutex and the original allocator definition
142 ProtectedAllocator protected_allocator; 142 ProtectedAllocator protectedAllocator;
143 143
144 AllocatorDefinition internal_allocator; 144 AllocatorDefinition internalAllocator;
145 145
146 Keepers* keepers{ nullptr }; 146 Keepers* keepers{ nullptr };
147 147
148 // Initialized by 'init_once_LOCKED()': the deep userdata Linda object 148 // Initialized by 'init_once_LOCKED()': the deep userdata Linda object
149 // used for timers (each lane will get a proxy to this) 149 // used for timers (each lane will get a proxy to this)
150 DeepPrelude* timer_deep{ nullptr }; 150 DeepPrelude* timerLinda{ nullptr };
151 151
152#if HAVE_LANE_TRACKING() 152#if HAVE_LANE_TRACKING()
153 std::mutex tracking_cs; 153 std::mutex trackingMutex;
154 Lane* volatile tracking_first{ nullptr }; // will change to TRACKING_END if we want to activate tracking 154 Lane* volatile trackingFirst{ nullptr }; // will change to TRACKING_END if we want to activate tracking
155#endif // HAVE_LANE_TRACKING() 155#endif // HAVE_LANE_TRACKING()
156 156
157 std::mutex selfdestruct_cs; 157 std::mutex selfdestructMutex;
158 158
159 // require() serialization 159 // require() serialization
160 std::recursive_mutex require_cs; 160 std::recursive_mutex requireMutex;
161 161
162 // metatable unique identifiers 162 // metatable unique identifiers
163 std::atomic<lua_Integer> next_mt_id{ 1 }; 163 std::atomic<lua_Integer> nextMetatableId{ 1 };
164 164
165#if USE_DEBUG_SPEW() 165#if USE_DEBUG_SPEW()
166 std::atomic<int> debugspew_indent_depth{ 0 }; 166 std::atomic<int> debugspewIndentDepth{ 0 };
167#endif // USE_DEBUG_SPEW() 167#endif // USE_DEBUG_SPEW()
168 168
169 Lane* volatile selfdestruct_first{ nullptr }; 169 Lane* volatile selfdestructFirst{ nullptr };
170 // After a lane has removed itself from the chain, it still performs some processing. 170 // After a lane has removed itself from the chain, it still performs some processing.
171 // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads 171 // The terminal desinit sequence should wait for all such processing to terminate before force-killing threads
172 std::atomic<int> selfdestructing_count{ 0 }; 172 std::atomic<int> selfdestructingCount{ 0 };
173 173
174 Universe(); 174 Universe();
175 ~Universe() = default; 175 ~Universe() = default;
@@ -201,13 +201,13 @@ class DebugSpewIndentScope
201 : U{ U_ } 201 : U{ U_ }
202 { 202 {
203 if (U) 203 if (U)
204 U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed); 204 U->debugspewIndentDepth.fetch_add(1, std::memory_order_relaxed);
205 } 205 }
206 206
207 ~DebugSpewIndentScope() 207 ~DebugSpewIndentScope()
208 { 208 {
209 if (U) 209 if (U)
210 U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed); 210 U->debugspewIndentDepth.fetch_sub(1, std::memory_order_relaxed);
211 } 211 }
212}; 212};
213#endif // USE_DEBUG_SPEW() 213#endif // USE_DEBUG_SPEW()