diff options
author | Benoit Germain <benoit.germain@ubisoft.com> | 2024-05-02 09:05:36 +0200 |
---|---|---|
committer | Benoit Germain <benoit.germain@ubisoft.com> | 2024-05-02 09:05:36 +0200 |
commit | 8e64f794f08cb3e4f930df5bb17c3a7061516cca (patch) | |
tree | 5f4475a38af6a9458ab954fbd03df769ef63d59c /src | |
parent | 261a42021e44e1d3c3cfb3fc6576d3d269241c93 (diff) | |
download | lanes-8e64f794f08cb3e4f930df5bb17c3a7061516cca.tar.gz lanes-8e64f794f08cb3e4f930df5bb17c3a7061516cca.tar.bz2 lanes-8e64f794f08cb3e4f930df5bb17c3a7061516cca.zip |
Progressively applying the coding rules
Diffstat (limited to 'src')
-rw-r--r-- | src/cancel.cpp | 12 | ||||
-rw-r--r-- | src/deep.cpp | 16 | ||||
-rw-r--r-- | src/deep.h | 8 | ||||
-rw-r--r-- | src/lanes.cpp | 66 | ||||
-rw-r--r-- | src/lanes_private.h | 12 | ||||
-rw-r--r-- | src/linda.cpp | 28 | ||||
-rw-r--r-- | src/tools.cpp | 32 | ||||
-rw-r--r-- | src/uniquekey.h | 14 |
8 files changed, 94 insertions, 94 deletions
diff --git a/src/cancel.cpp b/src/cancel.cpp index 9887cba..ed450f0 100644 --- a/src/cancel.cpp +++ b/src/cancel.cpp | |||
@@ -112,8 +112,8 @@ LUAG_FUNC(cancel_test) | |||
112 | lane_->cancel_request = CancelRequest::Soft; // it's now signaled to stop | 112 | lane_->cancel_request = CancelRequest::Soft; // it's now signaled to stop |
113 | // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own | 113 | // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own |
114 | if (wakeLane_) { // wake the thread so that execution returns from any pending linda operation if desired | 114 | if (wakeLane_) { // wake the thread so that execution returns from any pending linda operation if desired |
115 | std::condition_variable* const waiting_on{ lane_->m_waiting_on }; | 115 | std::condition_variable* const waiting_on{ lane_->waiting_on }; |
116 | if (lane_->m_status == Lane::Waiting && waiting_on != nullptr) { | 116 | if (lane_->status == Lane::Waiting && waiting_on != nullptr) { |
117 | waiting_on->notify_all(); | 117 | waiting_on->notify_all(); |
118 | } | 118 | } |
119 | } | 119 | } |
@@ -126,10 +126,10 @@ LUAG_FUNC(cancel_test) | |||
126 | [[nodiscard]] static CancelResult thread_cancel_hard(Lane* lane_, lua_Duration duration_, bool wakeLane_) | 126 | [[nodiscard]] static CancelResult thread_cancel_hard(Lane* lane_, lua_Duration duration_, bool wakeLane_) |
127 | { | 127 | { |
128 | lane_->cancel_request = CancelRequest::Hard; // it's now signaled to stop | 128 | lane_->cancel_request = CancelRequest::Hard; // it's now signaled to stop |
129 | // lane_->m_thread.get_stop_source().request_stop(); | 129 | // lane_->thread.get_stop_source().request_stop(); |
130 | if (wakeLane_) { // wake the thread so that execution returns from any pending linda operation if desired | 130 | if (wakeLane_) { // wake the thread so that execution returns from any pending linda operation if desired |
131 | std::condition_variable* waiting_on = lane_->m_waiting_on; | 131 | std::condition_variable* waiting_on = lane_->waiting_on; |
132 | if (lane_->m_status == Lane::Waiting && waiting_on != nullptr) { | 132 | if (lane_->status == Lane::Waiting && waiting_on != nullptr) { |
133 | waiting_on->notify_all(); | 133 | waiting_on->notify_all(); |
134 | } | 134 | } |
135 | } | 135 | } |
@@ -144,7 +144,7 @@ CancelResult thread_cancel(Lane* lane_, CancelOp op_, int hookCount_, lua_Durati | |||
144 | { | 144 | { |
145 | // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here | 145 | // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here |
146 | // We can read 'lane_->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) | 146 | // We can read 'lane_->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN) |
147 | if (lane_->m_status >= Lane::Done) { | 147 | if (lane_->status >= Lane::Done) { |
148 | // say "ok" by default, including when lane is already done | 148 | // say "ok" by default, including when lane is already done |
149 | return CancelResult::Cancelled; | 149 | return CancelResult::Cancelled; |
150 | } | 150 | } |
diff --git a/src/deep.cpp b/src/deep.cpp index 6358745..a824f72 100644 --- a/src/deep.cpp +++ b/src/deep.cpp | |||
@@ -111,7 +111,7 @@ static void LookupDeep(lua_State* L_) | |||
111 | if (mode_ == LookupMode::FromKeeper) { | 111 | if (mode_ == LookupMode::FromKeeper) { |
112 | DeepPrelude* const proxy{ *lua_tofulluserdata<DeepPrelude*>(L_, index_) }; | 112 | DeepPrelude* const proxy{ *lua_tofulluserdata<DeepPrelude*>(L_, index_) }; |
113 | // we can (and must) cast and fetch the internally stored factory | 113 | // we can (and must) cast and fetch the internally stored factory |
114 | return &proxy->m_factory; | 114 | return &proxy->factory; |
115 | } else { | 115 | } else { |
116 | // essentially we are making sure that the metatable of the object we want to copy is stored in our metatable/factory database | 116 | // essentially we are making sure that the metatable of the object we want to copy is stored in our metatable/factory database |
117 | // it is the only way to ensure that the userdata is indeed a deep userdata! | 117 | // it is the only way to ensure that the userdata is indeed a deep userdata! |
@@ -138,7 +138,7 @@ static void LookupDeep(lua_State* L_) | |||
138 | void DeepFactory::DeleteDeepObject(lua_State* L_, DeepPrelude* o_) | 138 | void DeepFactory::DeleteDeepObject(lua_State* L_, DeepPrelude* o_) |
139 | { | 139 | { |
140 | STACK_CHECK_START_REL(L_, 0); | 140 | STACK_CHECK_START_REL(L_, 0); |
141 | o_->m_factory.deleteDeepObjectInternal(L_, o_); | 141 | o_->factory.deleteDeepObjectInternal(L_, o_); |
142 | STACK_CHECK(L_, 0); | 142 | STACK_CHECK(L_, 0); |
143 | } | 143 | } |
144 | 144 | ||
@@ -157,7 +157,7 @@ void DeepFactory::DeleteDeepObject(lua_State* L_, DeepPrelude* o_) | |||
157 | 157 | ||
158 | // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded | 158 | // can work without a universe if creating a deep userdata from some external C module when Lanes isn't loaded |
159 | // in that case, we are not multithreaded and locking isn't necessary anyway | 159 | // in that case, we are not multithreaded and locking isn't necessary anyway |
160 | bool const isLastRef{ p->m_refcount.fetch_sub(1, std::memory_order_relaxed) == 1 }; | 160 | bool const isLastRef{ p->refcount.fetch_sub(1, std::memory_order_relaxed) == 1 }; |
161 | 161 | ||
162 | if (isLastRef) { | 162 | if (isLastRef) { |
163 | // retrieve wrapped __gc | 163 | // retrieve wrapped __gc |
@@ -205,10 +205,10 @@ char const* DeepFactory::PushDeepProxy(DestState L_, DeepPrelude* prelude_, int | |||
205 | DeepPrelude** const proxy{ lua_newuserdatauv<DeepPrelude*>(L_, nuv_) }; // L_: DPC proxy | 205 | DeepPrelude** const proxy{ lua_newuserdatauv<DeepPrelude*>(L_, nuv_) }; // L_: DPC proxy |
206 | LUA_ASSERT(L_, proxy); | 206 | LUA_ASSERT(L_, proxy); |
207 | *proxy = prelude_; | 207 | *proxy = prelude_; |
208 | prelude_->m_refcount.fetch_add(1, std::memory_order_relaxed); // one more proxy pointing to this deep data | 208 | prelude_->refcount.fetch_add(1, std::memory_order_relaxed); // one more proxy pointing to this deep data |
209 | 209 | ||
210 | // Get/create metatable for 'factory' (in this state) | 210 | // Get/create metatable for 'factory' (in this state) |
211 | DeepFactory& factory = prelude_->m_factory; | 211 | DeepFactory& factory = prelude_->factory; |
212 | lua_pushlightuserdata(L_, std::bit_cast<void*>(&factory)); // L_: DPC proxy factory | 212 | lua_pushlightuserdata(L_, std::bit_cast<void*>(&factory)); // L_: DPC proxy factory |
213 | LookupDeep(L_); // L_: DPC proxy metatable|nil | 213 | LookupDeep(L_); // L_: DPC proxy metatable|nil |
214 | 214 | ||
@@ -323,14 +323,14 @@ int DeepFactory::pushDeepUserdata(DestState L_, int nuv_) const | |||
323 | raise_luaL_error(L_, "DeepFactory::newDeepObjectInternal failed to create deep userdata (out of memory)"); | 323 | raise_luaL_error(L_, "DeepFactory::newDeepObjectInternal failed to create deep userdata (out of memory)"); |
324 | } | 324 | } |
325 | 325 | ||
326 | if (prelude->m_magic != kDeepVersion) { | 326 | if (prelude->magic != kDeepVersion) { |
327 | // just in case, don't leak the newly allocated deep userdata object | 327 | // just in case, don't leak the newly allocated deep userdata object |
328 | deleteDeepObjectInternal(L_, prelude); | 328 | deleteDeepObjectInternal(L_, prelude); |
329 | raise_luaL_error(L_, "Bad Deep Factory: kDeepVersion is incorrect, rebuild your implementation with the latest deep implementation"); | 329 | raise_luaL_error(L_, "Bad Deep Factory: kDeepVersion is incorrect, rebuild your implementation with the latest deep implementation"); |
330 | } | 330 | } |
331 | 331 | ||
332 | LUA_ASSERT(L_, prelude->m_refcount.load(std::memory_order_relaxed) == 0); // 'DeepFactory::PushDeepProxy' will lift it to 1 | 332 | LUA_ASSERT(L_, prelude->refcount.load(std::memory_order_relaxed) == 0); // 'DeepFactory::PushDeepProxy' will lift it to 1 |
333 | LUA_ASSERT(L_, &prelude->m_factory == this); | 333 | LUA_ASSERT(L_, &prelude->factory == this); |
334 | 334 | ||
335 | if (lua_gettop(L_) - oldtop != 0) { | 335 | if (lua_gettop(L_) - oldtop != 0) { |
336 | // just in case, don't leak the newly allocated deep userdata object | 336 | // just in case, don't leak the newly allocated deep userdata object |
@@ -38,14 +38,14 @@ static constexpr UniqueKey kDeepVersion{ 0x91171AEC6641E9DBull, "kDeepVersion" } | |||
38 | // a deep userdata is a full userdata that stores a single pointer to the actual DeepPrelude-derived object | 38 | // a deep userdata is a full userdata that stores a single pointer to the actual DeepPrelude-derived object |
39 | struct DeepPrelude | 39 | struct DeepPrelude |
40 | { | 40 | { |
41 | UniqueKey const m_magic{ kDeepVersion }; | 41 | UniqueKey const magic{ kDeepVersion }; |
42 | // when stored in a keeper state, the full userdata doesn't have a metatable, so we need direct access to the factory | 42 | // when stored in a keeper state, the full userdata doesn't have a metatable, so we need direct access to the factory |
43 | class DeepFactory& m_factory; | 43 | class DeepFactory& factory; |
44 | // data is destroyed when refcount is 0 | 44 | // data is destroyed when refcount is 0 |
45 | std::atomic<int> m_refcount{ 0 }; | 45 | std::atomic<int> refcount{ 0 }; |
46 | 46 | ||
47 | DeepPrelude(DeepFactory& factory_) | 47 | DeepPrelude(DeepFactory& factory_) |
48 | : m_factory{ factory_ } | 48 | : factory{ factory_ } |
49 | { | 49 | { |
50 | } | 50 | } |
51 | }; | 51 | }; |
diff --git a/src/lanes.cpp b/src/lanes.cpp index 91a2f8b..38fe2b9 100644 --- a/src/lanes.cpp +++ b/src/lanes.cpp | |||
@@ -176,10 +176,10 @@ bool Lane::waitForCompletion(lua_Duration duration_) | |||
176 | until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration_); | 176 | until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration_); |
177 | } | 177 | } |
178 | 178 | ||
179 | std::unique_lock lock{ m_done_mutex }; | 179 | std::unique_lock lock{ done_mutex }; |
180 | // std::stop_token token{ m_thread.get_stop_token() }; | 180 | // std::stop_token token{ thread.get_stop_token() }; |
181 | // return m_done_signal.wait_until(lock, token, secs_, [this](){ return m_status >= Lane::Done; }); | 181 | // return done_signal.wait_until(lock, token, secs_, [this](){ return status >= Lane::Done; }); |
182 | return m_done_signal.wait_until(lock, until, [this]() { return m_status >= Lane::Done; }); | 182 | return done_signal.wait_until(lock, until, [this]() { return status >= Lane::Done; }); |
183 | } | 183 | } |
184 | 184 | ||
185 | // ################################################################################################# | 185 | // ################################################################################################# |
@@ -187,9 +187,9 @@ bool Lane::waitForCompletion(lua_Duration duration_) | |||
187 | static void lane_main(Lane* lane); | 187 | static void lane_main(Lane* lane); |
188 | void Lane::startThread(int priority_) | 188 | void Lane::startThread(int priority_) |
189 | { | 189 | { |
190 | m_thread = std::jthread([this]() { lane_main(this); }); | 190 | thread = std::jthread([this]() { lane_main(this); }); |
191 | if (priority_ != kThreadPrioDefault) { | 191 | if (priority_ != kThreadPrioDefault) { |
192 | JTHREAD_SET_PRIORITY(m_thread, priority_, U->m_sudo); | 192 | JTHREAD_SET_PRIORITY(thread, priority_, U->m_sudo); |
193 | } | 193 | } |
194 | } | 194 | } |
195 | 195 | ||
@@ -441,7 +441,7 @@ static void selfdestruct_add(Lane* lane_) | |||
441 | // cancel/kill). | 441 | // cancel/kill). |
442 | // | 442 | // |
443 | if (lane_->selfdestruct_next != nullptr) { | 443 | if (lane_->selfdestruct_next != nullptr) { |
444 | Lane** ref = (Lane**) &lane_->U->selfdestruct_first; | 444 | Lane* volatile* ref = static_cast<Lane* volatile*>(&lane_->U->selfdestruct_first); |
445 | 445 | ||
446 | while (*ref != SELFDESTRUCT_END) { | 446 | while (*ref != SELFDESTRUCT_END) { |
447 | if (*ref == lane_) { | 447 | if (*ref == lane_) { |
@@ -452,7 +452,7 @@ static void selfdestruct_add(Lane* lane_) | |||
452 | found = true; | 452 | found = true; |
453 | break; | 453 | break; |
454 | } | 454 | } |
455 | ref = (Lane**) &((*ref)->selfdestruct_next); | 455 | ref = static_cast<Lane* volatile*>(&((*ref)->selfdestruct_next)); |
456 | } | 456 | } |
457 | assert(found); | 457 | assert(found); |
458 | } | 458 | } |
@@ -479,7 +479,7 @@ static void selfdestruct_add(Lane* lane_) | |||
479 | // attempt the requested cancel with a small timeout. | 479 | // attempt the requested cancel with a small timeout. |
480 | // if waiting on a linda, they will raise a cancel_error. | 480 | // if waiting on a linda, they will raise a cancel_error. |
481 | // if a cancellation hook is desired, it will be installed to try to raise an error | 481 | // if a cancellation hook is desired, it will be installed to try to raise an error |
482 | if (lane->m_thread.joinable()) { | 482 | if (lane->thread.joinable()) { |
483 | std::ignore = thread_cancel(lane, op, 1, timeout, true); | 483 | std::ignore = thread_cancel(lane, op, 1, timeout, true); |
484 | } | 484 | } |
485 | lane = lane->selfdestruct_next; | 485 | lane = lane->selfdestruct_next; |
@@ -532,7 +532,7 @@ static void selfdestruct_add(Lane* lane_) | |||
532 | 532 | ||
533 | // no need to mutex-protect this as all threads in the universe are gone at that point | 533 | // no need to mutex-protect this as all threads in the universe are gone at that point |
534 | if (U->timer_deep != nullptr) { // test ins case some early internal error prevented Lanes from creating the deep timer | 534 | if (U->timer_deep != nullptr) { // test ins case some early internal error prevented Lanes from creating the deep timer |
535 | [[maybe_unused]] int const prev_ref_count{ U->timer_deep->m_refcount.fetch_sub(1, std::memory_order_relaxed) }; | 535 | [[maybe_unused]] int const prev_ref_count{ U->timer_deep->refcount.fetch_sub(1, std::memory_order_relaxed) }; |
536 | LUA_ASSERT(L_, prev_ref_count == 1); // this should be the last reference | 536 | LUA_ASSERT(L_, prev_ref_count == 1); // this should be the last reference |
537 | DeepFactory::DeleteDeepObject(L_, U->timer_deep); | 537 | DeepFactory::DeleteDeepObject(L_, U->timer_deep); |
538 | U->timer_deep = nullptr; | 538 | U->timer_deep = nullptr; |
@@ -784,13 +784,13 @@ static void lane_main(Lane* lane_) | |||
784 | { | 784 | { |
785 | lua_State* const L{ lane_->L }; | 785 | lua_State* const L{ lane_->L }; |
786 | // wait until the launching thread has finished preparing L | 786 | // wait until the launching thread has finished preparing L |
787 | lane_->m_ready.wait(); | 787 | lane_->ready.wait(); |
788 | int rc{ LUA_ERRRUN }; | 788 | int rc{ LUA_ERRRUN }; |
789 | if (lane_->m_status == Lane::Pending) { // nothing wrong happened during preparation, we can work | 789 | if (lane_->status == Lane::Pending) { // nothing wrong happened during preparation, we can work |
790 | // At this point, the lane function and arguments are on the stack | 790 | // At this point, the lane function and arguments are on the stack |
791 | int const nargs{ lua_gettop(L) - 1 }; | 791 | int const nargs{ lua_gettop(L) - 1 }; |
792 | DEBUGSPEW_CODE(Universe* U = universe_get(L)); | 792 | DEBUGSPEW_CODE(Universe* U = universe_get(L)); |
793 | lane_->m_status = Lane::Running; // Pending -> Running | 793 | lane_->status = Lane::Running; // Pending -> Running |
794 | 794 | ||
795 | // Tie "set_finalizer()" to the state | 795 | // Tie "set_finalizer()" to the state |
796 | lua_pushcfunction(L, LG_set_finalizer); | 796 | lua_pushcfunction(L, LG_set_finalizer); |
@@ -838,7 +838,7 @@ static void lane_main(Lane* lane_) | |||
838 | // the finalizer generated an error, and left its own error message [and stack trace] on the stack | 838 | // the finalizer generated an error, and left its own error message [and stack trace] on the stack |
839 | rc = rc2; // we're overruling the earlier script error or normal return | 839 | rc = rc2; // we're overruling the earlier script error or normal return |
840 | } | 840 | } |
841 | lane_->m_waiting_on = nullptr; // just in case | 841 | lane_->waiting_on = nullptr; // just in case |
842 | if (selfdestruct_remove(lane_)) { // check and remove (under lock!) | 842 | if (selfdestruct_remove(lane_)) { // check and remove (under lock!) |
843 | // We're a free-running thread and no-one's there to clean us up. | 843 | // We're a free-running thread and no-one's there to clean us up. |
844 | lua_close(lane_->L); | 844 | lua_close(lane_->L); |
@@ -849,7 +849,7 @@ static void lane_main(Lane* lane_) | |||
849 | lane_->U->selfdestruct_cs.unlock(); | 849 | lane_->U->selfdestruct_cs.unlock(); |
850 | 850 | ||
851 | // we destroy our jthread member from inside the thread body, so we have to detach so that we don't try to join, as this doesn't seem a good idea | 851 | // we destroy our jthread member from inside the thread body, so we have to detach so that we don't try to join, as this doesn't seem a good idea |
852 | lane_->m_thread.detach(); | 852 | lane_->thread.detach(); |
853 | delete lane_; | 853 | delete lane_; |
854 | lane_ = nullptr; | 854 | lane_ = nullptr; |
855 | } | 855 | } |
@@ -860,10 +860,10 @@ static void lane_main(Lane* lane_) | |||
860 | Lane::Status const st = (rc == LUA_OK) ? Lane::Done : kCancelError.equals(L, 1) ? Lane::Cancelled : Lane::Error; | 860 | Lane::Status const st = (rc == LUA_OK) ? Lane::Done : kCancelError.equals(L, 1) ? Lane::Cancelled : Lane::Error; |
861 | 861 | ||
862 | { | 862 | { |
863 | // 'm_done_mutex' protects the -> Done|Error|Cancelled state change | 863 | // 'done_mutex' protects the -> Done|Error|Cancelled state change |
864 | std::lock_guard lock{ lane_->m_done_mutex }; | 864 | std::lock_guard lock{ lane_->done_mutex }; |
865 | lane_->m_status = st; | 865 | lane_->status = st; |
866 | lane_->m_done_signal.notify_one(); // wake up master (while 'lane_->m_done_mutex' is on) | 866 | lane_->done_signal.notify_one(); // wake up master (while 'lane_->done_mutex' is on) |
867 | } | 867 | } |
868 | } | 868 | } |
869 | } | 869 | } |
@@ -994,12 +994,12 @@ LUAG_FUNC(lane_new) | |||
994 | lua_settop(m_lane->L, 0); | 994 | lua_settop(m_lane->L, 0); |
995 | kCancelError.pushKey(m_lane->L); | 995 | kCancelError.pushKey(m_lane->L); |
996 | { | 996 | { |
997 | std::lock_guard lock{ m_lane->m_done_mutex }; | 997 | std::lock_guard lock{ m_lane->done_mutex }; |
998 | m_lane->m_status = Lane::Cancelled; | 998 | m_lane->status = Lane::Cancelled; |
999 | m_lane->m_done_signal.notify_one(); // wake up master (while 'lane->m_done_mutex' is on) | 999 | m_lane->done_signal.notify_one(); // wake up master (while 'lane->done_mutex' is on) |
1000 | } | 1000 | } |
1001 | // unblock the thread so that it can terminate gracefully | 1001 | // unblock the thread so that it can terminate gracefully |
1002 | m_lane->m_ready.count_down(); | 1002 | m_lane->ready.count_down(); |
1003 | } | 1003 | } |
1004 | } | 1004 | } |
1005 | 1005 | ||
@@ -1037,7 +1037,7 @@ LUAG_FUNC(lane_new) | |||
1037 | void success() | 1037 | void success() |
1038 | { | 1038 | { |
1039 | prepareUserData(); | 1039 | prepareUserData(); |
1040 | m_lane->m_ready.count_down(); | 1040 | m_lane->ready.count_down(); |
1041 | m_lane = nullptr; | 1041 | m_lane = nullptr; |
1042 | } | 1042 | } |
1043 | } onExit{ L_, lane, gc_cb_idx DEBUGSPEW_COMMA_PARAM(U) }; | 1043 | } onExit{ L_, lane, gc_cb_idx DEBUGSPEW_COMMA_PARAM(U) }; |
@@ -1214,7 +1214,7 @@ LUAG_FUNC(lane_new) | |||
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | // We can read 'lane->status' without locks, but not wait for it | 1216 | // We can read 'lane->status' without locks, but not wait for it |
1217 | if (lane->m_status < Lane::Done) { | 1217 | if (lane->status < Lane::Done) { |
1218 | // still running: will have to be cleaned up later | 1218 | // still running: will have to be cleaned up later |
1219 | selfdestruct_add(lane); | 1219 | selfdestruct_add(lane); |
1220 | assert(lane->selfdestruct_next); | 1220 | assert(lane->selfdestruct_next); |
@@ -1272,7 +1272,7 @@ LUAG_FUNC(lane_new) | |||
1272 | 1272 | ||
1273 | void Lane::pushThreadStatus(lua_State* L_) | 1273 | void Lane::pushThreadStatus(lua_State* L_) |
1274 | { | 1274 | { |
1275 | char const* const str{ thread_status_string(m_status) }; | 1275 | char const* const str{ thread_status_string(status) }; |
1276 | LUA_ASSERT(L_, str); | 1276 | LUA_ASSERT(L_, str); |
1277 | 1277 | ||
1278 | lua_pushstring(L_, str); | 1278 | lua_pushstring(L_, str); |
@@ -1294,7 +1294,7 @@ LUAG_FUNC(thread_join) | |||
1294 | lua_Duration const duration{ luaL_optnumber(L_, 2, -1.0) }; | 1294 | lua_Duration const duration{ luaL_optnumber(L_, 2, -1.0) }; |
1295 | lua_State* const L2{ lane->L }; | 1295 | lua_State* const L2{ lane->L }; |
1296 | 1296 | ||
1297 | bool const done{ !lane->m_thread.joinable() || lane->waitForCompletion(duration) }; | 1297 | bool const done{ !lane->thread.joinable() || lane->waitForCompletion(duration) }; |
1298 | if (!done || !L2) { | 1298 | if (!done || !L2) { |
1299 | STACK_GROW(L_, 2); | 1299 | STACK_GROW(L_, 2); |
1300 | lua_pushnil(L_); // L_: lane timeout? nil | 1300 | lua_pushnil(L_); // L_: lane timeout? nil |
@@ -1310,7 +1310,7 @@ LUAG_FUNC(thread_join) | |||
1310 | // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed | 1310 | // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed |
1311 | // so store it in the userdata uservalue at a key that can't possibly collide | 1311 | // so store it in the userdata uservalue at a key that can't possibly collide |
1312 | securize_debug_threadname(L_, lane); | 1312 | securize_debug_threadname(L_, lane); |
1313 | switch (lane->m_status) { | 1313 | switch (lane->status) { |
1314 | case Lane::Done: | 1314 | case Lane::Done: |
1315 | { | 1315 | { |
1316 | int const n{ lua_gettop(L2) }; // whole L2 stack | 1316 | int const n{ lua_gettop(L2) }; // whole L2 stack |
@@ -1343,7 +1343,7 @@ LUAG_FUNC(thread_join) | |||
1343 | break; | 1343 | break; |
1344 | 1344 | ||
1345 | default: | 1345 | default: |
1346 | DEBUGSPEW_CODE(fprintf(stderr, "Status: %d\n", lane->m_status)); | 1346 | DEBUGSPEW_CODE(fprintf(stderr, "Status: %d\n", lane->status)); |
1347 | LUA_ASSERT(L_, false); | 1347 | LUA_ASSERT(L_, false); |
1348 | ret = 0; | 1348 | ret = 0; |
1349 | } | 1349 | } |
@@ -1399,12 +1399,12 @@ LUAG_FUNC(thread_index) | |||
1399 | lua_pushcfunction(L_, LG_thread_join); | 1399 | lua_pushcfunction(L_, LG_thread_join); |
1400 | lua_pushvalue(L_, kSelf); | 1400 | lua_pushvalue(L_, kSelf); |
1401 | lua_call(L_, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+ | 1401 | lua_call(L_, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+ |
1402 | switch (lane->m_status) { | 1402 | switch (lane->status) { |
1403 | default: | 1403 | default: |
1404 | // this is an internal error, we probably never get here | 1404 | // this is an internal error, we probably never get here |
1405 | lua_settop(L_, 0); | 1405 | lua_settop(L_, 0); |
1406 | lua_pushliteral(L_, "Unexpected status: "); | 1406 | lua_pushliteral(L_, "Unexpected status: "); |
1407 | lua_pushstring(L_, thread_status_string(lane->m_status)); | 1407 | lua_pushstring(L_, thread_status_string(lane->status)); |
1408 | lua_concat(L_, 2); | 1408 | lua_concat(L_, 2); |
1409 | raise_lua_error(L_); | 1409 | raise_lua_error(L_); |
1410 | [[fallthrough]]; // fall through if we are killed, as we got nil, "killed" on the stack | 1410 | [[fallthrough]]; // fall through if we are killed, as we got nil, "killed" on the stack |
@@ -1666,7 +1666,7 @@ LUAG_FUNC(configure) | |||
1666 | U->tracking_first = lua_toboolean(L_, -1) ? TRACKING_END : nullptr; | 1666 | U->tracking_first = lua_toboolean(L_, -1) ? TRACKING_END : nullptr; |
1667 | lua_pop(L_, 1); // L_: settings | 1667 | lua_pop(L_, 1); // L_: settings |
1668 | #endif // HAVE_LANE_TRACKING() | 1668 | #endif // HAVE_LANE_TRACKING() |
1669 | // Linked chains handling | 1669 | // Linked chains handling |
1670 | U->selfdestruct_first = SELFDESTRUCT_END; | 1670 | U->selfdestruct_first = SELFDESTRUCT_END; |
1671 | initialize_allocator_function(U, L_); | 1671 | initialize_allocator_function(U, L_); |
1672 | initialize_on_state_create(U, L_); | 1672 | initialize_on_state_create(U, L_); |
@@ -1682,7 +1682,7 @@ LUAG_FUNC(configure) | |||
1682 | // Proxy userdata contents is only a 'DeepPrelude*' pointer | 1682 | // Proxy userdata contents is only a 'DeepPrelude*' pointer |
1683 | U->timer_deep = *lua_tofulluserdata<DeepPrelude*>(L_, -1); | 1683 | U->timer_deep = *lua_tofulluserdata<DeepPrelude*>(L_, -1); |
1684 | // increment refcount so that this linda remains alive as long as the universe exists. | 1684 | // increment refcount so that this linda remains alive as long as the universe exists. |
1685 | U->timer_deep->m_refcount.fetch_add(1, std::memory_order_relaxed); | 1685 | U->timer_deep->refcount.fetch_add(1, std::memory_order_relaxed); |
1686 | lua_pop(L_, 1); // L_: settings | 1686 | lua_pop(L_, 1); // L_: settings |
1687 | } | 1687 | } |
1688 | STACK_CHECK(L_, 1); | 1688 | STACK_CHECK(L_, 1); |
diff --git a/src/lanes_private.h b/src/lanes_private.h index 083ac4e..1d476cf 100644 --- a/src/lanes_private.h +++ b/src/lanes_private.h | |||
@@ -33,12 +33,12 @@ class Lane | |||
33 | using enum Status; | 33 | using enum Status; |
34 | 34 | ||
35 | // the thread | 35 | // the thread |
36 | std::jthread m_thread; | 36 | std::jthread thread; |
37 | // a latch to wait for the lua_State to be ready | 37 | // a latch to wait for the lua_State to be ready |
38 | std::latch m_ready{ 1 }; | 38 | std::latch ready{ 1 }; |
39 | // to wait for stop requests through m_thread's stop_source | 39 | // to wait for stop requests through m_thread's stop_source |
40 | std::mutex m_done_mutex; | 40 | std::mutex done_mutex; |
41 | std::condition_variable m_done_signal; // use condition_variable_any if waiting for a stop_token | 41 | std::condition_variable done_signal; // use condition_variable_any if waiting for a stop_token |
42 | // | 42 | // |
43 | // M: sub-thread OS thread | 43 | // M: sub-thread OS thread |
44 | // S: not used | 44 | // S: not used |
@@ -51,12 +51,12 @@ class Lane | |||
51 | // M: prepares the state, and reads results | 51 | // M: prepares the state, and reads results |
52 | // S: while S is running, M must keep out of modifying the state | 52 | // S: while S is running, M must keep out of modifying the state |
53 | 53 | ||
54 | Status volatile m_status{ Pending }; | 54 | Status volatile status{ Pending }; |
55 | // | 55 | // |
56 | // M: sets to Pending (before launching) | 56 | // M: sets to Pending (before launching) |
57 | // S: updates -> Running/Waiting -> Done/Error/Cancelled | 57 | // S: updates -> Running/Waiting -> Done/Error/Cancelled |
58 | 58 | ||
59 | std::condition_variable* volatile m_waiting_on{ nullptr }; | 59 | std::condition_variable* volatile waiting_on{ nullptr }; |
60 | // | 60 | // |
61 | // When status is Waiting, points on the linda's signal the thread waits on, else nullptr | 61 | // When status is Waiting, points on the linda's signal the thread waits on, else nullptr |
62 | 62 | ||
diff --git a/src/linda.cpp b/src/linda.cpp index 82f5f98..cda3a63 100644 --- a/src/linda.cpp +++ b/src/linda.cpp | |||
@@ -133,7 +133,7 @@ static void check_key_types(lua_State* L_, int start_, int end_) | |||
133 | static constexpr std::array<std::reference_wrapper<UniqueKey const>, 3> kKeysToCheck{ kLindaBatched, kCancelError, kNilSentinel }; | 133 | static constexpr std::array<std::reference_wrapper<UniqueKey const>, 3> kKeysToCheck{ kLindaBatched, kCancelError, kNilSentinel }; |
134 | for (UniqueKey const& key : kKeysToCheck) { | 134 | for (UniqueKey const& key : kKeysToCheck) { |
135 | if (key.equals(L_, i)) { | 135 | if (key.equals(L_, i)) { |
136 | raise_luaL_error(L_, "argument #%d: can't use %s as a key", i, key.m_debugName); | 136 | raise_luaL_error(L_, "argument #%d: can't use %s as a key", i, key.debugName); |
137 | break; | 137 | break; |
138 | } | 138 | } |
139 | } | 139 | } |
@@ -276,20 +276,20 @@ LUAG_FUNC(linda_send) | |||
276 | Lane::Status prev_status{ Lane::Error }; // prevent 'might be used uninitialized' warnings | 276 | Lane::Status prev_status{ Lane::Error }; // prevent 'might be used uninitialized' warnings |
277 | if (lane != nullptr) { | 277 | if (lane != nullptr) { |
278 | // change status of lane to "waiting" | 278 | // change status of lane to "waiting" |
279 | prev_status = lane->m_status; // Running, most likely | 279 | prev_status = lane->status; // Running, most likely |
280 | LUA_ASSERT(L_, prev_status == Lane::Running); // but check, just in case | 280 | LUA_ASSERT(L_, prev_status == Lane::Running); // but check, just in case |
281 | lane->m_status = Lane::Waiting; | 281 | lane->status = Lane::Waiting; |
282 | LUA_ASSERT(L_, lane->m_waiting_on == nullptr); | 282 | LUA_ASSERT(L_, lane->waiting_on == nullptr); |
283 | lane->m_waiting_on = &linda->m_read_happened; | 283 | lane->waiting_on = &linda->m_read_happened; |
284 | } | 284 | } |
285 | // could not send because no room: wait until some data was read before trying again, or until timeout is reached | 285 | // could not send because no room: wait until some data was read before trying again, or until timeout is reached |
286 | std::unique_lock<std::mutex> keeper_lock{ K->m_mutex, std::adopt_lock }; | 286 | std::unique_lock<std::mutex> keeper_lock{ K->m_mutex, std::adopt_lock }; |
287 | std::cv_status const status{ linda->m_read_happened.wait_until(keeper_lock, until) }; | 287 | std::cv_status const status{ linda->m_read_happened.wait_until(keeper_lock, until) }; |
288 | keeper_lock.release(); // we don't want to release the lock! | 288 | keeper_lock.release(); // we don't want to release the lock! |
289 | try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups | 289 | try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups |
290 | if (lane != nullptr) { | 290 | if (lane != nullptr) { |
291 | lane->m_waiting_on = nullptr; | 291 | lane->waiting_on = nullptr; |
292 | lane->m_status = prev_status; | 292 | lane->status = prev_status; |
293 | } | 293 | } |
294 | } | 294 | } |
295 | } | 295 | } |
@@ -423,11 +423,11 @@ LUAG_FUNC(linda_receive) | |||
423 | Lane::Status prev_status{ Lane::Error }; // prevent 'might be used uninitialized' warnings | 423 | Lane::Status prev_status{ Lane::Error }; // prevent 'might be used uninitialized' warnings |
424 | if (lane != nullptr) { | 424 | if (lane != nullptr) { |
425 | // change status of lane to "waiting" | 425 | // change status of lane to "waiting" |
426 | prev_status = lane->m_status; // Running, most likely | 426 | prev_status = lane->status; // Running, most likely |
427 | LUA_ASSERT(L_, prev_status == Lane::Running); // but check, just in case | 427 | LUA_ASSERT(L_, prev_status == Lane::Running); // but check, just in case |
428 | lane->m_status = Lane::Waiting; | 428 | lane->status = Lane::Waiting; |
429 | LUA_ASSERT(L_, lane->m_waiting_on == nullptr); | 429 | LUA_ASSERT(L_, lane->waiting_on == nullptr); |
430 | lane->m_waiting_on = &linda->m_write_happened; | 430 | lane->waiting_on = &linda->m_write_happened; |
431 | } | 431 | } |
432 | // not enough data to read: wakeup when data was sent, or when timeout is reached | 432 | // not enough data to read: wakeup when data was sent, or when timeout is reached |
433 | std::unique_lock<std::mutex> keeper_lock{ K->m_mutex, std::adopt_lock }; | 433 | std::unique_lock<std::mutex> keeper_lock{ K->m_mutex, std::adopt_lock }; |
@@ -435,8 +435,8 @@ LUAG_FUNC(linda_receive) | |||
435 | keeper_lock.release(); // we don't want to release the lock! | 435 | keeper_lock.release(); // we don't want to release the lock! |
436 | try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups | 436 | try_again = (status == std::cv_status::no_timeout); // detect spurious wakeups |
437 | if (lane != nullptr) { | 437 | if (lane != nullptr) { |
438 | lane->m_waiting_on = nullptr; | 438 | lane->waiting_on = nullptr; |
439 | lane->m_status = prev_status; | 439 | lane->status = prev_status; |
440 | } | 440 | } |
441 | } | 441 | } |
442 | } | 442 | } |
diff --git a/src/tools.cpp b/src/tools.cpp index f4fbf46..c4ce24f 100644 --- a/src/tools.cpp +++ b/src/tools.cpp | |||
@@ -648,14 +648,14 @@ static constexpr RegistryUniqueKey kMtIdRegKey{ 0xA8895DCF4EC3FE3Cull }; | |||
648 | lua_getglobal(L2, "decoda_name"); // L1: ... t ... L2: {} t decoda_name | 648 | lua_getglobal(L2, "decoda_name"); // L1: ... t ... L2: {} t decoda_name |
649 | to = lua_tostring(L2, -1); | 649 | to = lua_tostring(L2, -1); |
650 | lua_pop(L2, 1); // L1: ... t ... L2: {} t | 650 | lua_pop(L2, 1); // L1: ... t ... L2: {} t |
651 | // when mode_ == LookupMode::FromKeeper, L is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error | 651 | // when mode_ == LookupMode::FromKeeper, L1 is a keeper state and L2 is not, therefore L2 is the state where we want to raise the error |
652 | raise_luaL_error( | 652 | raise_luaL_error( |
653 | (mode == LookupMode::FromKeeper) ? L2 : L1, | 653 | (mode == LookupMode::FromKeeper) ? L2 : L1, |
654 | "INTERNAL ERROR IN %s: table '%s' not found in %s destination transfer database.", | 654 | "INTERNAL ERROR IN %s: table '%s' not found in %s destination transfer database.", |
655 | from ? from : "main", | 655 | from ? from : "main", |
656 | fqn, | 656 | fqn, |
657 | to ? to : "main"); | 657 | to ? to : "main" |
658 | return false; | 658 | ); |
659 | } | 659 | } |
660 | lua_remove(L2, -2); // L1: ... t ... L2: t | 660 | lua_remove(L2, -2); // L1: ... t ... L2: t |
661 | break; | 661 | break; |
@@ -1025,7 +1025,7 @@ void InterCopyContext::copy_func() const | |||
1025 | 1025 | ||
1026 | // transfer the bytecode, then the upvalues, to create a similar closure | 1026 | // transfer the bytecode, then the upvalues, to create a similar closure |
1027 | { | 1027 | { |
1028 | char const* name = nullptr; | 1028 | char const* fname = nullptr; |
1029 | #define LOG_FUNC_INFO 0 | 1029 | #define LOG_FUNC_INFO 0 |
1030 | #if LOG_FUNC_INFO | 1030 | #if LOG_FUNC_INFO |
1031 | // "To get information about a function you push it onto the | 1031 | // "To get information about a function you push it onto the |
@@ -1034,9 +1034,9 @@ void InterCopyContext::copy_func() const | |||
1034 | { | 1034 | { |
1035 | lua_Debug ar; | 1035 | lua_Debug ar; |
1036 | lua_pushvalue(L1, L1_i); // L1: ... b f | 1036 | lua_pushvalue(L1, L1_i); // L1: ... b f |
1037 | // fills 'name' 'namewhat' and 'linedefined', pops function | 1037 | // fills 'fname' 'namewhat' and 'linedefined', pops function |
1038 | lua_getinfo(L1, ">nS", &ar); // L1: ... b | 1038 | lua_getinfo(L1, ">nS", &ar); // L1: ... b |
1039 | name = ar.namewhat; | 1039 | fname = ar.namewhat; |
1040 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "FNAME: %s @ %d" INDENT_END(U), ar.short_src, ar.linedefined)); // just gives nullptr | 1040 | DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "FNAME: %s @ %d" INDENT_END(U), ar.short_src, ar.linedefined)); // just gives nullptr |
1041 | } | 1041 | } |
1042 | #endif // LOG_FUNC_INFO | 1042 | #endif // LOG_FUNC_INFO |
@@ -1046,17 +1046,17 @@ void InterCopyContext::copy_func() const | |||
1046 | LUA_ASSERT(L1, s && sz); | 1046 | LUA_ASSERT(L1, s && sz); |
1047 | STACK_GROW(L2, 2); | 1047 | STACK_GROW(L2, 2); |
1048 | // Note: Line numbers seem to be taken precisely from the | 1048 | // Note: Line numbers seem to be taken precisely from the |
1049 | // original function. 'name' is not used since the chunk | 1049 | // original function. 'fname' is not used since the chunk |
1050 | // is precompiled (it seems...). | 1050 | // is precompiled (it seems...). |
1051 | // | 1051 | // |
1052 | // TBD: Can we get the function's original name through, as well? | 1052 | // TBD: Can we get the function's original name through, as well? |
1053 | // | 1053 | // |
1054 | if (luaL_loadbuffer(L2, s, sz, name) != 0) { // L2: ... {cache} ... p function | 1054 | if (luaL_loadbuffer(L2, s, sz, fname) != 0) { // L2: ... {cache} ... p function |
1055 | // chunk is precompiled so only LUA_ERRMEM can happen | 1055 | // chunk is precompiled so only LUA_ERRMEM can happen |
1056 | // "Otherwise, it pushes an error message" | 1056 | // "Otherwise, it pushes an error message" |
1057 | // | 1057 | // |
1058 | STACK_GROW(L1, 1); | 1058 | STACK_GROW(L1, 1); |
1059 | raise_luaL_error(L1, "%s: %s", name, lua_tostring(L2, -1)); | 1059 | raise_luaL_error(L1, "%s: %s", fname, lua_tostring(L2, -1)); |
1060 | } | 1060 | } |
1061 | // remove the dumped string | 1061 | // remove the dumped string |
1062 | lua_pop(L1, 1); // ... | 1062 | lua_pop(L1, 1); // ... |
@@ -1285,8 +1285,8 @@ void InterCopyContext::inter_copy_keyvaluepair() const | |||
1285 | 1285 | ||
1286 | [[nodiscard]] bool InterCopyContext::tryCopyClonable() const | 1286 | [[nodiscard]] bool InterCopyContext::tryCopyClonable() const |
1287 | { | 1287 | { |
1288 | SourceIndex const L1_i{ lua_absindex(L1, this->L1_i) }; | 1288 | SourceIndex const L1i{ lua_absindex(L1, L1_i) }; |
1289 | void* const source{ lua_touserdata(L1, L1_i) }; | 1289 | void* const source{ lua_touserdata(L1, L1i) }; |
1290 | 1290 | ||
1291 | STACK_CHECK_START_REL(L1, 0); | 1291 | STACK_CHECK_START_REL(L1, 0); |
1292 | STACK_CHECK_START_REL(L2, 0); | 1292 | STACK_CHECK_START_REL(L2, 0); |
@@ -1303,7 +1303,7 @@ void InterCopyContext::inter_copy_keyvaluepair() const | |||
1303 | STACK_CHECK(L2, 0); | 1303 | STACK_CHECK(L2, 0); |
1304 | 1304 | ||
1305 | // no metatable? -> not clonable | 1305 | // no metatable? -> not clonable |
1306 | if (!lua_getmetatable(L1, L1_i)) { // L1: ... mt? | 1306 | if (!lua_getmetatable(L1, L1i)) { // L1: ... mt? |
1307 | STACK_CHECK(L1, 0); | 1307 | STACK_CHECK(L1, 0); |
1308 | return false; | 1308 | return false; |
1309 | } | 1309 | } |
@@ -1319,10 +1319,10 @@ void InterCopyContext::inter_copy_keyvaluepair() const | |||
1319 | // we need to copy over the uservalues of the userdata as well | 1319 | // we need to copy over the uservalues of the userdata as well |
1320 | { | 1320 | { |
1321 | int const mt{ lua_absindex(L1, -2) }; // L1: ... mt __lanesclone | 1321 | int const mt{ lua_absindex(L1, -2) }; // L1: ... mt __lanesclone |
1322 | size_t const userdata_size{ lua_rawlen(L1, L1_i) }; | 1322 | size_t const userdata_size{ lua_rawlen(L1, L1i) }; |
1323 | // extract all the uservalues, but don't transfer them yet | 1323 | // extract all the uservalues, but don't transfer them yet |
1324 | int uvi = 0; | 1324 | int uvi = 0; |
1325 | while (lua_getiuservalue(L1, L1_i, ++uvi) != LUA_TNONE) {} // L1: ... mt __lanesclone [uv]+ nil | 1325 | while (lua_getiuservalue(L1, L1i, ++uvi) != LUA_TNONE) {} // L1: ... mt __lanesclone [uv]+ nil |
1326 | // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now | 1326 | // when lua_getiuservalue() returned LUA_TNONE, it pushed a nil. pop it now |
1327 | lua_pop(L1, 1); // L1: ... mt __lanesclone [uv]+ | 1327 | lua_pop(L1, 1); // L1: ... mt __lanesclone [uv]+ |
1328 | --uvi; | 1328 | --uvi; |
@@ -1811,7 +1811,7 @@ void InterCopyContext::inter_copy_keyvaluepair() const | |||
1811 | DEBUGSPEW_CODE(DebugSpewIndentScope m_scope); | 1811 | DEBUGSPEW_CODE(DebugSpewIndentScope m_scope); |
1812 | 1812 | ||
1813 | public: | 1813 | public: |
1814 | OnExit(Universe* U_, lua_State* L2_) | 1814 | OnExit(DEBUGSPEW_PARAM_COMMA(Universe* U_) lua_State* L2_) |
1815 | : L2{ L2_ } | 1815 | : L2{ L2_ } |
1816 | , top_L2{ lua_gettop(L2) } DEBUGSPEW_COMMA_PARAM(m_scope{ U_ }) | 1816 | , top_L2{ lua_gettop(L2) } DEBUGSPEW_COMMA_PARAM(m_scope{ U_ }) |
1817 | { | 1817 | { |
@@ -1821,7 +1821,7 @@ void InterCopyContext::inter_copy_keyvaluepair() const | |||
1821 | { | 1821 | { |
1822 | lua_settop(L2, top_L2); | 1822 | lua_settop(L2, top_L2); |
1823 | } | 1823 | } |
1824 | } onExit{ U, L2 }; | 1824 | } onExit{ DEBUGSPEW_PARAM_COMMA(U) L2 }; |
1825 | 1825 | ||
1826 | STACK_CHECK_START_REL(L1, 0); | 1826 | STACK_CHECK_START_REL(L1, 0); |
1827 | if (lua_type_as_enum(L1, L1_i) != LuaType::TABLE) { | 1827 | if (lua_type_as_enum(L1, L1_i) != LuaType::TABLE) { |
diff --git a/src/uniquekey.h b/src/uniquekey.h index 984ef50..da699b0 100644 --- a/src/uniquekey.h +++ b/src/uniquekey.h | |||
@@ -10,19 +10,19 @@ | |||
10 | class UniqueKey | 10 | class UniqueKey |
11 | { | 11 | { |
12 | protected: | 12 | protected: |
13 | uintptr_t const m_storage{ 0 }; | 13 | uintptr_t const storage{ 0 }; |
14 | 14 | ||
15 | public: | 15 | public: |
16 | char const* m_debugName{ nullptr }; | 16 | char const* debugName{ nullptr }; |
17 | 17 | ||
18 | // --------------------------------------------------------------------------------------------- | 18 | // --------------------------------------------------------------------------------------------- |
19 | constexpr explicit UniqueKey(uint64_t val_, char const* debugName_ = nullptr) | 19 | constexpr explicit UniqueKey(uint64_t val_, char const* debugName_ = nullptr) |
20 | #if LUAJIT_FLAVOR() == 64 // building against LuaJIT headers for 64 bits, light userdata is restricted to 47 significant bits, because LuaJIT uses the other bits for internal optimizations | 20 | #if LUAJIT_FLAVOR() == 64 // building against LuaJIT headers for 64 bits, light userdata is restricted to 47 significant bits, because LuaJIT uses the other bits for internal optimizations |
21 | : m_storage{ static_cast<uintptr_t>(val_ & 0x7FFFFFFFFFFFull) } | 21 | : storage{ static_cast<uintptr_t>(val_ & 0x7FFFFFFFFFFFull) } |
22 | #else // LUAJIT_FLAVOR() | 22 | #else // LUAJIT_FLAVOR() |
23 | : m_storage{ static_cast<uintptr_t>(val_) } | 23 | : storage{ static_cast<uintptr_t>(val_) } |
24 | #endif // LUAJIT_FLAVOR() | 24 | #endif // LUAJIT_FLAVOR() |
25 | , m_debugName{ debugName_ } | 25 | , debugName{ debugName_ } |
26 | { | 26 | { |
27 | } | 27 | } |
28 | // --------------------------------------------------------------------------------------------- | 28 | // --------------------------------------------------------------------------------------------- |
@@ -32,12 +32,12 @@ class UniqueKey | |||
32 | // --------------------------------------------------------------------------------------------- | 32 | // --------------------------------------------------------------------------------------------- |
33 | bool equals(lua_State* const L_, int i_) const | 33 | bool equals(lua_State* const L_, int i_) const |
34 | { | 34 | { |
35 | return lua_touserdata(L_, i_) == std::bit_cast<void*>(m_storage); | 35 | return lua_touserdata(L_, i_) == std::bit_cast<void*>(storage); |
36 | } | 36 | } |
37 | // --------------------------------------------------------------------------------------------- | 37 | // --------------------------------------------------------------------------------------------- |
38 | void pushKey(lua_State* const L_) const | 38 | void pushKey(lua_State* const L_) const |
39 | { | 39 | { |
40 | lua_pushlightuserdata(L_, std::bit_cast<void*>(m_storage)); | 40 | lua_pushlightuserdata(L_, std::bit_cast<void*>(storage)); |
41 | } | 41 | } |
42 | }; | 42 | }; |
43 | 43 | ||