aboutsummaryrefslogtreecommitdiff
path: root/src/lanes.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/lanes.cpp')
-rw-r--r--src/lanes.cpp650
1 files changed, 251 insertions, 399 deletions
diff --git a/src/lanes.cpp b/src/lanes.cpp
index 47ca79a..1f795cc 100644
--- a/src/lanes.cpp
+++ b/src/lanes.cpp
@@ -99,6 +99,8 @@ THE SOFTWARE.
99# include <sys/types.h> 99# include <sys/types.h>
100#endif 100#endif
101 101
102#include <atomic>
103
102// forwarding (will do things better later) 104// forwarding (will do things better later)
103static void tracking_add(Lane* lane_); 105static void tracking_add(Lane* lane_);
104 106
@@ -106,11 +108,6 @@ Lane::Lane(Universe* U_, lua_State* L_)
106: U{ U_ } 108: U{ U_ }
107, L{ L_ } 109, L{ L_ }
108{ 110{
109#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
110 MUTEX_INIT(&done_lock);
111 SIGNAL_INIT(&done_signal);
112#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
113
114#if HAVE_LANE_TRACKING() 111#if HAVE_LANE_TRACKING()
115 if (U->tracking_first) 112 if (U->tracking_first)
116 { 113 {
@@ -119,6 +116,29 @@ Lane::Lane(Universe* U_, lua_State* L_)
119#endif // HAVE_LANE_TRACKING() 116#endif // HAVE_LANE_TRACKING()
120} 117}
121 118
119bool Lane::waitForCompletion(lua_Duration duration_)
120{
121 std::chrono::time_point<std::chrono::steady_clock> until{ std::chrono::time_point<std::chrono::steady_clock>::max() };
122 if (duration_.count() >= 0.0)
123 {
124 until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration_);
125 }
126
127 std::unique_lock lock{ m_done_mutex };
128 //std::stop_token token{ m_thread.get_stop_token() };
129 //return m_done_signal.wait_until(lock, token, secs_, [this](){ return m_status >= Lane::Done; });
130 return m_done_signal.wait_until(lock, until, [this](){ return m_status >= Lane::Done; });
131}
132
133static void lane_main(Lane* lane);
134void Lane::startThread(int priority_)
135{
136 m_thread = std::jthread([this]() { lane_main(this); });
137 if (priority_ != THREAD_PRIO_DEFAULT)
138 {
139 JTHREAD_SET_PRIORITY(m_thread, priority_, U->m_sudo);
140 }
141}
122 142
123/* Do you want full call stacks, or just the line where the error happened? 143/* Do you want full call stacks, or just the line where the error happened?
124* 144*
@@ -142,7 +162,7 @@ static void securize_debug_threadname(lua_State* L, Lane* lane_)
142} 162}
143 163
144#if ERROR_FULL_STACK 164#if ERROR_FULL_STACK
145static int lane_error( lua_State* L); 165[[nodiscard]] static int lane_error(lua_State* L);
146// crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ 166// crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/
147static constexpr UniqueKey STACKTRACE_REGKEY{ 0x534af7d3226a429full }; 167static constexpr UniqueKey STACKTRACE_REGKEY{ 0x534af7d3226a429full };
148#endif // ERROR_FULL_STACK 168#endif // ERROR_FULL_STACK
@@ -168,7 +188,7 @@ static constexpr UniqueKey FINALIZER_REGKEY{ 0x188fccb8bf348e09ull };
168* Returns: true if a table was pushed 188* Returns: true if a table was pushed
169* false if no table found, not created, and nothing pushed 189* false if no table found, not created, and nothing pushed
170*/ 190*/
171static bool push_registry_table( lua_State* L, UniqueKey key, bool create) 191[[nodiscard]] static bool push_registry_table(lua_State* L, UniqueKey key, bool create)
172{ 192{
173 STACK_GROW(L, 3); 193 STACK_GROW(L, 3);
174 STACK_CHECK_START_REL(L, 0); 194 STACK_CHECK_START_REL(L, 0);
@@ -217,7 +237,7 @@ static void tracking_add(Lane* lane_)
217/* 237/*
218 * A free-running lane has ended; remove it from tracking chain 238 * A free-running lane has ended; remove it from tracking chain
219 */ 239 */
220static bool tracking_remove(Lane* lane_) 240[[nodiscard]] static bool tracking_remove(Lane* lane_)
221{ 241{
222 bool found{ false }; 242 bool found{ false };
223 std::lock_guard<std::mutex> guard{ lane_->U->tracking_cs }; 243 std::lock_guard<std::mutex> guard{ lane_->U->tracking_cs };
@@ -253,16 +273,11 @@ Lane::~Lane()
253{ 273{
254 // Clean up after a (finished) thread 274 // Clean up after a (finished) thread
255 // 275 //
256#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
257 SIGNAL_FREE(&done_signal);
258 MUTEX_FREE(&done_lock);
259#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
260
261#if HAVE_LANE_TRACKING() 276#if HAVE_LANE_TRACKING()
262 if (U->tracking_first != nullptr) 277 if (U->tracking_first != nullptr)
263 { 278 {
264 // Lane was cleaned up, no need to handle at process termination 279 // Lane was cleaned up, no need to handle at process termination
265 tracking_remove(this); 280 std::ignore = tracking_remove(this);
266 } 281 }
267#endif // HAVE_LANE_TRACKING() 282#endif // HAVE_LANE_TRACKING()
268} 283}
@@ -285,10 +300,10 @@ LUAG_FUNC( set_finalizer)
285{ 300{
286 luaL_argcheck(L, lua_isfunction(L, 1), 1, "finalizer should be a function"); 301 luaL_argcheck(L, lua_isfunction(L, 1), 1, "finalizer should be a function");
287 luaL_argcheck(L, lua_gettop( L) == 1, 1, "too many arguments"); 302 luaL_argcheck(L, lua_gettop( L) == 1, 1, "too many arguments");
288 // Get the current finalizer table (if any) 303 // Get the current finalizer table (if any), create one if it doesn't exist
289 push_registry_table(L, FINALIZER_REGKEY, true /*do create if none*/); // finalizer {finalisers} 304 std::ignore = push_registry_table(L, FINALIZER_REGKEY, true); // finalizer {finalisers}
290 STACK_GROW(L, 2); 305 STACK_GROW(L, 2);
291 lua_pushinteger(L, lua_rawlen(L, -1) + 1); // finalizer {finalisers} idx 306 lua_pushinteger(L, lua_rawlen(L, -1) + 1); // finalizer {finalisers} idx
292 lua_pushvalue(L, 1); // finalizer {finalisers} idx finalizer 307 lua_pushvalue(L, 1); // finalizer {finalisers} idx finalizer
293 lua_rawset(L, -3); // finalizer {finalisers} 308 lua_rawset(L, -3); // finalizer {finalisers}
294 lua_pop(L, 2); // 309 lua_pop(L, 2); //
@@ -311,7 +326,7 @@ LUAG_FUNC( set_finalizer)
311// 326//
312static void push_stack_trace( lua_State* L, int rc_, int stk_base_); 327static void push_stack_trace( lua_State* L, int rc_, int stk_base_);
313 328
314static int run_finalizers( lua_State* L, int lua_rc) 329[[nodiscard]] static int run_finalizers(lua_State* L, int lua_rc)
315{ 330{
316 int finalizers_index; 331 int finalizers_index;
317 int n; 332 int n;
@@ -415,7 +430,7 @@ static void selfdestruct_add(Lane* lane_)
415/* 430/*
416 * A free-running lane has ended; remove it from selfdestruct chain 431 * A free-running lane has ended; remove it from selfdestruct chain
417 */ 432 */
418static bool selfdestruct_remove(Lane* lane_) 433[[nodiscard]] static bool selfdestruct_remove(Lane* lane_)
419{ 434{
420 bool found{ false }; 435 bool found{ false };
421 std::lock_guard<std::mutex> guard{ lane_->U->selfdestruct_cs }; 436 std::lock_guard<std::mutex> guard{ lane_->U->selfdestruct_cs };
@@ -434,7 +449,7 @@ static bool selfdestruct_remove(Lane* lane_)
434 *ref = lane_->selfdestruct_next; 449 *ref = lane_->selfdestruct_next;
435 lane_->selfdestruct_next = nullptr; 450 lane_->selfdestruct_next = nullptr;
436 // the terminal shutdown should wait until the lane is done with its lua_close() 451 // the terminal shutdown should wait until the lane is done with its lua_close()
437 ++lane_->U->selfdestructing_count; 452 lane_->U->selfdestructing_count.fetch_add(1, std::memory_order_release);
438 found = true; 453 found = true;
439 break; 454 break;
440 } 455 }
@@ -450,29 +465,30 @@ static bool selfdestruct_remove(Lane* lane_)
450/* 465/*
451* Process end; cancel any still free-running threads 466* Process end; cancel any still free-running threads
452*/ 467*/
453static int universe_gc( lua_State* L) 468[[nodiscard]] static int universe_gc(lua_State* L)
454{ 469{
455 Universe* const U{ lua_tofulluserdata<Universe>(L, 1) }; 470 Universe* const U{ lua_tofulluserdata<Universe>(L, 1) };
471 lua_Duration const shutdown_timeout{ lua_tonumber(L, lua_upvalueindex(1)) };
472 [[maybe_unused]] char const* const op_string{ lua_tostring(L, lua_upvalueindex(2)) };
473 CancelOp const op{ which_cancel_op(op_string) };
456 474
457 while (U->selfdestruct_first != SELFDESTRUCT_END) // true at most once! 475 if (U->selfdestruct_first != SELFDESTRUCT_END)
458 { 476 {
477
459 // Signal _all_ still running threads to exit (including the timer thread) 478 // Signal _all_ still running threads to exit (including the timer thread)
460 // 479 //
461 { 480 {
462 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs }; 481 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
463 Lane* lane{ U->selfdestruct_first }; 482 Lane* lane{ U->selfdestruct_first };
483 lua_Duration timeout{ 1us };
464 while (lane != SELFDESTRUCT_END) 484 while (lane != SELFDESTRUCT_END)
465 { 485 {
466 // attempt a regular unforced hard cancel with a small timeout 486 // attempt the requested cancel with a small timeout.
467 bool const cancelled{ THREAD_ISNULL(lane->thread) || thread_cancel(L, lane, CancelOp::Hard, 0.0001, false, 0.0) != CancelResult::Timeout }; 487 // if waiting on a linda, they will raise a cancel_error.
468 // if we failed, and we know the thread is waiting on a linda 488 // if a cancellation hook is desired, it will be installed to try to raise an error
469 if (cancelled == false && lane->status == WAITING && lane->waiting_on != nullptr) 489 if (lane->m_thread.joinable())
470 { 490 {
471 // signal the linda to wake up the thread so that it can react to the cancel query 491 std::ignore = thread_cancel(lane, op, 1, timeout, true);
472 // let us hope we never land here with a pointer on a linda that has been destroyed...
473 SIGNAL_T* const waiting_on{ lane->waiting_on };
474 // lane->waiting_on = nullptr; // useful, or not?
475 SIGNAL_ALL(waiting_on);
476 } 492 }
477 lane = lane->selfdestruct_next; 493 lane = lane->selfdestruct_next;
478 } 494 }
@@ -480,98 +496,52 @@ static int universe_gc( lua_State* L)
480 496
481 // When noticing their cancel, the lanes will remove themselves from 497 // When noticing their cancel, the lanes will remove themselves from
482 // the selfdestruct chain. 498 // the selfdestruct chain.
483
484 // TBD: Not sure if Windows (multi core) will require the timed approach,
485 // or single Yield. I don't have machine to test that (so leaving
486 // for timed approach). -- AKa 25-Oct-2008
487
488 // OS X 10.5 (Intel) needs more to avoid segfaults.
489 //
490 // "make test" is okay. 100's of "make require" are okay.
491 //
492 // Tested on MacBook Core Duo 2GHz and 10.5.5:
493 // -- AKa 25-Oct-2008
494 //
495 { 499 {
496 lua_Number const shutdown_timeout = lua_tonumber(L, lua_upvalueindex(1)); 500 std::chrono::time_point<std::chrono::steady_clock> t_until{ std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(shutdown_timeout) };
497 double const t_until = now_secs() + shutdown_timeout;
498 501
499 while (U->selfdestruct_first != SELFDESTRUCT_END) 502 while (U->selfdestruct_first != SELFDESTRUCT_END)
500 { 503 {
501 YIELD(); // give threads time to act on their cancel 504 // give threads time to act on their cancel
505 std::this_thread::yield();
506 // count the number of cancelled thread that didn't have the time to act yet
507 int n{ 0 };
502 { 508 {
503 // count the number of cancelled thread that didn't have the time to act yet 509 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
504 int n = 0; 510 Lane* lane{ U->selfdestruct_first };
505 double t_now = 0.0; 511 while (lane != SELFDESTRUCT_END)
506 {
507 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
508 Lane* lane{ U->selfdestruct_first };
509 while (lane != SELFDESTRUCT_END)
510 {
511 if (lane->cancel_request == CancelRequest::Hard)
512 ++n;
513 lane = lane->selfdestruct_next;
514 }
515 }
516 // if timeout elapsed, or we know all threads have acted, stop waiting
517 t_now = now_secs();
518 if (n == 0 || (t_now >= t_until))
519 { 512 {
520 DEBUGSPEW_CODE(fprintf(stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now))); 513 if (lane->cancel_request != CancelRequest::None)
521 break; 514 ++n;
515 lane = lane->selfdestruct_next;
522 } 516 }
523 } 517 }
518 // if timeout elapsed, or we know all threads have acted, stop waiting
519 std::chrono::time_point<std::chrono::steady_clock> t_now = std::chrono::steady_clock::now();
520 if (n == 0 || (t_now >= t_until))
521 {
522 DEBUGSPEW_CODE(fprintf(stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout.count()));
523 break;
524 }
524 } 525 }
525 } 526 }
526 527
527 // If some lanes are currently cleaning after themselves, wait until they are done. 528 // If some lanes are currently cleaning after themselves, wait until they are done.
528 // They are no longer listed in the selfdestruct chain, but they still have to lua_close(). 529 // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
529 while (U->selfdestructing_count > 0) 530 while (U->selfdestructing_count.load(std::memory_order_acquire) > 0)
530 { 531 {
531 YIELD(); 532 std::this_thread::yield();
532 }
533
534 //---
535 // Kill the still free running threads
536 //
537 if (U->selfdestruct_first != SELFDESTRUCT_END)
538 {
539 unsigned int n = 0;
540 // first thing we did was to raise the linda signals the threads were waiting on (if any)
541 // therefore, any well-behaved thread should be in CANCELLED state
542 // these are not running, and the state can be closed
543 {
544 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
545 Lane* lane{ U->selfdestruct_first };
546 while (lane != SELFDESTRUCT_END)
547 {
548 Lane* const next_s{ lane->selfdestruct_next };
549 lane->selfdestruct_next = nullptr; // detach from selfdestruct chain
550 if (!THREAD_ISNULL(lane->thread)) // can be nullptr if previous 'soft' termination succeeded
551 {
552 THREAD_KILL(&lane->thread);
553#if THREADAPI == THREADAPI_PTHREAD
554 // pthread: make sure the thread is really stopped!
555 THREAD_WAIT(&lane->thread, -1, &lane->done_signal, &lane->done_lock, &lane->status);
556#endif // THREADAPI == THREADAPI_PTHREAD
557 }
558 // NO lua_close() in this case because we don't know where execution of the state was interrupted
559 delete lane;
560 lane = next_s;
561 ++n;
562 }
563 U->selfdestruct_first = SELFDESTRUCT_END;
564 }
565
566 DEBUGSPEW_CODE(fprintf(stderr, "Killed %d lane(s) at process end.\n", n));
567 } 533 }
568 } 534 }
569 535
570 // If some lanes are currently cleaning after themselves, wait until they are done. 536 // If after all this, we still have some free-running lanes, it's an external user error, they should have stopped appropriately
571 // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
572 while( U->selfdestructing_count > 0)
573 { 537 {
574 YIELD(); 538 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
539 Lane* lane{ U->selfdestruct_first };
540 if (lane != SELFDESTRUCT_END)
541 {
542 // this causes a leak because we don't call U's destructor (which could be bad if the still running lanes are accessing it)
543 luaL_error(L, "Zombie thread %s refuses to die!", lane->debug_name); // doesn't return
544 }
575 } 545 }
576 546
577 // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 547 // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1
@@ -668,7 +638,7 @@ LUAG_FUNC( set_error_reporting)
668 return 0; 638 return 0;
669} 639}
670 640
671static int lane_error(lua_State* L) 641[[nodiscard]] static int lane_error(lua_State* L)
672{ 642{
673 // error message (any type) 643 // error message (any type)
674 STACK_CHECK_START_ABS(L, 1); // some_error 644 STACK_CHECK_START_ABS(L, 1); // some_error
@@ -814,7 +784,7 @@ LUAG_FUNC(get_debug_threadname)
814 784
815LUAG_FUNC(set_thread_priority) 785LUAG_FUNC(set_thread_priority)
816{ 786{
817 int const prio{ (int) luaL_checkinteger(L, 1) }; 787 lua_Integer const prio{ luaL_checkinteger(L, 1) };
818 // public Lanes API accepts a generic range -3/+3 788 // public Lanes API accepts a generic range -3/+3
819 // that will be remapped into the platform-specific scheduler priority scheme 789 // that will be remapped into the platform-specific scheduler priority scheme
820 // On some platforms, -3 is equivalent to -2 and +3 to +2 790 // On some platforms, -3 is equivalent to -2 and +3 to +2
@@ -822,7 +792,7 @@ LUAG_FUNC(set_thread_priority)
822 { 792 {
823 return luaL_error(L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio); 793 return luaL_error(L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio);
824 } 794 }
825 THREAD_SET_PRIORITY(prio); 795 THREAD_SET_PRIORITY(static_cast<int>(prio), universe_get(L)->m_sudo);
826 return 0; 796 return 0;
827} 797}
828 798
@@ -872,32 +842,18 @@ static char const* get_errcode_name( int _code)
872} 842}
873#endif // USE_DEBUG_SPEW() 843#endif // USE_DEBUG_SPEW()
874 844
875#if THREADWAIT_METHOD == THREADWAIT_CONDVAR // implies THREADAPI == THREADAPI_PTHREAD 845static void lane_main(Lane* lane)
876static void thread_cleanup_handler(void* opaque)
877{
878 Lane* lane{ (Lane*) opaque };
879 MUTEX_LOCK(&lane->done_lock);
880 lane->status = CANCELLED;
881 SIGNAL_ONE(&lane->done_signal); // wake up master (while 'lane->done_lock' is on)
882 MUTEX_UNLOCK(&lane->done_lock);
883}
884#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
885
886static THREAD_RETURN_T THREAD_CALLCONV lane_main(void* vs)
887{ 846{
888 Lane* lane{ (Lane*) vs };
889 lua_State* const L{ lane->L }; 847 lua_State* const L{ lane->L };
890 // wait until the launching thread has finished preparing L 848 // wait until the launching thread has finished preparing L
891 lane->m_ready.wait(); 849 lane->m_ready.wait();
892 int rc{ LUA_ERRRUN }; 850 int rc{ LUA_ERRRUN };
893 if (lane->status == PENDING) // nothing wrong happened during preparation, we can work 851 if (lane->m_status == Lane::Pending) // nothing wrong happened during preparation, we can work
894 { 852 {
895 // At this point, the lane function and arguments are on the stack 853 // At this point, the lane function and arguments are on the stack
896 int const nargs{ lua_gettop(L) - 1 }; 854 int const nargs{ lua_gettop(L) - 1 };
897 DEBUGSPEW_CODE(Universe* U = universe_get(L)); 855 DEBUGSPEW_CODE(Universe* U = universe_get(L));
898 THREAD_MAKE_ASYNCH_CANCELLABLE(); 856 lane->m_status = Lane::Running; // Pending -> Running
899 THREAD_CLEANUP_PUSH(thread_cleanup_handler, lane);
900 lane->status = RUNNING; // PENDING -> RUNNING
901 857
902 // Tie "set_finalizer()" to the state 858 // Tie "set_finalizer()" to the state
903 lua_pushcfunction(L, LG_set_finalizer); 859 lua_pushcfunction(L, LG_set_finalizer);
@@ -947,18 +903,19 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main(void* vs)
947 // the finalizer generated an error, and left its own error message [and stack trace] on the stack 903 // the finalizer generated an error, and left its own error message [and stack trace] on the stack
948 rc = rc2; // we're overruling the earlier script error or normal return 904 rc = rc2; // we're overruling the earlier script error or normal return
949 } 905 }
950 lane->waiting_on = nullptr; // just in case 906 lane->m_waiting_on = nullptr; // just in case
951 if (selfdestruct_remove(lane)) // check and remove (under lock!) 907 if (selfdestruct_remove(lane)) // check and remove (under lock!)
952 { 908 {
953 // We're a free-running thread and no-one's there to clean us up. 909 // We're a free-running thread and no-one's there to clean us up.
954 //
955 lua_close(lane->L); 910 lua_close(lane->L);
956 911 lane->L = nullptr; // just in case
957 lane->U->selfdestruct_cs.lock(); 912 lane->U->selfdestruct_cs.lock();
958 // done with lua_close(), terminal shutdown sequence may proceed 913 // done with lua_close(), terminal shutdown sequence may proceed
959 --lane->U->selfdestructing_count; 914 lane->U->selfdestructing_count.fetch_sub(1, std::memory_order_release);
960 lane->U->selfdestruct_cs.unlock(); 915 lane->U->selfdestruct_cs.unlock();
961 916
917 // we destroy our jthread member from inside the thread body, so we have to detach so that we don't try to join, as this doesn't seem a good idea
918 lane->m_thread.detach();
962 delete lane; 919 delete lane;
963 lane = nullptr; 920 lane = nullptr;
964 } 921 }
@@ -967,24 +924,15 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main(void* vs)
967 { 924 {
968 // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them 925 // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them
969 926
970 enum e_status st = (rc == 0) ? DONE : CANCEL_ERROR.equals(L, 1) ? CANCELLED : ERROR_ST; 927 Lane::Status st = (rc == LUA_OK) ? Lane::Done : CANCEL_ERROR.equals(L, 1) ? Lane::Cancelled : Lane::Error;
971 928
972 // Posix no PTHREAD_TIMEDJOIN:
973 // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change
974 //
975#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
976 MUTEX_LOCK(&lane->done_lock);
977 { 929 {
978#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR 930 // 'm_done_mutex' protects the -> Done|Error|Cancelled state change
979 lane->status = st; 931 std::lock_guard lock{ lane->m_done_mutex };
980#if THREADWAIT_METHOD == THREADWAIT_CONDVAR 932 lane->m_status = st;
981 SIGNAL_ONE(&lane->done_signal); // wake up master (while 'lane->done_lock' is on) 933 lane->m_done_signal.notify_one();// wake up master (while 'lane->m_done_mutex' is on)
982 } 934 }
983 MUTEX_UNLOCK(&lane->done_lock);
984#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
985 } 935 }
986 THREAD_CLEANUP_POP(false);
987 return 0; // ignored
988} 936}
989 937
990// ################################################################################################# 938// #################################################################################################
@@ -1000,13 +948,13 @@ LUAG_FUNC(require)
1000 DEBUGSPEW_CODE(Universe* U = universe_get(L)); 948 DEBUGSPEW_CODE(Universe* U = universe_get(L));
1001 STACK_CHECK_START_REL(L, 0); 949 STACK_CHECK_START_REL(L, 0);
1002 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name)); 950 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name));
1003 DEBUGSPEW_CODE(++U->debugspew_indent_depth); 951 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1004 lua_pushvalue(L, lua_upvalueindex(1)); // "name" require 952 lua_pushvalue(L, lua_upvalueindex(1)); // "name" require
1005 lua_insert(L, 1); // require "name" 953 lua_insert(L, 1); // require "name"
1006 lua_call(L, nargs, 1); // module 954 lua_call(L, nargs, 1); // module
1007 populate_func_lookup_table(L, -1, name); 955 populate_func_lookup_table(L, -1, name);
1008 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name)); 956 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name));
1009 DEBUGSPEW_CODE(--U->debugspew_indent_depth); 957 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1010 STACK_CHECK(L, 0); 958 STACK_CHECK(L, 0);
1011 return 1; 959 return 1;
1012} 960}
@@ -1019,17 +967,17 @@ LUAG_FUNC(require)
1019LUAG_FUNC(register) 967LUAG_FUNC(register)
1020{ 968{
1021 char const* name = luaL_checkstring(L, 1); 969 char const* name = luaL_checkstring(L, 1);
1022 int const mod_type = lua_type(L, 2); 970 LuaType const mod_type{ lua_type_as_enum(L, 2) };
1023 // ignore extra parameters, just in case 971 // ignore extra parameters, just in case
1024 lua_settop(L, 2); 972 lua_settop(L, 2);
1025 luaL_argcheck(L, (mod_type == LUA_TTABLE) || (mod_type == LUA_TFUNCTION), 2, "unexpected module type"); 973 luaL_argcheck(L, (mod_type == LuaType::TABLE) || (mod_type == LuaType::FUNCTION), 2, "unexpected module type");
1026 DEBUGSPEW_CODE(Universe* U = universe_get(L)); 974 DEBUGSPEW_CODE(Universe* U = universe_get(L));
1027 STACK_CHECK_START_REL(L, 0); // "name" mod_table 975 STACK_CHECK_START_REL(L, 0); // "name" mod_table
1028 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name)); 976 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.register %s BEGIN\n" INDENT_END, name));
1029 DEBUGSPEW_CODE(++U->debugspew_indent_depth); 977 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1030 populate_func_lookup_table(L, -1, name); 978 populate_func_lookup_table(L, -1, name);
1031 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name)); 979 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lanes.register %s END\n" INDENT_END, name));
1032 DEBUGSPEW_CODE(--U->debugspew_indent_depth); 980 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1033 STACK_CHECK(L, 0); 981 STACK_CHECK(L, 0);
1034 return 0; 982 return 0;
1035} 983}
@@ -1076,10 +1024,10 @@ LUAG_FUNC(lane_new)
1076 1024
1077 /* --- Create and prepare the sub state --- */ 1025 /* --- Create and prepare the sub state --- */
1078 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END)); 1026 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: setup\n" INDENT_END));
1079 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1027 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1080 1028
1081 // populate with selected libraries at the same time 1029 // populate with selected libraries at the same time
1082 lua_State* const L2{ luaG_newstate(U, L, libs_str) }; // L // L2 1030 lua_State* const L2{ luaG_newstate(U, Source{ L }, libs_str) }; // L // L2
1083 1031
1084 // 'lane' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread) 1032 // 'lane' is allocated from heap, not Lua, since its life span may surpass the handle's (if free running thread)
1085 Lane* const lane{ new (U) Lane{ U, L2 } }; 1033 Lane* const lane{ new (U) Lane{ U, L2 } };
@@ -1095,13 +1043,15 @@ LUAG_FUNC(lane_new)
1095 lua_State* const m_L; 1043 lua_State* const m_L;
1096 Lane* m_lane{ nullptr }; 1044 Lane* m_lane{ nullptr };
1097 int const m_gc_cb_idx; 1045 int const m_gc_cb_idx;
1046 DEBUGSPEW_CODE(Universe* const U); // for DEBUGSPEW only (hence the absence of m_ prefix)
1098 1047
1099 public: 1048 public:
1100 1049
1101 OnExit(lua_State* L_, Lane* lane_, int gc_cb_idx_) 1050 OnExit(lua_State* L_, Lane* lane_, int gc_cb_idx_ DEBUGSPEW_COMMA_PARAM(Universe* U_))
1102 : m_L{ L_ } 1051 : m_L{ L_ }
1103 , m_lane{ lane_ } 1052 , m_lane{ lane_ }
1104 , m_gc_cb_idx{ gc_cb_idx_ } 1053 , m_gc_cb_idx{ gc_cb_idx_ }
1054 DEBUGSPEW_COMMA_PARAM(U{ U_ })
1105 {} 1055 {}
1106 1056
1107 ~OnExit() 1057 ~OnExit()
@@ -1113,13 +1063,11 @@ LUAG_FUNC(lane_new)
1113 // leave a single cancel_error on the stack for the caller 1063 // leave a single cancel_error on the stack for the caller
1114 lua_settop(m_lane->L, 0); 1064 lua_settop(m_lane->L, 0);
1115 CANCEL_ERROR.pushKey(m_lane->L); 1065 CANCEL_ERROR.pushKey(m_lane->L);
1116#if THREADWAIT_METHOD == THREADWAIT_CONDVAR 1066 {
1117 MUTEX_LOCK(&m_lane->done_lock); 1067 std::lock_guard lock{ m_lane->m_done_mutex };
1118#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR 1068 m_lane->m_status = Lane::Cancelled;
1119 m_lane->status = CANCELLED; 1069 m_lane->m_done_signal.notify_one(); // wake up master (while 'lane->m_done_mutex' is on)
1120#if THREADWAIT_METHOD == THREADWAIT_CONDVAR 1070 }
1121 MUTEX_UNLOCK(&m_lane->done_lock);
1122#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
1123 // unblock the thread so that it can terminate gracefully 1071 // unblock the thread so that it can terminate gracefully
1124 m_lane->m_ready.count_down(); 1072 m_lane->m_ready.count_down();
1125 } 1073 }
@@ -1162,12 +1110,13 @@ LUAG_FUNC(lane_new)
1162 void success() 1110 void success()
1163 { 1111 {
1164 prepareUserData(); 1112 prepareUserData();
1113 m_lane->m_ready.count_down();
1165 m_lane = nullptr; 1114 m_lane = nullptr;
1166 } 1115 }
1167 } onExit{ L, lane, gc_cb_idx }; 1116 } onExit{ L, lane, gc_cb_idx DEBUGSPEW_COMMA_PARAM(U) };
1168 // launch the thread early, it will sync with a std::latch to parallelize OS thread warmup and L2 preparation 1117 // launch the thread early, it will sync with a std::latch to parallelize OS thread warmup and L2 preparation
1169 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END)); 1118 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END));
1170 THREAD_CREATE(&lane->thread, lane_main, lane, priority); 1119 lane->startThread(priority);
1171 1120
1172 STACK_GROW( L2, nargs + 3); // 1121 STACK_GROW( L2, nargs + 3); //
1173 STACK_CHECK_START_REL(L2, 0); 1122 STACK_CHECK_START_REL(L2, 0);
@@ -1185,7 +1134,8 @@ LUAG_FUNC(lane_new)
1185 { 1134 {
1186 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END)); 1135 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: update 'package'\n" INDENT_END));
1187 // when copying with mode LookupMode::LaneBody, should raise an error in case of problem, not leave it one the stack 1136 // when copying with mode LookupMode::LaneBody, should raise an error in case of problem, not leave it one the stack
1188 (void) luaG_inter_copy_package(U, L, L2, package_idx, LookupMode::LaneBody); 1137 [[maybe_unused]] InterCopyResult const ret{ luaG_inter_copy_package(U, Source{ L }, Dest{ L2 }, package_idx, LookupMode::LaneBody) };
1138 ASSERT_L(ret == InterCopyResult::Success); // either all went well, or we should not even get here
1189 } 1139 }
1190 1140
1191 // modules to require in the target lane *before* the function is transfered! 1141 // modules to require in the target lane *before* the function is transfered!
@@ -1193,19 +1143,19 @@ LUAG_FUNC(lane_new)
1193 { 1143 {
1194 int nbRequired = 1; 1144 int nbRequired = 1;
1195 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END)); 1145 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: require 'required' list\n" INDENT_END));
1196 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1146 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1197 // should not happen, was checked in lanes.lua before calling lane_new() 1147 // should not happen, was checked in lanes.lua before calling lane_new()
1198 if (lua_type(L, required_idx) != LUA_TTABLE) 1148 if (lua_type(L, required_idx) != LUA_TTABLE)
1199 { 1149 {
1200 return luaL_error(L, "expected required module list as a table, got %s", luaL_typename(L, required_idx)); 1150 luaL_error(L, "expected required module list as a table, got %s", luaL_typename(L, required_idx)); // doesn't return
1201 } 1151 }
1202 1152
1203 lua_pushnil(L); // func libs priority globals package required gc_cb [... args ...] nil 1153 lua_pushnil(L); // func libs priority globals package required gc_cb [... args ...] nil
1204 while( lua_next(L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname" 1154 while (lua_next(L, required_idx) != 0) // func libs priority globals package required gc_cb [... args ...] n "modname"
1205 { 1155 {
1206 if (lua_type(L, -1) != LUA_TSTRING || lua_type(L, -2) != LUA_TNUMBER || lua_tonumber(L, -2) != nbRequired) 1156 if (lua_type(L, -1) != LUA_TSTRING || lua_type(L, -2) != LUA_TNUMBER || lua_tonumber(L, -2) != nbRequired)
1207 { 1157 {
1208 return luaL_error(L, "required module list should be a list of strings"); 1158 luaL_error(L, "required module list should be a list of strings"); // doesn't return
1209 } 1159 }
1210 else 1160 else
1211 { 1161 {
@@ -1219,7 +1169,7 @@ LUAG_FUNC(lane_new)
1219 if (lua_isnil( L2, -1)) 1169 if (lua_isnil( L2, -1))
1220 { 1170 {
1221 lua_pop( L2, 1); // 1171 lua_pop( L2, 1); //
1222 return luaL_error(L, "cannot pre-require modules without loading 'package' library first"); 1172 luaL_error(L, "cannot pre-require modules without loading 'package' library first"); // doesn't return
1223 } 1173 }
1224 else 1174 else
1225 { 1175 {
@@ -1227,7 +1177,10 @@ LUAG_FUNC(lane_new)
1227 if (lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode 1177 if (lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode
1228 { 1178 {
1229 // propagate error to main state if any 1179 // propagate error to main state if any
1230 luaG_inter_move(U, L2, L, 1, LookupMode::LaneBody); // func libs priority globals package required gc_cb [... args ...] n "modname" error 1180 std::ignore = luaG_inter_move(U
1181 , Source{ L2 }, Dest{ L }
1182 , 1, LookupMode::LaneBody
1183 ); // func libs priority globals package required gc_cb [... args ...] n "modname" error
1231 raise_lua_error(L); 1184 raise_lua_error(L);
1232 } 1185 }
1233 // after requiring the module, register the functions it exported in our name<->function database 1186 // after requiring the module, register the functions it exported in our name<->function database
@@ -1238,7 +1191,7 @@ LUAG_FUNC(lane_new)
1238 lua_pop(L, 1); // func libs priority globals package required gc_cb [... args ...] n 1191 lua_pop(L, 1); // func libs priority globals package required gc_cb [... args ...] n
1239 ++ nbRequired; 1192 ++ nbRequired;
1240 } // func libs priority globals package required gc_cb [... args ...] 1193 } // func libs priority globals package required gc_cb [... args ...]
1241 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1194 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1242 } 1195 }
1243 STACK_CHECK(L, 0); 1196 STACK_CHECK(L, 0);
1244 STACK_CHECK(L2, 0); // 1197 STACK_CHECK(L2, 0); //
@@ -1251,49 +1204,54 @@ LUAG_FUNC(lane_new)
1251 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END)); 1204 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer globals\n" INDENT_END));
1252 if (!lua_istable(L, globals_idx)) 1205 if (!lua_istable(L, globals_idx))
1253 { 1206 {
1254 return luaL_error(L, "Expected table, got %s", luaL_typename(L, globals_idx)); 1207 luaL_error(L, "Expected table, got %s", luaL_typename(L, globals_idx)); // doesn't return
1255 } 1208 }
1256 1209
1257 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1210 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1258 lua_pushnil(L); // func libs priority globals package required gc_cb [... args ...] nil 1211 lua_pushnil(L); // func libs priority globals package required gc_cb [... args ...] nil
1259 // Lua 5.2 wants us to push the globals table on the stack 1212 // Lua 5.2 wants us to push the globals table on the stack
1260 lua_pushglobaltable(L2); // _G 1213 lua_pushglobaltable(L2); // _G
1261 while( lua_next(L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v 1214 while( lua_next(L, globals_idx)) // func libs priority globals package required gc_cb [... args ...] k v
1262 { 1215 {
1263 luaG_inter_copy(U, L, L2, 2, LookupMode::LaneBody); // _G k v 1216 std::ignore = luaG_inter_copy(U, Source{ L }, Dest{ L2 }, 2, LookupMode::LaneBody); // _G k v
1264 // assign it in L2's globals table 1217 // assign it in L2's globals table
1265 lua_rawset(L2, -3); // _G 1218 lua_rawset(L2, -3); // _G
1266 lua_pop(L, 1); // func libs priority globals package required gc_cb [... args ...] k 1219 lua_pop(L, 1); // func libs priority globals package required gc_cb [... args ...] k
1267 } // func libs priority globals package required gc_cb [... args ...] 1220 } // func libs priority globals package required gc_cb [... args ...]
1268 lua_pop( L2, 1); // 1221 lua_pop( L2, 1); //
1269 1222
1270 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1223 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1271 } 1224 }
1272 STACK_CHECK(L, 0); 1225 STACK_CHECK(L, 0);
1273 STACK_CHECK(L2, 0); 1226 STACK_CHECK(L2, 0);
1274 1227
1275 // Lane main function 1228 // Lane main function
1276 if (lua_type(L, 1) == LUA_TFUNCTION) 1229 LuaType const func_type{ lua_type_as_enum(L, 1) };
1230 if (func_type == LuaType::FUNCTION)
1277 { 1231 {
1278 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END)); 1232 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane body\n" INDENT_END));
1279 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1233 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1280 lua_pushvalue(L, 1); // func libs priority globals package required gc_cb [... args ...] func 1234 lua_pushvalue(L, 1); // func libs priority globals package required gc_cb [... args ...] func
1281 int const res{ luaG_inter_move(U, L, L2, 1, LookupMode::LaneBody) };// func libs priority globals package required gc_cb [... args ...] // func 1235 InterCopyResult const res{ luaG_inter_move(U, Source{ L }, Dest{ L2 }, 1, LookupMode::LaneBody) }; // func libs priority globals package required gc_cb [... args ...] // func
1282 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1236 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1283 if (res != 0) 1237 if (res != InterCopyResult::Success)
1284 { 1238 {
1285 return luaL_error(L, "tried to copy unsupported types"); 1239 luaL_error(L, "tried to copy unsupported types"); // doesn't return
1286 } 1240 }
1287 } 1241 }
1288 else if (lua_type(L, 1) == LUA_TSTRING) 1242 else if (func_type == LuaType::STRING)
1289 { 1243 {
1290 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: compile lane body\n" INDENT_END)); 1244 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: compile lane body\n" INDENT_END));
1291 // compile the string 1245 // compile the string
1292 if (luaL_loadstring(L2, lua_tostring(L, 1)) != 0) // func 1246 if (luaL_loadstring(L2, lua_tostring(L, 1)) != 0) // func
1293 { 1247 {
1294 return luaL_error(L, "error when parsing lane function code"); 1248 luaL_error(L, "error when parsing lane function code"); // doesn't return
1295 } 1249 }
1296 } 1250 }
1251 else
1252 {
1253 luaL_error(L, "Expected function, got %s", lua_typename(L, func_type)); // doesn't return
1254 }
1297 STACK_CHECK(L, 0); 1255 STACK_CHECK(L, 0);
1298 STACK_CHECK(L2, 1); 1256 STACK_CHECK(L2, 1);
1299 ASSERT_L(lua_isfunction(L2, 1)); 1257 ASSERT_L(lua_isfunction(L2, 1));
@@ -1301,14 +1259,13 @@ LUAG_FUNC(lane_new)
1301 // revive arguments 1259 // revive arguments
1302 if (nargs > 0) 1260 if (nargs > 0)
1303 { 1261 {
1304 int res; 1262 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END));
1305 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lane_new: transfer lane arguments\n" INDENT_END)); 1263 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1306 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1264 InterCopyResult const res{ luaG_inter_move(U, Source{ L }, Dest{ L2 }, nargs, LookupMode::LaneBody) }; // func libs priority globals package required gc_cb // func [... args ...]
1307 res = luaG_inter_move(U, L, L2, nargs, LookupMode::LaneBody); // func libs priority globals package required gc_cb // func [... args ...] 1265 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1308 DEBUGSPEW_CODE( -- U->debugspew_indent_depth); 1266 if (res != InterCopyResult::Success)
1309 if (res != 0)
1310 { 1267 {
1311 return luaL_error(L, "tried to copy unsupported types"); 1268 luaL_error(L, "tried to copy unsupported types"); // doesn't return
1312 } 1269 }
1313 } 1270 }
1314 STACK_CHECK(L, -nargs); 1271 STACK_CHECK(L, -nargs);
@@ -1323,8 +1280,7 @@ LUAG_FUNC(lane_new)
1323 onExit.success(); 1280 onExit.success();
1324 // we should have the lane userdata on top of the stack 1281 // we should have the lane userdata on top of the stack
1325 STACK_CHECK(L, 1); 1282 STACK_CHECK(L, 1);
1326 lane->m_ready.count_down(); 1283 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
1327 DEBUGSPEW_CODE(--U->debugspew_indent_depth);
1328 return 1; 1284 return 1;
1329} 1285}
1330 1286
@@ -1342,10 +1298,10 @@ LUAG_FUNC(lane_new)
1342// and the issue of canceling/killing threads at gc is not very nice, either 1298// and the issue of canceling/killing threads at gc is not very nice, either
1343// (would easily cause waits at gc cycle, which we don't want). 1299// (would easily cause waits at gc cycle, which we don't want).
1344// 1300//
1345static int lane_gc(lua_State* L) 1301[[nodiscard]] static int lane_gc(lua_State* L)
1346{ 1302{
1347 bool have_gc_cb{ false }; 1303 bool have_gc_cb{ false };
1348 Lane* lane{ lua_toLane(L, 1) }; // ud 1304 Lane* const lane{ lua_toLane(L, 1) }; // ud
1349 1305
1350 // if there a gc callback? 1306 // if there a gc callback?
1351 lua_getiuservalue(L, 1, 1); // ud uservalue 1307 lua_getiuservalue(L, 1, 1); // ud uservalue
@@ -1363,30 +1319,7 @@ static int lane_gc(lua_State* L)
1363 } 1319 }
1364 1320
1365 // We can read 'lane->status' without locks, but not wait for it 1321 // We can read 'lane->status' without locks, but not wait for it
1366 // test Killed state first, as it doesn't need to enter the selfdestruct chain 1322 if (lane->m_status < Lane::Done)
1367 if (lane->mstatus == Lane::Killed)
1368 {
1369 // Make sure a kill has proceeded, before cleaning up the data structure.
1370 //
1371 // NO lua_close() in this case because we don't know where execution of the state was interrupted
1372 DEBUGSPEW_CODE(fprintf(stderr, "** Joining with a killed thread (needs testing) **"));
1373 // make sure the thread is no longer running, just like thread_join()
1374 if (!THREAD_ISNULL(lane->thread))
1375 {
1376 THREAD_WAIT(&lane->thread, -1, &lane->done_signal, &lane->done_lock, &lane->status);
1377 }
1378 if (lane->status >= DONE && lane->L)
1379 {
1380 // we know the thread was killed while the Lua VM was not doing anything: we should be able to close it without crashing
1381 // now, thread_cancel() will not forcefully kill a lane with lane->status >= DONE, so I am not sure it can ever happen
1382 lua_close(lane->L);
1383 lane->L = nullptr;
1384 // just in case, but s will be freed soon so...
1385 lane->debug_name = "<gc>";
1386 }
1387 DEBUGSPEW_CODE(fprintf(stderr, "** Joined ok **"));
1388 }
1389 else if (lane->status < DONE)
1390 { 1323 {
1391 // still running: will have to be cleaned up later 1324 // still running: will have to be cleaned up later
1392 selfdestruct_add(lane); 1325 selfdestruct_add(lane);
@@ -1431,29 +1364,27 @@ static int lane_gc(lua_State* L)
1431// / "error" finished at an error, error value is there 1364// / "error" finished at an error, error value is there
1432// / "cancelled" execution cancelled by M (state gone) 1365// / "cancelled" execution cancelled by M (state gone)
1433// 1366//
1434static char const * thread_status_string(Lane* lane_) 1367[[nodiscard]] static char const* thread_status_string(Lane* lane_)
1435{ 1368{
1436 enum e_status const st{ lane_->status }; // read just once (volatile) 1369 Lane::Status const st{ lane_->m_status }; // read just once (volatile)
1437 char const* str = 1370 char const* str =
1438 (lane_->mstatus == Lane::Killed) ? "killed" : // new to v3.3.0! 1371 (st == Lane::Pending) ? "pending" :
1439 (st == PENDING) ? "pending" : 1372 (st == Lane::Running) ? "running" : // like in 'co.status()'
1440 (st == RUNNING) ? "running" : // like in 'co.status()' 1373 (st == Lane::Waiting) ? "waiting" :
1441 (st == WAITING) ? "waiting" : 1374 (st == Lane::Done) ? "done" :
1442 (st == DONE) ? "done" : 1375 (st == Lane::Error) ? "error" :
1443 (st == ERROR_ST) ? "error" : 1376 (st == Lane::Cancelled) ? "cancelled" : nullptr;
1444 (st == CANCELLED) ? "cancelled" : nullptr;
1445 return str; 1377 return str;
1446} 1378}
1447 1379
1448// ################################################################################################# 1380// #################################################################################################
1449 1381
1450int push_thread_status(lua_State* L, Lane* lane_) 1382void push_thread_status(lua_State* L, Lane* lane_)
1451{ 1383{
1452 char const* const str{ thread_status_string(lane_) }; 1384 char const* const str{ thread_status_string(lane_) };
1453 ASSERT_L(str); 1385 ASSERT_L(str);
1454 1386
1455 lua_pushstring(L, str); 1387 std::ignore = lua_pushstring(L, str);
1456 return 1;
1457} 1388}
1458 1389
1459// ################################################################################################# 1390// #################################################################################################
@@ -1469,9 +1400,10 @@ int push_thread_status(lua_State* L, Lane* lane_)
1469LUAG_FUNC(thread_join) 1400LUAG_FUNC(thread_join)
1470{ 1401{
1471 Lane* const lane{ lua_toLane(L, 1) }; 1402 Lane* const lane{ lua_toLane(L, 1) };
1472 lua_Number const wait_secs{ luaL_optnumber(L, 2, -1.0) }; 1403 lua_Duration const duration{ luaL_optnumber(L, 2, -1.0) };
1473 lua_State* const L2{ lane->L }; 1404 lua_State* const L2{ lane->L };
1474 bool const done{ THREAD_ISNULL(lane->thread) || THREAD_WAIT(&lane->thread, wait_secs, &lane->done_signal, &lane->done_lock, &lane->status) }; 1405
1406 bool const done{ !lane->m_thread.joinable() || lane->waitForCompletion(duration) };
1475 if (!done || !L2) 1407 if (!done || !L2)
1476 { 1408 {
1477 STACK_GROW(L, 2); 1409 STACK_GROW(L, 2);
@@ -1481,61 +1413,50 @@ LUAG_FUNC(thread_join)
1481 } 1413 }
1482 1414
1483 STACK_CHECK_START_REL(L, 0); 1415 STACK_CHECK_START_REL(L, 0);
1484 // Thread is DONE/ERROR_ST/CANCELLED; all ours now 1416 // Thread is Done/Error/Cancelled; all ours now
1485 1417
1486 int ret{ 0 }; 1418 int ret{ 0 };
1487 if (lane->mstatus == Lane::Killed) // OS thread was killed if thread_cancel was forced 1419 Universe* const U{ lane->U };
1488 { 1420 // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed
1489 // in that case, even if the thread was killed while DONE/ERROR_ST/CANCELLED, ignore regular return values 1421 // so store it in the userdata uservalue at a key that can't possibly collide
1490 STACK_GROW(L, 2); 1422 securize_debug_threadname(L, lane);
1491 lua_pushnil(L); 1423 switch (lane->m_status)
1492 lua_pushliteral(L, "killed");
1493 ret = 2;
1494 }
1495 else
1496 { 1424 {
1497 Universe* const U{ lane->U }; 1425 case Lane::Done:
1498 // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed
1499 // so store it in the userdata uservalue at a key that can't possibly collide
1500 securize_debug_threadname(L, lane);
1501 switch (lane->status)
1502 { 1426 {
1503 case DONE: 1427 int const n{ lua_gettop(L2) }; // whole L2 stack
1428 if ((n > 0) && (luaG_inter_move(U, Source{ L2 }, Dest{ L }, n, LookupMode::LaneBody) != InterCopyResult::Success))
1504 { 1429 {
1505 int const n{ lua_gettop(L2) }; // whole L2 stack 1430 luaL_error(L, "tried to copy unsupported types"); // doesn't return
1506 if ((n > 0) && (luaG_inter_move(U, L2, L, n, LookupMode::LaneBody) != 0))
1507 {
1508 return luaL_error(L, "tried to copy unsupported types");
1509 }
1510 ret = n;
1511 } 1431 }
1512 break; 1432 ret = n;
1433 }
1434 break;
1513 1435
1514 case ERROR_ST: 1436 case Lane::Error:
1437 {
1438 int const n{ lua_gettop(L2) };
1439 STACK_GROW(L, 3);
1440 lua_pushnil(L);
1441 // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ...
1442 if (luaG_inter_move(U, Source{ L2 }, Dest{ L }, n, LookupMode::LaneBody) != InterCopyResult::Success) // nil "err" [trace]
1515 { 1443 {
1516 int const n{ lua_gettop(L2) }; 1444 luaL_error(L, "tried to copy unsupported types: %s", lua_tostring(L, -n)); // doesn't return
1517 STACK_GROW(L, 3);
1518 lua_pushnil(L);
1519 // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ...
1520 if (luaG_inter_move(U, L2, L, n, LookupMode::LaneBody) != 0) // nil "err" [trace]
1521 {
1522 return luaL_error(L, "tried to copy unsupported types: %s", lua_tostring(L, -n));
1523 }
1524 ret = 1 + n;
1525 } 1445 }
1526 break; 1446 ret = 1 + n;
1447 }
1448 break;
1527 1449
1528 case CANCELLED: 1450 case Lane::Cancelled:
1529 ret = 0; 1451 ret = 0;
1530 break; 1452 break;
1531 1453
1532 default: 1454 default:
1533 DEBUGSPEW_CODE(fprintf(stderr, "Status: %d\n", lane->status)); 1455 DEBUGSPEW_CODE(fprintf(stderr, "Status: %d\n", lane->m_status));
1534 ASSERT_L(false); 1456 ASSERT_L(false);
1535 ret = 0; 1457 ret = 0;
1536 }
1537 lua_close(L2);
1538 } 1458 }
1459 lua_close(L2);
1539 lane->L = nullptr; 1460 lane->L = nullptr;
1540 STACK_CHECK(L, ret); 1461 STACK_CHECK(L, ret);
1541 return ret; 1462 return ret;
@@ -1591,21 +1512,18 @@ LUAG_FUNC(thread_index)
1591 lua_pushcfunction(L, LG_thread_join); 1512 lua_pushcfunction(L, LG_thread_join);
1592 lua_pushvalue(L, UD); 1513 lua_pushvalue(L, UD);
1593 lua_call(L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+ 1514 lua_call(L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+
1594 switch (lane->status) 1515 switch (lane->m_status)
1595 { 1516 {
1596 default: 1517 default:
1597 if (lane->mstatus != Lane::Killed) 1518 // this is an internal error, we probably never get here
1598 { 1519 lua_settop(L, 0);
1599 // this is an internal error, we probably never get here 1520 lua_pushliteral(L, "Unexpected status: ");
1600 lua_settop(L, 0); 1521 lua_pushstring(L, thread_status_string(lane));
1601 lua_pushliteral(L, "Unexpected status: "); 1522 lua_concat(L, 2);
1602 lua_pushstring(L, thread_status_string(lane)); 1523 raise_lua_error(L);
1603 lua_concat(L, 2);
1604 raise_lua_error(L);
1605 }
1606 [[fallthrough]]; // fall through if we are killed, as we got nil, "killed" on the stack 1524 [[fallthrough]]; // fall through if we are killed, as we got nil, "killed" on the stack
1607 1525
1608 case DONE: // got regular return values 1526 case Lane::Done: // got regular return values
1609 { 1527 {
1610 int const nvalues{ lua_gettop(L) - 3 }; 1528 int const nvalues{ lua_gettop(L) - 3 };
1611 for (int i = nvalues; i > 0; --i) 1529 for (int i = nvalues; i > 0; --i)
@@ -1616,7 +1534,7 @@ LUAG_FUNC(thread_index)
1616 } 1534 }
1617 break; 1535 break;
1618 1536
1619 case ERROR_ST: // got 3 values: nil, errstring, callstack table 1537 case Lane::Error: // got 3 values: nil, errstring, callstack table
1620 // me[-2] could carry the stack table, but even 1538 // me[-2] could carry the stack table, but even
1621 // me[-1] is rather unnecessary (and undocumented); 1539 // me[-1] is rather unnecessary (and undocumented);
1622 // use ':join()' instead. --AKa 22-Jan-2009 1540 // use ':join()' instead. --AKa 22-Jan-2009
@@ -1627,7 +1545,7 @@ LUAG_FUNC(thread_index)
1627 lua_rawset(L, USR); 1545 lua_rawset(L, USR);
1628 break; 1546 break;
1629 1547
1630 case CANCELLED: 1548 case Lane::Cancelled:
1631 // do nothing 1549 // do nothing
1632 break; 1550 break;
1633 } 1551 }
@@ -1669,11 +1587,12 @@ LUAG_FUNC(thread_index)
1669 } 1587 }
1670 if (lua_type(L, KEY) == LUA_TSTRING) 1588 if (lua_type(L, KEY) == LUA_TSTRING)
1671 { 1589 {
1672 char const * const keystr = lua_tostring(L, KEY); 1590 char const* const keystr{ lua_tostring(L, KEY) };
1673 lua_settop(L, 2); // keep only our original arguments on the stack 1591 lua_settop(L, 2); // keep only our original arguments on the stack
1674 if (strcmp( keystr, "status") == 0) 1592 if (strcmp( keystr, "status") == 0)
1675 { 1593 {
1676 return push_thread_status(L, lane); // push the string representing the status 1594 push_thread_status(L, lane); // push the string representing the status
1595 return 1;
1677 } 1596 }
1678 // return UD.metatable[key] 1597 // return UD.metatable[key]
1679 lua_getmetatable(L, UD); // UD KEY mt 1598 lua_getmetatable(L, UD); // UD KEY mt
@@ -1713,20 +1632,20 @@ LUAG_FUNC(threads)
1713 { 1632 {
1714 Lane* lane{ U->tracking_first }; 1633 Lane* lane{ U->tracking_first };
1715 int index = 0; 1634 int index = 0;
1716 lua_newtable(L); // {} 1635 lua_newtable(L); // {}
1717 while (lane != TRACKING_END) 1636 while (lane != TRACKING_END)
1718 { 1637 {
1719 // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other 1638 // insert a { name, status } tuple, so that several lanes with the same name can't clobber each other
1720 lua_newtable(L); // {} {} 1639 lua_newtable(L); // {} {}
1721 lua_pushstring(L, lane->debug_name); // {} {} "name" 1640 lua_pushstring(L, lane->debug_name); // {} {} "name"
1722 lua_setfield(L, -2, "name"); // {} {} 1641 lua_setfield(L, -2, "name"); // {} {}
1723 push_thread_status(L, lane); // {} {} "status" 1642 push_thread_status(L, lane); // {} {} "status"
1724 lua_setfield(L, -2, "status"); // {} {} 1643 lua_setfield(L, -2, "status"); // {} {}
1725 lua_rawseti(L, -2, ++index); // {} 1644 lua_rawseti(L, -2, ++index); // {}
1726 lane = lane->tracking_next; 1645 lane = lane->tracking_next;
1727 } 1646 }
1728 } 1647 }
1729 return lua_gettop(L) - top; // 0 or 1 1648 return lua_gettop(L) - top; // 0 or 1
1730} 1649}
1731#endif // HAVE_LANE_TRACKING() 1650#endif // HAVE_LANE_TRACKING()
1732 1651
@@ -1737,13 +1656,17 @@ LUAG_FUNC(threads)
1737 */ 1656 */
1738 1657
1739/* 1658/*
1740* secs= now_secs() 1659* secs = now_secs()
1741* 1660*
1742* Returns the current time, as seconds (millisecond resolution). 1661* Returns the current time, as seconds. Resolution depends on std::system_clock implementation
1662* Can't use std::chrono::steady_clock because we need the same baseline as std::mktime
1743*/ 1663*/
1744LUAG_FUNC(now_secs) 1664LUAG_FUNC(now_secs)
1745{ 1665{
1746 lua_pushnumber(L, now_secs()); 1666 auto const now{ std::chrono::system_clock::now() };
1667 lua_Duration duration { now.time_since_epoch() };
1668
1669 lua_pushnumber(L, duration.count());
1747 return 1; 1670 return 1;
1748} 1671}
1749 1672
@@ -1788,8 +1711,7 @@ LUAG_FUNC(wakeup_conv)
1788 lua_pop(L,1); 1711 lua_pop(L,1);
1789 STACK_CHECK(L, 0); 1712 STACK_CHECK(L, 0);
1790 1713
1791 struct tm t; 1714 std::tm t{};
1792 memset(&t, 0, sizeof(t));
1793 t.tm_year = year - 1900; 1715 t.tm_year = year - 1900;
1794 t.tm_mon= month-1; // 0..11 1716 t.tm_mon= month-1; // 0..11
1795 t.tm_mday= day; // 1..31 1717 t.tm_mday= day; // 1..31
@@ -1798,7 +1720,7 @@ LUAG_FUNC(wakeup_conv)
1798 t.tm_sec= sec; // 0..60 1720 t.tm_sec= sec; // 0..60
1799 t.tm_isdst= isdst; // 0/1/negative 1721 t.tm_isdst= isdst; // 0/1/negative
1800 1722
1801 lua_pushnumber(L, static_cast<lua_Number>(mktime(&t))); // ms=0 1723 lua_pushnumber(L, static_cast<lua_Number>(std::mktime(&t))); // resolution: 1 second
1802 return 1; 1724 return 1;
1803} 1725}
1804 1726
@@ -1809,7 +1731,7 @@ LUAG_FUNC(wakeup_conv)
1809 */ 1731 */
1810 1732
1811extern int LG_linda(lua_State* L); 1733extern int LG_linda(lua_State* L);
1812static const struct luaL_Reg lanes_functions[] = 1734static struct luaL_Reg const lanes_functions[] =
1813{ 1735{
1814 { "linda", LG_linda }, 1736 { "linda", LG_linda },
1815 { "now_secs", LG_now_secs }, 1737 { "now_secs", LG_now_secs },
@@ -1822,116 +1744,46 @@ static const struct luaL_Reg lanes_functions[] =
1822 { nullptr, nullptr } 1744 { nullptr, nullptr }
1823}; 1745};
1824 1746
1825/*
1826 * One-time initializations
1827 * settings table it at position 1 on the stack
1828 * pushes an error string on the stack in case of problem
1829 */
1830static void init_once_LOCKED( void)
1831{
1832#if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC)
1833 now_secs(); // initialize 'now_secs()' internal offset
1834#endif
1835
1836#if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU)
1837 chudInitialize();
1838#endif
1839
1840 //---
1841 // Linux needs SCHED_RR to change thread priorities, and that is only
1842 // allowed for sudo'ers. SCHED_OTHER (default) has no priorities.
1843 // SCHED_OTHER threads are always lower priority than SCHED_RR.
1844 //
1845 // ^-- those apply to 2.6 kernel. IF **wishful thinking** these
1846 // constraints will change in the future, non-sudo priorities can
1847 // be enabled also for Linux.
1848 //
1849#ifdef PLATFORM_LINUX
1850 sudo = (geteuid() == 0); // we are root?
1851
1852 // If lower priorities (-2..-1) are wanted, we need to lift the main
1853 // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below
1854 // the launched threads (even -2).
1855 //
1856#ifdef LINUX_SCHED_RR
1857 if (sudo)
1858 {
1859 struct sched_param sp;
1860 sp.sched_priority = _PRIO_0;
1861 PT_CALL(pthread_setschedparam(pthread_self(), SCHED_RR, &sp));
1862 }
1863#endif // LINUX_SCHED_RR
1864#endif // PLATFORM_LINUX
1865}
1866
1867// ################################################################################################# 1747// #################################################################################################
1868 1748
1869static volatile long s_initCount = 0;
1870
1871// upvalue 1: module name 1749// upvalue 1: module name
1872// upvalue 2: module table 1750// upvalue 2: module table
1873// param 1: settings table 1751// param 1: settings table
1874LUAG_FUNC(configure) 1752LUAG_FUNC(configure)
1875{ 1753{
1876 Universe* U = universe_get(L); 1754 // start with one-time initializations.
1877 bool const from_master_state{ U == nullptr };
1878 char const* name = luaL_checkstring(L, lua_upvalueindex(1));
1879 ASSERT_L(lua_type(L, 1) == LUA_TTABLE);
1880
1881 /*
1882 ** Making one-time initializations.
1883 **
1884 ** When the host application is single-threaded (and all threading happens via Lanes)
1885 ** there is no problem. But if the host is multithreaded, we need to lock around the
1886 ** initializations.
1887 */
1888#if THREADAPI == THREADAPI_WINDOWS
1889 { 1755 {
1890 static volatile int /*bool*/ go_ahead; // = 0 1756 // C++ guarantees that the static variable initialization is threadsafe.
1891 if (InterlockedCompareExchange(&s_initCount, 1, 0) == 0) 1757 static auto _ = std::invoke(
1892 { 1758 []()
1893 init_once_LOCKED();
1894 go_ahead = 1; // let others pass
1895 }
1896 else
1897 {
1898 while (!go_ahead)
1899 {
1900 Sleep(1);
1901 } // changes threads
1902 }
1903 }
1904#else // THREADAPI == THREADAPI_PTHREAD
1905 if (s_initCount == 0)
1906 {
1907 static pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER;
1908 pthread_mutex_lock(&my_lock);
1909 {
1910 // Recheck now that we're within the lock
1911 //
1912 if (s_initCount == 0)
1913 { 1759 {
1914 init_once_LOCKED(); 1760#if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU)
1915 s_initCount = 1; 1761 chudInitialize();
1762#endif
1763 return false;
1916 } 1764 }
1917 } 1765 );
1918 pthread_mutex_unlock(&my_lock);
1919 } 1766 }
1920#endif // THREADAPI == THREADAPI_PTHREAD 1767
1768 Universe* U = universe_get(L);
1769 bool const from_master_state{ U == nullptr };
1770 char const* name = luaL_checkstring(L, lua_upvalueindex(1));
1771 ASSERT_L(lua_type(L, 1) == LUA_TTABLE);
1921 1772
1922 STACK_GROW(L, 4); 1773 STACK_GROW(L, 4);
1923 STACK_CHECK_START_ABS(L, 1); // settings 1774 STACK_CHECK_START_ABS(L, 1); // settings
1924 1775
1925 DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); 1776 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L));
1926 DEBUGSPEW_CODE( if (U) ++ U->debugspew_indent_depth); 1777 DEBUGSPEW_CODE(if (U) U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1927 1778
1928 if(U == nullptr) 1779 if (U == nullptr)
1929 { 1780 {
1930 U = universe_create( L); // settings universe 1781 U = universe_create(L); // settings universe
1931 DEBUGSPEW_CODE( ++ U->debugspew_indent_depth); 1782 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1932 lua_newtable( L); // settings universe mt 1783 lua_newtable( L); // settings universe mt
1933 lua_getfield(L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout 1784 lua_getfield(L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout
1934 lua_pushcclosure(L, universe_gc, 1); // settings universe mt universe_gc 1785 lua_getfield(L, 1, "shutdown_mode"); // settings universe mt shutdown_timeout shutdown_mode
1786 lua_pushcclosure(L, universe_gc, 2); // settings universe mt universe_gc
1935 lua_setfield(L, -2, "__gc"); // settings universe mt 1787 lua_setfield(L, -2, "__gc"); // settings universe mt
1936 lua_setmetatable(L, -2); // settings universe 1788 lua_setmetatable(L, -2); // settings universe
1937 lua_pop(L, 1); // settings 1789 lua_pop(L, 1); // settings
@@ -1988,7 +1840,7 @@ LUAG_FUNC(configure)
1988 STACK_CHECK(L, 2); 1840 STACK_CHECK(L, 2);
1989 1841
1990 { 1842 {
1991 char const* errmsg{ push_deep_proxy(L, U->timer_deep, 0, LookupMode::LaneBody) }; // settings M timer_deep 1843 char const* errmsg{ push_deep_proxy(Dest{ L }, U->timer_deep, 0, LookupMode::LaneBody) }; // settings M timer_deep
1992 if (errmsg != nullptr) 1844 if (errmsg != nullptr)
1993 { 1845 {
1994 return luaL_error(L, errmsg); 1846 return luaL_error(L, errmsg);
@@ -2070,7 +1922,7 @@ LUAG_FUNC(configure)
2070 CONFIG_REGKEY.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); }); 1922 CONFIG_REGKEY.setValue(L, [](lua_State* L) { lua_pushvalue(L, -2); });
2071 STACK_CHECK(L, 1); 1923 STACK_CHECK(L, 1);
2072 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L)); 1924 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L));
2073 DEBUGSPEW_CODE(--U->debugspew_indent_depth); 1925 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_sub(1, std::memory_order_relaxed));
2074 // Return the settings table 1926 // Return the settings table
2075 return 1; 1927 return 1;
2076} 1928}
@@ -2178,9 +2030,9 @@ LANES_API int luaopen_lanes_core( lua_State* L)
2178 return 1; 2030 return 1;
2179} 2031}
2180 2032
2181static int default_luaopen_lanes( lua_State* L) 2033[[nodiscard]] static int default_luaopen_lanes(lua_State* L)
2182{ 2034{
2183 int rc = luaL_loadfile(L, "lanes.lua") || lua_pcall(L, 0, 1, 0); 2035 int const rc{ luaL_loadfile(L, "lanes.lua") || lua_pcall(L, 0, 1, 0) };
2184 if (rc != LUA_OK) 2036 if (rc != LUA_OK)
2185 { 2037 {
2186 return luaL_error(L, "failed to initialize embedded Lanes"); 2038 return luaL_error(L, "failed to initialize embedded Lanes");