aboutsummaryrefslogtreecommitdiff
path: root/src/lanes.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/lanes.cpp')
-rw-r--r--src/lanes.cpp337
1 files changed, 123 insertions, 214 deletions
diff --git a/src/lanes.cpp b/src/lanes.cpp
index 08584a2..4dd9b46 100644
--- a/src/lanes.cpp
+++ b/src/lanes.cpp
@@ -108,11 +108,6 @@ Lane::Lane(Universe* U_, lua_State* L_)
108: U{ U_ } 108: U{ U_ }
109, L{ L_ } 109, L{ L_ }
110{ 110{
111#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
112 MUTEX_INIT(&done_lock);
113 SIGNAL_INIT(&done_signal);
114#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
115
116#if HAVE_LANE_TRACKING() 111#if HAVE_LANE_TRACKING()
117 if (U->tracking_first) 112 if (U->tracking_first)
118 { 113 {
@@ -121,6 +116,29 @@ Lane::Lane(Universe* U_, lua_State* L_)
121#endif // HAVE_LANE_TRACKING() 116#endif // HAVE_LANE_TRACKING()
122} 117}
123 118
119bool Lane::waitForCompletion(lua_Duration duration_)
120{
121 std::chrono::time_point<std::chrono::steady_clock> until{ std::chrono::time_point<std::chrono::steady_clock>::max() };
122 if (duration_.count() >= 0.0)
123 {
124 until = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration_);
125 }
126
127 std::unique_lock lock{ m_done_mutex };
128 //std::stop_token token{ m_thread.get_stop_token() };
129 //return m_done_signal.wait_for(lock, token, secs_, [this](){ return status >= DONE; });
130 return m_done_signal.wait_until(lock, until, [this](){ return status >= DONE; });
131}
132
133static void lane_main(Lane* lane);
134void Lane::startThread(int priority_)
135{
136 m_thread = std::jthread([this]() { lane_main(this); });
137 if (priority_ != THREAD_PRIO_DEFAULT)
138 {
139 JTHREAD_SET_PRIORITY(m_thread, priority_);
140 }
141}
124 142
125/* Do you want full call stacks, or just the line where the error happened? 143/* Do you want full call stacks, or just the line where the error happened?
126* 144*
@@ -144,7 +162,7 @@ static void securize_debug_threadname(lua_State* L, Lane* lane_)
144} 162}
145 163
146#if ERROR_FULL_STACK 164#if ERROR_FULL_STACK
147static int lane_error( lua_State* L); 165static int lane_error(lua_State* L);
148// crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/ 166// crc64/we of string "STACKTRACE_REGKEY" generated at http://www.nitrxgen.net/hashgen/
149static constexpr UniqueKey STACKTRACE_REGKEY{ 0x534af7d3226a429full }; 167static constexpr UniqueKey STACKTRACE_REGKEY{ 0x534af7d3226a429full };
150#endif // ERROR_FULL_STACK 168#endif // ERROR_FULL_STACK
@@ -255,11 +273,6 @@ Lane::~Lane()
255{ 273{
256 // Clean up after a (finished) thread 274 // Clean up after a (finished) thread
257 // 275 //
258#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
259 SIGNAL_FREE(&done_signal);
260 MUTEX_FREE(&done_lock);
261#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
262
263#if HAVE_LANE_TRACKING() 276#if HAVE_LANE_TRACKING()
264 if (U->tracking_first != nullptr) 277 if (U->tracking_first != nullptr)
265 { 278 {
@@ -455,26 +468,27 @@ static bool selfdestruct_remove(Lane* lane_)
455static int universe_gc( lua_State* L) 468static int universe_gc( lua_State* L)
456{ 469{
457 Universe* const U{ lua_tofulluserdata<Universe>(L, 1) }; 470 Universe* const U{ lua_tofulluserdata<Universe>(L, 1) };
471 lua_Duration const shutdown_timeout{ lua_tonumber(L, lua_upvalueindex(1)) };
472 [[maybe_unused]] char const* const op_string{ lua_tostring(L, lua_upvalueindex(2)) };
473 CancelOp const op{ which_cancel_op(op_string) };
458 474
459 while (U->selfdestruct_first != SELFDESTRUCT_END) // true at most once! 475 if (U->selfdestruct_first != SELFDESTRUCT_END)
460 { 476 {
477
461 // Signal _all_ still running threads to exit (including the timer thread) 478 // Signal _all_ still running threads to exit (including the timer thread)
462 // 479 //
463 { 480 {
464 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs }; 481 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
465 Lane* lane{ U->selfdestruct_first }; 482 Lane* lane{ U->selfdestruct_first };
483 lua_Duration timeout{ 1us };
466 while (lane != SELFDESTRUCT_END) 484 while (lane != SELFDESTRUCT_END)
467 { 485 {
468 // attempt a regular unforced hard cancel with a small timeout 486 // attempt the requested cancel with a small timeout.
469 bool const cancelled{ THREAD_ISNULL(lane->thread) || thread_cancel(L, lane, CancelOp::Hard, 0.0001, false, 0.0) != CancelResult::Timeout }; 487 // if waiting on a linda, they will raise a cancel_error.
470 // if we failed, and we know the thread is waiting on a linda 488 // if a cancellation hook is desired, it will be installed to try to raise an error
471 if (cancelled == false && lane->status == WAITING && lane->waiting_on != nullptr) 489 if (lane->m_thread.joinable())
472 { 490 {
473 // signal the linda to wake up the thread so that it can react to the cancel query 491 std::ignore = thread_cancel(lane, op, 1, timeout, true);
474 // let us hope we never land here with a pointer on a linda that has been destroyed...
475 SIGNAL_T* const waiting_on{ lane->waiting_on };
476 // lane->waiting_on = nullptr; // useful, or not?
477 SIGNAL_ALL(waiting_on);
478 } 492 }
479 lane = lane->selfdestruct_next; 493 lane = lane->selfdestruct_next;
480 } 494 }
@@ -482,47 +496,32 @@ static int universe_gc( lua_State* L)
482 496
483 // When noticing their cancel, the lanes will remove themselves from 497 // When noticing their cancel, the lanes will remove themselves from
484 // the selfdestruct chain. 498 // the selfdestruct chain.
485
486 // TBD: Not sure if Windows (multi core) will require the timed approach,
487 // or single Yield. I don't have machine to test that (so leaving
488 // for timed approach). -- AKa 25-Oct-2008
489
490 // OS X 10.5 (Intel) needs more to avoid segfaults.
491 //
492 // "make test" is okay. 100's of "make require" are okay.
493 //
494 // Tested on MacBook Core Duo 2GHz and 10.5.5:
495 // -- AKa 25-Oct-2008
496 //
497 { 499 {
498 lua_Number const shutdown_timeout = lua_tonumber(L, lua_upvalueindex(1)); 500 std::chrono::time_point<std::chrono::steady_clock> t_until{ std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(shutdown_timeout) };
499 double const t_until = now_secs() + shutdown_timeout;
500 501
501 while (U->selfdestruct_first != SELFDESTRUCT_END) 502 while (U->selfdestruct_first != SELFDESTRUCT_END)
502 { 503 {
503 YIELD(); // give threads time to act on their cancel 504 // give threads time to act on their cancel
505 YIELD();
506 // count the number of cancelled thread that didn't have the time to act yet
507 int n{ 0 };
504 { 508 {
505 // count the number of cancelled thread that didn't have the time to act yet 509 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
506 int n = 0; 510 Lane* lane{ U->selfdestruct_first };
507 double t_now = 0.0; 511 while (lane != SELFDESTRUCT_END)
508 { 512 {
509 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs }; 513 if (lane->cancel_request != CancelRequest::None)
510 Lane* lane{ U->selfdestruct_first }; 514 ++n;
511 while (lane != SELFDESTRUCT_END) 515 lane = lane->selfdestruct_next;
512 {
513 if (lane->cancel_request == CancelRequest::Hard)
514 ++n;
515 lane = lane->selfdestruct_next;
516 }
517 }
518 // if timeout elapsed, or we know all threads have acted, stop waiting
519 t_now = now_secs();
520 if (n == 0 || (t_now >= t_until))
521 {
522 DEBUGSPEW_CODE(fprintf(stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now)));
523 break;
524 } 516 }
525 } 517 }
518 // if timeout elapsed, or we know all threads have acted, stop waiting
519 std::chrono::time_point<std::chrono::steady_clock> t_now = std::chrono::steady_clock::now();
520 if (n == 0 || (t_now >= t_until))
521 {
522 DEBUGSPEW_CODE(fprintf(stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout.count()));
523 break;
524 }
526 } 525 }
527 } 526 }
528 527
@@ -532,48 +531,17 @@ static int universe_gc( lua_State* L)
532 { 531 {
533 YIELD(); 532 YIELD();
534 } 533 }
535
536 //---
537 // Kill the still free running threads
538 //
539 if (U->selfdestruct_first != SELFDESTRUCT_END)
540 {
541 unsigned int n = 0;
542 // first thing we did was to raise the linda signals the threads were waiting on (if any)
543 // therefore, any well-behaved thread should be in CANCELLED state
544 // these are not running, and the state can be closed
545 {
546 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
547 Lane* lane{ U->selfdestruct_first };
548 while (lane != SELFDESTRUCT_END)
549 {
550 Lane* const next_s{ lane->selfdestruct_next };
551 lane->selfdestruct_next = nullptr; // detach from selfdestruct chain
552 if (!THREAD_ISNULL(lane->thread)) // can be nullptr if previous 'soft' termination succeeded
553 {
554 THREAD_KILL(&lane->thread);
555#if THREADAPI == THREADAPI_PTHREAD
556 // pthread: make sure the thread is really stopped!
557 THREAD_WAIT(&lane->thread, -1, &lane->done_signal, &lane->done_lock, &lane->status);
558#endif // THREADAPI == THREADAPI_PTHREAD
559 }
560 // NO lua_close() in this case because we don't know where execution of the state was interrupted
561 delete lane;
562 lane = next_s;
563 ++n;
564 }
565 U->selfdestruct_first = SELFDESTRUCT_END;
566 }
567
568 DEBUGSPEW_CODE(fprintf(stderr, "Killed %d lane(s) at process end.\n", n));
569 }
570 } 534 }
571 535
572 // If some lanes are currently cleaning after themselves, wait until they are done. 536 // If after all this, we still have some free-running lanes, it's an external user error, they should have stopped appropriately
573 // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
574 while (U->selfdestructing_count.load(std::memory_order_acquire) > 0)
575 { 537 {
576 YIELD(); 538 std::lock_guard<std::mutex> guard{ U->selfdestruct_cs };
539 Lane* lane{ U->selfdestruct_first };
540 if (lane != SELFDESTRUCT_END)
541 {
542 // this causes a leak because we don't call U's destructor (which could be bad if the still running lanes are accessing it)
543 std::ignore = luaL_error(L, "Zombie thread %s refuses to die!", lane->debug_name);
544 }
577 } 545 }
578 546
579 // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1 547 // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1
@@ -874,20 +842,8 @@ static char const* get_errcode_name( int _code)
874} 842}
875#endif // USE_DEBUG_SPEW() 843#endif // USE_DEBUG_SPEW()
876 844
877#if THREADWAIT_METHOD == THREADWAIT_CONDVAR // implies THREADAPI == THREADAPI_PTHREAD 845static void lane_main(Lane* lane)
878static void thread_cleanup_handler(void* opaque)
879{ 846{
880 Lane* lane{ (Lane*) opaque };
881 MUTEX_LOCK(&lane->done_lock);
882 lane->status = CANCELLED;
883 SIGNAL_ONE(&lane->done_signal); // wake up master (while 'lane->done_lock' is on)
884 MUTEX_UNLOCK(&lane->done_lock);
885}
886#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
887
888static THREAD_RETURN_T THREAD_CALLCONV lane_main(void* vs)
889{
890 Lane* lane{ (Lane*) vs };
891 lua_State* const L{ lane->L }; 847 lua_State* const L{ lane->L };
892 // wait until the launching thread has finished preparing L 848 // wait until the launching thread has finished preparing L
893 lane->m_ready.wait(); 849 lane->m_ready.wait();
@@ -897,8 +853,6 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main(void* vs)
897 // At this point, the lane function and arguments are on the stack 853 // At this point, the lane function and arguments are on the stack
898 int const nargs{ lua_gettop(L) - 1 }; 854 int const nargs{ lua_gettop(L) - 1 };
899 DEBUGSPEW_CODE(Universe* U = universe_get(L)); 855 DEBUGSPEW_CODE(Universe* U = universe_get(L));
900 THREAD_MAKE_ASYNCH_CANCELLABLE();
901 THREAD_CLEANUP_PUSH(thread_cleanup_handler, lane);
902 lane->status = RUNNING; // PENDING -> RUNNING 856 lane->status = RUNNING; // PENDING -> RUNNING
903 857
904 // Tie "set_finalizer()" to the state 858 // Tie "set_finalizer()" to the state
@@ -949,18 +903,19 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main(void* vs)
949 // the finalizer generated an error, and left its own error message [and stack trace] on the stack 903 // the finalizer generated an error, and left its own error message [and stack trace] on the stack
950 rc = rc2; // we're overruling the earlier script error or normal return 904 rc = rc2; // we're overruling the earlier script error or normal return
951 } 905 }
952 lane->waiting_on = nullptr; // just in case 906 lane->m_waiting_on = nullptr; // just in case
953 if (selfdestruct_remove(lane)) // check and remove (under lock!) 907 if (selfdestruct_remove(lane)) // check and remove (under lock!)
954 { 908 {
955 // We're a free-running thread and no-one's there to clean us up. 909 // We're a free-running thread and no-one's there to clean us up.
956 //
957 lua_close(lane->L); 910 lua_close(lane->L);
958 911 lane->L = nullptr; // just in case
959 lane->U->selfdestruct_cs.lock(); 912 lane->U->selfdestruct_cs.lock();
960 // done with lua_close(), terminal shutdown sequence may proceed 913 // done with lua_close(), terminal shutdown sequence may proceed
961 lane->U->selfdestructing_count.fetch_sub(1, std::memory_order_release); 914 lane->U->selfdestructing_count.fetch_sub(1, std::memory_order_release);
962 lane->U->selfdestruct_cs.unlock(); 915 lane->U->selfdestruct_cs.unlock();
963 916
917 // we destroy our jthread member from inside the thread body, so we have to detach so that we don't try to join, as this doesn't seem a good idea
918 lane->m_thread.detach();
964 delete lane; 919 delete lane;
965 lane = nullptr; 920 lane = nullptr;
966 } 921 }
@@ -972,21 +927,14 @@ static THREAD_RETURN_T THREAD_CALLCONV lane_main(void* vs)
972 enum e_status st = (rc == 0) ? DONE : CANCEL_ERROR.equals(L, 1) ? CANCELLED : ERROR_ST; 927 enum e_status st = (rc == 0) ? DONE : CANCEL_ERROR.equals(L, 1) ? CANCELLED : ERROR_ST;
973 928
974 // Posix no PTHREAD_TIMEDJOIN: 929 // Posix no PTHREAD_TIMEDJOIN:
975 // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change 930 // 'm_done_mutex' protects the -> DONE|ERROR_ST|CANCELLED state change
976 // 931 //
977#if THREADWAIT_METHOD == THREADWAIT_CONDVAR
978 MUTEX_LOCK(&lane->done_lock);
979 { 932 {
980#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR 933 std::lock_guard lock{ lane->m_done_mutex };
981 lane->status = st; 934 lane->status = st;
982#if THREADWAIT_METHOD == THREADWAIT_CONDVAR 935 lane->m_done_signal.notify_one();// wake up master (while 'lane->m_done_mutex' is on)
983 SIGNAL_ONE(&lane->done_signal); // wake up master (while 'lane->done_lock' is on)
984 } 936 }
985 MUTEX_UNLOCK(&lane->done_lock);
986#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
987 } 937 }
988 THREAD_CLEANUP_POP(false);
989 return 0; // ignored
990} 938}
991 939
992// ################################################################################################# 940// #################################################################################################
@@ -1115,13 +1063,11 @@ LUAG_FUNC(lane_new)
1115 // leave a single cancel_error on the stack for the caller 1063 // leave a single cancel_error on the stack for the caller
1116 lua_settop(m_lane->L, 0); 1064 lua_settop(m_lane->L, 0);
1117 CANCEL_ERROR.pushKey(m_lane->L); 1065 CANCEL_ERROR.pushKey(m_lane->L);
1118#if THREADWAIT_METHOD == THREADWAIT_CONDVAR 1066 {
1119 MUTEX_LOCK(&m_lane->done_lock); 1067 std::lock_guard lock{ m_lane->m_done_mutex };
1120#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR 1068 m_lane->status = CANCELLED;
1121 m_lane->status = CANCELLED; 1069 m_lane->m_done_signal.notify_one(); // wake up master (while 'lane->m_done_mutex' is on)
1122#if THREADWAIT_METHOD == THREADWAIT_CONDVAR 1070 }
1123 MUTEX_UNLOCK(&m_lane->done_lock);
1124#endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
1125 // unblock the thread so that it can terminate gracefully 1071 // unblock the thread so that it can terminate gracefully
1126 m_lane->m_ready.count_down(); 1072 m_lane->m_ready.count_down();
1127 } 1073 }
@@ -1170,7 +1116,7 @@ LUAG_FUNC(lane_new)
1170 } onExit{ L, lane, gc_cb_idx }; 1116 } onExit{ L, lane, gc_cb_idx };
1171 // launch the thread early, it will sync with a std::latch to parallelize OS thread warmup and L2 preparation 1117 // launch the thread early, it will sync with a std::latch to parallelize OS thread warmup and L2 preparation
1172 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END)); 1118 DEBUGSPEW_CODE(fprintf(stderr, INDENT_BEGIN "lane_new: launching thread\n" INDENT_END));
1173 THREAD_CREATE(&lane->thread, lane_main, lane, priority); 1119 lane->startThread(priority);
1174 1120
1175 STACK_GROW( L2, nargs + 3); // 1121 STACK_GROW( L2, nargs + 3); //
1176 STACK_CHECK_START_REL(L2, 0); 1122 STACK_CHECK_START_REL(L2, 0);
@@ -1347,7 +1293,7 @@ LUAG_FUNC(lane_new)
1347static int lane_gc(lua_State* L) 1293static int lane_gc(lua_State* L)
1348{ 1294{
1349 bool have_gc_cb{ false }; 1295 bool have_gc_cb{ false };
1350 Lane* lane{ lua_toLane(L, 1) }; // ud 1296 Lane* const lane{ lua_toLane(L, 1) }; // ud
1351 1297
1352 // if there a gc callback? 1298 // if there a gc callback?
1353 lua_getiuservalue(L, 1, 1); // ud uservalue 1299 lua_getiuservalue(L, 1, 1); // ud uservalue
@@ -1365,30 +1311,7 @@ static int lane_gc(lua_State* L)
1365 } 1311 }
1366 1312
1367 // We can read 'lane->status' without locks, but not wait for it 1313 // We can read 'lane->status' without locks, but not wait for it
1368 // test Killed state first, as it doesn't need to enter the selfdestruct chain 1314 if (lane->status < DONE)
1369 if (lane->mstatus == Lane::Killed)
1370 {
1371 // Make sure a kill has proceeded, before cleaning up the data structure.
1372 //
1373 // NO lua_close() in this case because we don't know where execution of the state was interrupted
1374 DEBUGSPEW_CODE(fprintf(stderr, "** Joining with a killed thread (needs testing) **"));
1375 // make sure the thread is no longer running, just like thread_join()
1376 if (!THREAD_ISNULL(lane->thread))
1377 {
1378 THREAD_WAIT(&lane->thread, -1, &lane->done_signal, &lane->done_lock, &lane->status);
1379 }
1380 if (lane->status >= DONE && lane->L)
1381 {
1382 // we know the thread was killed while the Lua VM was not doing anything: we should be able to close it without crashing
1383 // now, thread_cancel() will not forcefully kill a lane with lane->status >= DONE, so I am not sure it can ever happen
1384 lua_close(lane->L);
1385 lane->L = nullptr;
1386 // just in case, but s will be freed soon so...
1387 lane->debug_name = "<gc>";
1388 }
1389 DEBUGSPEW_CODE(fprintf(stderr, "** Joined ok **"));
1390 }
1391 else if (lane->status < DONE)
1392 { 1315 {
1393 // still running: will have to be cleaned up later 1316 // still running: will have to be cleaned up later
1394 selfdestruct_add(lane); 1317 selfdestruct_add(lane);
@@ -1437,7 +1360,6 @@ static char const * thread_status_string(Lane* lane_)
1437{ 1360{
1438 enum e_status const st{ lane_->status }; // read just once (volatile) 1361 enum e_status const st{ lane_->status }; // read just once (volatile)
1439 char const* str = 1362 char const* str =
1440 (lane_->mstatus == Lane::Killed) ? "killed" : // new to v3.3.0!
1441 (st == PENDING) ? "pending" : 1363 (st == PENDING) ? "pending" :
1442 (st == RUNNING) ? "running" : // like in 'co.status()' 1364 (st == RUNNING) ? "running" : // like in 'co.status()'
1443 (st == WAITING) ? "waiting" : 1365 (st == WAITING) ? "waiting" :
@@ -1471,9 +1393,10 @@ int push_thread_status(lua_State* L, Lane* lane_)
1471LUAG_FUNC(thread_join) 1393LUAG_FUNC(thread_join)
1472{ 1394{
1473 Lane* const lane{ lua_toLane(L, 1) }; 1395 Lane* const lane{ lua_toLane(L, 1) };
1474 lua_Number const wait_secs{ luaL_optnumber(L, 2, -1.0) }; 1396 lua_Duration const duration{ luaL_optnumber(L, 2, -1.0) };
1475 lua_State* const L2{ lane->L }; 1397 lua_State* const L2{ lane->L };
1476 bool const done{ THREAD_ISNULL(lane->thread) || THREAD_WAIT(&lane->thread, wait_secs, &lane->done_signal, &lane->done_lock, &lane->status) }; 1398
1399 bool const done{ !lane->m_thread.joinable() || lane->waitForCompletion(duration) };
1477 if (!done || !L2) 1400 if (!done || !L2)
1478 { 1401 {
1479 STACK_GROW(L, 2); 1402 STACK_GROW(L, 2);
@@ -1486,58 +1409,47 @@ LUAG_FUNC(thread_join)
1486 // Thread is DONE/ERROR_ST/CANCELLED; all ours now 1409 // Thread is DONE/ERROR_ST/CANCELLED; all ours now
1487 1410
1488 int ret{ 0 }; 1411 int ret{ 0 };
1489 if (lane->mstatus == Lane::Killed) // OS thread was killed if thread_cancel was forced 1412 Universe* const U{ lane->U };
1490 { 1413 // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed
1491 // in that case, even if the thread was killed while DONE/ERROR_ST/CANCELLED, ignore regular return values 1414 // so store it in the userdata uservalue at a key that can't possibly collide
1492 STACK_GROW(L, 2); 1415 securize_debug_threadname(L, lane);
1493 lua_pushnil(L); 1416 switch (lane->status)
1494 lua_pushliteral(L, "killed");
1495 ret = 2;
1496 }
1497 else
1498 { 1417 {
1499 Universe* const U{ lane->U }; 1418 case DONE:
1500 // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed
1501 // so store it in the userdata uservalue at a key that can't possibly collide
1502 securize_debug_threadname(L, lane);
1503 switch (lane->status)
1504 { 1419 {
1505 case DONE: 1420 int const n{ lua_gettop(L2) }; // whole L2 stack
1421 if ((n > 0) && (luaG_inter_move(U, L2, L, n, LookupMode::LaneBody) != 0))
1506 { 1422 {
1507 int const n{ lua_gettop(L2) }; // whole L2 stack 1423 return luaL_error(L, "tried to copy unsupported types");
1508 if ((n > 0) && (luaG_inter_move(U, L2, L, n, LookupMode::LaneBody) != 0))
1509 {
1510 return luaL_error(L, "tried to copy unsupported types");
1511 }
1512 ret = n;
1513 } 1424 }
1514 break; 1425 ret = n;
1426 }
1427 break;
1515 1428
1516 case ERROR_ST: 1429 case ERROR_ST:
1430 {
1431 int const n{ lua_gettop(L2) };
1432 STACK_GROW(L, 3);
1433 lua_pushnil(L);
1434 // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ...
1435 if (luaG_inter_move(U, L2, L, n, LookupMode::LaneBody) != 0) // nil "err" [trace]
1517 { 1436 {
1518 int const n{ lua_gettop(L2) }; 1437 return luaL_error(L, "tried to copy unsupported types: %s", lua_tostring(L, -n));
1519 STACK_GROW(L, 3);
1520 lua_pushnil(L);
1521 // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ...
1522 if (luaG_inter_move(U, L2, L, n, LookupMode::LaneBody) != 0) // nil "err" [trace]
1523 {
1524 return luaL_error(L, "tried to copy unsupported types: %s", lua_tostring(L, -n));
1525 }
1526 ret = 1 + n;
1527 } 1438 }
1528 break; 1439 ret = 1 + n;
1440 }
1441 break;
1529 1442
1530 case CANCELLED: 1443 case CANCELLED:
1531 ret = 0; 1444 ret = 0;
1532 break; 1445 break;
1533 1446
1534 default: 1447 default:
1535 DEBUGSPEW_CODE(fprintf(stderr, "Status: %d\n", lane->status)); 1448 DEBUGSPEW_CODE(fprintf(stderr, "Status: %d\n", lane->status));
1536 ASSERT_L(false); 1449 ASSERT_L(false);
1537 ret = 0; 1450 ret = 0;
1538 }
1539 lua_close(L2);
1540 } 1451 }
1452 lua_close(L2);
1541 lane->L = nullptr; 1453 lane->L = nullptr;
1542 STACK_CHECK(L, ret); 1454 STACK_CHECK(L, ret);
1543 return ret; 1455 return ret;
@@ -1596,15 +1508,12 @@ LUAG_FUNC(thread_index)
1596 switch (lane->status) 1508 switch (lane->status)
1597 { 1509 {
1598 default: 1510 default:
1599 if (lane->mstatus != Lane::Killed) 1511 // this is an internal error, we probably never get here
1600 { 1512 lua_settop(L, 0);
1601 // this is an internal error, we probably never get here 1513 lua_pushliteral(L, "Unexpected status: ");
1602 lua_settop(L, 0); 1514 lua_pushstring(L, thread_status_string(lane));
1603 lua_pushliteral(L, "Unexpected status: "); 1515 lua_concat(L, 2);
1604 lua_pushstring(L, thread_status_string(lane)); 1516 raise_lua_error(L);
1605 lua_concat(L, 2);
1606 raise_lua_error(L);
1607 }
1608 [[fallthrough]]; // fall through if we are killed, as we got nil, "killed" on the stack 1517 [[fallthrough]]; // fall through if we are killed, as we got nil, "killed" on the stack
1609 1518
1610 case DONE: // got regular return values 1519 case DONE: // got regular return values
@@ -1790,8 +1699,7 @@ LUAG_FUNC(wakeup_conv)
1790 lua_pop(L,1); 1699 lua_pop(L,1);
1791 STACK_CHECK(L, 0); 1700 STACK_CHECK(L, 0);
1792 1701
1793 struct tm t; 1702 std::tm t{};
1794 memset(&t, 0, sizeof(t));
1795 t.tm_year = year - 1900; 1703 t.tm_year = year - 1900;
1796 t.tm_mon= month-1; // 0..11 1704 t.tm_mon= month-1; // 0..11
1797 t.tm_mday= day; // 1..31 1705 t.tm_mday= day; // 1..31
@@ -1800,7 +1708,7 @@ LUAG_FUNC(wakeup_conv)
1800 t.tm_sec= sec; // 0..60 1708 t.tm_sec= sec; // 0..60
1801 t.tm_isdst= isdst; // 0/1/negative 1709 t.tm_isdst= isdst; // 0/1/negative
1802 1710
1803 lua_pushnumber(L, static_cast<lua_Number>(mktime(&t))); // ms=0 1711 lua_pushnumber(L, static_cast<lua_Number>(std::mktime(&t))); // resolution: 1 second
1804 return 1; 1712 return 1;
1805} 1713}
1806 1714
@@ -1909,13 +1817,14 @@ LUAG_FUNC(configure)
1909 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L)); 1817 DEBUGSPEW_CODE(fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L));
1910 DEBUGSPEW_CODE(if (U) U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); 1818 DEBUGSPEW_CODE(if (U) U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1911 1819
1912 if(U == nullptr) 1820 if (U == nullptr)
1913 { 1821 {
1914 U = universe_create( L); // settings universe 1822 U = universe_create(L); // settings universe
1915 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed)); 1823 DEBUGSPEW_CODE(U->debugspew_indent_depth.fetch_add(1, std::memory_order_relaxed));
1916 lua_newtable( L); // settings universe mt 1824 lua_newtable( L); // settings universe mt
1917 lua_getfield(L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout 1825 lua_getfield(L, 1, "shutdown_timeout"); // settings universe mt shutdown_timeout
1918 lua_pushcclosure(L, universe_gc, 1); // settings universe mt universe_gc 1826 lua_getfield(L, 1, "shutdown_mode"); // settings universe mt shutdown_timeout shutdown_mode
1827 lua_pushcclosure(L, universe_gc, 2); // settings universe mt universe_gc
1919 lua_setfield(L, -2, "__gc"); // settings universe mt 1828 lua_setfield(L, -2, "__gc"); // settings universe mt
1920 lua_setmetatable(L, -2); // settings universe 1829 lua_setmetatable(L, -2); // settings universe
1921 lua_pop(L, 1); // settings 1830 lua_pop(L, 1); // settings